Initial import of Cobalt 2.8885 2016-07-27
diff --git a/src/third_party/blink/Tools/Scripts/SpacingHeuristics.pm b/src/third_party/blink/Tools/Scripts/SpacingHeuristics.pm
new file mode 100644
index 0000000..ab489ff
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/SpacingHeuristics.pm
@@ -0,0 +1,99 @@
+# Copyright (C) 2006 Apple Computer, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Used for helping remove extra blank lines from files when processing.
+# see split-class for an example usage (or other scripts in bugzilla)
+
+BEGIN {
+ use Exporter ();
+ our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS);
+ $VERSION = 1.00;
+ @ISA = qw(Exporter);
+ @EXPORT = qw(&resetSpacingHeuristics &isOnlyWhiteSpace &applySpacingHeuristicsAndPrint &setPreviousAllowedLine &setPreviousAllowedLine &printPendingEmptyLines &ignoringLine);
+ %EXPORT_TAGS = ();
+ @EXPORT_OK = ();
+}
+
+our @EXPORT_OK;
+
+my $justFoundEmptyLine = 0;
+my $previousLineWasDisallowed = 0;
+my $previousAllowedLine = "";
+my $pendingEmptyLines = "";
+
+sub resetSpacingHeuristics
+{
+ $justFoundEmptyLine = 0;
+ $previousLineWasDisallowed = 0;
+ $previousAllowedLine = "";
+ $pendingEmptyLines = "";
+}
+
+sub isOnlyWhiteSpace
+{
+ my $line = shift;
+ my $isOnlyWhiteSpace = ($line =~ m/^\s+$/);
+ $pendingEmptyLines .= $line if ($isOnlyWhiteSpace);
+ return $isOnlyWhiteSpace;
+}
+
+sub applySpacingHeuristicsAndPrint
+{
+ my ($out, $line) = @_;
+
+ printPendingEmptyLines($out, $line);
+ $previousLineWasDisallowed = 0;
+ print $out $line;
+}
+
+sub setPreviousAllowedLine
+{
+ my $line = shift;
+ $previousAllowedLine = $line;
+}
+
+sub printPendingEmptyLines
+{
+ my $out = shift;
+ my $line = shift;
+ if ($previousLineWasDisallowed) {
+ if (!($pendingEmptyLines eq "") && !($previousAllowedLine =~ m/{\s*$/) && !($line =~ m/^\s*}/)) {
+ $pendingEmptyLines = "\n";
+ } else {
+ $pendingEmptyLines = "";
+ }
+ }
+ print $out $pendingEmptyLines;
+ $pendingEmptyLines = "";
+}
+
+sub ignoringLine
+{
+ # my $line = shift; # ignoring input argument
+ $previousLineWasDisallowed = 1;
+}
+
+1;
diff --git a/src/third_party/blink/Tools/Scripts/VCSUtils.pm b/src/third_party/blink/Tools/Scripts/VCSUtils.pm
new file mode 100644
index 0000000..dcf0a32
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/VCSUtils.pm
@@ -0,0 +1,2155 @@
+# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Apple Inc. All rights reserved.
+# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+# Copyright (C) 2010, 2011 Research In Motion Limited. All rights reserved.
+# Copyright (C) 2012 Daniel Bates (dbates@intudata.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Module to share code to work with various version control systems.
+package VCSUtils;
+
+use strict;
+use warnings;
+
+use Cwd qw(); # "qw()" prevents warnings about redefining getcwd() with "use POSIX;"
+use English; # for $POSTMATCH, etc.
+use File::Basename;
+use File::Spec;
+use POSIX;
+use Term::ANSIColor qw(colored);
+
+BEGIN {
+ use Exporter ();
+ our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS);
+ $VERSION = 1.00;
+ @ISA = qw(Exporter);
+ @EXPORT = qw(
+ &applyGitBinaryPatchDelta
+ &callSilently
+ &canonicalizePath
+ &changeLogEmailAddress
+ &changeLogFileName
+ &changeLogName
+ &chdirReturningRelativePath
+ &decodeGitBinaryChunk
+ &decodeGitBinaryPatch
+ &determineSVNRoot
+ &determineVCSRoot
+ &escapeSubversionPath
+ &exitStatus
+ &fixChangeLogPatch
+ &gitBranch
+ &gitdiff2svndiff
+ &isGit
+ &isGitSVN
+ &isGitBranchBuild
+ &isGitDirectory
+ &isSVN
+ &isSVNDirectory
+ &isSVNVersion16OrNewer
+ &makeFilePathRelative
+ &mergeChangeLogs
+ &normalizePath
+ &parseChunkRange
+ &parseFirstEOL
+ &parsePatch
+ &pathRelativeToSVNRepositoryRootForPath
+ &possiblyColored
+ &prepareParsedPatch
+ &removeEOL
+ &runCommand
+ &runPatchCommand
+ &scmMoveOrRenameFile
+ &scmToggleExecutableBit
+ &setChangeLogDateAndReviewer
+ &svnRevisionForDirectory
+ &svnStatus
+ &toWindowsLineEndings
+ &gitCommitForSVNRevision
+ &listOfChangedFilesBetweenRevisions
+ );
+ %EXPORT_TAGS = ( );
+ @EXPORT_OK = ();
+}
+
+our @EXPORT_OK;
+
+my $gitBranch;
+my $gitRoot;
+my $isGit;
+my $isGitSVN;
+my $isGitBranchBuild;
+my $isSVN;
+my $svnVersion;
+
+# Project time zone for Cupertino, CA, US
+my $changeLogTimeZone = "PST8PDT";
+
+my $gitDiffStartRegEx = qr#^diff --git (\w/)?(.+) (\w/)?([^\r\n]+)#;
+my $svnDiffStartRegEx = qr#^Index: ([^\r\n]+)#;
+my $svnPropertiesStartRegEx = qr#^Property changes on: ([^\r\n]+)#; # $1 is normally the same as the index path.
+my $svnPropertyStartRegEx = qr#^(Modified|Name|Added|Deleted): ([^\r\n]+)#; # $2 is the name of the property.
+my $svnPropertyValueStartRegEx = qr#^\s*(\+|-|Merged|Reverse-merged)\s*([^\r\n]+)#; # $2 is the start of the property's value (which may span multiple lines).
+my $svnPropertyValueNoNewlineRegEx = qr#\ No newline at end of property#;
+
+# This method is for portability. Return the system-appropriate exit
+# status of a child process.
+#
+# Args: pass the child error status returned by the last pipe close,
+# for example "$?".
+sub exitStatus($)
+{
+ my ($returnvalue) = @_;
+ if ($^O eq "MSWin32") {
+ return $returnvalue >> 8;
+ }
+ if (!WIFEXITED($returnvalue)) {
+ return 254;
+ }
+ return WEXITSTATUS($returnvalue);
+}
+
+# Call a function while suppressing STDERR, and return the return values
+# as an array.
+sub callSilently($@) {
+ my ($func, @args) = @_;
+
+ # The following pattern was taken from here:
+ # http://www.sdsc.edu/~moreland/courses/IntroPerl/docs/manual/pod/perlfunc/open.html
+ #
+ # Also see this Perl documentation (search for "open OLDERR"):
+ # http://perldoc.perl.org/functions/open.html
+ open(OLDERR, ">&STDERR");
+ close(STDERR);
+ my @returnValue = &$func(@args);
+ open(STDERR, ">&OLDERR");
+ close(OLDERR);
+
+ return @returnValue;
+}
+
+sub toWindowsLineEndings
+{
+ my ($text) = @_;
+ $text =~ s/\n/\r\n/g;
+ return $text;
+}
+
+# Note, this method will not error if the file corresponding to the $source path does not exist.
+sub scmMoveOrRenameFile
+{
+ my ($source, $destination) = @_;
+ return if ! -e $source;
+ if (isSVN()) {
+ my $escapedDestination = escapeSubversionPath($destination);
+ my $escapedSource = escapeSubversionPath($source);
+ system("svn", "move", $escapedSource, $escapedDestination);
+ } elsif (isGit()) {
+ system("git", "mv", $source, $destination);
+ }
+}
+
+# Note, this method will not error if the file corresponding to the path does not exist.
+sub scmToggleExecutableBit
+{
+ my ($path, $executableBitDelta) = @_;
+ return if ! -e $path;
+ if ($executableBitDelta == 1) {
+ scmAddExecutableBit($path);
+ } elsif ($executableBitDelta == -1) {
+ scmRemoveExecutableBit($path);
+ }
+}
+
+sub scmAddExecutableBit($)
+{
+ my ($path) = @_;
+
+ if (isSVN()) {
+ my $escapedPath = escapeSubversionPath($path);
+ system("svn", "propset", "svn:executable", "on", $escapedPath) == 0 or die "Failed to run 'svn propset svn:executable on $escapedPath'.";
+ } elsif (isGit()) {
+ chmod(0755, $path);
+ }
+}
+
+sub scmRemoveExecutableBit($)
+{
+ my ($path) = @_;
+
+ if (isSVN()) {
+ my $escapedPath = escapeSubversionPath($path);
+ system("svn", "propdel", "svn:executable", $escapedPath) == 0 or die "Failed to run 'svn propdel svn:executable $escapedPath'.";
+ } elsif (isGit()) {
+ chmod(0664, $path);
+ }
+}
+
+sub isGitDirectory($)
+{
+ my ($dir) = @_;
+ return system("cd $dir && git rev-parse > " . File::Spec->devnull() . " 2>&1") == 0;
+}
+
+sub isGit()
+{
+ return $isGit if defined $isGit;
+
+ $isGit = isGitDirectory(".");
+ return $isGit;
+}
+
+sub isGitSVN()
+{
+ return $isGitSVN if defined $isGitSVN;
+
+ # There doesn't seem to be an officially documented way to determine
+ # if you're in a git-svn checkout. The best suggestions seen so far
+ # all use something like the following:
+ my $output = `git config --get svn-remote.svn.fetch 2>& 1`;
+ $isGitSVN = $output ne '';
+ return $isGitSVN;
+}
+
+sub gitBranch()
+{
+ unless (defined $gitBranch) {
+ chomp($gitBranch = `git symbolic-ref -q HEAD`);
+ $gitBranch = "" if exitStatus($?);
+ $gitBranch =~ s#^refs/heads/##;
+ $gitBranch = "" if $gitBranch eq "master";
+ }
+
+ return $gitBranch;
+}
+
+sub isGitBranchBuild()
+{
+ my $branch = gitBranch();
+ chomp(my $override = `git config --bool branch.$branch.webKitBranchBuild`);
+ return 1 if $override eq "true";
+ return 0 if $override eq "false";
+
+ unless (defined $isGitBranchBuild) {
+ chomp(my $gitBranchBuild = `git config --bool core.webKitBranchBuild`);
+ $isGitBranchBuild = $gitBranchBuild eq "true";
+ }
+
+ return $isGitBranchBuild;
+}
+
+sub isSVNDirectory($)
+{
+ my ($dir) = @_;
+ return system("cd $dir && svn info > " . File::Spec->devnull() . " 2>&1") == 0;
+}
+
+sub isSVN()
+{
+ return $isSVN if defined $isSVN;
+
+ $isSVN = isSVNDirectory(".");
+ return $isSVN;
+}
+
+sub svnVersion()
+{
+ return $svnVersion if defined $svnVersion;
+
+ if (!isSVN()) {
+ $svnVersion = 0;
+ } else {
+ chomp($svnVersion = `svn --version --quiet`);
+ }
+ return $svnVersion;
+}
+
+sub isSVNVersion16OrNewer()
+{
+ my $version = svnVersion();
+ return eval "v$version" ge v1.6;
+}
+
+sub chdirReturningRelativePath($)
+{
+ my ($directory) = @_;
+ my $previousDirectory = Cwd::getcwd();
+ chdir $directory;
+ my $newDirectory = Cwd::getcwd();
+ return "." if $newDirectory eq $previousDirectory;
+ return File::Spec->abs2rel($previousDirectory, $newDirectory);
+}
+
+sub determineGitRoot()
+{
+ chomp(my $gitDir = `git rev-parse --git-dir`);
+ return dirname($gitDir);
+}
+
+sub determineSVNRoot()
+{
+ my $last = '';
+ my $path = '.';
+ my $parent = '..';
+ my $repositoryRoot;
+ my $repositoryUUID;
+ while (1) {
+ my $thisRoot;
+ my $thisUUID;
+ my $escapedPath = escapeSubversionPath($path);
+ # Ignore error messages in case we've run past the root of the checkout.
+ open INFO, "svn info '$escapedPath' 2> " . File::Spec->devnull() . " |" or die;
+ while (<INFO>) {
+ if (/^Repository Root: (.+)/) {
+ $thisRoot = $1;
+ }
+ if (/^Repository UUID: (.+)/) {
+ $thisUUID = $1;
+ }
+ if ($thisRoot && $thisUUID) {
+ local $/ = undef;
+ <INFO>; # Consume the rest of the input.
+ }
+ }
+ close INFO;
+
+ # It's possible (e.g. for developers of some ports) to have a WebKit
+ # checkout in a subdirectory of another checkout. So abort if the
+ # repository root or the repository UUID suddenly changes.
+ last if !$thisUUID;
+ $repositoryUUID = $thisUUID if !$repositoryUUID;
+ last if $thisUUID ne $repositoryUUID;
+
+ last if !$thisRoot;
+ $repositoryRoot = $thisRoot if !$repositoryRoot;
+ last if $thisRoot ne $repositoryRoot;
+
+ $last = $path;
+ $path = File::Spec->catdir($parent, $path);
+ }
+
+ return File::Spec->rel2abs($last);
+}
+
+sub determineVCSRoot()
+{
+ if (isGit()) {
+ return determineGitRoot();
+ }
+
+ if (!isSVN()) {
+ # Some users have a workflow where svn-create-patch, svn-apply and
+ # svn-unapply are used outside of multiple svn working directores,
+ # so warn the user and assume Subversion is being used in this case.
+ warn "Unable to determine VCS root for '" . Cwd::getcwd() . "'; assuming Subversion";
+ $isSVN = 1;
+ }
+
+ return determineSVNRoot();
+}
+
+sub isWindows()
+{
+ return ($^O eq "MSWin32") || 0;
+}
+
+sub svnRevisionForDirectory($)
+{
+ my ($dir) = @_;
+ my $revision;
+
+ if (isSVNDirectory($dir)) {
+ my $escapedDir = escapeSubversionPath($dir);
+ my $command = "svn info $escapedDir | grep Revision:";
+ $command = "LC_ALL=C $command" if !isWindows();
+ my $svnInfo = `$command`;
+ ($revision) = ($svnInfo =~ m/Revision: (\d+).*/g);
+ } elsif (isGitDirectory($dir)) {
+ my $command = "git log --grep=\"git-svn-id: \" -n 1 | grep git-svn-id:";
+ $command = "LC_ALL=C $command" if !isWindows();
+ $command = "cd $dir && $command";
+ my $gitLog = `$command`;
+ ($revision) = ($gitLog =~ m/ +git-svn-id: .+@(\d+) /g);
+ }
+ if (!defined($revision)) {
+ $revision = "unknown";
+ warn "Unable to determine current SVN revision in $dir";
+ }
+ return $revision;
+}
+
+sub pathRelativeToSVNRepositoryRootForPath($)
+{
+ my ($file) = @_;
+ my $relativePath = File::Spec->abs2rel($file);
+
+ my $svnInfo;
+ if (isSVN()) {
+ my $escapedRelativePath = escapeSubversionPath($relativePath);
+ my $command = "svn info $escapedRelativePath";
+ $command = "LC_ALL=C $command" if !isWindows();
+ $svnInfo = `$command`;
+ } elsif (isGit()) {
+ my $command = "git svn info $relativePath";
+ $command = "LC_ALL=C $command" if !isWindows();
+ $svnInfo = `$command`;
+ }
+
+ $svnInfo =~ /.*^URL: (.*?)$/m;
+ my $svnURL = $1;
+
+ $svnInfo =~ /.*^Repository Root: (.*?)$/m;
+ my $repositoryRoot = $1;
+
+ $svnURL =~ s/$repositoryRoot\///;
+ return $svnURL;
+}
+
+sub makeFilePathRelative($)
+{
+ my ($path) = @_;
+ return $path unless isGit();
+
+ unless (defined $gitRoot) {
+ chomp($gitRoot = `git rev-parse --show-cdup`);
+ }
+ return $gitRoot . $path;
+}
+
+sub normalizePath($)
+{
+ my ($path) = @_;
+ $path =~ s/\\/\//g;
+ return $path;
+}
+
+sub possiblyColored($$)
+{
+ my ($colors, $string) = @_;
+
+ if (-t STDOUT) {
+ return colored([$colors], $string);
+ } else {
+ return $string;
+ }
+}
+
+sub adjustPathForRecentRenamings($)
+{
+ my ($fullPath) = @_;
+
+ $fullPath =~ s|WebCore/webaudio|WebCore/Modules/webaudio|g;
+ $fullPath =~ s|JavaScriptCore/wtf|WTF/wtf|g;
+ $fullPath =~ s|test_expectations.txt|TestExpectations|g;
+
+ return $fullPath;
+}
+
+sub canonicalizePath($)
+{
+ my ($file) = @_;
+
+ # Remove extra slashes and '.' directories in path
+ $file = File::Spec->canonpath($file);
+
+ # Remove '..' directories in path
+ my @dirs = ();
+ foreach my $dir (File::Spec->splitdir($file)) {
+ if ($dir eq '..' && $#dirs >= 0 && $dirs[$#dirs] ne '..') {
+ pop(@dirs);
+ } else {
+ push(@dirs, $dir);
+ }
+ }
+ return ($#dirs >= 0) ? File::Spec->catdir(@dirs) : ".";
+}
+
+sub removeEOL($)
+{
+ my ($line) = @_;
+ return "" unless $line;
+
+ $line =~ s/[\r\n]+$//g;
+ return $line;
+}
+
+sub parseFirstEOL($)
+{
+ my ($fileHandle) = @_;
+
+ # Make input record separator the new-line character to simplify regex matching below.
+ my $savedInputRecordSeparator = $INPUT_RECORD_SEPARATOR;
+ $INPUT_RECORD_SEPARATOR = "\n";
+ my $firstLine = <$fileHandle>;
+ $INPUT_RECORD_SEPARATOR = $savedInputRecordSeparator;
+
+ return unless defined($firstLine);
+
+ my $eol;
+ if ($firstLine =~ /\r\n/) {
+ $eol = "\r\n";
+ } elsif ($firstLine =~ /\r/) {
+ $eol = "\r";
+ } elsif ($firstLine =~ /\n/) {
+ $eol = "\n";
+ }
+ return $eol;
+}
+
+sub firstEOLInFile($)
+{
+ my ($file) = @_;
+ my $eol;
+ if (open(FILE, $file)) {
+ $eol = parseFirstEOL(*FILE);
+ close(FILE);
+ }
+ return $eol;
+}
+
+# Parses a chunk range line into its components.
+#
+# A chunk range line has the form: @@ -L_1,N_1 +L_2,N_2 @@, where the pairs (L_1, N_1),
+# (L_2, N_2) are ranges that represent the starting line number and line count in the
+# original file and new file, respectively.
+#
+# Note, some versions of GNU diff may omit the comma and trailing line count (e.g. N_1),
+# in which case the omitted line count defaults to 1. For example, GNU diff may output
+# @@ -1 +1 @@, which is equivalent to @@ -1,1 +1,1 @@.
+#
+# This subroutine returns undef if given an invalid or malformed chunk range.
+#
+# Args:
+# $line: the line to parse.
+# $chunkSentinel: the sentinel that surrounds the chunk range information (defaults to "@@").
+#
+# Returns $chunkRangeHashRef
+# $chunkRangeHashRef: a hash reference representing the parts of a chunk range, as follows--
+# startingLine: the starting line in the original file.
+# lineCount: the line count in the original file.
+# newStartingLine: the new starting line in the new file.
+# newLineCount: the new line count in the new file.
+sub parseChunkRange($;$)
+{
+ my ($line, $chunkSentinel) = @_;
+ $chunkSentinel = "@@" if !$chunkSentinel;
+ my $chunkRangeRegEx = qr#^\Q$chunkSentinel\E -(\d+)(,(\d+))? \+(\d+)(,(\d+))? \Q$chunkSentinel\E#;
+ if ($line !~ /$chunkRangeRegEx/) {
+ return;
+ }
+ my %chunkRange;
+ $chunkRange{startingLine} = $1;
+ $chunkRange{lineCount} = defined($2) ? $3 : 1;
+ $chunkRange{newStartingLine} = $4;
+ $chunkRange{newLineCount} = defined($5) ? $6 : 1;
+ return \%chunkRange;
+}
+
+sub svnStatus($)
+{
+ my ($fullPath) = @_;
+ my $escapedFullPath = escapeSubversionPath($fullPath);
+ my $svnStatus;
+ open SVN, "svn status --non-interactive --non-recursive '$escapedFullPath' |" or die;
+ if (-d $fullPath) {
+ # When running "svn stat" on a directory, we can't assume that only one
+ # status will be returned (since any files with a status below the
+ # directory will be returned), and we can't assume that the directory will
+ # be first (since any files with unknown status will be listed first).
+ my $normalizedFullPath = File::Spec->catdir(File::Spec->splitdir($fullPath));
+ while (<SVN>) {
+ # Input may use a different EOL sequence than $/, so avoid chomp.
+ $_ = removeEOL($_);
+ my $normalizedStatPath = File::Spec->catdir(File::Spec->splitdir(substr($_, 7)));
+ if ($normalizedFullPath eq $normalizedStatPath) {
+ $svnStatus = "$_\n";
+ last;
+ }
+ }
+ # Read the rest of the svn command output to avoid a broken pipe warning.
+ local $/ = undef;
+ <SVN>;
+ }
+ else {
+ # Files will have only one status returned.
+ $svnStatus = removeEOL(<SVN>) . "\n";
+ }
+ close SVN;
+ return $svnStatus;
+}
+
+# Return whether the given file mode is executable in the source control
+# sense. We make this determination based on whether the executable bit
+# is set for "others" rather than the stronger condition that it be set
+# for the user, group, and others. This is sufficient for distinguishing
+# the default behavior in Git and SVN.
+#
+# Args:
+# $fileMode: A number or string representing a file mode in octal notation.
+sub isExecutable($)
+{
+ my $fileMode = shift;
+
+ return $fileMode % 2;
+}
+
+# Parse the next Git diff header from the given file handle, and advance
+# the handle so the last line read is the first line after the header.
+#
+# This subroutine dies if given leading junk.
+#
+# Args:
+# $fileHandle: advanced so the last line read from the handle is the first
+# line of the header to parse. This should be a line
+# beginning with "diff --git".
+# $line: the line last read from $fileHandle
+#
+# Returns ($headerHashRef, $lastReadLine):
+# $headerHashRef: a hash reference representing a diff header, as follows--
+# copiedFromPath: the path from which the file was copied or moved if
+# the diff is a copy or move.
+# executableBitDelta: the value 1 or -1 if the executable bit was added or
+# removed, respectively. New and deleted files have
+# this value only if the file is executable, in which
+# case the value is 1 and -1, respectively.
+# indexPath: the path of the target file.
+# isBinary: the value 1 if the diff is for a binary file.
+# isDeletion: the value 1 if the diff is a file deletion.
+# isCopyWithChanges: the value 1 if the file was copied or moved and
+# the target file was changed in some way after being
+# copied or moved (e.g. if its contents or executable
+# bit were changed).
+# isNew: the value 1 if the diff is for a new file.
+# shouldDeleteSource: the value 1 if the file was copied or moved and
+# the source file was deleted -- i.e. if the copy
+# was actually a move.
+# svnConvertedText: the header text with some lines converted to SVN
+# format. Git-specific lines are preserved.
+# $lastReadLine: the line last read from $fileHandle.
+sub parseGitDiffHeader($$)
+{
+ my ($fileHandle, $line) = @_;
+
+ $_ = $line;
+
+ my $indexPath;
+ if (/$gitDiffStartRegEx/) {
+ # The first and second paths can differ in the case of copies
+ # and renames. We use the second file path because it is the
+ # destination path.
+ $indexPath = adjustPathForRecentRenamings($4);
+ # Use $POSTMATCH to preserve the end-of-line character.
+ $_ = "Index: $indexPath$POSTMATCH"; # Convert to SVN format.
+ } else {
+ die("Could not parse leading \"diff --git\" line: \"$line\".");
+ }
+
+ my $copiedFromPath;
+ my $foundHeaderEnding;
+ my $isBinary;
+ my $isDeletion;
+ my $isNew;
+ my $newExecutableBit = 0;
+ my $oldExecutableBit = 0;
+ my $shouldDeleteSource = 0;
+ my $similarityIndex = 0;
+ my $svnConvertedText;
+ while (1) {
+ # Temporarily strip off any end-of-line characters to simplify
+ # regex matching below.
+ s/([\n\r]+)$//;
+ my $eol = $1;
+
+ if (/^(deleted file|old) mode (\d+)/) {
+ $oldExecutableBit = (isExecutable($2) ? 1 : 0);
+ $isDeletion = 1 if $1 eq "deleted file";
+ } elsif (/^new( file)? mode (\d+)/) {
+ $newExecutableBit = (isExecutable($2) ? 1 : 0);
+ $isNew = 1 if $1;
+ } elsif (/^similarity index (\d+)%/) {
+ $similarityIndex = $1;
+ } elsif (/^copy from (\S+)/) {
+ $copiedFromPath = $1;
+ } elsif (/^rename from (\S+)/) {
+ # FIXME: Record this as a move rather than as a copy-and-delete.
+ # This will simplify adding rename support to svn-unapply.
+ # Otherwise, the hash for a deletion would have to know
+ # everything about the file being deleted in order to
+ # support undoing itself. Recording as a move will also
+ # permit us to use "svn move" and "git move".
+ $copiedFromPath = $1;
+ $shouldDeleteSource = 1;
+ } elsif (/^--- \S+/) {
+ $_ = "--- $indexPath"; # Convert to SVN format.
+ } elsif (/^\+\+\+ \S+/) {
+ $_ = "+++ $indexPath"; # Convert to SVN format.
+ $foundHeaderEnding = 1;
+ } elsif (/^GIT binary patch$/ ) {
+ $isBinary = 1;
+ $foundHeaderEnding = 1;
+ # The "git diff" command includes a line of the form "Binary files
+ # <path1> and <path2> differ" if the --binary flag is not used.
+ } elsif (/^Binary files / ) {
+ die("Error: the Git diff contains a binary file without the binary data in ".
+ "line: \"$_\". Be sure to use the --binary flag when invoking \"git diff\" ".
+ "with diffs containing binary files.");
+ }
+
+ $svnConvertedText .= "$_$eol"; # Also restore end-of-line characters.
+
+ $_ = <$fileHandle>; # Not defined if end-of-file reached.
+
+ last if (!defined($_) || /$gitDiffStartRegEx/ || $foundHeaderEnding);
+ }
+
+ my $executableBitDelta = $newExecutableBit - $oldExecutableBit;
+
+ my %header;
+
+ $header{copiedFromPath} = $copiedFromPath if $copiedFromPath;
+ $header{executableBitDelta} = $executableBitDelta if $executableBitDelta;
+ $header{indexPath} = $indexPath;
+ $header{isBinary} = $isBinary if $isBinary;
+ $header{isCopyWithChanges} = 1 if ($copiedFromPath && ($similarityIndex != 100 || $executableBitDelta));
+ $header{isDeletion} = $isDeletion if $isDeletion;
+ $header{isNew} = $isNew if $isNew;
+ $header{shouldDeleteSource} = $shouldDeleteSource if $shouldDeleteSource;
+ $header{svnConvertedText} = $svnConvertedText;
+
+ return (\%header, $_);
+}
+
+# Parse the next SVN diff header from the given file handle, and advance
+# the handle so the last line read is the first line after the header.
+#
+# This subroutine dies if given leading junk or if it could not detect
+# the end of the header block.
+#
+# Args:
+# $fileHandle: advanced so the last line read from the handle is the first
+# line of the header to parse. This should be a line
+# beginning with "Index:".
+# $line: the line last read from $fileHandle
+#
+# Returns ($headerHashRef, $lastReadLine):
+# $headerHashRef: a hash reference representing a diff header, as follows--
+# copiedFromPath: the path from which the file was copied if the diff
+# is a copy.
+# indexPath: the path of the target file, which is the path found in
+# the "Index:" line.
+# isBinary: the value 1 if the diff is for a binary file.
+# isNew: the value 1 if the diff is for a new file.
+# sourceRevision: the revision number of the source, if it exists. This
+# is the same as the revision number the file was copied
+# from, in the case of a file copy.
+# svnConvertedText: the header text converted to a header with the paths
+# in some lines corrected.
+# $lastReadLine: the line last read from $fileHandle.
+sub parseSvnDiffHeader($$)
+{
+ my ($fileHandle, $line) = @_;
+
+ $_ = $line;
+
+ my $indexPath;
+ if (/$svnDiffStartRegEx/) {
+ $indexPath = adjustPathForRecentRenamings($1);
+ } else {
+ die("First line of SVN diff does not begin with \"Index \": \"$_\"");
+ }
+
+ my $copiedFromPath;
+ my $foundHeaderEnding;
+ my $isBinary;
+ my $isNew;
+ my $sourceRevision;
+ my $svnConvertedText;
+ while (1) {
+ # Temporarily strip off any end-of-line characters to simplify
+ # regex matching below.
+ s/([\n\r]+)$//;
+ my $eol = $1;
+
+ # Fix paths on "---" and "+++" lines to match the leading
+ # index line.
+ if (s/^--- [^\t\n\r]+/--- $indexPath/) {
+ # ---
+ if (/^--- .+\(revision (\d+)\)/) {
+ $sourceRevision = $1;
+ $isNew = 1 if !$sourceRevision; # if revision 0.
+ if (/\(from (\S+):(\d+)\)$/) {
+ # The "from" clause is created by svn-create-patch, in
+ # which case there is always also a "revision" clause.
+ $copiedFromPath = $1;
+ die("Revision number \"$2\" in \"from\" clause does not match " .
+ "source revision number \"$sourceRevision\".") if ($2 != $sourceRevision);
+ }
+ }
+ } elsif (s/^\+\+\+ [^\t\n\r]+/+++ $indexPath/ || $isBinary && /^$/) {
+ $foundHeaderEnding = 1;
+ } elsif (/^Cannot display: file marked as a binary type.$/) {
+ $isBinary = 1;
+ # SVN 1.7 has an unusual display format for a binary diff. It repeats the first
+ # two lines of the diff header. For example:
+ # Index: test_file.swf
+ # ===================================================================
+ # Cannot display: file marked as a binary type.
+ # svn:mime-type = application/octet-stream
+ # Index: test_file.swf
+ # ===================================================================
+ # --- test_file.swf
+ # +++ test_file.swf
+ #
+ # ...
+ # Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+ # Therefore, we continue reading the diff header until we either encounter a line
+ # that begins with "+++" (SVN 1.7 or greater) or an empty line (SVN version less
+ # than 1.7).
+ }
+
+ $svnConvertedText .= "$_$eol"; # Also restore end-of-line characters.
+
+ $_ = <$fileHandle>; # Not defined if end-of-file reached.
+
+ last if (!defined($_) || !$isBinary && /$svnDiffStartRegEx/ || $foundHeaderEnding);
+ }
+
+ if (!$foundHeaderEnding) {
+ die("Did not find end of header block corresponding to index path \"$indexPath\".");
+ }
+
+ my %header;
+
+ $header{copiedFromPath} = $copiedFromPath if $copiedFromPath;
+ $header{indexPath} = $indexPath;
+ $header{isBinary} = $isBinary if $isBinary;
+ $header{isNew} = $isNew if $isNew;
+ $header{sourceRevision} = $sourceRevision if $sourceRevision;
+ $header{svnConvertedText} = $svnConvertedText;
+
+ return (\%header, $_);
+}
+
+# Parse the next diff header from the given file handle, and advance
+# the handle so the last line read is the first line after the header.
+#
+# This subroutine dies if given leading junk or if it could not detect
+# the end of the header block.
+#
+# Args:
+# $fileHandle: advanced so the last line read from the handle is the first
+# line of the header to parse. For SVN-formatted diffs, this
+# is a line beginning with "Index:". For Git, this is a line
+# beginning with "diff --git".
+# $line: the line last read from $fileHandle
+#
+# Returns ($headerHashRef, $lastReadLine):
+# $headerHashRef: a hash reference representing a diff header
+# copiedFromPath: the path from which the file was copied if the diff
+# is a copy.
+# executableBitDelta: the value 1 or -1 if the executable bit was added or
+# removed, respectively. New and deleted files have
+# this value only if the file is executable, in which
+# case the value is 1 and -1, respectively.
+# indexPath: the path of the target file.
+# isBinary: the value 1 if the diff is for a binary file.
+# isGit: the value 1 if the diff is Git-formatted.
+# isSvn: the value 1 if the diff is SVN-formatted.
+# sourceRevision: the revision number of the source, if it exists. This
+# is the same as the revision number the file was copied
+# from, in the case of a file copy.
+# svnConvertedText: the header text with some lines converted to SVN
+# format. Git-specific lines are preserved.
+# $lastReadLine: the line last read from $fileHandle.
+sub parseDiffHeader($$)
+{
+ my ($fileHandle, $line) = @_;
+
+ my $header; # This is a hash ref.
+ my $isGit;
+ my $isSvn;
+ my $lastReadLine;
+
+ if ($line =~ $svnDiffStartRegEx) {
+ $isSvn = 1;
+ ($header, $lastReadLine) = parseSvnDiffHeader($fileHandle, $line);
+ } elsif ($line =~ $gitDiffStartRegEx) {
+ $isGit = 1;
+ ($header, $lastReadLine) = parseGitDiffHeader($fileHandle, $line);
+ } else {
+ die("First line of diff does not begin with \"Index:\" or \"diff --git\": \"$line\"");
+ }
+
+ $header->{isGit} = $isGit if $isGit;
+ $header->{isSvn} = $isSvn if $isSvn;
+
+ return ($header, $lastReadLine);
+}
+
+# FIXME: The %diffHash "object" should not have an svnConvertedText property.
+# Instead, the hash object should store its information in a
+# structured way as properties. This should be done in a way so
+# that, if necessary, the text of an SVN or Git patch can be
+# reconstructed from the information in those hash properties.
+#
+# A %diffHash is a hash representing a source control diff of a single
+# file operation (e.g. a file modification, copy, or delete).
+#
+# These hashes appear, for example, in the parseDiff(), parsePatch(),
+# and prepareParsedPatch() subroutines of this package.
+#
+# The corresponding values are--
+#
+# copiedFromPath: the path from which the file was copied if the diff
+# is a copy.
+# executableBitDelta: the value 1 or -1 if the executable bit was added or
+# removed from the target file, respectively.
+# indexPath: the path of the target file. For SVN-formatted diffs,
+# this is the same as the path in the "Index:" line.
+# isBinary: the value 1 if the diff is for a binary file.
+# isDeletion: the value 1 if the diff is known from the header to be a deletion.
+# isGit: the value 1 if the diff is Git-formatted.
+# isNew: the value 1 if the dif is known from the header to be a new file.
+# isSvn: the value 1 if the diff is SVN-formatted.
+# sourceRevision: the revision number of the source, if it exists. This
+# is the same as the revision number the file was copied
+# from, in the case of a file copy.
+# svnConvertedText: the diff with some lines converted to SVN format.
+# Git-specific lines are preserved.
+
+# Parse one diff from a patch file created by svn-create-patch, and
+# advance the file handle so the last line read is the first line
+# of the next header block.
+#
+# This subroutine preserves any leading junk encountered before the header.
+#
+# Composition of an SVN diff
+#
+# There are three parts to an SVN diff: the header, the property change, and
+# the binary contents, in that order. Either the header or the property change
+# may be ommitted, but not both. If there are binary changes, then you always
+# have all three.
+#
+# Args:
+# $fileHandle: a file handle advanced to the first line of the next
+# header block. Leading junk is okay.
+# $line: the line last read from $fileHandle.
+# $optionsHashRef: a hash reference representing optional options to use
+# when processing a diff.
+# shouldNotUseIndexPathEOL: whether to use the line endings in the diff instead
+# instead of the line endings in the target file; the
+# value of 1 if svnConvertedText should use the line
+# endings in the diff.
+#
+# Returns ($diffHashRefs, $lastReadLine):
+# $diffHashRefs: A reference to an array of references to %diffHash hashes.
+# See the %diffHash documentation above.
+# $lastReadLine: the line last read from $fileHandle
+sub parseDiff($$;$)
+{
+ # FIXME: Adjust this method so that it dies if the first line does not
+ # match the start of a diff. This will require a change to
+ # parsePatch() so that parsePatch() skips over leading junk.
+ my ($fileHandle, $line, $optionsHashRef) = @_;
+
+ my $headerStartRegEx = $svnDiffStartRegEx; # SVN-style header for the default
+
+ my $headerHashRef; # Last header found, as returned by parseDiffHeader().
+ my $svnPropertiesHashRef; # Last SVN properties diff found, as returned by parseSvnDiffProperties().
+ my $svnText;
+ my $indexPathEOL;
+ my $numTextChunks = 0;
+ while (defined($line)) {
+ if (!$headerHashRef && ($line =~ $gitDiffStartRegEx)) {
+ # Then assume all diffs in the patch are Git-formatted. This
+ # block was made to be enterable at most once since we assume
+ # all diffs in the patch are formatted the same (SVN or Git).
+ $headerStartRegEx = $gitDiffStartRegEx;
+ }
+
+ if ($line =~ $svnPropertiesStartRegEx) {
+ my $propertyPath = $1;
+ if ($svnPropertiesHashRef || $headerHashRef && ($propertyPath ne $headerHashRef->{indexPath})) {
+ # This is the start of the second diff in the while loop, which happens to
+ # be a property diff. If $svnPropertiesHasRef is defined, then this is the
+ # second consecutive property diff, otherwise it's the start of a property
+ # diff for a file that only has property changes.
+ last;
+ }
+ ($svnPropertiesHashRef, $line) = parseSvnDiffProperties($fileHandle, $line);
+ next;
+ }
+ if ($line !~ $headerStartRegEx) {
+ # Then we are in the body of the diff.
+ my $isChunkRange = defined(parseChunkRange($line));
+ $numTextChunks += 1 if $isChunkRange;
+ my $nextLine = <$fileHandle>;
+ my $willAddNewLineAtEndOfFile = defined($nextLine) && $nextLine =~ /^\\ No newline at end of file$/;
+ if ($willAddNewLineAtEndOfFile) {
+ # Diff(1) always emits a LF character preceeding the line "\ No newline at end of file".
+ # We must preserve both the added LF character and the line ending of this sentinel line
+ # or patch(1) will complain.
+ $svnText .= $line . $nextLine;
+ $line = <$fileHandle>;
+ next;
+ }
+ if ($indexPathEOL && !$isChunkRange) {
+ # The chunk range is part of the body of the diff, but its line endings should't be
+ # modified or patch(1) will complain. So, we only modify non-chunk range lines.
+ $line =~ s/\r\n|\r|\n/$indexPathEOL/g;
+ }
+ $svnText .= $line;
+ $line = $nextLine;
+ next;
+ } # Otherwise, we found a diff header.
+
+ if ($svnPropertiesHashRef || $headerHashRef) {
+ # Then either we just processed an SVN property change or this
+ # is the start of the second diff header of this while loop.
+ last;
+ }
+
+ ($headerHashRef, $line) = parseDiffHeader($fileHandle, $line);
+ if (!$optionsHashRef || !$optionsHashRef->{shouldNotUseIndexPathEOL}) {
+ # FIXME: We shouldn't query the file system (via firstEOLInFile()) to determine the
+ # line endings of the file indexPath. Instead, either the caller to parseDiff()
+ # should provide this information or parseDiff() should take a delegate that it
+ # can use to query for this information.
+ $indexPathEOL = firstEOLInFile($headerHashRef->{indexPath}) if !$headerHashRef->{isNew} && !$headerHashRef->{isBinary};
+ }
+
+ $svnText .= $headerHashRef->{svnConvertedText};
+ }
+
+ my @diffHashRefs;
+
+ if ($headerHashRef->{shouldDeleteSource}) {
+ my %deletionHash;
+ $deletionHash{indexPath} = $headerHashRef->{copiedFromPath};
+ $deletionHash{isDeletion} = 1;
+ push @diffHashRefs, \%deletionHash;
+ }
+ if ($headerHashRef->{copiedFromPath}) {
+ my %copyHash;
+ $copyHash{copiedFromPath} = $headerHashRef->{copiedFromPath};
+ $copyHash{indexPath} = $headerHashRef->{indexPath};
+ $copyHash{sourceRevision} = $headerHashRef->{sourceRevision} if $headerHashRef->{sourceRevision};
+ if ($headerHashRef->{isSvn}) {
+ $copyHash{executableBitDelta} = $svnPropertiesHashRef->{executableBitDelta} if $svnPropertiesHashRef->{executableBitDelta};
+ }
+ push @diffHashRefs, \%copyHash;
+ }
+
+ # Note, the order of evaluation for the following if conditional has been explicitly chosen so that
+ # it evaluates to false when there is no headerHashRef (e.g. a property change diff for a file that
+ # only has property changes).
+ if ($headerHashRef->{isCopyWithChanges} || (%$headerHashRef && !$headerHashRef->{copiedFromPath})) {
+ # Then add the usual file modification.
+ my %diffHash;
+ # FIXME: We should expand this code to support other properties. In the future,
+ # parseSvnDiffProperties may return a hash whose keys are the properties.
+ if ($headerHashRef->{isSvn}) {
+ # SVN records the change to the executable bit in a separate property change diff
+ # that follows the contents of the diff, except for binary diffs. For binary
+ # diffs, the property change diff follows the diff header.
+ $diffHash{executableBitDelta} = $svnPropertiesHashRef->{executableBitDelta} if $svnPropertiesHashRef->{executableBitDelta};
+ } elsif ($headerHashRef->{isGit}) {
+ # Git records the change to the executable bit in the header of a diff.
+ $diffHash{executableBitDelta} = $headerHashRef->{executableBitDelta} if $headerHashRef->{executableBitDelta};
+ }
+ $diffHash{indexPath} = $headerHashRef->{indexPath};
+ $diffHash{isBinary} = $headerHashRef->{isBinary} if $headerHashRef->{isBinary};
+ $diffHash{isDeletion} = $headerHashRef->{isDeletion} if $headerHashRef->{isDeletion};
+ $diffHash{isGit} = $headerHashRef->{isGit} if $headerHashRef->{isGit};
+ $diffHash{isNew} = $headerHashRef->{isNew} if $headerHashRef->{isNew};
+ $diffHash{isSvn} = $headerHashRef->{isSvn} if $headerHashRef->{isSvn};
+ if (!$headerHashRef->{copiedFromPath}) {
+ # If the file was copied, then we have already incorporated the
+ # sourceRevision information into the change.
+ $diffHash{sourceRevision} = $headerHashRef->{sourceRevision} if $headerHashRef->{sourceRevision};
+ }
+ # FIXME: Remove the need for svnConvertedText. See the %diffHash
+ # code comments above for more information.
+ #
+ # Note, we may not always have SVN converted text since we intend
+ # to deprecate it in the future. For example, a property change
+ # diff for a file that only has property changes will not return
+ # any SVN converted text.
+ $diffHash{svnConvertedText} = $svnText if $svnText;
+ $diffHash{numTextChunks} = $numTextChunks if $svnText && !$headerHashRef->{isBinary};
+ push @diffHashRefs, \%diffHash;
+ }
+
+ if (!%$headerHashRef && $svnPropertiesHashRef) {
+ # A property change diff for a file that only has property changes.
+ my %propertyChangeHash;
+ $propertyChangeHash{executableBitDelta} = $svnPropertiesHashRef->{executableBitDelta} if $svnPropertiesHashRef->{executableBitDelta};
+ $propertyChangeHash{indexPath} = $svnPropertiesHashRef->{propertyPath};
+ $propertyChangeHash{isSvn} = 1;
+ push @diffHashRefs, \%propertyChangeHash;
+ }
+
+ return (\@diffHashRefs, $line);
+}
+
+# Parse an SVN property change diff from the given file handle, and advance
+# the handle so the last line read is the first line after this diff.
+#
+# For the case of an SVN binary diff, the binary contents will follow the
+# the property changes.
+#
+# This subroutine dies if the first line does not begin with "Property changes on"
+# or if the separator line that follows this line is missing.
+#
+# Args:
+# $fileHandle: advanced so the last line read from the handle is the first
+# line of the footer to parse. This line begins with
+# "Property changes on".
+# $line: the line last read from $fileHandle.
+#
+# Returns ($propertyHashRef, $lastReadLine):
+# $propertyHashRef: a hash reference representing an SVN diff footer.
+# propertyPath: the path of the target file.
+# executableBitDelta: the value 1 or -1 if the executable bit was added or
+# removed from the target file, respectively.
+# $lastReadLine: the line last read from $fileHandle.
+sub parseSvnDiffProperties($$)
+{
+ my ($fileHandle, $line) = @_;
+
+ $_ = $line;
+
+ my %footer;
+ if (/$svnPropertiesStartRegEx/) {
+ $footer{propertyPath} = $1;
+ } else {
+ die("Failed to find start of SVN property change, \"Property changes on \": \"$_\"");
+ }
+
+ # We advance $fileHandle two lines so that the next line that
+ # we process is $svnPropertyStartRegEx in a well-formed footer.
+ # A well-formed footer has the form:
+ # Property changes on: FileA
+ # ___________________________________________________________________
+ # Added: svn:executable
+ # + *
+ $_ = <$fileHandle>; # Not defined if end-of-file reached.
+ my $separator = "_" x 67;
+ if (defined($_) && /^$separator[\r\n]+$/) {
+ $_ = <$fileHandle>;
+ } else {
+ die("Failed to find separator line: \"$_\".");
+ }
+
+ # FIXME: We should expand this to support other SVN properties
+ # (e.g. return a hash of property key-values that represents
+ # all properties).
+ #
+ # Notice, we keep processing until we hit end-of-file or some
+ # line that does not resemble $svnPropertyStartRegEx, such as
+ # the empty line that precedes the start of the binary contents
+ # of a patch, or the start of the next diff (e.g. "Index:").
+ my $propertyHashRef;
+ while (defined($_) && /$svnPropertyStartRegEx/) {
+ ($propertyHashRef, $_) = parseSvnProperty($fileHandle, $_);
+ if ($propertyHashRef->{name} eq "svn:executable") {
+ # Notice, for SVN properties, propertyChangeDelta is always non-zero
+ # because a property can only be added or removed.
+ $footer{executableBitDelta} = $propertyHashRef->{propertyChangeDelta};
+ }
+ }
+
+ return(\%footer, $_);
+}
+
+# Parse the next SVN property from the given file handle, and advance the handle so the last
+# line read is the first line after the property.
+#
+# This subroutine dies if the first line is not a valid start of an SVN property,
+# or the property is missing a value, or the property change type (e.g. "Added")
+# does not correspond to the property value type (e.g. "+").
+#
+# Args:
+# $fileHandle: advanced so the last line read from the handle is the first
+# line of the property to parse. This should be a line
+# that matches $svnPropertyStartRegEx.
+# $line: the line last read from $fileHandle.
+#
+# Returns ($propertyHashRef, $lastReadLine):
+# $propertyHashRef: a hash reference representing a SVN property.
+# name: the name of the property.
+# value: the last property value. For instance, suppose the property is "Modified".
+# Then it has both a '-' and '+' property value in that order. Therefore,
+# the value of this key is the value of the '+' property by ordering (since
+# it is the last value).
+# propertyChangeDelta: the value 1 or -1 if the property was added or
+# removed, respectively.
+# $lastReadLine: the line last read from $fileHandle.
+sub parseSvnProperty($$)
+{
+ my ($fileHandle, $line) = @_;
+
+ $_ = $line;
+
+ my $propertyName;
+ my $propertyChangeType;
+ if (/$svnPropertyStartRegEx/) {
+ $propertyChangeType = $1;
+ $propertyName = $2;
+ } else {
+ die("Failed to find SVN property: \"$_\".");
+ }
+
+ $_ = <$fileHandle>; # Not defined if end-of-file reached.
+
+ if (defined($_) && defined(parseChunkRange($_, "##"))) {
+ # FIXME: We should validate the chunk range line that is part of an SVN 1.7
+ # property diff. For now, we ignore this line.
+ $_ = <$fileHandle>;
+ }
+
+ # The "svn diff" command neither inserts newline characters between property values
+ # nor between successive properties.
+ #
+ # As of SVN 1.7, "svn diff" may insert "\ No newline at end of property" after a
+ # property value that doesn't end in a newline.
+ #
+ # FIXME: We do not support property values that contain tailing newline characters
+ # as it is difficult to disambiguate these trailing newlines from the empty
+ # line that precedes the contents of a binary patch.
+ my $propertyValue;
+ my $propertyValueType;
+ while (defined($_) && /$svnPropertyValueStartRegEx/) {
+ # Note, a '-' property may be followed by a '+' property in the case of a "Modified"
+ # or "Name" property. We only care about the ending value (i.e. the '+' property)
+ # in such circumstances. So, we take the property value for the property to be its
+ # last parsed property value.
+ #
+ # FIXME: We may want to consider strictly enforcing a '-', '+' property ordering or
+ # add error checking to prevent '+', '+', ..., '+' and other invalid combinations.
+ $propertyValueType = $1;
+ ($propertyValue, $_) = parseSvnPropertyValue($fileHandle, $_);
+ $_ = <$fileHandle> if defined($_) && /$svnPropertyValueNoNewlineRegEx/;
+ }
+
+ if (!$propertyValue) {
+ die("Failed to find the property value for the SVN property \"$propertyName\": \"$_\".");
+ }
+
+ my $propertyChangeDelta;
+ if ($propertyValueType eq "+" || $propertyValueType eq "Merged") {
+ $propertyChangeDelta = 1;
+ } elsif ($propertyValueType eq "-" || $propertyValueType eq "Reverse-merged") {
+ $propertyChangeDelta = -1;
+ } else {
+ die("Not reached.");
+ }
+
+ # We perform a simple validation that an "Added" or "Deleted" property
+ # change type corresponds with a "+" and "-" value type, respectively.
+ my $expectedChangeDelta;
+ if ($propertyChangeType eq "Added") {
+ $expectedChangeDelta = 1;
+ } elsif ($propertyChangeType eq "Deleted") {
+ $expectedChangeDelta = -1;
+ }
+
+ if ($expectedChangeDelta && $propertyChangeDelta != $expectedChangeDelta) {
+ die("The final property value type found \"$propertyValueType\" does not " .
+ "correspond to the property change type found \"$propertyChangeType\".");
+ }
+
+ my %propertyHash;
+ $propertyHash{name} = $propertyName;
+ $propertyHash{propertyChangeDelta} = $propertyChangeDelta;
+ $propertyHash{value} = $propertyValue;
+ return (\%propertyHash, $_);
+}
+
+# Parse the value of an SVN property from the given file handle, and advance
+# the handle so the last line read is the first line after the property value.
+#
+# This subroutine dies if the first line is an invalid SVN property value line
+# (i.e. a line that does not begin with " +" or " -").
+#
+# Args:
+# $fileHandle: advanced so the last line read from the handle is the first
+# line of the property value to parse. This should be a line
+# beginning with " +" or " -".
+# $line: the line last read from $fileHandle.
+#
+# Returns ($propertyValue, $lastReadLine):
+# $propertyValue: the value of the property.
+# $lastReadLine: the line last read from $fileHandle.
+sub parseSvnPropertyValue($$)
+{
+ my ($fileHandle, $line) = @_;
+
+ $_ = $line;
+
+ my $propertyValue;
+ my $eol;
+ if (/$svnPropertyValueStartRegEx/) {
+ $propertyValue = $2; # Does not include the end-of-line character(s).
+ $eol = $POSTMATCH;
+ } else {
+ die("Failed to find property value beginning with '+', '-', 'Merged', or 'Reverse-merged': \"$_\".");
+ }
+
+ while (<$fileHandle>) {
+ if (/^[\r\n]+$/ || /$svnPropertyValueStartRegEx/ || /$svnPropertyStartRegEx/ || /$svnPropertyValueNoNewlineRegEx/) {
+ # Note, we may encounter an empty line before the contents of a binary patch.
+ # Also, we check for $svnPropertyValueStartRegEx because a '-' property may be
+ # followed by a '+' property in the case of a "Modified" or "Name" property.
+ # We check for $svnPropertyStartRegEx because it indicates the start of the
+ # next property to parse.
+ last;
+ }
+
+ # Temporarily strip off any end-of-line characters. We add the end-of-line characters
+ # from the previously processed line to the start of this line so that the last line
+ # of the property value does not end in end-of-line characters.
+ s/([\n\r]+)$//;
+ $propertyValue .= "$eol$_";
+ $eol = $1;
+ }
+
+ return ($propertyValue, $_);
+}
+
+# Parse a patch file created by svn-create-patch.
+#
+# Args:
+# $fileHandle: A file handle to the patch file that has not yet been
+# read from.
+# $optionsHashRef: a hash reference representing optional options to use
+# when processing a diff.
+# shouldNotUseIndexPathEOL: whether to use the line endings in the diff instead
+# instead of the line endings in the target file; the
+# value of 1 if svnConvertedText should use the line
+# endings in the diff.
+#
+# Returns:
+# @diffHashRefs: an array of diff hash references.
+# See the %diffHash documentation above.
+sub parsePatch($;$)
+{
+ my ($fileHandle, $optionsHashRef) = @_;
+
+ my $newDiffHashRefs;
+ my @diffHashRefs; # return value
+
+ my $line = <$fileHandle>;
+
+ while (defined($line)) { # Otherwise, at EOF.
+
+ ($newDiffHashRefs, $line) = parseDiff($fileHandle, $line, $optionsHashRef);
+
+ push @diffHashRefs, @$newDiffHashRefs;
+ }
+
+ return @diffHashRefs;
+}
+
+# Prepare the results of parsePatch() for use in svn-apply and svn-unapply.
+#
+# Args:
+# $shouldForce: Whether to continue processing if an unexpected
+# state occurs.
+# @diffHashRefs: An array of references to %diffHashes.
+# See the %diffHash documentation above.
+#
+# Returns $preparedPatchHashRef:
+# copyDiffHashRefs: A reference to an array of the $diffHashRefs in
+# @diffHashRefs that represent file copies. The original
+# ordering is preserved.
+# nonCopyDiffHashRefs: A reference to an array of the $diffHashRefs in
+# @diffHashRefs that do not represent file copies.
+# The original ordering is preserved.
+# sourceRevisionHash: A reference to a hash of source path to source
+# revision number.
+sub prepareParsedPatch($@)
+{
+ my ($shouldForce, @diffHashRefs) = @_;
+
+ my %copiedFiles;
+
+ # Return values
+ my @copyDiffHashRefs = ();
+ my @nonCopyDiffHashRefs = ();
+ my %sourceRevisionHash = ();
+ for my $diffHashRef (@diffHashRefs) {
+ my $copiedFromPath = $diffHashRef->{copiedFromPath};
+ my $indexPath = $diffHashRef->{indexPath};
+ my $sourceRevision = $diffHashRef->{sourceRevision};
+ my $sourcePath;
+
+ if (defined($copiedFromPath)) {
+ # Then the diff is a copy operation.
+ $sourcePath = $copiedFromPath;
+
+ # FIXME: Consider printing a warning or exiting if
+ # exists($copiedFiles{$indexPath}) is true -- i.e. if
+ # $indexPath appears twice as a copy target.
+ $copiedFiles{$indexPath} = $sourcePath;
+
+ push @copyDiffHashRefs, $diffHashRef;
+ } else {
+ # Then the diff is not a copy operation.
+ $sourcePath = $indexPath;
+
+ push @nonCopyDiffHashRefs, $diffHashRef;
+ }
+
+ if (defined($sourceRevision)) {
+ if (exists($sourceRevisionHash{$sourcePath}) &&
+ ($sourceRevisionHash{$sourcePath} != $sourceRevision)) {
+ if (!$shouldForce) {
+ die "Two revisions of the same file required as a source:\n".
+ " $sourcePath:$sourceRevisionHash{$sourcePath}\n".
+ " $sourcePath:$sourceRevision";
+ }
+ }
+ $sourceRevisionHash{$sourcePath} = $sourceRevision;
+ }
+ }
+
+ my %preparedPatchHash;
+
+ $preparedPatchHash{copyDiffHashRefs} = \@copyDiffHashRefs;
+ $preparedPatchHash{nonCopyDiffHashRefs} = \@nonCopyDiffHashRefs;
+ $preparedPatchHash{sourceRevisionHash} = \%sourceRevisionHash;
+
+ return \%preparedPatchHash;
+}
+
+# Return localtime() for the project's time zone, given an integer time as
+# returned by Perl's time() function.
+sub localTimeInProjectTimeZone($)
+{
+ my $epochTime = shift;
+
+ # Change the time zone temporarily for the localtime() call.
+ my $savedTimeZone = $ENV{'TZ'};
+ $ENV{'TZ'} = $changeLogTimeZone;
+ my @localTime = localtime($epochTime);
+ if (defined $savedTimeZone) {
+ $ENV{'TZ'} = $savedTimeZone;
+ } else {
+ delete $ENV{'TZ'};
+ }
+
+ return @localTime;
+}
+
+# Set the reviewer and date in a ChangeLog patch, and return the new patch.
+#
+# Args:
+# $patch: a ChangeLog patch as a string.
+# $reviewer: the name of the reviewer, or undef if the reviewer should not be set.
+# $epochTime: an integer time as returned by Perl's time() function.
+sub setChangeLogDateAndReviewer($$$)
+{
+ my ($patch, $reviewer, $epochTime) = @_;
+
+ my @localTime = localTimeInProjectTimeZone($epochTime);
+ my $newDate = strftime("%Y-%m-%d", @localTime);
+
+ my $firstChangeLogLineRegEx = qr#(\n\+)\d{4}-[^-]{2}-[^-]{2}( )#;
+ $patch =~ s/$firstChangeLogLineRegEx/$1$newDate$2/;
+
+ if (defined($reviewer)) {
+ # We include a leading plus ("+") in the regular expression to make
+ # the regular expression less likely to match text in the leading junk
+ # for the patch, if the patch has leading junk.
+ $patch =~ s/(\n\+.*)NOBODY \(OOPS!\)/$1$reviewer/;
+ }
+
+ return $patch;
+}
+
+# If possible, returns a ChangeLog patch equivalent to the given one,
+# but with the newest ChangeLog entry inserted at the top of the
+# file -- i.e. no leading context and all lines starting with "+".
+#
+# If given a patch string not representable as a patch with the above
+# properties, it returns the input back unchanged.
+#
+# WARNING: This subroutine can return an inequivalent patch string if
+# both the beginning of the new ChangeLog file matches the beginning
+# of the source ChangeLog, and the source beginning was modified.
+# Otherwise, it is guaranteed to return an equivalent patch string,
+# if it returns.
+#
+# Applying this subroutine to ChangeLog patches allows svn-apply to
+# insert new ChangeLog entries at the top of the ChangeLog file.
+# svn-apply uses patch with --fuzz=3 to do this. We need to apply
+# this subroutine because the diff(1) command is greedy when matching
+# lines. A new ChangeLog entry with the same date and author as the
+# previous will match and cause the diff to have lines of starting
+# context.
+#
+# This subroutine has unit tests in VCSUtils_unittest.pl.
+#
+# Returns $changeLogHashRef:
+# $changeLogHashRef: a hash reference representing a change log patch.
+# patch: a ChangeLog patch equivalent to the given one, but with the
+# newest ChangeLog entry inserted at the top of the file, if possible.
+sub fixChangeLogPatch($)
+{
+ my $patch = shift; # $patch will only contain patch fragments for ChangeLog.
+
+ $patch =~ s|test_expectations.txt:|TestExpectations:|g;
+
+ $patch =~ /(\r?\n)/;
+ my $lineEnding = $1;
+ my @lines = split(/$lineEnding/, $patch);
+
+ my $i = 0; # We reuse the same index throughout.
+
+ # Skip to beginning of first chunk.
+ for (; $i < @lines; ++$i) {
+ if (substr($lines[$i], 0, 1) eq "@") {
+ last;
+ }
+ }
+ my $chunkStartIndex = ++$i;
+ my %changeLogHashRef;
+
+ # Optimization: do not process if new lines already begin the chunk.
+ if (substr($lines[$i], 0, 1) eq "+") {
+ $changeLogHashRef{patch} = $patch;
+ return \%changeLogHashRef;
+ }
+
+ # Skip to first line of newly added ChangeLog entry.
+ # For example, +2009-06-03 Eric Seidel <eric@webkit.org>
+ my $dateStartRegEx = '^\+(\d{4}-\d{2}-\d{2})' # leading "+" and date
+ . '\s+(.+)\s+' # name
+ . '<([^<>]+)>$'; # e-mail address
+
+ for (; $i < @lines; ++$i) {
+ my $line = $lines[$i];
+ my $firstChar = substr($line, 0, 1);
+ if ($line =~ /$dateStartRegEx/) {
+ last;
+ } elsif ($firstChar eq " " or $firstChar eq "+") {
+ next;
+ }
+ $changeLogHashRef{patch} = $patch; # Do not change if, for example, "-" or "@" found.
+ return \%changeLogHashRef;
+ }
+ if ($i >= @lines) {
+ $changeLogHashRef{patch} = $patch; # Do not change if date not found.
+ return \%changeLogHashRef;
+ }
+ my $dateStartIndex = $i;
+
+ # Rewrite overlapping lines to lead with " ".
+ my @overlappingLines = (); # These will include a leading "+".
+ for (; $i < @lines; ++$i) {
+ my $line = $lines[$i];
+ if (substr($line, 0, 1) ne "+") {
+ last;
+ }
+ push(@overlappingLines, $line);
+ $lines[$i] = " " . substr($line, 1);
+ }
+
+ # Remove excess ending context, if necessary.
+ my $shouldTrimContext = 1;
+ for (; $i < @lines; ++$i) {
+ my $firstChar = substr($lines[$i], 0, 1);
+ if ($firstChar eq " ") {
+ next;
+ } elsif ($firstChar eq "@") {
+ last;
+ }
+ $shouldTrimContext = 0; # For example, if "+" or "-" encountered.
+ last;
+ }
+ my $deletedLineCount = 0;
+ if ($shouldTrimContext) { # Also occurs if end of file reached.
+ splice(@lines, $i - @overlappingLines, @overlappingLines);
+ $deletedLineCount = @overlappingLines;
+ }
+
+ # Work backwards, shifting overlapping lines towards front
+ # while checking that patch stays equivalent.
+ for ($i = $dateStartIndex - 1; @overlappingLines && $i >= $chunkStartIndex; --$i) {
+ my $line = $lines[$i];
+ if (substr($line, 0, 1) ne " ") {
+ next;
+ }
+ my $text = substr($line, 1);
+ my $newLine = pop(@overlappingLines);
+ if ($text ne substr($newLine, 1)) {
+ $changeLogHashRef{patch} = $patch; # Unexpected difference.
+ return \%changeLogHashRef;
+ }
+ $lines[$i] = "+$text";
+ }
+
+ # If @overlappingLines > 0, this is where we make use of the
+ # assumption that the beginning of the source file was not modified.
+ splice(@lines, $chunkStartIndex, 0, @overlappingLines);
+
+ # Update the date start index as it may have changed after shifting
+ # the overlapping lines towards the front.
+ for ($i = $chunkStartIndex; $i < $dateStartIndex; ++$i) {
+ $dateStartIndex = $i if $lines[$i] =~ /$dateStartRegEx/;
+ }
+ splice(@lines, $chunkStartIndex, $dateStartIndex - $chunkStartIndex); # Remove context of later entry.
+ $deletedLineCount += $dateStartIndex - $chunkStartIndex;
+
+ # Update the initial chunk range.
+ my $chunkRangeHashRef = parseChunkRange($lines[$chunkStartIndex - 1]);
+ if (!$chunkRangeHashRef) {
+ # FIXME: Handle errors differently from ChangeLog files that
+ # are okay but should not be altered. That way we can find out
+ # if improvements to the script ever become necessary.
+ $changeLogHashRef{patch} = $patch; # Error: unexpected patch string format.
+ return \%changeLogHashRef;
+ }
+ my $oldSourceLineCount = $chunkRangeHashRef->{lineCount};
+ my $oldTargetLineCount = $chunkRangeHashRef->{newLineCount};
+
+ my $sourceLineCount = $oldSourceLineCount + @overlappingLines - $deletedLineCount;
+ my $targetLineCount = $oldTargetLineCount + @overlappingLines - $deletedLineCount;
+ $lines[$chunkStartIndex - 1] = "@@ -1,$sourceLineCount +1,$targetLineCount @@";
+
+ $changeLogHashRef{patch} = join($lineEnding, @lines) . "\n"; # patch(1) expects an extra trailing newline.
+ return \%changeLogHashRef;
+}
+
+# This is a supporting method for runPatchCommand.
+#
+# Arg: the optional $args parameter passed to runPatchCommand (can be undefined).
+#
+# Returns ($patchCommand, $isForcing).
+#
+# This subroutine has unit tests in VCSUtils_unittest.pl.
+sub generatePatchCommand($)
+{
+ my ($passedArgsHashRef) = @_;
+
+ my $argsHashRef = { # Defaults
+ ensureForce => 0,
+ shouldReverse => 0,
+ options => []
+ };
+
+ # Merges hash references. It's okay here if passed hash reference is undefined.
+ @{$argsHashRef}{keys %{$passedArgsHashRef}} = values %{$passedArgsHashRef};
+
+ my $ensureForce = $argsHashRef->{ensureForce};
+ my $shouldReverse = $argsHashRef->{shouldReverse};
+ my $options = $argsHashRef->{options};
+
+ if (! $options) {
+ $options = [];
+ } else {
+ $options = [@{$options}]; # Copy to avoid side effects.
+ }
+
+ my $isForcing = 0;
+ if (grep /^--force$/, @{$options}) {
+ $isForcing = 1;
+ } elsif ($ensureForce) {
+ push @{$options}, "--force";
+ $isForcing = 1;
+ }
+
+ if ($shouldReverse) { # No check: --reverse should never be passed explicitly.
+ push @{$options}, "--reverse";
+ }
+
+ @{$options} = sort(@{$options}); # For easier testing.
+
+ my $patchCommand = join(" ", "patch -p0", @{$options});
+
+ return ($patchCommand, $isForcing);
+}
+
+# Apply the given patch using the patch(1) command.
+#
+# On success, return the resulting exit status. Otherwise, exit with the
+# exit status. If "--force" is passed as an option, however, then never
+# exit and always return the exit status.
+#
+# Args:
+# $patch: a patch string.
+# $repositoryRootPath: an absolute path to the repository root.
+# $pathRelativeToRoot: the path of the file to be patched, relative to the
+# repository root. This should normally be the path
+# found in the patch's "Index:" line. It is passed
+# explicitly rather than reparsed from the patch
+# string for optimization purposes.
+# This is used only for error reporting. The
+# patch command gleans the actual file to patch
+# from the patch string.
+# $args: a reference to a hash of optional arguments. The possible
+# keys are --
+# ensureForce: whether to ensure --force is passed (defaults to 0).
+# shouldReverse: whether to pass --reverse (defaults to 0).
+# options: a reference to an array of options to pass to the
+# patch command. The subroutine passes the -p0 option
+# no matter what. This should not include --reverse.
+#
+# This subroutine has unit tests in VCSUtils_unittest.pl.
+sub runPatchCommand($$$;$)
+{
+ my ($patch, $repositoryRootPath, $pathRelativeToRoot, $args) = @_;
+
+ my ($patchCommand, $isForcing) = generatePatchCommand($args);
+
+ # Temporarily change the working directory since the path found
+ # in the patch's "Index:" line is relative to the repository root
+ # (i.e. the same as $pathRelativeToRoot).
+ my $cwd = Cwd::getcwd();
+ chdir $repositoryRootPath;
+
+ open PATCH, "| $patchCommand" or die "Could not call \"$patchCommand\" for file \"$pathRelativeToRoot\": $!";
+ print PATCH $patch;
+ close PATCH;
+ my $exitStatus = exitStatus($?);
+
+ chdir $cwd;
+
+ if ($exitStatus && !$isForcing) {
+ print "Calling \"$patchCommand\" for file \"$pathRelativeToRoot\" returned " .
+ "status $exitStatus. Pass --force to ignore patch failures.\n";
+ exit $exitStatus;
+ }
+
+ return $exitStatus;
+}
+
+# Merge ChangeLog patches using a three-file approach.
+#
+# This is used by resolve-ChangeLogs when it's operated as a merge driver
+# and when it's used to merge conflicts after a patch is applied or after
+# an svn update.
+#
+# It's also used for traditional rejected patches.
+#
+# Args:
+# $fileMine: The merged version of the file. Also known in git as the
+# other branch's version (%B) or "ours".
+# For traditional patch rejects, this is the *.rej file.
+# $fileOlder: The base version of the file. Also known in git as the
+# ancestor version (%O) or "base".
+# For traditional patch rejects, this is the *.orig file.
+# $fileNewer: The current version of the file. Also known in git as the
+# current version (%A) or "theirs".
+# For traditional patch rejects, this is the original-named
+# file.
+#
+# Returns 1 if merge was successful, else 0.
+sub mergeChangeLogs($$$)
+{
+ my ($fileMine, $fileOlder, $fileNewer) = @_;
+
+ my $traditionalReject = $fileMine =~ /\.rej$/ ? 1 : 0;
+
+ local $/ = undef;
+
+ my $patch;
+ if ($traditionalReject) {
+ open(DIFF, "<", $fileMine) or die $!;
+ $patch = <DIFF>;
+ close(DIFF);
+ rename($fileMine, "$fileMine.save");
+ rename($fileOlder, "$fileOlder.save");
+ } else {
+ open(DIFF, "diff -u -a --binary \"$fileOlder\" \"$fileMine\" |") or die $!;
+ $patch = <DIFF>;
+ close(DIFF);
+ }
+
+ unlink("${fileNewer}.orig");
+ unlink("${fileNewer}.rej");
+
+ open(PATCH, "| patch --force --fuzz=3 --binary \"$fileNewer\" > " . File::Spec->devnull()) or die $!;
+ if ($traditionalReject) {
+ print PATCH $patch;
+ } else {
+ my $changeLogHash = fixChangeLogPatch($patch);
+ print PATCH $changeLogHash->{patch};
+ }
+ close(PATCH);
+
+ my $result = !exitStatus($?);
+
+ # Refuse to merge the patch if it did not apply cleanly
+ if (-e "${fileNewer}.rej") {
+ unlink("${fileNewer}.rej");
+ if (-f "${fileNewer}.orig") {
+ unlink($fileNewer);
+ rename("${fileNewer}.orig", $fileNewer);
+ }
+ } else {
+ unlink("${fileNewer}.orig");
+ }
+
+ if ($traditionalReject) {
+ rename("$fileMine.save", $fileMine);
+ rename("$fileOlder.save", $fileOlder);
+ }
+
+ return $result;
+}
+
+sub gitConfig($)
+{
+ return unless $isGit;
+
+ my ($config) = @_;
+
+ my $result = `git config $config`;
+ chomp $result;
+ return $result;
+}
+
+sub changeLogSuffix()
+{
+ my $rootPath = determineVCSRoot();
+ my $changeLogSuffixFile = File::Spec->catfile($rootPath, ".changeLogSuffix");
+ return "" if ! -e $changeLogSuffixFile;
+ open FILE, $changeLogSuffixFile or die "Could not open $changeLogSuffixFile: $!";
+ my $changeLogSuffix = <FILE>;
+ chomp $changeLogSuffix;
+ close FILE;
+ return $changeLogSuffix;
+}
+
+sub changeLogFileName()
+{
+ return "ChangeLog" . changeLogSuffix()
+}
+
+sub changeLogNameError($)
+{
+ my ($message) = @_;
+ print STDERR "$message\nEither:\n";
+ print STDERR " set CHANGE_LOG_NAME in your environment\n";
+ print STDERR " OR pass --name= on the command line\n";
+ print STDERR " OR set REAL_NAME in your environment";
+ print STDERR " OR git users can set 'git config user.name'\n";
+ exit(1);
+}
+
+sub changeLogName()
+{
+ my $name = $ENV{CHANGE_LOG_NAME} || $ENV{REAL_NAME} || gitConfig("user.name") || (split /\s*,\s*/, (getpwuid $<)[6])[0];
+
+ changeLogNameError("Failed to determine ChangeLog name.") unless $name;
+ # getpwuid seems to always succeed on windows, returning the username instead of the full name. This check will catch that case.
+ changeLogNameError("'$name' does not contain a space! ChangeLogs should contain your full name.") unless ($name =~ /\S\s\S/);
+
+ return $name;
+}
+
+sub changeLogEmailAddressError($)
+{
+ my ($message) = @_;
+ print STDERR "$message\nEither:\n";
+ print STDERR " set CHANGE_LOG_EMAIL_ADDRESS in your environment\n";
+ print STDERR " OR pass --email= on the command line\n";
+ print STDERR " OR set EMAIL_ADDRESS in your environment\n";
+ print STDERR " OR git users can set 'git config user.email'\n";
+ exit(1);
+}
+
+sub changeLogEmailAddress()
+{
+ my $emailAddress = $ENV{CHANGE_LOG_EMAIL_ADDRESS} || $ENV{EMAIL_ADDRESS} || gitConfig("user.email");
+
+ changeLogEmailAddressError("Failed to determine email address for ChangeLog.") unless $emailAddress;
+ changeLogEmailAddressError("Email address '$emailAddress' does not contain '\@' and is likely invalid.") unless ($emailAddress =~ /\@/);
+
+ return $emailAddress;
+}
+
+# http://tools.ietf.org/html/rfc1924
+sub decodeBase85($)
+{
+ my ($encoded) = @_;
+ my %table;
+ my @characters = ('0'..'9', 'A'..'Z', 'a'..'z', '!', '#', '$', '%', '&', '(', ')', '*', '+', '-', ';', '<', '=', '>', '?', '@', '^', '_', '`', '{', '|', '}', '~');
+ for (my $i = 0; $i < 85; $i++) {
+ $table{$characters[$i]} = $i;
+ }
+
+ my $decoded = '';
+ my @encodedChars = $encoded =~ /./g;
+
+ for (my $encodedIter = 0; defined($encodedChars[$encodedIter]);) {
+ my $digit = 0;
+ for (my $i = 0; $i < 5; $i++) {
+ $digit *= 85;
+ my $char = $encodedChars[$encodedIter];
+ $digit += $table{$char};
+ $encodedIter++;
+ }
+
+ for (my $i = 0; $i < 4; $i++) {
+ $decoded .= chr(($digit >> (3 - $i) * 8) & 255);
+ }
+ }
+
+ return $decoded;
+}
+
+sub decodeGitBinaryChunk($$)
+{
+ my ($contents, $fullPath) = @_;
+
+ # Load this module lazily in case the user don't have this module
+ # and won't handle git binary patches.
+ require Compress::Zlib;
+
+ my $encoded = "";
+ my $compressedSize = 0;
+ while ($contents =~ /^([A-Za-z])(.*)$/gm) {
+ my $line = $2;
+ next if $line eq "";
+ die "$fullPath: unexpected size of a line: $&" if length($2) % 5 != 0;
+ my $actualSize = length($2) / 5 * 4;
+ my $encodedExpectedSize = ord($1);
+ my $expectedSize = $encodedExpectedSize <= ord("Z") ? $encodedExpectedSize - ord("A") + 1 : $encodedExpectedSize - ord("a") + 27;
+
+ die "$fullPath: unexpected size of a line: $&" if int(($expectedSize + 3) / 4) * 4 != $actualSize;
+ $compressedSize += $expectedSize;
+ $encoded .= $line;
+ }
+
+ my $compressed = decodeBase85($encoded);
+ $compressed = substr($compressed, 0, $compressedSize);
+ return Compress::Zlib::uncompress($compressed);
+}
+
+sub decodeGitBinaryPatch($$)
+{
+ my ($contents, $fullPath) = @_;
+
+ # Git binary patch has two chunks. One is for the normal patching
+ # and another is for the reverse patching.
+ #
+ # Each chunk a line which starts from either "literal" or "delta",
+ # followed by a number which specifies decoded size of the chunk.
+ #
+ # Then, content of the chunk comes. To decode the content, we
+ # need decode it with base85 first, and then zlib.
+ my $gitPatchRegExp = '(literal|delta) ([0-9]+)\n([A-Za-z0-9!#$%&()*+-;<=>?@^_`{|}~\\n]*?)\n\n';
+ if ($contents !~ m"\nGIT binary patch\n$gitPatchRegExp$gitPatchRegExp\Z") {
+ die "$fullPath: unknown git binary patch format"
+ }
+
+ my $binaryChunkType = $1;
+ my $binaryChunkExpectedSize = $2;
+ my $encodedChunk = $3;
+ my $reverseBinaryChunkType = $4;
+ my $reverseBinaryChunkExpectedSize = $5;
+ my $encodedReverseChunk = $6;
+
+ my $binaryChunk = decodeGitBinaryChunk($encodedChunk, $fullPath);
+ my $binaryChunkActualSize = length($binaryChunk);
+ my $reverseBinaryChunk = decodeGitBinaryChunk($encodedReverseChunk, $fullPath);
+ my $reverseBinaryChunkActualSize = length($reverseBinaryChunk);
+
+ die "$fullPath: unexpected size of the first chunk (expected $binaryChunkExpectedSize but was $binaryChunkActualSize" if ($binaryChunkType eq "literal" and $binaryChunkExpectedSize != $binaryChunkActualSize);
+ die "$fullPath: unexpected size of the second chunk (expected $reverseBinaryChunkExpectedSize but was $reverseBinaryChunkActualSize" if ($reverseBinaryChunkType eq "literal" and $reverseBinaryChunkExpectedSize != $reverseBinaryChunkActualSize);
+
+ return ($binaryChunkType, $binaryChunk, $reverseBinaryChunkType, $reverseBinaryChunk);
+}
+
+sub readByte($$)
+{
+ my ($data, $location) = @_;
+
+ # Return the byte at $location in $data as a numeric value.
+ return ord(substr($data, $location, 1));
+}
+
+# The git binary delta format is undocumented, except in code:
+# - https://github.com/git/git/blob/master/delta.h:get_delta_hdr_size is the source
+# of the algorithm in decodeGitBinaryPatchDeltaSize.
+# - https://github.com/git/git/blob/master/patch-delta.c:patch_delta is the source
+# of the algorithm in applyGitBinaryPatchDelta.
+sub decodeGitBinaryPatchDeltaSize($)
+{
+ my ($binaryChunk) = @_;
+
+ # Source and destination buffer sizes are stored in 7-bit chunks at the
+ # start of the binary delta patch data. The highest bit in each byte
+ # except the last is set; the remaining 7 bits provide the next
+ # chunk of the size. The chunks are stored in ascending significance
+ # order.
+ my $cmd;
+ my $size = 0;
+ my $shift = 0;
+ for (my $i = 0; $i < length($binaryChunk);) {
+ $cmd = readByte($binaryChunk, $i++);
+ $size |= ($cmd & 0x7f) << $shift;
+ $shift += 7;
+ if (!($cmd & 0x80)) {
+ return ($size, $i);
+ }
+ }
+}
+
+sub applyGitBinaryPatchDelta($$)
+{
+ my ($binaryChunk, $originalContents) = @_;
+
+ # Git delta format consists of two headers indicating source buffer size
+ # and result size, then a series of commands. Each command is either
+ # a copy-from-old-version (the 0x80 bit is set) or a copy-from-delta
+ # command. Commands are applied sequentially to generate the result.
+ #
+ # A copy-from-old-version command encodes an offset and size to copy
+ # from in subsequent bits, while a copy-from-delta command consists only
+ # of the number of bytes to copy from the delta.
+
+ # We don't use these values, but we need to know how big they are so that
+ # we can skip to the diff data.
+ my ($size, $bytesUsed) = decodeGitBinaryPatchDeltaSize($binaryChunk);
+ $binaryChunk = substr($binaryChunk, $bytesUsed);
+ ($size, $bytesUsed) = decodeGitBinaryPatchDeltaSize($binaryChunk);
+ $binaryChunk = substr($binaryChunk, $bytesUsed);
+
+ my $out = "";
+ for (my $i = 0; $i < length($binaryChunk); ) {
+ my $cmd = ord(substr($binaryChunk, $i++, 1));
+ if ($cmd & 0x80) {
+ # Extract an offset and size from the delta data, then copy
+ # $size bytes from $offset in the original data into the output.
+ my $offset = 0;
+ my $size = 0;
+ if ($cmd & 0x01) { $offset = readByte($binaryChunk, $i++); }
+ if ($cmd & 0x02) { $offset |= readByte($binaryChunk, $i++) << 8; }
+ if ($cmd & 0x04) { $offset |= readByte($binaryChunk, $i++) << 16; }
+ if ($cmd & 0x08) { $offset |= readByte($binaryChunk, $i++) << 24; }
+ if ($cmd & 0x10) { $size = readByte($binaryChunk, $i++); }
+ if ($cmd & 0x20) { $size |= readByte($binaryChunk, $i++) << 8; }
+ if ($cmd & 0x40) { $size |= readByte($binaryChunk, $i++) << 16; }
+ if ($size == 0) { $size = 0x10000; }
+ $out .= substr($originalContents, $offset, $size);
+ } elsif ($cmd) {
+ # Copy $cmd bytes from the delta data into the output.
+ $out .= substr($binaryChunk, $i, $cmd);
+ $i += $cmd;
+ } else {
+ die "unexpected delta opcode 0";
+ }
+ }
+
+ return $out;
+}
+
+sub escapeSubversionPath($)
+{
+ my ($path) = @_;
+ $path .= "@" if $path =~ /@/;
+ return $path;
+}
+
+sub runCommand(@)
+{
+ my @args = @_;
+ my $pid = open(CHILD, "-|");
+ if (!defined($pid)) {
+ die "Failed to fork(): $!";
+ }
+ if ($pid) {
+ # Parent process
+ my $childStdout;
+ while (<CHILD>) {
+ $childStdout .= $_;
+ }
+ close(CHILD);
+ my %childOutput;
+ $childOutput{exitStatus} = exitStatus($?);
+ $childOutput{stdout} = $childStdout if $childStdout;
+ return \%childOutput;
+ }
+ # Child process
+ # FIXME: Consider further hardening of this function, including sanitizing the environment.
+ exec { $args[0] } @args or die "Failed to exec(): $!";
+}
+
+sub gitCommitForSVNRevision
+{
+ my ($svnRevision) = @_;
+ my $command = "git svn find-rev r" . $svnRevision;
+ $command = "LC_ALL=C $command" if !isWindows();
+ my $gitHash = `$command`;
+ if (!defined($gitHash)) {
+ $gitHash = "unknown";
+ warn "Unable to determine GIT commit from SVN revision";
+ } else {
+ chop($gitHash);
+ }
+ return $gitHash;
+}
+
+sub listOfChangedFilesBetweenRevisions
+{
+ my ($sourceDir, $firstRevision, $lastRevision) = @_;
+ my $command;
+
+ if ($firstRevision eq "unknown" or $lastRevision eq "unknown") {
+ return ();
+ }
+
+ # Some VCS functions don't work from within the build dir, so always
+ # go to the source dir first.
+ my $cwd = Cwd::getcwd();
+ chdir $sourceDir;
+
+ if (isGit()) {
+ my $firstCommit = gitCommitForSVNRevision($firstRevision);
+ my $lastCommit = gitCommitForSVNRevision($lastRevision);
+ $command = "git diff --name-status $firstCommit..$lastCommit";
+ } elsif (isSVN()) {
+ $command = "svn diff --summarize -r $firstRevision:$lastRevision";
+ }
+
+ my @result = ();
+
+ if ($command) {
+ my $diffOutput = `$command`;
+ $diffOutput =~ s/^[A-Z]\s+//gm;
+ @result = split(/[\r\n]+/, $diffOutput);
+ }
+
+ chdir $cwd;
+
+ return @result;
+}
+
+
+1;
diff --git a/src/third_party/blink/Tools/Scripts/add-include b/src/third_party/blink/Tools/Scripts/add-include
new file mode 100755
index 0000000..3560c33
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/add-include
@@ -0,0 +1,135 @@
+#!/usr/bin/perl -w
+
+# Copyright 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Helper script to add includes to source files.
+
+use strict;
+
+my $headerPattern = '[\"<][A-Za-z][A-Za-z0-9_/]+(\.h)?[\">]'; # " Make Xcode formatter happy.
+
+my $headerToAdd = shift @ARGV or die;
+$headerToAdd =~ /^([A-Za-z][A-Za-z0-9]+)\.h$/ or die "Header to add must be a .h file: $headerToAdd.\n";
+
+sub includesParagraph;
+
+FILE: for my $filename (@ARGV) {
+ unless ($filename =~ /(\w+)\.cpp$/) { print STDERR "Command line args must be .cpp files: $filename.\n"; next FILE; }
+
+ my $base = $1;
+
+ my $sawConfig = 0;
+ my $sawSelfInclude = 0;
+
+ my $pastIncludes = 0;
+ my %includes;
+
+ my $beforeIncludes = "";
+ my $afterIncludes = "";
+
+ my $currentCondition = "";
+
+ my $entireFileCondition = "";
+
+ unless (open INPUT, "<", $filename) { print STDERR "File does not exist: $filename\n"; next FILE; }
+ while (my $line = <INPUT>) {
+ if ($line =~ /^\s*#(include|import)\s*($headerPattern)\s*\n/) {
+ my $include = $2;
+ if ($pastIncludes) { print STDERR "Saw more includes after include section in $filename, line $.\n"; next FILE; }
+ if ($include eq "\"config.h\"") {
+ $sawConfig = 1;
+ } else {
+ unless ($sawConfig) { print STDERR "First include must be config.h in $filename, line $.\n"; next FILE; }
+ if ($include eq "\"$base.h\"") {
+ $sawSelfInclude = 1;
+ } else {
+ unless ($sawSelfInclude) { print STDERR "Second include must be $base.h in $filename, line $.\n"; next FILE; }
+ $includes{$currentCondition}{$include} = 1;
+ }
+ }
+ } else {
+ if ($sawConfig && !$pastIncludes) {
+ if ($line =~ /^\s*#\s*if\s+(.+?)\s*$/) {
+ my $condition = $1;
+ if (!$sawSelfInclude) {
+ $entireFileCondition = $1;
+ next;
+ }
+ unless ($currentCondition eq "") { print STDERR "Nested #if in include section in $filename, line $.\n"; next FILE; }
+ $currentCondition = $condition;
+ next;
+ }
+ if ($line =~ /^\s*#\s*endif\s*$/) {
+ unless ($currentCondition ne "") { print STDERR "Extra #endif in include section in $filename, line $.\n"; next FILE; }
+ $currentCondition = "";
+ next;
+ }
+ }
+ if (!$sawConfig) {
+ $beforeIncludes .= $line;
+ } else {
+ $pastIncludes = 1 if $line !~ /^\s*$/;
+ if ($pastIncludes) {
+ unless ($currentCondition eq "") { print STDERR "Unterminated #if in include section in $filename, line $.\n"; next FILE; }
+ $afterIncludes .= $line;
+ }
+ }
+ }
+ }
+ close INPUT or die;
+
+ $includes{""}{"\"$headerToAdd\""} = 1;
+
+ $beforeIncludes =~ s/\n+$//;
+ $afterIncludes =~ s/^\n+//;
+
+ my $contents = $beforeIncludes;
+ $contents .= "\n\n#include \"config.h\"\n";
+ $contents .= "\n#if $entireFileCondition\n" if $entireFileCondition ne "";
+ $contents .= "#include \"$base.h\"\n\n";
+ for my $condition (sort keys %includes) {
+ $contents .= "#if $condition\n" unless $condition eq "";
+ $contents .= includesParagraph($includes{$condition});
+ $contents .= "#endif\n" unless $condition eq "";
+ $contents .= "\n";
+ }
+ $contents .= $afterIncludes;
+
+ unless (open OUTPUT, ">", $filename) { print STDERR "Could not open file for writing: $filename\n"; next FILE; };
+ print OUTPUT $contents;
+ close OUTPUT or die;
+}
+
+sub includesParagraph()
+{
+ my ($includes) = @_;
+
+ my $paragraph = "";
+
+ for my $include (sort keys %{$includes}) {
+ $paragraph .= "#include $include\n";
+ }
+
+ return $paragraph;
+}
diff --git a/src/third_party/blink/Tools/Scripts/bencher b/src/third_party/blink/Tools/Scripts/bencher
new file mode 100755
index 0000000..9183262
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/bencher
@@ -0,0 +1,2101 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require 'rubygems'
+
+require 'getoptlong'
+require 'pathname'
+require 'tempfile'
+require 'socket'
+
+begin
+ require 'json'
+rescue LoadError => e
+ $stderr.puts "It does not appear that you have the 'json' package installed. Try running 'sudo gem install json'."
+ exit 1
+end
+
+# Configuration
+
+CONFIGURATION_FLNM = ENV["HOME"]+"/.bencher"
+
+unless FileTest.exist? CONFIGURATION_FLNM
+ $stderr.puts "Error: no configuration file at ~/.bencher."
+ $stderr.puts "This file should contain paths to SunSpider, V8, and Kraken, as well as a"
+ $stderr.puts "temporary directory that bencher can use for its remote mode. It should be"
+ $stderr.puts "formatted in JSON. For example:"
+ $stderr.puts "{"
+ $stderr.puts " \"sunSpiderPath\": \"/Volumes/Data/pizlo/OpenSource/PerformanceTests/SunSpider/tests/sunspider-1.0\","
+ $stderr.puts " \"v8Path\": \"/Volumes/Data/pizlo/OpenSource/PerformanceTests/SunSpider/tests/v8-v6\","
+ $stderr.puts " \"krakenPath\": \"/Volumes/Data/pizlo/kraken/kraken-e119421cb325/tests/kraken-1.1\","
+ $stderr.puts " \"tempPath\": \"/Volumes/Data/pizlo/bencher/temp\""
+ $stderr.puts "}"
+ exit 1
+end
+
+CONFIGURATION = JSON.parse(File::read(CONFIGURATION_FLNM))
+
+SUNSPIDER_PATH = CONFIGURATION["sunSpiderPath"]
+V8_PATH = CONFIGURATION["v8Path"]
+KRAKEN_PATH = CONFIGURATION["krakenPath"]
+TEMP_PATH = CONFIGURATION["tempPath"]
+BENCH_DATA_PATH = TEMP_PATH + "/benchdata"
+
+IBR_LOOKUP=[0.00615583, 0.0975, 0.22852, 0.341628, 0.430741, 0.500526, 0.555933,
+ 0.600706, 0.637513, 0.668244, 0.694254, 0.716537, 0.735827, 0.752684,
+ 0.767535, 0.780716, 0.792492, 0.803074, 0.812634, 0.821313, 0.829227,
+ 0.836472, 0.843129, 0.849267, 0.854943, 0.860209, 0.865107, 0.869674,
+ 0.873942, 0.877941, 0.881693, 0.885223, 0.888548, 0.891686, 0.894652,
+ 0.897461, 0.900124, 0.902652, 0.905056, 0.907343, 0.909524, 0.911604,
+ 0.91359, 0.91549, 0.917308, 0.919049, 0.920718, 0.92232, 0.923859, 0.925338,
+ 0.926761, 0.92813, 0.929449, 0.930721, 0.931948, 0.933132, 0.934275, 0.93538,
+ 0.936449, 0.937483, 0.938483, 0.939452, 0.940392, 0.941302, 0.942185,
+ 0.943042, 0.943874, 0.944682, 0.945467, 0.94623, 0.946972, 0.947694,
+ 0.948396, 0.94908, 0.949746, 0.950395, 0.951027, 0.951643, 0.952244,
+ 0.952831, 0.953403, 0.953961, 0.954506, 0.955039, 0.955559, 0.956067,
+ 0.956563, 0.957049, 0.957524, 0.957988, 0.958443, 0.958887, 0.959323,
+ 0.959749, 0.960166, 0.960575, 0.960975, 0.961368, 0.961752, 0.962129,
+ 0.962499, 0.962861, 0.963217, 0.963566, 0.963908, 0.964244, 0.964574,
+ 0.964897, 0.965215, 0.965527, 0.965834, 0.966135, 0.966431, 0.966722,
+ 0.967007, 0.967288, 0.967564, 0.967836, 0.968103, 0.968366, 0.968624,
+ 0.968878, 0.969128, 0.969374, 0.969617, 0.969855, 0.97009, 0.970321,
+ 0.970548, 0.970772, 0.970993, 0.97121, 0.971425, 0.971636, 0.971843,
+ 0.972048, 0.97225, 0.972449, 0.972645, 0.972839, 0.973029, 0.973217,
+ 0.973403, 0.973586, 0.973766, 0.973944, 0.97412, 0.974293, 0.974464,
+ 0.974632, 0.974799, 0.974963, 0.975125, 0.975285, 0.975443, 0.975599,
+ 0.975753, 0.975905, 0.976055, 0.976204, 0.97635, 0.976495, 0.976638,
+ 0.976779, 0.976918, 0.977056, 0.977193, 0.977327, 0.97746, 0.977592,
+ 0.977722, 0.97785, 0.977977, 0.978103, 0.978227, 0.978349, 0.978471,
+ 0.978591, 0.978709, 0.978827, 0.978943, 0.979058, 0.979171, 0.979283,
+ 0.979395, 0.979504, 0.979613, 0.979721, 0.979827, 0.979933, 0.980037,
+ 0.98014, 0.980242, 0.980343, 0.980443, 0.980543, 0.980641, 0.980738,
+ 0.980834, 0.980929, 0.981023, 0.981116, 0.981209, 0.9813, 0.981391, 0.981481,
+ 0.981569, 0.981657, 0.981745, 0.981831, 0.981916, 0.982001, 0.982085,
+ 0.982168, 0.982251, 0.982332, 0.982413, 0.982493, 0.982573, 0.982651,
+ 0.982729, 0.982807, 0.982883, 0.982959, 0.983034, 0.983109, 0.983183,
+ 0.983256, 0.983329, 0.983401, 0.983472, 0.983543, 0.983613, 0.983683,
+ 0.983752, 0.98382, 0.983888, 0.983956, 0.984022, 0.984089, 0.984154,
+ 0.984219, 0.984284, 0.984348, 0.984411, 0.984474, 0.984537, 0.984599,
+ 0.98466, 0.984721, 0.984782, 0.984842, 0.984902, 0.984961, 0.985019,
+ 0.985077, 0.985135, 0.985193, 0.985249, 0.985306, 0.985362, 0.985417,
+ 0.985472, 0.985527, 0.985582, 0.985635, 0.985689, 0.985742, 0.985795,
+ 0.985847, 0.985899, 0.985951, 0.986002, 0.986053, 0.986103, 0.986153,
+ 0.986203, 0.986252, 0.986301, 0.98635, 0.986398, 0.986446, 0.986494,
+ 0.986541, 0.986588, 0.986635, 0.986681, 0.986727, 0.986773, 0.986818,
+ 0.986863, 0.986908, 0.986953, 0.986997, 0.987041, 0.987084, 0.987128,
+ 0.987171, 0.987213, 0.987256, 0.987298, 0.98734, 0.987381, 0.987423,
+ 0.987464, 0.987504, 0.987545, 0.987585, 0.987625, 0.987665, 0.987704,
+ 0.987744, 0.987783, 0.987821, 0.98786, 0.987898, 0.987936, 0.987974,
+ 0.988011, 0.988049, 0.988086, 0.988123, 0.988159, 0.988196, 0.988232,
+ 0.988268, 0.988303, 0.988339, 0.988374, 0.988409, 0.988444, 0.988479,
+ 0.988513, 0.988547, 0.988582, 0.988615, 0.988649, 0.988682, 0.988716,
+ 0.988749, 0.988782, 0.988814, 0.988847, 0.988879, 0.988911, 0.988943,
+ 0.988975, 0.989006, 0.989038, 0.989069, 0.9891, 0.989131, 0.989161, 0.989192,
+ 0.989222, 0.989252, 0.989282, 0.989312, 0.989342, 0.989371, 0.989401,
+ 0.98943, 0.989459, 0.989488, 0.989516, 0.989545, 0.989573, 0.989602, 0.98963,
+ 0.989658, 0.989685, 0.989713, 0.98974, 0.989768, 0.989795, 0.989822,
+ 0.989849, 0.989876, 0.989902, 0.989929, 0.989955, 0.989981, 0.990007,
+ 0.990033, 0.990059, 0.990085, 0.99011, 0.990136, 0.990161, 0.990186,
+ 0.990211, 0.990236, 0.990261, 0.990285, 0.99031, 0.990334, 0.990358,
+ 0.990383, 0.990407, 0.99043, 0.990454, 0.990478, 0.990501, 0.990525,
+ 0.990548, 0.990571, 0.990594, 0.990617, 0.99064, 0.990663, 0.990686,
+ 0.990708, 0.990731, 0.990753, 0.990775, 0.990797, 0.990819, 0.990841,
+ 0.990863, 0.990885, 0.990906, 0.990928, 0.990949, 0.99097, 0.990991,
+ 0.991013, 0.991034, 0.991054, 0.991075, 0.991096, 0.991116, 0.991137,
+ 0.991157, 0.991178, 0.991198, 0.991218, 0.991238, 0.991258, 0.991278,
+ 0.991298, 0.991317, 0.991337, 0.991356, 0.991376, 0.991395, 0.991414,
+ 0.991433, 0.991452, 0.991471, 0.99149, 0.991509, 0.991528, 0.991547,
+ 0.991565, 0.991584, 0.991602, 0.99162, 0.991639, 0.991657, 0.991675,
+ 0.991693, 0.991711, 0.991729, 0.991746, 0.991764, 0.991782, 0.991799,
+ 0.991817, 0.991834, 0.991851, 0.991869, 0.991886, 0.991903, 0.99192,
+ 0.991937, 0.991954, 0.991971, 0.991987, 0.992004, 0.992021, 0.992037,
+ 0.992054, 0.99207, 0.992086, 0.992103, 0.992119, 0.992135, 0.992151,
+ 0.992167, 0.992183, 0.992199, 0.992215, 0.99223, 0.992246, 0.992262,
+ 0.992277, 0.992293, 0.992308, 0.992324, 0.992339, 0.992354, 0.992369,
+ 0.992384, 0.9924, 0.992415, 0.992429, 0.992444, 0.992459, 0.992474, 0.992489,
+ 0.992503, 0.992518, 0.992533, 0.992547, 0.992561, 0.992576, 0.99259,
+ 0.992604, 0.992619, 0.992633, 0.992647, 0.992661, 0.992675, 0.992689,
+ 0.992703, 0.992717, 0.99273, 0.992744, 0.992758, 0.992771, 0.992785,
+ 0.992798, 0.992812, 0.992825, 0.992839, 0.992852, 0.992865, 0.992879,
+ 0.992892, 0.992905, 0.992918, 0.992931, 0.992944, 0.992957, 0.99297,
+ 0.992983, 0.992995, 0.993008, 0.993021, 0.993034, 0.993046, 0.993059,
+ 0.993071, 0.993084, 0.993096, 0.993109, 0.993121, 0.993133, 0.993145,
+ 0.993158, 0.99317, 0.993182, 0.993194, 0.993206, 0.993218, 0.99323, 0.993242,
+ 0.993254, 0.993266, 0.993277, 0.993289, 0.993301, 0.993312, 0.993324,
+ 0.993336, 0.993347, 0.993359, 0.99337, 0.993382, 0.993393, 0.993404,
+ 0.993416, 0.993427, 0.993438, 0.993449, 0.99346, 0.993472, 0.993483,
+ 0.993494, 0.993505, 0.993516, 0.993527, 0.993538, 0.993548, 0.993559,
+ 0.99357, 0.993581, 0.993591, 0.993602, 0.993613, 0.993623, 0.993634,
+ 0.993644, 0.993655, 0.993665, 0.993676, 0.993686, 0.993697, 0.993707,
+ 0.993717, 0.993727, 0.993738, 0.993748, 0.993758, 0.993768, 0.993778,
+ 0.993788, 0.993798, 0.993808, 0.993818, 0.993828, 0.993838, 0.993848,
+ 0.993858, 0.993868, 0.993877, 0.993887, 0.993897, 0.993907, 0.993916,
+ 0.993926, 0.993935, 0.993945, 0.993954, 0.993964, 0.993973, 0.993983,
+ 0.993992, 0.994002, 0.994011, 0.99402, 0.99403, 0.994039, 0.994048, 0.994057,
+ 0.994067, 0.994076, 0.994085, 0.994094, 0.994103, 0.994112, 0.994121,
+ 0.99413, 0.994139, 0.994148, 0.994157, 0.994166, 0.994175, 0.994183,
+ 0.994192, 0.994201, 0.99421, 0.994218, 0.994227, 0.994236, 0.994244,
+ 0.994253, 0.994262, 0.99427, 0.994279, 0.994287, 0.994296, 0.994304,
+ 0.994313, 0.994321, 0.994329, 0.994338, 0.994346, 0.994354, 0.994363,
+ 0.994371, 0.994379, 0.994387, 0.994395, 0.994404, 0.994412, 0.99442,
+ 0.994428, 0.994436, 0.994444, 0.994452, 0.99446, 0.994468, 0.994476,
+ 0.994484, 0.994492, 0.9945, 0.994508, 0.994516, 0.994523, 0.994531, 0.994539,
+ 0.994547, 0.994554, 0.994562, 0.99457, 0.994577, 0.994585, 0.994593, 0.9946,
+ 0.994608, 0.994615, 0.994623, 0.994631, 0.994638, 0.994645, 0.994653,
+ 0.99466, 0.994668, 0.994675, 0.994683, 0.99469, 0.994697, 0.994705, 0.994712,
+ 0.994719, 0.994726, 0.994734, 0.994741, 0.994748, 0.994755, 0.994762,
+ 0.994769, 0.994777, 0.994784, 0.994791, 0.994798, 0.994805, 0.994812,
+ 0.994819, 0.994826, 0.994833, 0.99484, 0.994847, 0.994854, 0.99486, 0.994867,
+ 0.994874, 0.994881, 0.994888, 0.994895, 0.994901, 0.994908, 0.994915,
+ 0.994922, 0.994928, 0.994935, 0.994942, 0.994948, 0.994955, 0.994962,
+ 0.994968, 0.994975, 0.994981, 0.994988, 0.994994, 0.995001, 0.995007,
+ 0.995014, 0.99502, 0.995027, 0.995033, 0.99504, 0.995046, 0.995052, 0.995059,
+ 0.995065, 0.995071, 0.995078, 0.995084, 0.99509, 0.995097, 0.995103,
+ 0.995109, 0.995115, 0.995121, 0.995128, 0.995134, 0.99514, 0.995146,
+ 0.995152, 0.995158, 0.995164, 0.995171, 0.995177, 0.995183, 0.995189,
+ 0.995195, 0.995201, 0.995207, 0.995213, 0.995219, 0.995225, 0.995231,
+ 0.995236, 0.995242, 0.995248, 0.995254, 0.99526, 0.995266, 0.995272,
+ 0.995277, 0.995283, 0.995289, 0.995295, 0.995301, 0.995306, 0.995312,
+ 0.995318, 0.995323, 0.995329, 0.995335, 0.99534, 0.995346, 0.995352,
+ 0.995357, 0.995363, 0.995369, 0.995374, 0.99538, 0.995385, 0.995391,
+ 0.995396, 0.995402, 0.995407, 0.995413, 0.995418, 0.995424, 0.995429,
+ 0.995435, 0.99544, 0.995445, 0.995451, 0.995456, 0.995462, 0.995467,
+ 0.995472, 0.995478, 0.995483, 0.995488, 0.995493, 0.995499, 0.995504,
+ 0.995509, 0.995515, 0.99552, 0.995525, 0.99553, 0.995535, 0.995541, 0.995546,
+ 0.995551, 0.995556, 0.995561, 0.995566, 0.995571, 0.995577, 0.995582,
+ 0.995587, 0.995592, 0.995597, 0.995602, 0.995607, 0.995612, 0.995617,
+ 0.995622, 0.995627, 0.995632, 0.995637, 0.995642, 0.995647, 0.995652,
+ 0.995657, 0.995661, 0.995666, 0.995671, 0.995676, 0.995681, 0.995686,
+ 0.995691, 0.995695, 0.9957, 0.995705, 0.99571, 0.995715, 0.995719, 0.995724,
+ 0.995729, 0.995734, 0.995738, 0.995743, 0.995748, 0.995753, 0.995757,
+ 0.995762, 0.995767, 0.995771, 0.995776, 0.995781, 0.995785, 0.99579,
+ 0.995794, 0.995799, 0.995804, 0.995808, 0.995813, 0.995817, 0.995822,
+ 0.995826, 0.995831, 0.995835, 0.99584, 0.995844, 0.995849, 0.995853,
+ 0.995858, 0.995862, 0.995867, 0.995871, 0.995876, 0.99588, 0.995885,
+ 0.995889, 0.995893, 0.995898, 0.995902, 0.995906, 0.995911, 0.995915,
+ 0.99592, 0.995924, 0.995928, 0.995932, 0.995937, 0.995941, 0.995945, 0.99595,
+ 0.995954, 0.995958, 0.995962, 0.995967, 0.995971, 0.995975, 0.995979,
+ 0.995984, 0.995988, 0.995992, 0.995996, 0.996, 0.996004, 0.996009, 0.996013,
+ 0.996017, 0.996021, 0.996025, 0.996029, 0.996033, 0.996037, 0.996041,
+ 0.996046, 0.99605, 0.996054, 0.996058, 0.996062, 0.996066, 0.99607, 0.996074,
+ 0.996078, 0.996082, 0.996086, 0.99609, 0.996094, 0.996098, 0.996102,
+ 0.996106, 0.99611, 0.996114, 0.996117, 0.996121, 0.996125, 0.996129,
+ 0.996133, 0.996137, 0.996141, 0.996145, 0.996149, 0.996152, 0.996156,
+ 0.99616, 0.996164]
+
+# Run-time configuration parameters (can be set with command-line options)
+
+$rerun=1
+$inner=3
+$warmup=1
+$outer=4
+$includeSunSpider=true
+$includeV8=true
+$includeKraken=true
+$measureGC=false
+$benchmarkPattern=nil
+$verbosity=0
+$timeMode=:preciseTime
+$forceVMKind=nil
+$brief=false
+$silent=false
+$remoteHosts=[]
+$alsoLocal=false
+$sshOptions=[]
+$vms = []
+$needToCopyVMs = false
+$dontCopyVMs = false
+
+$prepare = true
+$run = true
+$analyze = []
+
+# Helpful functions and classes
+
+def smallUsage
+ puts "Use the --help option to get basic usage information."
+ exit 1
+end
+
+def usage
+ puts "bencher [options] <vm1> [<vm2> ...]"
+ puts
+ puts "Runs one or more JavaScript runtimes against SunSpider, V8, and/or Kraken"
+ puts "benchmarks, and reports detailed statistics. What makes bencher special is"
+ puts "that each benchmark/VM configuration is run in a single VM invocation, and"
+ puts "the invocations are run in random order. This minimizes systematics due to"
+ puts "one benchmark polluting the running time of another. The fine-grained"
+ puts "interleaving of VM invocations further minimizes systematics due to changes in"
+ puts "the performance or behavior of your machine."
+ puts
+ puts "Bencher is highly configurable. You can compare as many VMs as you like. You"
+ puts "can change the amount of warm-up iterations, number of iterations executed per"
+ puts "VM invocation, and the number of VM invocations per benchmark. By default,"
+ puts "SunSpider, VM, and Kraken are all run; but you can run any combination of these"
+ puts "suites."
+ puts
+ puts "The <vm> should be either a path to a JavaScript runtime executable (such as"
+ puts "jsc), or a string of the form <name>:<path>, where the <path> is the path to"
+ puts "the executable and <name> is the name that you would like to give the"
+ puts "configuration for the purposeof reporting. If no name is given, a generic name"
+ puts "of the form Conf#<n> will be ascribed to the configuration automatically."
+ puts
+ puts "Options:"
+ puts "--rerun <n> Set the number of iterations of the benchmark that"
+ puts " contribute to the measured run time. Default is #{$rerun}."
+ puts "--inner <n> Set the number of inner (per-runtime-invocation)"
+ puts " iterations. Default is #{$inner}."
+ puts "--outer <n> Set the number of runtime invocations for each benchmark."
+ puts " Default is #{$outer}."
+ puts "--warmup <n> Set the number of warm-up runs per invocation. Default"
+ puts " is #{$warmup}."
+ puts "--timing-mode Set the way that bencher measures time. Possible values"
+ puts " are 'preciseTime' and 'date'. Default is 'preciseTime'."
+ puts "--force-vm-kind Turn off auto-detection of VM kind, and assume that it is"
+ puts " the one specified. Valid arguments are 'jsc' or"
+ puts " 'DumpRenderTree'."
+ puts "--force-vm-copy Force VM builds to be copied to bencher's working directory."
+ puts " This may reduce pathologies resulting from path names."
+ puts "--dont-copy-vms Don't copy VMs even when doing a remote benchmarking run;"
+ puts " instead assume that they are already there."
+ puts "--v8-only Only run V8."
+ puts "--sunspider-only Only run SunSpider."
+ puts "--kraken-only Only run Kraken."
+ puts "--exclude-v8 Exclude V8 (only run SunSpider and Kraken)."
+ puts "--exclude-sunspider Exclude SunSpider (only run V8 and Kraken)."
+ puts "--exclude-kraken Exclude Kraken (only run SunSpider and V8)."
+ puts "--benchmarks Only run benchmarks matching the given regular expression."
+ puts "--measure-gc Turn off manual calls to gc(), so that GC time is measured."
+ puts " Works best with large values of --inner. You can also say"
+ puts " --measure-gc <conf>, which turns this on for one"
+ puts " configuration only."
+ puts "--verbose or -v Print more stuff."
+ puts "--brief Print only the final result for each VM."
+ puts "--silent Don't print progress. This might slightly reduce some"
+ puts " performance perturbation."
+ puts "--remote <sshhosts> Performance performance measurements remotely, on the given"
+ puts " SSH host(s). Easiest way to use this is to specify the SSH"
+ puts " user@host string. However, you can also supply a comma-"
+ puts " separated list of SSH hosts. Alternatively, you can use this"
+ puts " option multiple times to specify multiple hosts. This"
+ puts " automatically copies the WebKit release builds of the VMs"
+ puts " you specified to all of the hosts."
+ puts "--ssh-options Pass additional options to SSH."
+ puts "--local Also do a local benchmark run even when doing --remote."
+ puts "--prepare-only Only prepare the bencher runscript (a shell script that"
+ puts " invokes the VMs to run benchmarks) but don't run it."
+ puts "--analyze Only read the output of the runscript but don't do anything"
+ puts " else. This requires passing the same arguments to bencher"
+ puts " that you passed when running --prepare-only."
+ puts "--help or -h Display this message."
+ puts
+ puts "Example:"
+ puts "bencher TipOfTree:/Volumes/Data/pizlo/OpenSource/WebKitBuild/Release/jsc MyChanges:/Volumes/Data/pizlo/secondary/OpenSource/WebKitBuild/Release/jsc"
+ exit 1
+end
+
+def fail(reason)
+ if reason.respond_to? :backtrace
+ puts "FAILED: #{reason}"
+ puts "Stack trace:"
+ puts reason.backtrace.join("\n")
+ else
+ puts "FAILED: #{reason}"
+ end
+ smallUsage
+end
+
+def quickFail(r1,r2)
+ $stderr.puts "#{$0}: #{r1}"
+ puts
+ fail(r2)
+end
+
+def intArg(argName,arg,min,max)
+ result=arg.to_i
+ unless result.to_s == arg
+ quickFail("Expected an integer value for #{argName}, but got #{arg}.",
+ "Invalid argument for command-line option")
+ end
+ if min and result<min
+ quickFail("Argument for #{argName} cannot be smaller than #{min}.",
+ "Invalid argument for command-line option")
+ end
+ if max and result>max
+ quickFail("Argument for #{argName} cannot be greater than #{max}.",
+ "Invalid argument for command-line option")
+ end
+ result
+end
+
+def computeMean(array)
+ sum=0.0
+ array.each {
+ | value |
+ sum += value
+ }
+ sum/array.length
+end
+
+def computeGeometricMean(array)
+ mult=1.0
+ array.each {
+ | value |
+ mult*=value
+ }
+ mult**(1.0/array.length)
+end
+
+def computeHarmonicMean(array)
+ 1.0 / computeMean(array.collect{ | value | 1.0 / value })
+end
+
+def computeStdDev(array)
+ case array.length
+ when 0
+ 0.0/0.0
+ when 1
+ 0.0
+ else
+ begin
+ mean=computeMean(array)
+ sum=0.0
+ array.each {
+ | value |
+ sum += (value-mean)**2
+ }
+ Math.sqrt(sum/(array.length-1))
+ rescue
+ 0.0/0.0
+ end
+ end
+end
+
+class Array
+ def shuffle!
+ size.downto(1) { |n| push delete_at(rand(n)) }
+ self
+ end
+end
+
+def inverseBetaRegularized(n)
+ IBR_LOOKUP[n-1]
+end
+
+def numToStr(num)
+ "%.4f"%(num.to_f)
+end
+
+class NoChange
+ attr_reader :amountFaster
+
+ def initialize(amountFaster)
+ @amountFaster = amountFaster
+ end
+
+ def shortForm
+ " "
+ end
+
+ def longForm
+ " might be #{numToStr(@amountFaster)}x faster"
+ end
+
+ def to_s
+ if @amountFaster < 1.01
+ ""
+ else
+ longForm
+ end
+ end
+end
+
+class Faster
+ attr_reader :amountFaster
+
+ def initialize(amountFaster)
+ @amountFaster = amountFaster
+ end
+
+ def shortForm
+ "^"
+ end
+
+ def longForm
+ "^ definitely #{numToStr(@amountFaster)}x faster"
+ end
+
+ def to_s
+ longForm
+ end
+end
+
+class Slower
+ attr_reader :amountSlower
+
+ def initialize(amountSlower)
+ @amountSlower = amountSlower
+ end
+
+ def shortForm
+ "!"
+ end
+
+ def longForm
+ "! definitely #{numToStr(@amountSlower)}x slower"
+ end
+
+ def to_s
+ longForm
+ end
+end
+
+class MayBeSlower
+ attr_reader :amountSlower
+
+ def initialize(amountSlower)
+ @amountSlower = amountSlower
+ end
+
+ def shortForm
+ "?"
+ end
+
+ def longForm
+ "? might be #{numToStr(@amountSlower)}x slower"
+ end
+
+ def to_s
+ if @amountSlower < 1.01
+ "?"
+ else
+ longForm
+ end
+ end
+end
+
+class Stats
+ def initialize
+ @array = []
+ end
+
+ def add(value)
+ if value.is_a? Stats
+ add(value.array)
+ elsif value.respond_to? :each
+ value.each {
+ | v |
+ add(v)
+ }
+ else
+ @array << value.to_f
+ end
+ end
+
+ def array
+ @array
+ end
+
+ def sum
+ result=0
+ @array.each {
+ | value |
+ result += value
+ }
+ result
+ end
+
+ def min
+ @array.min
+ end
+
+ def max
+ @array.max
+ end
+
+ def size
+ @array.length
+ end
+
+ def mean
+ computeMean(array)
+ end
+
+ def arithmeticMean
+ mean
+ end
+
+ def stdDev
+ computeStdDev(array)
+ end
+
+ def stdErr
+ stdDev/Math.sqrt(size)
+ end
+
+ # Computes a 95% Student's t distribution confidence interval
+ def confInt
+ if size < 2
+ 0.0/0.0
+ else
+ raise if size > 1000
+ Math.sqrt(size-1.0)*stdErr*Math.sqrt(-1.0+1.0/inverseBetaRegularized(size-1))
+ end
+ end
+
+ def lower
+ mean-confInt
+ end
+
+ def upper
+ mean+confInt
+ end
+
+ def geometricMean
+ computeGeometricMean(array)
+ end
+
+ def harmonicMean
+ computeHarmonicMean(array)
+ end
+
+ def compareTo(other)
+ if upper < other.lower
+ Faster.new(other.mean/mean)
+ elsif lower > other.upper
+ Slower.new(mean/other.mean)
+ elsif mean > other.mean
+ MayBeSlower.new(mean/other.mean)
+ else
+ NoChange.new(other.mean/mean)
+ end
+ end
+
+ def to_s
+ "size = #{size}, mean = #{mean}, stdDev = #{stdDev}, stdErr = #{stdErr}, confInt = #{confInt}"
+ end
+end
+
+def doublePuts(out1,out2,msg)
+ out1.puts "#{out2.path}: #{msg}" if $verbosity>=3
+ out2.puts msg
+end
+
+class Benchfile < File
+ @@counter = 0
+
+ attr_reader :filename, :basename
+
+ def initialize(name)
+ @basename, @filename = Benchfile.uniqueFilename(name)
+ super(@filename, "w")
+ end
+
+ def self.uniqueFilename(name)
+ if name.is_a? Array
+ basename = name[0] + @@counter.to_s + name[1]
+ else
+ basename = name + @@counter.to_s
+ end
+ filename = BENCH_DATA_PATH + "/" + basename
+ @@counter += 1
+ raise "Benchfile #{filename} already exists" if FileTest.exist?(filename)
+ [basename, filename]
+ end
+
+ def self.create(name)
+ file = Benchfile.new(name)
+ yield file
+ file.close
+ file.basename
+ end
+end
+
+$dataFiles={}
+def ensureFile(key, filename)
+ unless $dataFiles[key]
+ $dataFiles[key] = Benchfile.create(key) {
+ | outp |
+ doublePuts($stderr,outp,IO::read(filename))
+ }
+ end
+ $dataFiles[key]
+end
+
+def emitBenchRunCodeFile(name, plan, benchDataPath, benchPath)
+ case plan.vm.vmType
+ when :jsc
+ Benchfile.create("bencher") {
+ | file |
+ case $timeMode
+ when :preciseTime
+ doublePuts($stderr,file,"function __bencher_curTimeMS() {")
+ doublePuts($stderr,file," return preciseTime()*1000")
+ doublePuts($stderr,file,"}")
+ when :date
+ doublePuts($stderr,file,"function __bencher_curTimeMS() {")
+ doublePuts($stderr,file," return Date.now()")
+ doublePuts($stderr,file,"}")
+ else
+ raise
+ end
+
+ if benchDataPath
+ doublePuts($stderr,file,"load(#{benchDataPath.inspect});")
+ doublePuts($stderr,file,"gc();")
+ doublePuts($stderr,file,"for (var __bencher_index = 0; __bencher_index < #{$warmup+$inner}; ++__bencher_index) {")
+ doublePuts($stderr,file," before = __bencher_curTimeMS();")
+ $rerun.times {
+ doublePuts($stderr,file," load(#{benchPath.inspect});")
+ }
+ doublePuts($stderr,file," after = __bencher_curTimeMS();")
+ doublePuts($stderr,file," if (__bencher_index >= #{$warmup}) print(\"#{name}: #{plan.vm}: #{plan.iteration}: \" + (__bencher_index - #{$warmup}) + \": Time: \"+(after-before));");
+ doublePuts($stderr,file," gc();") unless plan.vm.shouldMeasureGC
+ doublePuts($stderr,file,"}")
+ else
+ doublePuts($stderr,file,"function __bencher_run(__bencher_what) {")
+ doublePuts($stderr,file," var __bencher_before = __bencher_curTimeMS();")
+ $rerun.times {
+ doublePuts($stderr,file," run(__bencher_what);")
+ }
+ doublePuts($stderr,file," var __bencher_after = __bencher_curTimeMS();")
+ doublePuts($stderr,file," return __bencher_after - __bencher_before;")
+ doublePuts($stderr,file,"}")
+ $warmup.times {
+ doublePuts($stderr,file,"__bencher_run(#{benchPath.inspect})")
+ doublePuts($stderr,file,"gc();") unless plan.vm.shouldMeasureGC
+ }
+ $inner.times {
+ | innerIndex |
+ doublePuts($stderr,file,"print(\"#{name}: #{plan.vm}: #{plan.iteration}: #{innerIndex}: Time: \"+__bencher_run(#{benchPath.inspect}));")
+ doublePuts($stderr,file,"gc();") unless plan.vm.shouldMeasureGC
+ }
+ end
+ }
+ when :dumpRenderTree
+ mainCode = Benchfile.create("bencher") {
+ | file |
+ doublePuts($stderr,file,"__bencher_count = 0;")
+ doublePuts($stderr,file,"function __bencher_doNext(result) {")
+ doublePuts($stderr,file," if (__bencher_count >= #{$warmup})")
+ doublePuts($stderr,file," debug(\"#{name}: #{plan.vm}: #{plan.iteration}: \" + (__bencher_count - #{$warmup}) + \": Time: \" + result);")
+ doublePuts($stderr,file," __bencher_count++;")
+ doublePuts($stderr,file," if (__bencher_count < #{$inner+$warmup})")
+ doublePuts($stderr,file," __bencher_runImpl(__bencher_doNext);")
+ doublePuts($stderr,file," else")
+ doublePuts($stderr,file," quit();")
+ doublePuts($stderr,file,"}")
+ doublePuts($stderr,file,"__bencher_runImpl(__bencher_doNext);")
+ }
+
+ cssCode = Benchfile.create("bencher-css") {
+ | file |
+ doublePuts($stderr,file,".pass {\n font-weight: bold;\n color: green;\n}\n.fail {\n font-weight: bold;\n color: red;\n}\n\#console {\n white-space: pre-wrap;\n font-family: monospace;\n}")
+ }
+
+ preCode = Benchfile.create("bencher-pre") {
+ | file |
+ doublePuts($stderr,file,"if (window.testRunner) {")
+ doublePuts($stderr,file," if (window.enablePixelTesting) {")
+ doublePuts($stderr,file," testRunner.dumpAsTextWithPixelResults();")
+ doublePuts($stderr,file," } else {")
+ doublePuts($stderr,file," testRunner.dumpAsText();")
+ doublePuts($stderr,file," }")
+ doublePuts($stderr,file,"}")
+ doublePuts($stderr,file,"")
+ doublePuts($stderr,file,"function debug(msg)")
+ doublePuts($stderr,file,"{")
+ doublePuts($stderr,file," var span = document.createElement(\"span\");")
+ doublePuts($stderr,file," document.getElementById(\"console\").appendChild(span); // insert it first so XHTML knows the namespace")
+ doublePuts($stderr,file," span.innerHTML = msg + '<br />';")
+ doublePuts($stderr,file,"}")
+ doublePuts($stderr,file,"")
+ doublePuts($stderr,file,"function quit() {")
+ doublePuts($stderr,file," testRunner.notifyDone();")
+ doublePuts($stderr,file,"}")
+ doublePuts($stderr,file,"")
+ doublePuts($stderr,file,"__bencher_continuation=null;")
+ doublePuts($stderr,file,"")
+ doublePuts($stderr,file,"function reportResult(result) {")
+ doublePuts($stderr,file," __bencher_continuation(result);")
+ doublePuts($stderr,file,"}")
+ doublePuts($stderr,file,"")
+ doublePuts($stderr,file,"function __bencher_runImpl(continuation) {")
+ doublePuts($stderr,file," function doit() {")
+ doublePuts($stderr,file," document.getElementById(\"frameparent\").innerHTML = \"\";")
+ doublePuts($stderr,file," document.getElementById(\"frameparent\").innerHTML = \"<iframe id='testframe'>\";")
+ doublePuts($stderr,file," var testFrame = document.getElementById(\"testframe\");")
+ doublePuts($stderr,file," testFrame.contentDocument.open();")
+ doublePuts($stderr,file," testFrame.contentDocument.write(\"<!DOCTYPE html>\\n<head></head><body><div id=\\\"console\\\"></div>\");")
+ if benchDataPath
+ doublePuts($stderr,file," testFrame.contentDocument.write(\"<script src=\\\"#{benchDataPath}\\\"></script>\");")
+ end
+ doublePuts($stderr,file," testFrame.contentDocument.write(\"<script type=\\\"text/javascript\\\">__bencher_before = Date.now();</script><script src=\\\"#{benchPath}\\\"></script><script type=\\\"text/javascript\\\">window.parent.reportResult(Date.now() - __bencher_before);</script></body></html>\");")
+ doublePuts($stderr,file," testFrame.contentDocument.close();")
+ doublePuts($stderr,file," }")
+ doublePuts($stderr,file," __bencher_continuation = continuation;")
+ doublePuts($stderr,file," window.setTimeout(doit, 10);")
+ doublePuts($stderr,file,"}")
+ }
+
+ Benchfile.create(["bencher-htmldoc",".html"]) {
+ | file |
+ doublePuts($stderr,file,"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML//EN\">\n<html><head><link rel=\"stylesheet\" href=\"#{cssCode}\"><script src=\"#{preCode}\"></script></head><body><div id=\"console\"></div><div id=\"frameparent\"></div><script src=\"#{mainCode}\"></script></body></html>")
+ }
+ else
+ raise
+ end
+end
+
+def emitBenchRunCode(name, plan, benchDataPath, benchPath)
+ plan.vm.emitRunCode(emitBenchRunCodeFile(name, plan, benchDataPath, benchPath))
+end
+
+def planForDescription(plans, benchFullname, vmName, iteration)
+ raise unless benchFullname =~ /\//
+ suiteName = $~.pre_match
+ benchName = $~.post_match
+ result = plans.select{|v| v.suite.name == suiteName and v.benchmark.name == benchName and v.vm.name == vmName and v.iteration == iteration}
+ raise unless result.size == 1
+ result[0]
+end
+
+class ParsedResult
+ attr_reader :plan, :innerIndex, :time
+
+ def initialize(plan, innerIndex, time)
+ @plan = plan
+ @innerIndex = innerIndex
+ @time = time
+
+ raise unless @plan.is_a? BenchPlan
+ raise unless @innerIndex.is_a? Integer
+ raise unless @time.is_a? Numeric
+ end
+
+ def benchmark
+ plan.benchmark
+ end
+
+ def suite
+ plan.suite
+ end
+
+ def vm
+ plan.vm
+ end
+
+ def outerIndex
+ plan.iteration
+ end
+
+ def self.parse(plans, string)
+ if string =~ /([a-zA-Z0-9\/-]+): ([a-zA-Z0-9_# ]+): ([0-9]+): ([0-9]+): Time: /
+ benchFullname = $1
+ vmName = $2
+ outerIndex = $3.to_i
+ innerIndex = $4.to_i
+ time = $~.post_match.to_f
+ ParsedResult.new(planForDescription(plans, benchFullname, vmName, outerIndex), innerIndex, time)
+ else
+ nil
+ end
+ end
+end
+
+class VM
+ def initialize(origPath, name, nameKind, svnRevision)
+ @origPath = origPath.to_s
+ @path = origPath.to_s
+ @name = name
+ @nameKind = nameKind
+
+ if $forceVMKind
+ @vmType = $forceVMKind
+ else
+ if @origPath =~ /DumpRenderTree$/
+ @vmType = :dumpRenderTree
+ else
+ @vmType = :jsc
+ end
+ end
+
+ @svnRevision = svnRevision
+
+ # Try to detect information about the VM.
+ if path =~ /\/WebKitBuild\/Release\/([a-zA-Z]+)$/
+ @checkoutPath = $~.pre_match
+ # FIXME: Use some variant of this:
+ # <bdash> def retrieve_revision
+ # <bdash> `perl -I#{@path}/Tools/Scripts -MVCSUtils -e 'print svnRevisionForDirectory("#{@path}");'`.to_i
+ # <bdash> end
+ unless @svnRevision
+ begin
+ Dir.chdir(@checkoutPath) {
+ $stderr.puts ">> cd #{@checkoutPath} && svn info" if $verbosity>=2
+ IO.popen("svn info", "r") {
+ | inp |
+ inp.each_line {
+ | line |
+ if line =~ /Revision: ([0-9]+)/
+ @svnRevision = $1
+ end
+ }
+ }
+ }
+ unless @svnRevision
+ $stderr.puts "Warning: running svn info for #{name} silently failed."
+ end
+ rescue => e
+ # Failed to detect svn revision.
+ $stderr.puts "Warning: could not get svn revision information for #{name}: #{e}"
+ end
+ end
+ else
+ $stderr.puts "Warning: could not identify checkout location for #{name}"
+ end
+
+ if @path =~ /\/Release\/([a-zA-Z]+)$/
+ @libPath, @relativeBinPath = $~.pre_match+"/Release", "./#{$1}"
+ elsif @path =~ /\/Contents\/Resources\/([a-zA-Z]+)$/
+ @libPath = $~.pre_match
+ elsif @path =~ /\/JavaScriptCore.framework\/Resources\/([a-zA-Z]+)$/
+ @libPath, @relativeBinPath = $~.pre_match, $&[1..-1]
+ end
+ end
+
+ def canCopyIntoBenchPath
+ if @libPath and @relativeBinPath
+ true
+ else
+ false
+ end
+ end
+
+ def copyIntoBenchPath
+ raise unless canCopyIntoBenchPath
+ basename, filename = Benchfile.uniqueFilename("vm")
+ raise unless Dir.mkdir(filename)
+ cmd = "cp -a #{@libPath.inspect}/* #{filename.inspect}"
+ $stderr.puts ">> #{cmd}" if $verbosity>=2
+ raise unless system(cmd)
+ @path = "#{basename}/#{@relativeBinPath}"
+ @libPath = basename
+ end
+
+ def to_s
+ @name
+ end
+
+ def name
+ @name
+ end
+
+ def shouldMeasureGC
+ $measureGC == true or ($measureGC == name)
+ end
+
+ def origPath
+ @origPath
+ end
+
+ def path
+ @path
+ end
+
+ def nameKind
+ @nameKind
+ end
+
+ def vmType
+ @vmType
+ end
+
+ def checkoutPath
+ @checkoutPath
+ end
+
+ def svnRevision
+ @svnRevision
+ end
+
+ def printFunction
+ case @vmType
+ when :jsc
+ "print"
+ when :dumpRenderTree
+ "debug"
+ else
+ raise @vmType
+ end
+ end
+
+ def emitRunCode(fileToRun)
+ myLibPath = @libPath
+ myLibPath = "" unless myLibPath
+ $script.puts "export DYLD_LIBRARY_PATH=#{myLibPath.to_s.inspect}"
+ $script.puts "export DYLD_FRAMEWORK_PATH=#{myLibPath.to_s.inspect}"
+ $script.puts "#{path} #{fileToRun}"
+ end
+end
+
+class StatsAccumulator
+ def initialize
+ @stats = []
+ ($outer*$inner).times {
+ @stats << Stats.new
+ }
+ end
+
+ def statsForIteration(outerIteration, innerIteration)
+ @stats[outerIteration*$inner + innerIteration]
+ end
+
+ def stats
+ result = Stats.new
+ @stats.each {
+ | stat |
+ result.add(yield stat)
+ }
+ result
+ end
+
+ def geometricMeanStats
+ stats {
+ | stat |
+ stat.geometricMean
+ }
+ end
+
+ def arithmeticMeanStats
+ stats {
+ | stat |
+ stat.arithmeticMean
+ }
+ end
+end
+
+module Benchmark
+ attr_accessor :benchmarkSuite
+ attr_reader :name
+
+ def fullname
+ benchmarkSuite.name + "/" + name
+ end
+
+ def to_s
+ fullname
+ end
+end
+
+class SunSpiderBenchmark
+ include Benchmark
+
+ def initialize(name)
+ @name = name
+ end
+
+ def emitRunCode(plan)
+ emitBenchRunCode(fullname, plan, nil, ensureFile("SunSpider-#{@name}", "#{SUNSPIDER_PATH}/#{@name}.js"))
+ end
+end
+
+class V8Benchmark
+ include Benchmark
+
+ def initialize(name)
+ @name = name
+ end
+
+ def emitRunCode(plan)
+ emitBenchRunCode(fullname, plan, nil, ensureFile("V8-#{@name}", "#{V8_PATH}/v8-#{@name}.js"))
+ end
+end
+
+class KrakenBenchmark
+ include Benchmark
+
+ def initialize(name)
+ @name = name
+ end
+
+ def emitRunCode(plan)
+ emitBenchRunCode(fullname, plan, ensureFile("KrakenData-#{@name}", "#{KRAKEN_PATH}/#{@name}-data.js"), ensureFile("Kraken-#{@name}", "#{KRAKEN_PATH}/#{@name}.js"))
+ end
+end
+
+class BenchmarkSuite
+ def initialize(name, path, preferredMean)
+ @name = name
+ @path = path
+ @preferredMean = preferredMean
+ @benchmarks = []
+ end
+
+ def name
+ @name
+ end
+
+ def to_s
+ @name
+ end
+
+ def path
+ @path
+ end
+
+ def add(benchmark)
+ if not $benchmarkPattern or "#{@name}/#{benchmark.name}" =~ $benchmarkPattern
+ benchmark.benchmarkSuite = self
+ @benchmarks << benchmark
+ end
+ end
+
+ def benchmarks
+ @benchmarks
+ end
+
+ def benchmarkForName(name)
+ result = @benchmarks.select{|v| v.name == name}
+ raise unless result.length == 1
+ result[0]
+ end
+
+ def empty?
+ @benchmarks.empty?
+ end
+
+ def retain_if
+ @benchmarks.delete_if {
+ | benchmark |
+ not yield benchmark
+ }
+ end
+
+ def preferredMean
+ @preferredMean
+ end
+
+ def computeMean(stat)
+ stat.send @preferredMean
+ end
+end
+
+class BenchRunPlan
+ def initialize(benchmark, vm, iteration)
+ @benchmark = benchmark
+ @vm = vm
+ @iteration = iteration
+ end
+
+ def benchmark
+ @benchmark
+ end
+
+ def suite
+ @benchmark.benchmarkSuite
+ end
+
+ def vm
+ @vm
+ end
+
+ def iteration
+ @iteration
+ end
+
+ def emitRunCode
+ @benchmark.emitRunCode(self)
+ end
+end
+
+class BenchmarkOnVM
+ def initialize(benchmark, suiteOnVM)
+ @benchmark = benchmark
+ @suiteOnVM = suiteOnVM
+ @stats = Stats.new
+ end
+
+ def to_s
+ "#{@benchmark} on #{@suiteOnVM.vm}"
+ end
+
+ def benchmark
+ @benchmark
+ end
+
+ def vm
+ @suiteOnVM.vm
+ end
+
+ def vmStats
+ @suiteOnVM.vmStats
+ end
+
+ def suite
+ @benchmark.benchmarkSuite
+ end
+
+ def suiteOnVM
+ @suiteOnVM
+ end
+
+ def stats
+ @stats
+ end
+
+ def parseResult(result)
+ raise "VM mismatch; I've got #{vm} and they've got #{result.vm}" unless result.vm == vm
+ raise unless result.benchmark == @benchmark
+ @stats.add(result.time)
+ end
+end
+
+class SuiteOnVM < StatsAccumulator
+ def initialize(vm, vmStats, suite)
+ super()
+ @vm = vm
+ @vmStats = vmStats
+ @suite = suite
+
+ raise unless @vm.is_a? VM
+ raise unless @vmStats.is_a? StatsAccumulator
+ raise unless @suite.is_a? BenchmarkSuite
+ end
+
+ def to_s
+ "#{@suite} on #{@vm}"
+ end
+
+ def suite
+ @suite
+ end
+
+ def vm
+ @vm
+ end
+
+ def vmStats
+ raise unless @vmStats
+ @vmStats
+ end
+end
+
+class BenchPlan
+ def initialize(benchmarkOnVM, iteration)
+ @benchmarkOnVM = benchmarkOnVM
+ @iteration = iteration
+ end
+
+ def to_s
+ "#{@benchmarkOnVM} \##{@iteration+1}"
+ end
+
+ def benchmarkOnVM
+ @benchmarkOnVM
+ end
+
+ def benchmark
+ @benchmarkOnVM.benchmark
+ end
+
+ def suite
+ @benchmarkOnVM.suite
+ end
+
+ def vm
+ @benchmarkOnVM.vm
+ end
+
+ def iteration
+ @iteration
+ end
+
+ def parseResult(result)
+ raise unless result.plan == self
+ @benchmarkOnVM.parseResult(result)
+ @benchmarkOnVM.vmStats.statsForIteration(@iteration, result.innerIndex).add(result.time)
+ @benchmarkOnVM.suiteOnVM.statsForIteration(@iteration, result.innerIndex).add(result.time)
+ end
+end
+
+def lpad(str,chars)
+ if str.length>chars
+ str
+ else
+ "%#{chars}s"%(str)
+ end
+end
+
+def rpad(str,chars)
+ while str.length<chars
+ str+=" "
+ end
+ str
+end
+
+def center(str,chars)
+ while str.length<chars
+ str+=" "
+ if str.length<chars
+ str=" "+str
+ end
+ end
+ str
+end
+
+def statsToStr(stats)
+ if $inner*$outer == 1
+ string = numToStr(stats.mean)
+ raise unless string =~ /\./
+ left = $~.pre_match
+ right = $~.post_match
+ lpad(left,12)+"."+rpad(right,9)
+ else
+ lpad(numToStr(stats.mean),11)+"+-"+rpad(numToStr(stats.confInt),9)
+ end
+end
+
+def plural(num)
+ if num == 1
+ ""
+ else
+ "s"
+ end
+end
+
+def wrap(str, columns)
+ array = str.split
+ result = ""
+ curLine = array.shift
+ array.each {
+ | curStr |
+ if (curLine + " " + curStr).size > columns
+ result += curLine + "\n"
+ curLine = curStr
+ else
+ curLine += " " + curStr
+ end
+ }
+ result + curLine + "\n"
+end
+
+def runAndGetResults
+ results = nil
+ Dir.chdir(BENCH_DATA_PATH) {
+ IO.popen("sh ./runscript", "r") {
+ | inp |
+ results = inp.read
+ }
+ raise "Script did not complete correctly: #{$?}" unless $?.success?
+ }
+ raise unless results
+ results
+end
+
+def parseAndDisplayResults(results)
+ vmStatses = []
+ $vms.each {
+ vmStatses << StatsAccumulator.new
+ }
+
+ suitesOnVMs = []
+ suitesOnVMsForSuite = {}
+ $suites.each {
+ | suite |
+ suitesOnVMsForSuite[suite] = []
+ }
+ suitesOnVMsForVM = {}
+ $vms.each {
+ | vm |
+ suitesOnVMsForVM[vm] = []
+ }
+
+ benchmarksOnVMs = []
+ benchmarksOnVMsForBenchmark = {}
+ $benchmarks.each {
+ | benchmark |
+ benchmarksOnVMsForBenchmark[benchmark] = []
+ }
+
+ $vms.each_with_index {
+ | vm, vmIndex |
+ vmStats = vmStatses[vmIndex]
+ $suites.each {
+ | suite |
+ suiteOnVM = SuiteOnVM.new(vm, vmStats, suite)
+ suitesOnVMs << suiteOnVM
+ suitesOnVMsForSuite[suite] << suiteOnVM
+ suitesOnVMsForVM[vm] << suiteOnVM
+ suite.benchmarks.each {
+ | benchmark |
+ benchmarkOnVM = BenchmarkOnVM.new(benchmark, suiteOnVM)
+ benchmarksOnVMs << benchmarkOnVM
+ benchmarksOnVMsForBenchmark[benchmark] << benchmarkOnVM
+ }
+ }
+ }
+
+ plans = []
+ benchmarksOnVMs.each {
+ | benchmarkOnVM |
+ $outer.times {
+ | iteration |
+ plans << BenchPlan.new(benchmarkOnVM, iteration)
+ }
+ }
+
+ hostname = nil
+ hwmodel = nil
+ results.each_line {
+ | line |
+ line.chomp!
+ if line =~ /HOSTNAME:([^.]+)/
+ hostname = $1
+ elsif line =~ /HARDWARE:hw\.model: /
+ hwmodel = $~.post_match.chomp
+ else
+ result = ParsedResult.parse(plans, line.chomp)
+ if result
+ result.plan.parseResult(result)
+ end
+ end
+ }
+
+ # Compute the geomean of the preferred means of results on a SuiteOnVM
+ overallResults = []
+ $vms.each {
+ | vm |
+ result = Stats.new
+ $outer.times {
+ | outerIndex |
+ $inner.times {
+ | innerIndex |
+ curResult = Stats.new
+ suitesOnVMsForVM[vm].each {
+ | suiteOnVM |
+ # For a given iteration, suite, and VM, compute the suite's preferred mean
+ # over the data collected for all benchmarks in that suite. We'll have one
+ # sample per benchmark. For example on V8 this will be the geomean of 1
+ # sample for crypto, 1 sample for deltablue, and so on, and 1 sample for
+ # splay.
+ curResult.add(suiteOnVM.suite.computeMean(suiteOnVM.statsForIteration(outerIndex, innerIndex)))
+ }
+
+ # curResult now holds 1 sample for each of the means computed in the above
+ # loop. Compute the geomean over this, and store it.
+ result.add(curResult.geometricMean)
+ }
+ }
+
+ # $overallResults will have a Stats for each VM. That Stats object will hold
+ # $inner*$outer geomeans, allowing us to compute the arithmetic mean and
+ # confidence interval of the geomeans of preferred means. Convoluted, but
+ # useful and probably sound.
+ overallResults << result
+ }
+
+ if $verbosity >= 2
+ benchmarksOnVMs.each {
+ | benchmarkOnVM |
+ $stderr.puts "#{benchmarkOnVM}: #{benchmarkOnVM.stats}"
+ }
+
+ $vms.each_with_index {
+ | vm, vmIndex |
+ vmStats = vmStatses[vmIndex]
+ $stderr.puts "#{vm} (arithmeticMean): #{vmStats.arithmeticMeanStats}"
+ $stderr.puts "#{vm} (geometricMean): #{vmStats.geometricMeanStats}"
+ }
+ end
+
+ reportName =
+ (if ($vms.collect {
+ | vm |
+ vm.nameKind
+ }.index :auto)
+ ""
+ else
+ $vms.collect {
+ | vm |
+ vm.to_s
+ }.join("_") + "_"
+ end) +
+ ($suites.collect {
+ | suite |
+ suite.to_s
+ }.join("")) + "_" +
+ (if hostname
+ hostname + "_"
+ else
+ ""
+ end)+
+ (begin
+ time = Time.now
+ "%04d%02d%02d_%02d%02d" %
+ [ time.year, time.month, time.day,
+ time.hour, time.min ]
+ end) +
+ "_benchReport.txt"
+
+ unless $brief
+ puts "Generating benchmark report at #{reportName}"
+ end
+
+ outp = $stdout
+ begin
+ outp = File.open(reportName,"w")
+ rescue => e
+ $stderr.puts "Error: could not save report to #{reportName}: #{e}"
+ $stderr.puts
+ end
+
+ def createVMsString
+ result = ""
+ result += " " if $suites.size > 1
+ result += rpad("", $benchpad)
+ result += " "
+ $vms.size.times {
+ | index |
+ if index != 0
+ result += " "+NoChange.new(0).shortForm
+ end
+ result += lpad(center($vms[index].name, 9+9+2), 11+9+2)
+ }
+ result += " "
+ if $vms.size >= 3
+ result += center("#{$vms[-1].name} v. #{$vms[0].name}",26)
+ elsif $vms.size >= 2
+ result += " "*26
+ end
+ result
+ end
+
+ columns = [createVMsString.size, 78].max
+
+ outp.print "Benchmark report for "
+ if $suites.size == 1
+ outp.print $suites[0].to_s
+ elsif $suites.size == 2
+ outp.print "#{$suites[0]} and #{$suites[1]}"
+ else
+ outp.print "#{$suites[0..-2].join(', ')}, and #{$suites[-1]}"
+ end
+ if hostname
+ outp.print " on #{hostname}"
+ end
+ if hwmodel
+ outp.print " (#{hwmodel})"
+ end
+ outp.puts "."
+ outp.puts
+
+ # This looks stupid; revisit later.
+ if false
+ $suites.each {
+ | suite |
+ outp.puts "#{suite} at #{suite.path}"
+ }
+
+ outp.puts
+ end
+
+ outp.puts "VMs tested:"
+ $vms.each {
+ | vm |
+ outp.print "\"#{vm.name}\" at #{vm.origPath}"
+ if vm.svnRevision
+ outp.print " (r#{vm.svnRevision})"
+ end
+ outp.puts
+ }
+
+ outp.puts
+
+ outp.puts wrap("Collected #{$outer*$inner} sample#{plural($outer*$inner)} per benchmark/VM, "+
+ "with #{$outer} VM invocation#{plural($outer)} per benchmark."+
+ (if $rerun > 1 then (" Ran #{$rerun} benchmark iterations, and measured the "+
+ "total time of those iterations, for each sample.")
+ else "" end)+
+ (if $measureGC == true then (" No manual garbage collection invocations were "+
+ "emitted.")
+ elsif $measureGC then (" Emitted a call to gc() between sample measurements for "+
+ "all VMs except #{$measureGC}.")
+ else (" Emitted a call to gc() between sample measurements.") end)+
+ (if $warmup == 0 then (" Did not include any warm-up iterations; measurements "+
+ "began with the very first iteration.")
+ else (" Used #{$warmup*$rerun} benchmark iteration#{plural($warmup*$rerun)} per VM "+
+ "invocation for warm-up.") end)+
+ (case $timeMode
+ when :preciseTime then (" Used the jsc-specific preciseTime() function to get "+
+ "microsecond-level timing.")
+ when :date then (" Used the portable Date.now() method to get millisecond-"+
+ "level timing.")
+ else raise end)+
+ " Reporting benchmark execution times with 95% confidence "+
+ "intervals in milliseconds.",
+ columns)
+
+ outp.puts
+
+ def printVMs(outp)
+ outp.puts createVMsString
+ end
+
+ def summaryStats(outp, accumulators, name, &proc)
+ outp.print " " if $suites.size > 1
+ outp.print rpad(name, $benchpad)
+ outp.print " "
+ accumulators.size.times {
+ | index |
+ if index != 0
+ outp.print " "+accumulators[index].stats(&proc).compareTo(accumulators[index-1].stats(&proc)).shortForm
+ end
+ outp.print statsToStr(accumulators[index].stats(&proc))
+ }
+ if accumulators.size>=2
+ outp.print(" "+accumulators[-1].stats(&proc).compareTo(accumulators[0].stats(&proc)).longForm)
+ end
+ outp.puts
+ end
+
+ def meanName(currentMean, preferredMean)
+ result = "<#{currentMean}>"
+ if "#{currentMean}Mean" == preferredMean.to_s
+ result += " *"
+ end
+ result
+ end
+
+ def allSummaryStats(outp, accumulators, preferredMean)
+ summaryStats(outp, accumulators, meanName("arithmetic", preferredMean)) {
+ | stat |
+ stat.arithmeticMean
+ }
+
+ summaryStats(outp, accumulators, meanName("geometric", preferredMean)) {
+ | stat |
+ stat.geometricMean
+ }
+
+ summaryStats(outp, accumulators, meanName("harmonic", preferredMean)) {
+ | stat |
+ stat.harmonicMean
+ }
+ end
+
+ $suites.each {
+ | suite |
+ printVMs(outp)
+ if $suites.size > 1
+ outp.puts "#{suite.name}:"
+ else
+ outp.puts
+ end
+ suite.benchmarks.each {
+ | benchmark |
+ outp.print " " if $suites.size > 1
+ outp.print rpad(benchmark.name, $benchpad)
+ outp.print " "
+ myConfigs = benchmarksOnVMsForBenchmark[benchmark]
+ myConfigs.size.times {
+ | index |
+ if index != 0
+ outp.print " "+myConfigs[index].stats.compareTo(myConfigs[index-1].stats).shortForm
+ end
+ outp.print statsToStr(myConfigs[index].stats)
+ }
+ if $vms.size>=2
+ outp.print(" "+myConfigs[-1].stats.compareTo(myConfigs[0].stats).to_s)
+ end
+ outp.puts
+ }
+ outp.puts
+ allSummaryStats(outp, suitesOnVMsForSuite[suite], suite.preferredMean)
+ outp.puts if $suites.size > 1
+ }
+
+ if $suites.size > 1
+ printVMs(outp)
+ outp.puts "All benchmarks:"
+ allSummaryStats(outp, vmStatses, nil)
+
+ outp.puts
+ printVMs(outp)
+ outp.puts "Geomean of preferred means:"
+ outp.print " "
+ outp.print rpad("<scaled-result>", $benchpad)
+ outp.print " "
+ $vms.size.times {
+ | index |
+ if index != 0
+ outp.print " "+overallResults[index].compareTo(overallResults[index-1]).shortForm
+ end
+ outp.print statsToStr(overallResults[index])
+ }
+ if overallResults.size>=2
+ outp.print(" "+overallResults[-1].compareTo(overallResults[0]).longForm)
+ end
+ outp.puts
+ end
+ outp.puts
+
+ if outp != $stdout
+ outp.close
+ end
+
+ if outp != $stdout and not $brief
+ puts
+ File.open(reportName) {
+ | inp |
+ puts inp.read
+ }
+ end
+
+ if $brief
+ puts(overallResults.collect{|stats| stats.mean}.join("\t"))
+ puts(overallResults.collect{|stats| stats.confInt}.join("\t"))
+ end
+
+
+end
+
+begin
+ $sawBenchOptions = false
+
+ def resetBenchOptionsIfNecessary
+ unless $sawBenchOptions
+ $includeSunSpider = false
+ $includeV8 = false
+ $includeKraken = false
+ $sawBenchOptions = true
+ end
+ end
+
+ GetoptLong.new(['--rerun', GetoptLong::REQUIRED_ARGUMENT],
+ ['--inner', GetoptLong::REQUIRED_ARGUMENT],
+ ['--outer', GetoptLong::REQUIRED_ARGUMENT],
+ ['--warmup', GetoptLong::REQUIRED_ARGUMENT],
+ ['--timing-mode', GetoptLong::REQUIRED_ARGUMENT],
+ ['--sunspider-only', GetoptLong::NO_ARGUMENT],
+ ['--v8-only', GetoptLong::NO_ARGUMENT],
+ ['--kraken-only', GetoptLong::NO_ARGUMENT],
+ ['--exclude-sunspider', GetoptLong::NO_ARGUMENT],
+ ['--exclude-v8', GetoptLong::NO_ARGUMENT],
+ ['--exclude-kraken', GetoptLong::NO_ARGUMENT],
+ ['--sunspider', GetoptLong::NO_ARGUMENT],
+ ['--v8', GetoptLong::NO_ARGUMENT],
+ ['--kraken', GetoptLong::NO_ARGUMENT],
+ ['--benchmarks', GetoptLong::REQUIRED_ARGUMENT],
+ ['--measure-gc', GetoptLong::OPTIONAL_ARGUMENT],
+ ['--force-vm-kind', GetoptLong::REQUIRED_ARGUMENT],
+ ['--force-vm-copy', GetoptLong::NO_ARGUMENT],
+ ['--dont-copy-vms', GetoptLong::NO_ARGUMENT],
+ ['--verbose', '-v', GetoptLong::NO_ARGUMENT],
+ ['--brief', GetoptLong::NO_ARGUMENT],
+ ['--silent', GetoptLong::NO_ARGUMENT],
+ ['--remote', GetoptLong::REQUIRED_ARGUMENT],
+ ['--local', GetoptLong::NO_ARGUMENT],
+ ['--ssh-options', GetoptLong::REQUIRED_ARGUMENT],
+ ['--slave', GetoptLong::NO_ARGUMENT],
+ ['--prepare-only', GetoptLong::NO_ARGUMENT],
+ ['--analyze', GetoptLong::REQUIRED_ARGUMENT],
+ ['--vms', GetoptLong::REQUIRED_ARGUMENT],
+ ['--help', '-h', GetoptLong::NO_ARGUMENT]).each {
+ | opt, arg |
+ case opt
+ when '--rerun'
+ $rerun = intArg(opt,arg,1,nil)
+ when '--inner'
+ $inner = intArg(opt,arg,1,nil)
+ when '--outer'
+ $outer = intArg(opt,arg,1,nil)
+ when '--warmup'
+ $warmup = intArg(opt,arg,0,nil)
+ when '--timing-mode'
+ if arg.upcase == "PRECISETIME"
+ $timeMode = :preciseTime
+ elsif arg.upcase == "DATE"
+ $timeMode = :date
+ elsif arg.upcase == "AUTO"
+ $timeMode = :auto
+ else
+ quickFail("Expected either 'preciseTime', 'date', or 'auto' for --time-mode, but got '#{arg}'.",
+ "Invalid argument for command-line option")
+ end
+ when '--force-vm-kind'
+ if arg.upcase == "JSC"
+ $forceVMKind = :jsc
+ elsif arg.upcase == "DUMPRENDERTREE"
+ $forceVMKind = :dumpRenderTree
+ elsif arg.upcase == "AUTO"
+ $forceVMKind = nil
+ else
+ quickFail("Expected either 'jsc' or 'DumpRenderTree' for --force-vm-kind, but got '#{arg}'.",
+ "Invalid argument for command-line option")
+ end
+ when '--force-vm-copy'
+ $needToCopyVMs = true
+ when '--dont-copy-vms'
+ $dontCopyVMs = true
+ when '--sunspider-only'
+ $includeV8 = false
+ $includeKraken = false
+ when '--v8-only'
+ $includeSunSpider = false
+ $includeKraken = false
+ when '--kraken-only'
+ $includeSunSpider = false
+ $includeV8 = false
+ when '--exclude-sunspider'
+ $includeSunSpider = false
+ when '--exclude-v8'
+ $includeV8 = false
+ when '--exclude-kraken'
+ $includeKraken = false
+ when '--sunspider'
+ resetBenchOptionsIfNecessary
+ $includeSunSpider = true
+ when '--v8'
+ resetBenchOptionsIfNecessary
+ $includeV8 = true
+ when '--kraken'
+ resetBenchOptionsIfNecessary
+ $includeKraken = true
+ when '--benchmarks'
+ $benchmarkPattern = Regexp.new(arg)
+ when '--measure-gc'
+ if arg == ''
+ $measureGC = true
+ else
+ $measureGC = arg
+ end
+ when '--verbose'
+ $verbosity += 1
+ when '--brief'
+ $brief = true
+ when '--silent'
+ $silent = true
+ when '--remote'
+ $remoteHosts += arg.split(',')
+ $needToCopyVMs = true
+ when '--ssh-options'
+ $sshOptions << arg
+ when '--local'
+ $alsoLocal = true
+ when '--prepare-only'
+ $run = false
+ when '--analyze'
+ $prepare = false
+ $run = false
+ $analyze << arg
+ when '--help'
+ usage
+ else
+ raise "bad option: #{opt}"
+ end
+ }
+
+ # If the --dont-copy-vms option was passed, it overrides the --force-vm-copy option.
+ if $dontCopyVMs
+ $needToCopyVMs = false
+ end
+
+ SUNSPIDER = BenchmarkSuite.new("SunSpider", SUNSPIDER_PATH, :arithmeticMean)
+ ["3d-cube", "3d-morph", "3d-raytrace", "access-binary-trees",
+ "access-fannkuch", "access-nbody", "access-nsieve",
+ "bitops-3bit-bits-in-byte", "bitops-bits-in-byte", "bitops-bitwise-and",
+ "bitops-nsieve-bits", "controlflow-recursive", "crypto-aes",
+ "crypto-md5", "crypto-sha1", "date-format-tofte", "date-format-xparb",
+ "math-cordic", "math-partial-sums", "math-spectral-norm", "regexp-dna",
+ "string-base64", "string-fasta", "string-tagcloud",
+ "string-unpack-code", "string-validate-input"].each {
+ | name |
+ SUNSPIDER.add SunSpiderBenchmark.new(name)
+ }
+
+ V8 = BenchmarkSuite.new("V8", V8_PATH, :geometricMean)
+ ["crypto", "deltablue", "earley-boyer", "raytrace",
+ "regexp", "richards", "splay"].each {
+ | name |
+ V8.add V8Benchmark.new(name)
+ }
+
+ KRAKEN = BenchmarkSuite.new("Kraken", KRAKEN_PATH, :arithmeticMean)
+ ["ai-astar", "audio-beat-detection", "audio-dft", "audio-fft",
+ "audio-oscillator", "imaging-darkroom", "imaging-desaturate",
+ "imaging-gaussian-blur", "json-parse-financial",
+ "json-stringify-tinderbox", "stanford-crypto-aes",
+ "stanford-crypto-ccm", "stanford-crypto-pbkdf2",
+ "stanford-crypto-sha256-iterative"].each {
+ | name |
+ KRAKEN.add KrakenBenchmark.new(name)
+ }
+
+ ARGV.each {
+ | vm |
+ if vm =~ /([a-zA-Z0-9_ ]+):/
+ name = $1
+ nameKind = :given
+ vm = $~.post_match
+ else
+ name = "Conf\##{$vms.length+1}"
+ nameKind = :auto
+ end
+ $stderr.puts "#{name}: #{vm}" if $verbosity >= 1
+ $vms << VM.new(Pathname.new(vm).realpath, name, nameKind, nil)
+ }
+
+ if $vms.empty?
+ quickFail("Please specify at least on configuraiton on the command line.",
+ "Insufficient arguments")
+ end
+
+ $vms.each {
+ | vm |
+ if vm.vmType != :jsc and $timeMode != :date
+ $timeMode = :date
+ $stderr.puts "Warning: using Date.now() instead of preciseTime() because #{vm} doesn't support the latter."
+ end
+ }
+
+ if FileTest.exist? BENCH_DATA_PATH
+ cmd = "rm -rf #{BENCH_DATA_PATH}"
+ $stderr.puts ">> #{cmd}" if $verbosity >= 2
+ raise unless system cmd
+ end
+
+ Dir.mkdir BENCH_DATA_PATH
+
+ if $needToCopyVMs
+ canCopyIntoBenchPath = true
+ $vms.each {
+ | vm |
+ canCopyIntoBenchPath = false unless vm.canCopyIntoBenchPath
+ }
+
+ if canCopyIntoBenchPath
+ $vms.each {
+ | vm |
+ $stderr.puts "Copying #{vm} into #{BENCH_DATA_PATH}..."
+ vm.copyIntoBenchPath
+ }
+ $stderr.puts "All VMs are in place."
+ else
+ $stderr.puts "Warning: don't know how to copy some VMs into #{BENCH_DATA_PATH}, so I won't do it."
+ end
+ end
+
+ if $measureGC and $measureGC != true
+ found = false
+ $vms.each {
+ | vm |
+ if vm.name == $measureGC
+ found = true
+ end
+ }
+ unless found
+ $stderr.puts "Warning: --measure-gc option ignored because no VM is named #{$measureGC}"
+ end
+ end
+
+ if $outer*$inner == 1
+ $stderr.puts "Warning: will only collect one sample per benchmark/VM. Confidence interval calculation will fail."
+ end
+
+ $stderr.puts "Using timeMode = #{$timeMode}." if $verbosity >= 1
+
+ $suites = []
+
+ if $includeSunSpider and not SUNSPIDER.empty?
+ $suites << SUNSPIDER
+ end
+
+ if $includeV8 and not V8.empty?
+ $suites << V8
+ end
+
+ if $includeKraken and not KRAKEN.empty?
+ $suites << KRAKEN
+ end
+
+ $benchmarks = []
+ $suites.each {
+ | suite |
+ $benchmarks += suite.benchmarks
+ }
+
+ $runPlans = []
+ $vms.each {
+ | vm |
+ $benchmarks.each {
+ | benchmark |
+ $outer.times {
+ | iteration |
+ $runPlans << BenchRunPlan.new(benchmark, vm, iteration)
+ }
+ }
+ }
+
+ $runPlans.shuffle!
+
+ $suitepad = $suites.collect {
+ | suite |
+ suite.to_s.size
+ }.max + 1
+
+ $benchpad = ($benchmarks +
+ ["<arithmetic> *", "<geometric> *", "<harmonic> *"]).collect {
+ | benchmark |
+ if benchmark.respond_to? :name
+ benchmark.name.size
+ else
+ benchmark.size
+ end
+ }.max + 1
+
+ $vmpad = $vms.collect {
+ | vm |
+ vm.to_s.size
+ }.max + 1
+
+ if $prepare
+ File.open("#{BENCH_DATA_PATH}/runscript", "w") {
+ | file |
+ file.puts "echo \"HOSTNAME:\\c\""
+ file.puts "hostname"
+ file.puts "echo"
+ file.puts "echo \"HARDWARE:\\c\""
+ file.puts "/usr/sbin/sysctl hw.model"
+ file.puts "echo"
+ file.puts "set -e"
+ $script = file
+ $runPlans.each_with_index {
+ | plan, idx |
+ if $verbosity == 0 and not $silent
+ text1 = lpad(idx.to_s,$runPlans.size.to_s.size)+"/"+$runPlans.size.to_s
+ text2 = plan.benchmark.to_s+"/"+plan.vm.to_s
+ file.puts("echo "+("\r#{text1} #{rpad(text2,$suitepad+1+$benchpad+1+$vmpad)}".inspect)[0..-2]+"\\c\" 1>&2")
+ file.puts("echo "+("\r#{text1} #{text2}".inspect)[0..-2]+"\\c\" 1>&2")
+ end
+ plan.emitRunCode
+ }
+ if $verbosity == 0 and not $silent
+ file.puts("echo "+("\r#{$runPlans.size}/#{$runPlans.size} #{' '*($suitepad+1+$benchpad+1+$vmpad)}".inspect)[0..-2]+"\\c\" 1>&2")
+ file.puts("echo "+("\r#{$runPlans.size}/#{$runPlans.size}".inspect)+" 1>&2")
+ end
+ }
+ end
+
+ if $run
+ unless $remoteHosts.empty?
+ $stderr.puts "Packaging benchmarking directory for remote hosts..." if $verbosity==0
+ Dir.chdir(TEMP_PATH) {
+ cmd = "tar -czf payload.tar.gz benchdata"
+ $stderr.puts ">> #{cmd}" if $verbosity>=2
+ raise unless system(cmd)
+ }
+
+ def grokHost(host)
+ if host =~ /:([0-9]+)$/
+ "-p " + $1 + " " + $~.pre_match.inspect
+ else
+ host.inspect
+ end
+ end
+
+ def sshRead(host, command)
+ cmd = "ssh #{$sshOptions.collect{|x| x.inspect}.join(' ')} #{grokHost(host)} #{command.inspect}"
+ $stderr.puts ">> #{cmd}" if $verbosity>=2
+ result = ""
+ IO.popen(cmd, "r") {
+ | inp |
+ inp.each_line {
+ | line |
+ $stderr.puts "#{host}: #{line}" if $verbosity>=2
+ result += line
+ }
+ }
+ raise "#{$?}" unless $?.success?
+ result
+ end
+
+ def sshWrite(host, command, data)
+ cmd = "ssh #{$sshOptions.collect{|x| x.inspect}.join(' ')} #{grokHost(host)} #{command.inspect}"
+ $stderr.puts ">> #{cmd}" if $verbosity>=2
+ IO.popen(cmd, "w") {
+ | outp |
+ outp.write(data)
+ }
+ raise "#{$?}" unless $?.success?
+ end
+
+ $remoteHosts.each {
+ | host |
+ $stderr.puts "Sending benchmark payload to #{host}..." if $verbosity==0
+
+ remoteTempPath = JSON::parse(sshRead(host, "cat ~/.bencher"))["tempPath"]
+ raise unless remoteTempPath
+
+ sshWrite(host, "cd #{remoteTempPath.inspect} && rm -rf benchdata && tar -xz", IO::read("#{TEMP_PATH}/payload.tar.gz"))
+
+ $stderr.puts "Running on #{host}..." if $verbosity==0
+
+ parseAndDisplayResults(sshRead(host, "cd #{(remoteTempPath+'/benchdata').inspect} && sh runscript"))
+ }
+ end
+
+ if not $remoteHosts.empty? and $alsoLocal
+ $stderr.puts "Running locally..."
+ end
+
+ if $remoteHosts.empty? or $alsoLocal
+ parseAndDisplayResults(runAndGetResults)
+ end
+ end
+
+ $analyze.each_with_index {
+ | filename, index |
+ if index >= 1
+ puts
+ end
+ parseAndDisplayResults(IO::read(filename))
+ }
+
+ if $prepare and not $run and $analyze.empty?
+ puts wrap("Benchmarking script and data are in #{BENCH_DATA_PATH}. You can run "+
+ "the benchmarks and get the results by doing:", 78)
+ puts
+ puts "cd #{BENCH_DATA_PATH}"
+ puts "sh runscript > results.txt"
+ puts
+ puts wrap("Then you can analyze the results by running bencher with the same arguments "+
+ "as now, but replacing --prepare-only with --analyze results.txt.", 78)
+ end
+rescue => e
+ fail(e)
+end
+
+
diff --git a/src/third_party/blink/Tools/Scripts/bisect-test-ordering b/src/third_party/blink/Tools/Scripts/bisect-test-ordering
new file mode 100755
index 0000000..fb29644
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/bisect-test-ordering
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+from webkitpy.layout_tests import bisect_test_ordering
+
+sys.exit(bisect_test_ordering.main(sys.argv[1:]))
diff --git a/src/third_party/blink/Tools/Scripts/check-blink-deps b/src/third_party/blink/Tools/Scripts/check-blink-deps
new file mode 100755
index 0000000..e360a97
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/check-blink-deps
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A utility script for running Chromium's dependency checker script on Blink."""
+
+import os
+import subprocess
+import sys
+
+def show_help():
+ print 'Usage: %s [dir=Source]' % os.path.basename(sys.argv[0])
+
+def main():
+ start_dir = None
+ if len(sys.argv) > 1:
+ start_dir = sys.argv[1]
+
+ if start_dir == '--help':
+ show_help()
+ return
+
+ root_dir = os.path.realpath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
+ if not start_dir:
+ start_dir = os.path.join(root_dir, 'Source')
+
+ check_deps = os.path.realpath(os.path.join(root_dir, os.pardir, os.pardir, 'buildtools', 'checkdeps', 'checkdeps.py'))
+ subprocess.call([sys.executable, check_deps, '--root', root_dir, start_dir])
+
+if '__main__' == __name__:
+ main()
diff --git a/src/third_party/blink/Tools/Scripts/check-dom-results b/src/third_party/blink/Tools/Scripts/check-dom-results
new file mode 100755
index 0000000..de4f77d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/check-dom-results
@@ -0,0 +1,141 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2005 Apple Computer, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Script to check status of W3C DOM tests that are part of the WebKit tests.
+
+use strict;
+use FindBin;
+use Cwd;
+use lib $FindBin::Bin;
+use webkitdirs;
+
+chdirWebKit();
+
+my $verbose = $ARGV[0] && $ARGV[0] eq "-v";
+
+my $workingDir = getcwd();
+my $testDirectory = "$workingDir/LayoutTests";
+
+my @suites = ( {"name" => "DOM Level 1 Core (html)", "directory" => "dom/html/level1/core"},
+ {"name" => "DOM Level 2 Core (html)", "directory" => "dom/html/level2/core"},
+ {"name" => "DOM Level 2 Events (html)", "directory" => "dom/html/level2/events"},
+ {"name" => "DOM Level 2 HTML (html)", "directory" => "dom/html/level2/html"},
+ {"name" => "DOM Level 1 Core (xhtml)", "directory" => "dom/xhtml/level1/core"},
+ {"name" => "DOM Level 2 Core (xhtml)", "directory" => "dom/xhtml/level2/core"},
+ {"name" => "DOM Level 2 Events (xhtml)", "directory" => "dom/xhtml/level2/events"},
+ {"name" => "DOM Level 2 HTML (xhtml)", "directory" => "dom/xhtml/level2/html"},
+ {"name" => "DOM Level 3 Core (xhtml)", "directory" => "dom/xhtml/level3/core"},
+ {"name" => "DOM Level 3 XPath (svg)", "directory" => "dom/svg/level3/xpath"});
+
+my $totalCount = 0;
+my $totalSuccesses = 0;
+my $totalDisabled = 0;
+my $totalFailed = 0;
+
+foreach my $suite (@suites) {
+
+ my %suite = %$suite;
+ my $directory = $suite{"directory"};
+ my $name = $suite{"name"};
+ my @results = `find "${testDirectory}/${directory}" -name "*-expected.txt"`;
+ my @disabled = `find "${testDirectory}/${directory}" -name "*-disabled"`;
+
+ my @failures = ();
+ my $count = 0;
+
+ foreach my $result (@results) {
+ $count++;
+ my $success = 0;
+ open RESULT, "<$result";
+ while (<RESULT>) {
+ if (/Success/) {
+ $success = 1;
+ last;
+ }
+ }
+ close RESULT;
+ if (!$success) {
+ push @failures, $result;
+ }
+ }
+
+ my $disabledCount = (scalar @disabled);
+ my $failureCount = (scalar @failures);
+
+ $count += $disabledCount;
+
+ my $successCount = $count - $failureCount - $disabledCount;
+ my $percentage = (sprintf "%.1f", ($successCount * 100.0 / $count));
+
+ if ($percentage == 100) {
+ print "${name}: all ${count} tests succeeded";
+ } else {
+ print "${name}: ${successCount} out of ${count} tests succeeded (${percentage}%)";
+ }
+ print " ($disabledCount disabled)" if $disabledCount;
+ print "\n";
+ if ($verbose) {
+ print "\n";
+ if (@disabled) {
+ print " Disabled:\n";
+
+ foreach my $failure (sort @disabled) {
+ $failure =~ s|.*/||;
+ $failure =~ s|-disabled||;
+ print " ${directory}/${failure}";
+ }
+ }
+ if (@failures) {
+ print " Failed:\n";
+
+ foreach my $failure (sort @failures) {
+ $directory =~ m|^dom/(\w+)|;
+ my $extension = $1;
+ $failure =~ s|.*/||;
+ $failure =~ s|-expected\.txt|.${extension}|;
+ print " ${directory}/${failure}";
+ }
+ }
+
+ print "\n";
+ }
+
+ $totalCount += $count;
+ $totalSuccesses += $successCount;
+ $totalDisabled += $disabledCount;
+ $totalFailed += $failureCount;
+}
+
+
+my $totalPercentage = (sprintf "%.1f", ($totalSuccesses * 100.0 / $totalCount));
+my $totalDisabledPercentage = (sprintf "%.1f", ($totalDisabled * 100.0 / $totalCount));
+my $totalFailedPercentage = (sprintf "%.1f", ($totalFailed * 100.0 / $totalCount));
+
+print "Total: ${totalSuccesses} out of ${totalCount} tests succeeded (${totalPercentage}%)\n";
+print " ${totalDisabled} tests disabled (${totalDisabledPercentage}%)\n";
+print " ${totalFailed} tests failed (${totalFailedPercentage}%)\n";
diff --git a/src/third_party/blink/Tools/Scripts/check-for-exit-time-destructors b/src/third_party/blink/Tools/Scripts/check-for-exit-time-destructors
new file mode 100755
index 0000000..6d94b5a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/check-for-exit-time-destructors
@@ -0,0 +1,152 @@
+#!/usr/bin/perl
+
+# Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# "check-for-exit-time-destructors" script for WebKit Open Source Project
+
+# Intended to be invoked from an Xcode build step to check if there are
+# any exit-time destructors in a target.
+
+use warnings;
+use strict;
+
+use File::Basename;
+
+sub touch($);
+sub printFunctions($$);
+
+my $arch = $ENV{'CURRENT_ARCH'};
+my $configuration = $ENV{'CONFIGURATION'};
+my $target = $ENV{'TARGET_NAME'};
+my $variant = $ENV{'CURRENT_VARIANT'};
+my $coverageBuild = $ENV{'WEBKIT_COVERAGE_BUILD'};
+my $debugRoot = $ENV{'WEBKIT_DEBUG_ROOT'};
+
+$arch = $ENV{'NATIVE_ARCH'} if !$arch; # for Xcode 2.1, which does not have CURRENT_ARCH
+$variant = "normal" if !$variant; # for Xcode 2.1, which does not have CURRENT_VARIANT
+
+my $executablePath = "$ENV{'TARGET_BUILD_DIR'}/$ENV{'EXECUTABLE_PATH'}";
+
+my $buildTimestampPath = $ENV{'TARGET_TEMP_DIR'} . "/" . basename($0) . ".timestamp";
+my $buildTimestampAge = -M $buildTimestampPath;
+my $scriptAge = -M $0;
+
+my $list = $ENV{"LINK_FILE_LIST_${variant}_${arch}"};
+
+if (!open LIST, $list) {
+ print "ERROR: Could not open $list\n";
+ exit 1;
+}
+
+my @files = <LIST>;
+chomp @files;
+close LIST;
+
+my $sawError = 0;
+
+for my $file (sort @files) {
+ if (defined $buildTimestampAge && $buildTimestampAge < $scriptAge) {
+ my $fileAge = -M $file;
+ next if defined $fileAge && $fileAge > $buildTimestampAge;
+ }
+ if (!open NM, "(nm '$file' | sed 's/^/STDOUT:/') 2>&1 |") {
+ print "ERROR: Could not open $file\n";
+ $sawError = 1;
+ next;
+ }
+ my $sawAtExit = 0;
+ my $shortName = $file;
+ $shortName =~ s/.*\///;
+
+ while (<NM>) {
+ if (/^STDOUT:/) {
+ # With GC logging enabled Heap.o may contain finalizers, so we ignore them.
+ $sawAtExit = 1 if (/___cxa_atexit/ && ($shortName ne "Heap.o"));
+ } else {
+ print STDERR if $_ ne "nm: no name list\n";
+ }
+ }
+ close NM;
+ next unless $sawAtExit;
+
+ $sawError = 1 if printFunctions($shortName, $file);
+}
+
+if ($sawError and !$coverageBuild) {
+ print "ERROR: Use DEFINE_STATIC_LOCAL from <wtf/StdLibExtras.h>\n";
+ unlink $executablePath;
+ exit 1;
+}
+
+touch($buildTimestampPath);
+exit 0;
+
+sub touch($)
+{
+ my ($path) = @_;
+ open(TOUCH, ">", $path) or die "$!";
+ close(TOUCH);
+}
+
+sub demangle($)
+{
+ my ($symbol) = @_;
+ if (!open FILT, "c++filt $symbol |") {
+ print "ERROR: Could not open c++filt\n";
+ return;
+ }
+ my $result = <FILT>;
+ close FILT;
+ chomp $result;
+ return $result;
+}
+
+sub printFunctions($$)
+{
+ my ($shortName, $path) = @_;
+ if (!open OTOOL, "otool -tV '$path' |") {
+ print "WARNING: Could not open $path\n";
+ return 0;
+ }
+ my %functions;
+ my $currentSymbol = "";
+ while (<OTOOL>) {
+ $currentSymbol = $1 if /^(\w+):$/;
+ next unless $currentSymbol;
+ $functions{demangle($currentSymbol)} = 1 if /___cxa_atexit/;
+ }
+ close OTOOL;
+ my $result = 0;
+ for my $function (sort keys %functions) {
+ if (!$result) {
+ print "ERROR: $shortName has exit time destructors in it! ($path)\n";
+ $result = 1;
+ }
+ print "ERROR: In function $function\n";
+ }
+ return $result;
+}
diff --git a/src/third_party/blink/Tools/Scripts/check-for-global-initializers b/src/third_party/blink/Tools/Scripts/check-for-global-initializers
new file mode 100755
index 0000000..4de9f2f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/check-for-global-initializers
@@ -0,0 +1,167 @@
+#!/usr/bin/perl
+
+# Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# "check-for-global-initializers" script for WebKit Open Source Project
+
+# Intended to be invoked from an Xcode build step to check if there are
+# any global initializers in a target.
+
+use warnings;
+use strict;
+
+use File::Basename;
+
+sub touch($);
+sub demangle($);
+
+my $arch = $ENV{'CURRENT_ARCH'};
+my $configuration = $ENV{'CONFIGURATION'};
+my $target = $ENV{'TARGET_NAME'};
+my $variant = $ENV{'CURRENT_VARIANT'};
+my $coverageBuild = $ENV{'WEBKIT_COVERAGE_BUILD'};
+my $debugRoot = $ENV{'WEBKIT_DEBUG_ROOT'};
+
+$arch = $ENV{'NATIVE_ARCH'} if !$arch; # for Xcode 2.1, which does not have CURRENT_ARCH
+$variant = "normal" if !$variant; # for Xcode 2.1, which does not have CURRENT_VARIANT
+
+my $executablePath = "$ENV{'TARGET_BUILD_DIR'}/$ENV{'EXECUTABLE_PATH'}";
+
+my $buildTimestampPath = $ENV{'TARGET_TEMP_DIR'} . "/" . basename($0) . ".timestamp";
+my $buildTimestampAge = -M $buildTimestampPath;
+my $scriptAge = -M $0;
+
+my $list = $ENV{"LINK_FILE_LIST_${variant}_${arch}"};
+
+if (!open LIST, $list) {
+ print "ERROR: Could not open $list\n";
+ exit 1;
+}
+
+my @files = <LIST>;
+chomp @files;
+close LIST;
+
+my $sawError = 0;
+
+for my $file (sort @files) {
+ if (defined $buildTimestampAge && $buildTimestampAge < $scriptAge) {
+ my $fileAge = -M $file;
+ next if defined $fileAge && $fileAge > $buildTimestampAge;
+ }
+ if (!open NM, "(nm '$file' | sed 's/^/STDOUT:/') 2>&1 |") {
+ print "ERROR: Could not open $file\n";
+ $sawError = 1;
+ next;
+ }
+ my $sawGlobal = 0;
+ my @globals;
+ while (<NM>) {
+ if (/^STDOUT:/) {
+ my $line = $_;
+ if ($line =~ /__GLOBAL__I(.+)$/) {
+ $sawGlobal = 1;
+ push(@globals, demangle($1));
+ }
+ } else {
+ print STDERR if $_ ne "nm: no name list\n";
+ }
+ }
+ close NM;
+ if ($sawGlobal) {
+ my $shortName = $file;
+ $shortName =~ s/.*\///;
+
+ # Special cases for files that have initializers in debug builds.
+ if ($configuration eq "Debug" or $variant eq "debug" or $debugRoot) {
+ if ($target eq "JavaScriptCore") {
+ next if $shortName eq "AllInOneFile.o";
+ next if $shortName eq "Opcode.o";
+ next if $shortName eq "Structure.o";
+ next if $shortName eq "nodes.o";
+ }
+ if ($target eq "WebCore") {
+ next if $shortName eq "BidiRun.o";
+ next if $shortName eq "CachedPage.o";
+ next if $shortName eq "CachedResource.o";
+ next if $shortName eq "FEGaussianBlur.o";
+ next if $shortName eq "Frame.o";
+ next if $shortName eq "JSCustomSQLTransactionCallback.o";
+ next if $shortName eq "JSLazyEventListener.o";
+ next if $shortName eq "Node.o";
+ next if $shortName eq "Page.o";
+ next if $shortName eq "Range.o";
+ next if $shortName eq "RenderObject.o";
+ next if $shortName eq "SVGElementInstance.o";
+ next if $shortName eq "SubresourceLoader.o";
+ next if $shortName eq "XMLHttpRequest.o";
+ }
+ if ($target eq "WebKit") {
+ next if $shortName eq "HostedNetscapePluginStream.o";
+ next if $shortName eq "NetscapePluginInstanceProxy.o";
+ }
+ if ($target eq "WebKit2") {
+ next if $shortName eq "WebContext.o";
+ next if $shortName eq "WebFrame.o";
+ next if $shortName eq "WebPage.o";
+ next if $shortName eq "WebPageProxy.o";
+ }
+ }
+
+ print "ERROR: $shortName has one or more global initializers in it! ($file), near @globals\n";
+ $sawError = 1;
+ }
+}
+
+if ($sawError and !$coverageBuild) {
+ unlink $executablePath;
+ exit 1;
+}
+
+touch($buildTimestampPath);
+exit 0;
+
+sub touch($)
+{
+ my ($path) = @_;
+ open(TOUCH, ">", $path) or die "$!";
+ close(TOUCH);
+}
+
+sub demangle($)
+{
+ my ($symbol) = @_;
+ if (!open FILT, "c++filt $symbol |") {
+ print "ERROR: Could not open c++filt\n";
+ return;
+ }
+ my $result = <FILT>;
+ close FILT;
+ chomp $result;
+ return $result;
+}
+
diff --git a/src/third_party/blink/Tools/Scripts/check-for-weak-vtables-and-externals b/src/third_party/blink/Tools/Scripts/check-for-weak-vtables-and-externals
new file mode 100755
index 0000000..b191fbc
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/check-for-weak-vtables-and-externals
@@ -0,0 +1,120 @@
+#!/usr/bin/perl
+
+# Copyright (C) 2006, 2007, 2008, 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# "check-for-weak-vtables-and-externals" script for WebKit Open Source Project
+
+# Intended to be invoked from an Xcode build step to check if there are
+# any weak vtables or weak externals in a target.
+
+use warnings;
+use strict;
+
+use File::Basename;
+
+sub touch($);
+
+my $arch = $ENV{'CURRENT_ARCH'};
+my $configuration = $ENV{'CONFIGURATION'};
+my $target = $ENV{'TARGET_NAME'};
+my $variant = $ENV{'CURRENT_VARIANT'};
+my $coverageBuild = $ENV{'WEBKIT_COVERAGE_BUILD'};
+my $debugRoot = $ENV{'WEBKIT_DEBUG_ROOT'};
+
+$arch = $ENV{'NATIVE_ARCH'} if !$arch; # for Xcode 2.1, which does not have CURRENT_ARCH
+$variant = "normal" if !$variant; # for Xcode 2.1, which does not have CURRENT_VARIANT
+
+my $executablePath = "$ENV{'TARGET_BUILD_DIR'}/$ENV{'EXECUTABLE_PATH'}";
+
+my $buildTimestampPath = $ENV{'TARGET_TEMP_DIR'} . "/" . basename($0) . ".timestamp";
+my $buildTimestampAge = -M $buildTimestampPath;
+my $executablePathAge = -M $executablePath;
+
+my $sawError = 0;
+
+if (!defined $executablePathAge || !defined $buildTimestampAge || $executablePathAge < $buildTimestampAge) {
+ if (!open NM, "(nm -m '$executablePath' | c++filt | sed 's/^/STDOUT:/') 2>&1 |") {
+ print "ERROR: Could not open $executablePath\n";
+ $sawError = 1;
+ next;
+ }
+ my @weakVTableClasses = ();
+ my @weakExternalSymbols = ();
+ while (<NM>) {
+ if (/^STDOUT:/) {
+ # Ignore undefined, RTTI and typeinfo symbols.
+ next if /\bundefined\b/ or /\b__ZT[IS]/;
+
+ if (/weak external vtable for (.*)$/) {
+ push @weakVTableClasses, $1;
+ } elsif (/weak external (.*)$/) {
+ push @weakExternalSymbols, $1;
+ }
+ } else {
+ print STDERR if $_ ne "nm: no name list\n";
+ }
+ }
+ close NM;
+
+ my $shortName = $executablePath;
+ $shortName =~ s/.*\///;
+
+ if (@weakVTableClasses) {
+ print "ERROR: $shortName has a weak vtable in it ($executablePath)\n";
+ print "ERROR: Fix by making sure the first virtual function in each of these classes is not an inline:\n";
+ for my $class (sort @weakVTableClasses) {
+ print "ERROR: class $class\n";
+ }
+ $sawError = 1;
+ }
+
+ if (@weakExternalSymbols) {
+ print "ERROR: $shortName has a weak external symbol in it ($executablePath)\n";
+ print "ERROR: A weak external symbol is generated when a symbol is defined in multiple compilation units and is also marked as being exported from the library.\n";
+ print "ERROR: A common cause of weak external symbols is when an inline function is listed in the linker export file.\n";
+ for my $symbol (sort @weakExternalSymbols) {
+ print "ERROR: symbol $symbol\n";
+ }
+ $sawError = 1;
+ }
+}
+
+if ($sawError and !$coverageBuild) {
+ unlink $executablePath;
+ exit 1;
+}
+
+touch($buildTimestampPath);
+
+exit 0;
+
+sub touch($)
+{
+ my ($path) = @_;
+ open(TOUCH, ">", $path) or die "$!";
+ close(TOUCH);
+}
diff --git a/src/third_party/blink/Tools/Scripts/check-testharness-expected-pass b/src/third_party/blink/Tools/Scripts/check-testharness-expected-pass
new file mode 100755
index 0000000..3d7e992
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/check-testharness-expected-pass
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Check if a LayoutTest expected file is a passing testharness result.
+
+The intent of this script is to identify expected files that are passing
+testharness.js results. Those files are not needed because the test
+infrastructure will read the output of testharness.js tests if there is no
+expected files."""
+
+
+import fileinput
+import sys
+
+from webkitpy.layout_tests.models import testharness_results
+
+paths = []
+
+for path in sys.argv[1:]:
+ content = open(path, 'r').read()
+ if testharness_results.is_testharness_output(content) and \
+ testharness_results.is_testharness_output_passing(content):
+ paths.append(path)
+
+if len(paths) > 0:
+ sys.stderr.write('* The following files are passing testharness results, they should be removed:\n ')
+ sys.stderr.write('\n '.join(paths))
+ sys.stderr.write('\n')
+ sys.exit("ERROR: found passing testharness results.")
diff --git a/src/third_party/blink/Tools/Scripts/check-webkit-style b/src/third_party/blink/Tools/Scripts/check-webkit-style
new file mode 100755
index 0000000..54ca276
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/check-webkit-style
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Does WebKit-lint on C/C++ or text files.
+
+The goal of this script is to identify places in the code that *may*
+be in non-compliance with WebKit style. It does not attempt to fix
+up these problems -- the point is to educate. It does also not
+attempt to find all problems, or to ensure that everything it does
+find is legitimately a problem."""
+
+import sys
+
+import webkitpy.common.version_check
+
+from webkitpy.style.main import CheckWebKitStyle
+
+
+if __name__ == "__main__":
+ sys.exit(CheckWebKitStyle().main())
diff --git a/src/third_party/blink/Tools/Scripts/clean-header-guards b/src/third_party/blink/Tools/Scripts/clean-header-guards
new file mode 100755
index 0000000..848439f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/clean-header-guards
@@ -0,0 +1,53 @@
+#!/usr/bin/ruby
+
+require 'find'
+require 'optparse'
+
+options = {}
+OptionParser.new do |opts|
+ opts.banner = "Usage: clean-header-guards [options]"
+
+ opts.on("--prefix [PREFIX]", "Append a header prefix to all guards") do |prefix|
+ options[:prefix] = prefix
+ end
+end.parse!
+
+IgnoredFilenamePatterns = [
+ # ignore headers which are known not to have guard
+ /WebCorePrefix/,
+ /ForwardingHeaders/,
+ %r|bindings/objc|,
+ /vcproj/, # anything inside a vcproj is in the windows wasteland
+
+ # we don't own any of these headers
+ %r|icu/unicode|,
+ %r|platform/graphics/cairo|,
+ %r|platform/image-decoders|,
+
+ /config.h/ # changing this one sounds scary
+].freeze
+
+IgnoreFileNamesPattern = Regexp.union(*IgnoredFilenamePatterns).freeze
+
+Find::find(".") do |filename|
+ next unless filename =~ /\.h$/
+ next if filename.match(IgnoreFileNamesPattern)
+
+ File.open(filename, "r+") do |file|
+ contents = file.read
+ match_results = contents.match(/#ifndef (\S+)\n#define \1/s)
+ if match_results
+ current_guard = match_results[1]
+ new_guard = File.basename(filename).sub('.', '_')
+ new_guard = options[:prefix] + '_' + new_guard if options[:prefix]
+ contents.gsub!(/#{current_guard}\b/, new_guard)
+ else
+ puts "Ignoring #{filename}, failed to find existing header guards."
+ end
+ tmp_filename = filename + ".tmp"
+ File.open(tmp_filename, "w+") do |new_file|
+ new_file.write(contents)
+ end
+ File.rename tmp_filename, filename
+ end
+end
diff --git a/src/third_party/blink/Tools/Scripts/commit-log-editor b/src/third_party/blink/Tools/Scripts/commit-log-editor
new file mode 100755
index 0000000..e790569
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/commit-log-editor
@@ -0,0 +1,371 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2006, 2007, 2008, 2009, 2010 Apple Inc. All rights reserved.
+# Copyright (C) 2009 Torch Mobile Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Script to put change log comments in as default check-in comment.
+
+use strict;
+use Getopt::Long;
+use File::Basename;
+use File::Spec;
+use FindBin;
+use lib $FindBin::Bin;
+use VCSUtils;
+use webkitdirs;
+
+sub createCommitMessage(@);
+sub loadTermReadKey();
+sub normalizeLineEndings($$);
+sub patchAuthorshipString($$$);
+sub removeLongestCommonPrefixEndingInDoubleNewline(\%);
+sub isCommitLogEditor($);
+
+my $endl = "\n";
+
+sub printUsageAndExit
+{
+ my $programName = basename($0);
+ print STDERR <<EOF;
+Usage: $programName [--regenerate-log] <log file>
+ $programName --print-log <ChangeLog file> [<ChangeLog file>...]
+ $programName --help
+EOF
+ exit 1;
+}
+
+my $help = 0;
+my $printLog = 0;
+my $regenerateLog = 0;
+
+my $getOptionsResult = GetOptions(
+ 'help' => \$help,
+ 'print-log' => \$printLog,
+ 'regenerate-log' => \$regenerateLog,
+);
+
+if (!$getOptionsResult || $help) {
+ printUsageAndExit();
+}
+
+die "Can't specify both --print-log and --regenerate-log\n" if $printLog && $regenerateLog;
+
+if ($printLog) {
+ printUsageAndExit() unless @ARGV;
+ print createCommitMessage(@ARGV);
+ exit 0;
+}
+
+my $log = $ARGV[0];
+if (!$log) {
+ printUsageAndExit();
+}
+
+my $baseDir = baseProductDir();
+
+my $editor = $ENV{SVN_LOG_EDITOR};
+$editor = $ENV{CVS_LOG_EDITOR} if !$editor;
+$editor = "" if $editor && isCommitLogEditor($editor);
+
+my $splitEditor = 1;
+if (!$editor) {
+ my $builtEditorApplication = "$baseDir/Release/Commit Log Editor.app/Contents/MacOS/Commit Log Editor";
+ if (-x $builtEditorApplication) {
+ $editor = $builtEditorApplication;
+ $splitEditor = 0;
+ }
+}
+if (!$editor) {
+ my $builtEditorApplication = "$baseDir/Debug/Commit Log Editor.app/Contents/MacOS/Commit Log Editor";
+ if (-x $builtEditorApplication) {
+ $editor = $builtEditorApplication;
+ $splitEditor = 0;
+ }
+}
+if (!$editor) {
+ my $builtEditorApplication = "$ENV{HOME}/Applications/Commit Log Editor.app/Contents/MacOS/Commit Log Editor";
+ if (-x $builtEditorApplication) {
+ $editor = $builtEditorApplication;
+ $splitEditor = 0;
+ }
+}
+
+$editor = $ENV{EDITOR} if !$editor;
+$editor = "/usr/bin/vi" if !$editor;
+
+my @editor;
+if ($splitEditor) {
+ @editor = split ' ', $editor;
+} else {
+ @editor = ($editor);
+}
+
+my $inChangesToBeCommitted = !isGit();
+my @changeLogs = ();
+my $logContents = "";
+my $existingLog = 0;
+open LOG, $log or die "Could not open the log file.";
+while (my $curLine = <LOG>) {
+ if (isGit()) {
+ if ($curLine =~ /^# Changes to be committed:$/) {
+ $inChangesToBeCommitted = 1;
+ } elsif ($inChangesToBeCommitted && $curLine =~ /^# \S/) {
+ $inChangesToBeCommitted = 0;
+ }
+ }
+
+ if (!isGit() || $curLine =~ /^#/) {
+ $logContents .= $curLine;
+ } else {
+ # $_ contains the current git log message
+ # (without the log comment info). We don't need it.
+ }
+ $existingLog = isGit() && !($curLine =~ /^#/ || $curLine =~ /^\s*$/) unless $existingLog;
+ my $changeLogFileName = changeLogFileName();
+ push @changeLogs, makeFilePathRelative($1) if $inChangesToBeCommitted && ($curLine =~ /^(?:M|A)....(.*$changeLogFileName)\r?\n?$/ || $curLine =~ /^#\t(?:modified|new file): (.*$changeLogFileName)$/) && $curLine !~ /-$changeLogFileName$/;
+}
+close LOG;
+
+# We want to match the line endings of the existing log file in case they're
+# different from perl's line endings.
+$endl = $1 if $logContents =~ /(\r?\n)/;
+
+my $keepExistingLog = 1;
+if ($regenerateLog && $existingLog && scalar(@changeLogs) > 0 && loadTermReadKey()) {
+ print "Existing log message detected, Use 'r' to regenerate log message from ChangeLogs, or any other key to keep the existing message.\n";
+ Term::ReadKey::ReadMode('cbreak');
+ my $key = Term::ReadKey::ReadKey(0);
+ Term::ReadKey::ReadMode('normal');
+ $keepExistingLog = 0 if ($key eq "r");
+}
+
+# Don't change anything if there's already a log message (as can happen with git-commit --amend).
+exec (@editor, @ARGV) if $existingLog && $keepExistingLog;
+
+my $first = 1;
+open NEWLOG, ">$log.edit" or die;
+if (isGit() && @changeLogs == 0) {
+ # populate git commit message with WebKit-format ChangeLog entries unless explicitly disabled
+ my $branch = gitBranch();
+ chomp(my $webkitGenerateCommitMessage = `git config --bool branch.$branch.webkitGenerateCommitMessage`);
+ if ($webkitGenerateCommitMessage eq "") {
+ chomp($webkitGenerateCommitMessage = `git config --bool core.webkitGenerateCommitMessage`);
+ }
+ if ($webkitGenerateCommitMessage ne "false") {
+ open CHANGELOG_ENTRIES, "-|", "$FindBin::Bin/prepare-ChangeLog --git-index --no-write" or die "prepare-ChangeLog failed: $!.\n";
+ while (<CHANGELOG_ENTRIES>) {
+ print NEWLOG normalizeLineEndings($_, $endl);
+ }
+ close CHANGELOG_ENTRIES;
+ }
+} else {
+ print NEWLOG createCommitMessage(@changeLogs);
+}
+print NEWLOG $logContents;
+close NEWLOG;
+
+system (@editor, "$log.edit");
+
+open NEWLOG, "$log.edit" or exit;
+my $foundComment = 0;
+while (<NEWLOG>) {
+ $foundComment = 1 if (/\S/ && !/^CVS:/);
+}
+close NEWLOG;
+
+if ($foundComment) {
+ open NEWLOG, "$log.edit" or die;
+ open LOG, ">$log" or die;
+ while (<NEWLOG>) {
+ print LOG;
+ }
+ close LOG;
+ close NEWLOG;
+}
+
+unlink "$log.edit";
+
+sub createCommitMessage(@)
+{
+ my @changeLogs = @_;
+
+ my $topLevel = determineVCSRoot();
+
+ my %changeLogSort;
+ my %changeLogContents;
+ for my $changeLog (@changeLogs) {
+ open CHANGELOG, $changeLog or die "Can't open $changeLog";
+ my $contents = "";
+ my $blankLines = "";
+ my $lineCount = 0;
+ my $date = "";
+ my $author = "";
+ my $email = "";
+ my $hasAuthorInfoToWrite = 0;
+ while (<CHANGELOG>) {
+ if (/^\S/) {
+ last if $contents;
+ }
+ if (/\S/) {
+ $contents .= $blankLines if $contents;
+ $blankLines = "";
+
+ my $line = $_;
+
+ # Remove indentation spaces
+ $line =~ s/^ {8}//;
+
+ # Grab the author and the date line
+ if ($line =~ m/^([0-9]{4}-[0-9]{2}-[0-9]{2})\s+(.*[^\s])\s+<(.*)>/ && $lineCount == 0) {
+ $date = $1;
+ $author = $2;
+ $email = $3;
+ $hasAuthorInfoToWrite = 1;
+ next;
+ }
+
+ if ($hasAuthorInfoToWrite) {
+ my $isReviewedByLine = $line =~ m/^(?:Reviewed|Rubber[ \-]?stamped) by/;
+ my $isModifiedFileLine = $line =~ m/^\* .*:/;
+
+ # Insert the authorship line if needed just above the "Reviewed by" line or the
+ # first modified file (whichever comes first).
+ if ($isReviewedByLine || $isModifiedFileLine) {
+ $hasAuthorInfoToWrite = 0;
+ my $authorshipString = patchAuthorshipString($author, $email, $date);
+ if ($authorshipString) {
+ $contents .= "$authorshipString\n";
+ $contents .= "\n" if $isModifiedFileLine;
+ }
+ }
+ }
+
+
+ $lineCount++;
+ $contents .= $line;
+ } else {
+ $blankLines .= $_;
+ }
+ }
+ if ($hasAuthorInfoToWrite) {
+ # We didn't find anywhere to put the authorship info, so just put it at the end.
+ my $authorshipString = patchAuthorshipString($author, $email, $date);
+ $contents .= "\n$authorshipString\n" if $authorshipString;
+ $hasAuthorInfoToWrite = 0;
+ }
+
+ close CHANGELOG;
+
+ $changeLog = File::Spec->abs2rel(File::Spec->rel2abs($changeLog), $topLevel);
+
+ my $label = dirname($changeLog);
+ $label = "top level" unless length $label;
+
+ my $sortKey = lc $label;
+ if ($label eq "top level") {
+ $sortKey = "";
+ } elsif ($label eq "LayoutTests") {
+ $sortKey = lc "~, LayoutTests last";
+ }
+
+ $changeLogSort{$sortKey} = $label;
+ $changeLogContents{$label} = $contents;
+ }
+
+ my $commonPrefix = removeLongestCommonPrefixEndingInDoubleNewline(%changeLogContents);
+
+ my $first = 1;
+ my @result;
+ push @result, normalizeLineEndings($commonPrefix, $endl);
+ for my $sortKey (sort keys %changeLogSort) {
+ my $label = $changeLogSort{$sortKey};
+ if (keys %changeLogSort > 1) {
+ push @result, normalizeLineEndings("\n", $endl) if !$first;
+ $first = 0;
+ push @result, normalizeLineEndings("$label: ", $endl);
+ }
+ push @result, normalizeLineEndings($changeLogContents{$label}, $endl);
+ }
+
+ return join '', @result;
+}
+
+sub loadTermReadKey()
+{
+ eval { require Term::ReadKey; };
+ return !$@;
+}
+
+sub normalizeLineEndings($$)
+{
+ my ($string, $endl) = @_;
+ $string =~ s/\r?\n/$endl/g;
+ return $string;
+}
+
+sub patchAuthorshipString($$$)
+{
+ my ($authorName, $authorEmail, $authorDate) = @_;
+
+ return if $authorEmail eq changeLogEmailAddress();
+ return "Patch by $authorName <$authorEmail> on $authorDate";
+}
+
+sub removeLongestCommonPrefixEndingInDoubleNewline(\%)
+{
+ my ($hashOfStrings) = @_;
+
+ my @strings = values %{$hashOfStrings};
+ return "" unless @strings > 1;
+
+ my $prefix = shift @strings;
+ my $prefixLength = length $prefix;
+ foreach my $string (@strings) {
+ while ($prefixLength) {
+ last if substr($string, 0, $prefixLength) eq $prefix;
+ --$prefixLength;
+ $prefix = substr($prefix, 0, -1);
+ }
+ last unless $prefixLength;
+ }
+
+ return "" unless $prefixLength;
+
+ my $lastDoubleNewline = rindex($prefix, "\n\n");
+ return "" unless $lastDoubleNewline > 0;
+
+ foreach my $key (keys %{$hashOfStrings}) {
+ $hashOfStrings->{$key} = substr($hashOfStrings->{$key}, $lastDoubleNewline);
+ }
+ return substr($prefix, 0, $lastDoubleNewline + 2);
+}
+
+sub isCommitLogEditor($)
+{
+ my $editor = shift;
+ return $editor =~ m/commit-log-editor/;
+}
diff --git a/src/third_party/blink/Tools/Scripts/compare-timing-files b/src/third_party/blink/Tools/Scripts/compare-timing-files
new file mode 100755
index 0000000..89f70b1
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/compare-timing-files
@@ -0,0 +1,88 @@
+#!/usr/bin/perl
+
+# Copyright (C) 2006 Apple Computer, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script takes two files that are lists of timings and compares them.
+
+use warnings;
+use strict;
+use Getopt::Long;
+
+my $usage = "compare-timing-files [-c|--count results] oldFile newFile";
+
+my $count = 1;
+GetOptions("c|count=i" => \$count);
+
+my ($file1, $file2) = @ARGV;
+die "$usage\n" unless ($file1 && $file2 && @ARGV == 2);
+
+my ($oldAverage, $oldRange, $oldRangePercent) = parseResults($file1);
+my ($newAverage, $newRange, $newRangePercent) = parseResults($file2);
+
+print "\n===== $file1 =====\n";
+if ($count == 1) {
+ print("fastest run: $oldAverage\n");
+} else {
+ print("average of fastest $count runs: $oldAverage\n");
+ printf("range of fastest $count runs: %.2f%% (%d)\n", $oldRangePercent, $oldRange);
+}
+
+print "\n===== $file2 =====\n";
+if ($count == 1) {
+ print("fastest run: $newAverage\n");
+} else {
+ print("average of fastest $count runs: $newAverage\n");
+ printf("range of fastest $count runs: %.2f%% (%d)\n", $newRangePercent, $newRange);
+}
+
+my $gainOrLoss = $newAverage <= $oldAverage ? "GAIN" : "LOSS";
+my $difference = abs($newAverage - $oldAverage);
+my $differencePercent = $difference / $oldAverage * 100;
+printf("\nperformance %s of %.2f%% (%.1f / %.1f)\n", $gainOrLoss, $differencePercent, $difference, $oldAverage);
+print "\n";
+
+sub parseResults
+{
+ my ($file) = @_;
+
+ open(FILE, $file) or die "Couldn't open file: $file";
+ my @results = <FILE>;
+ close(FILE);
+
+ @results = sort(@results);
+ my $total = 0;
+ for (my $i = 0; $i < $count; $i++) {
+ $results[$i] =~ s/\D*//; # cut out non-digits
+ $total += $results[$i];
+ }
+ my $average = $total / $count;
+ my $range = $results[$count - 1] - $results[0];
+ my $rangePercent = $range / $results[$count - 1] * 100;
+
+ return ($average, $range, $rangePercent);
+}
+
diff --git a/src/third_party/blink/Tools/Scripts/debug-test-runner b/src/third_party/blink/Tools/Scripts/debug-test-runner
new file mode 100755
index 0000000..251849f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/debug-test-runner
@@ -0,0 +1,37 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+# Simplified "debug" script for debugging the WebKitTestRunner.
+
+use strict;
+use FindBin;
+use lib $FindBin::Bin;
+use webkitdirs;
+
+printHelpAndExitForRunAndDebugWebKitAppIfNeeded(INCLUDE_OPTIONS_FOR_DEBUGGING);
+
+setConfiguration();
+
+exit exitStatus(debugWebKitTestRunner());
diff --git a/src/third_party/blink/Tools/Scripts/display-profiler-output b/src/third_party/blink/Tools/Scripts/display-profiler-output
new file mode 100755
index 0000000..0a8c15e
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/display-profiler-output
@@ -0,0 +1,938 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require 'rubygems'
+
+require 'readline'
+
+begin
+ require 'json'
+ require 'highline'
+rescue LoadError
+ $stderr.puts "Error: some required gems are not installed!"
+ $stderr.puts
+ $stderr.puts "Try running:"
+ $stderr.puts
+ $stderr.puts "sudo gem install json"
+ $stderr.puts "sudo gem install highline"
+ exit 1
+end
+
+class Bytecode
+ attr_accessor :bytecodes, :bytecodeIndex, :opcode, :description, :topCounts, :bottomCounts, :machineInlinees, :osrExits
+
+ def initialize(bytecodes, bytecodeIndex, opcode, description)
+ @bytecodes = bytecodes
+ @bytecodeIndex = bytecodeIndex
+ @opcode = opcode
+ @description = description
+ @topCounts = [] # "source" counts
+ @bottomCounts = {} # "machine" counts, maps compilations to counts
+ @machineInlinees = {} # maps my compilation to a set of inlinees
+ @osrExits = []
+ end
+
+ def shouldHaveCounts?
+ @opcode != "op_call_put_result"
+ end
+
+ def addTopCount(count)
+ @topCounts << count
+ end
+
+ def addBottomCountForCompilation(count, compilation)
+ @bottomCounts[compilation] = [] unless @bottomCounts[compilation]
+ @bottomCounts[compilation] << count
+ end
+
+ def addMachineInlinee(compilation, inlinee)
+ @machineInlinees[compilation] = {} unless @machineInlinees[compilation]
+ @machineInlinees[compilation][inlinee] = true
+ end
+
+ def totalTopExecutionCount
+ sum = 0
+ @topCounts.each {
+ | value |
+ sum += value.count
+ }
+ sum
+ end
+
+ def topExecutionCount(engine)
+ sum = 0
+ @topCounts.each {
+ | value |
+ if value.engine == engine
+ sum += value.count
+ end
+ }
+ sum
+ end
+
+ def totalBottomExecutionCount
+ sum = 0
+ @bottomCounts.each_value {
+ | counts |
+ max = 0
+ counts.each {
+ | value |
+ max = [max, value.count].max
+ }
+ sum += max
+ }
+ sum
+ end
+
+ def bottomExecutionCount(engine)
+ sum = 0
+ @bottomCounts.each_pair {
+ | compilation, counts |
+ if compilation.engine == engine
+ max = 0
+ counts.each {
+ | value |
+ max = [max, value.count].max
+ }
+ sum += max
+ end
+ }
+ sum
+ end
+
+ def totalExitCount
+ sum = 0
+ @osrExits.each {
+ | exit |
+ sum += exit.count
+ }
+ sum
+ end
+end
+
+class Bytecodes
+ attr_accessor :codeHash, :inferredName, :source, :instructionCount, :machineInlineSites, :compilations
+
+ def initialize(json)
+ @codeHash = json["hash"].to_s
+ @inferredName = json["inferredName"].to_s
+ @source = json["sourceCode"].to_s
+ @instructionCount = json["instructionCount"].to_i
+ @bytecode = {}
+ json["bytecode"].each {
+ | subJson |
+ index = subJson["bytecodeIndex"].to_i
+ @bytecode[index] = Bytecode.new(self, index, subJson["opcode"].to_s, subJson["description"].to_s)
+ }
+ @machineInlineSites = {} # maps compilation to a set of origins
+ @compilations = []
+ end
+
+ def name(limit)
+ if to_s.size > limit
+ "\##{@codeHash}"
+ else
+ to_s
+ end
+ end
+
+ def to_s
+ "#{@inferredName}\##{@codeHash}"
+ end
+
+ def matches(pattern)
+ if pattern =~ /^#/
+ $~.post_match == @codeHash
+ elsif pattern =~ /#/
+ pattern == to_s
+ else
+ pattern == @inferredName or pattern == @codeHash
+ end
+ end
+
+ def each
+ @bytecode.values.sort{|a, b| a.bytecodeIndex <=> b.bytecodeIndex}.each {
+ | value |
+ yield value
+ }
+ end
+
+ def bytecode(bytecodeIndex)
+ @bytecode[bytecodeIndex]
+ end
+
+ def addMachineInlineSite(compilation, origin)
+ @machineInlineSites[compilation] = {} unless @machineInlineSites[compilation]
+ @machineInlineSites[compilation][origin] = true
+ end
+
+ def totalMachineInlineSites
+ sum = 0
+ @machineInlineSites.each_value {
+ | set |
+ sum += set.size
+ }
+ sum
+ end
+
+ def sourceMachineInlineSites
+ set = {}
+ @machineInlineSites.each_value {
+ | mySet |
+ set.merge!(mySet)
+ }
+ set.size
+ end
+
+ def totalMaxTopExecutionCount
+ max = 0
+ @bytecode.each_value {
+ | bytecode |
+ max = [max, bytecode.totalTopExecutionCount].max
+ }
+ max
+ end
+
+ def maxTopExecutionCount(engine)
+ max = 0
+ @bytecode.each_value {
+ | bytecode |
+ max = [max, bytecode.topExecutionCount(engine)].max
+ }
+ max
+ end
+
+ def totalMaxBottomExecutionCount
+ max = 0
+ @bytecode.each_value {
+ | bytecode |
+ max = [max, bytecode.totalBottomExecutionCount].max
+ }
+ max
+ end
+
+ def maxBottomExecutionCount(engine)
+ max = 0
+ @bytecode.each_value {
+ | bytecode |
+ max = [max, bytecode.bottomExecutionCount(engine)].max
+ }
+ max
+ end
+
+ def totalExitCount
+ sum = 0
+ each {
+ | bytecode |
+ sum += bytecode.totalExitCount
+ }
+ sum
+ end
+end
+
+class ProfiledBytecode
+ attr_reader :bytecodeIndex, :description
+
+ def initialize(json)
+ @bytecodeIndex = json["bytecodeIndex"].to_i
+ @description = json["description"].to_s
+ end
+end
+
+class ProfiledBytecodes
+ attr_reader :header, :bytecodes
+
+ def initialize(json)
+ @header = json["header"]
+ @bytecodes = $bytecodes[json["bytecodesID"].to_i]
+ @sequence = json["bytecode"].map {
+ | subJson |
+ ProfiledBytecode.new(subJson)
+ }
+ end
+
+ def each
+ @sequence.each {
+ | description |
+ yield description
+ }
+ end
+end
+
+def originStackFromJSON(json)
+ json.map {
+ | subJson |
+ $bytecodes[subJson["bytecodesID"].to_i].bytecode(subJson["bytecodeIndex"].to_i)
+ }
+end
+
+class CompiledBytecode
+ attr_accessor :origin, :description
+
+ def initialize(json)
+ @origin = originStackFromJSON(json["origin"])
+ @description = json["description"].to_s
+ end
+end
+
+class ExecutionCounter
+ attr_accessor :origin, :engine, :count
+
+ def initialize(origin, engine, count)
+ @origin = origin
+ @engine = engine
+ @count = count
+ end
+end
+
+class OSRExit
+ attr_reader :compilation, :origin, :codeAddresses, :exitKind, :isWatchpoint, :count
+
+ def initialize(compilation, origin, codeAddresses, exitKind, isWatchpoint, count)
+ @compilation = compilation
+ @origin = origin
+ @codeAddresses = codeAddresses
+ @exitKind = exitKind
+ @isWatchpoint = isWatchpoint
+ @count = count
+ end
+
+ def dumpForDisplay(prefix)
+ puts(prefix + "EXIT: due to #{@exitKind}, #{@count} times")
+ end
+end
+
+class Compilation
+ attr_accessor :bytecode, :engine, :descriptions, :counters, :compilationIndex
+ attr_accessor :osrExits, :profiledBytecodes, :numInlinedGetByIds, :numInlinedPutByIds
+ attr_accessor :numInlinedCalls
+
+ def initialize(json)
+ @bytecode = $bytecodes[json["bytecodesID"].to_i]
+ @bytecode.compilations << self
+ @compilationIndex = @bytecode.compilations.size
+ @engine = json["compilationKind"]
+ @descriptions = json["descriptions"].map {
+ | subJson |
+ CompiledBytecode.new(subJson)
+ }
+ @descriptions.each {
+ | description |
+ next if description.origin.empty?
+ description.origin[1..-1].each_with_index {
+ | inlinee, index |
+ description.origin[0].addMachineInlinee(self, inlinee.bytecodes)
+ inlinee.bytecodes.addMachineInlineSite(self, description.origin[0...index])
+ }
+ }
+ @counters = {}
+ json["counters"].each {
+ | subJson |
+ origin = originStackFromJSON(subJson["origin"])
+ counter = ExecutionCounter.new(origin, @engine, subJson["executionCount"].to_i)
+ @counters[origin] = counter
+ origin[-1].addTopCount(counter)
+ origin[0].addBottomCountForCompilation(counter, self)
+ }
+ @osrExits = {}
+ json["osrExits"].each {
+ | subJson |
+ osrExit = OSRExit.new(self, originStackFromJSON(subJson["origin"]),
+ json["osrExitSites"][subJson["id"]].map {
+ | value |
+ value.hex
+ }, subJson["exitKind"], subJson["isWatchpoint"],
+ subJson["count"])
+ osrExit.codeAddresses.each {
+ | codeAddress |
+ osrExits[codeAddress] = [] unless osrExits[codeAddress]
+ osrExits[codeAddress] << osrExit
+ }
+ osrExit.origin[-1].osrExits << osrExit
+ }
+ @profiledBytecodes = []
+ json["profiledBytecodes"].each {
+ | subJson |
+ @profiledBytecodes << ProfiledBytecodes.new(subJson)
+ }
+ @numInlinedGetByIds = json["numInlinedGetByIds"]
+ @numInlinedPutByIds = json["numInlinedPutByIds"]
+ @numInlinedCalls = json["numInlinedCalls"]
+ end
+
+ def counter(origin)
+ @counters[origin]
+ end
+
+ def to_s
+ "#{bytecode}-#{compilationIndex}-#{engine}"
+ end
+end
+
+class DescriptionLine
+ attr_reader :actualCountsString, :sourceCountsString, :disassembly, :shouldShow
+
+ def initialize(actualCountsString, sourceCountsString, disassembly, shouldShow)
+ @actualCountsString = actualCountsString
+ @sourceCountsString = sourceCountsString
+ @disassembly = disassembly
+ @shouldShow = shouldShow
+ end
+
+ def codeAddress
+ if @disassembly =~ /^\s*(0x[0-9a-fA-F]+):/
+ $1.hex
+ else
+ nil
+ end
+ end
+end
+
+if ARGV.length != 1
+ $stderr.puts "Usage: display-profiler-output <path to profiler output file>"
+ $stderr.puts
+ $stderr.puts "The typical usage pattern for the profiler currently looks something like:"
+ $stderr.puts
+ $stderr.puts "Path/To/jsc -p profile.json myprogram.js"
+ $stderr.puts "display-profiler-output profile.json"
+ exit 1
+end
+
+$json = JSON::parse(IO::read(ARGV[0]))
+$bytecodes = $json["bytecodes"].map {
+ | subJson |
+ Bytecodes.new(subJson)
+}
+$compilations = $json["compilations"].map {
+ | subJson |
+ Compilation.new(subJson)
+}
+$engines = ["Baseline", "DFG"]
+
+def lpad(str,chars)
+ if str.length>chars
+ str
+ else
+ "%#{chars}s"%(str)
+ end
+end
+
+def rpad(str, chars)
+ while str.length < chars
+ str += " "
+ end
+ str
+end
+
+def center(str, chars)
+ while str.length < chars
+ str += " "
+ if str.length < chars
+ str = " " + str
+ end
+ end
+ str
+end
+
+def mayBeHash(hash)
+ hash =~ /#/ or hash.size == 6
+end
+
+def sourceOnOneLine(source, limit)
+ source.gsub(/\s+/, ' ')[0...limit]
+end
+
+def screenWidth
+ if $stdin.tty?
+ HighLine::SystemExtensions.terminal_size[0]
+ else
+ 200
+ end
+end
+
+def summary(mode)
+ remaining = screenWidth
+
+ # Figure out how many columns we need for the code block names, and for counts
+ maxCount = 0
+ maxName = 0
+ $bytecodes.each {
+ | bytecodes |
+ maxCount = ([maxCount] + $engines.map {
+ | engine |
+ bytecodes.maxTopExecutionCount(engine)
+ } + $engines.map {
+ | engine |
+ bytecodes.maxBottomExecutionCount(engine)
+ }).max
+ maxName = [bytecodes.to_s.size, maxName].max
+ }
+ maxCountDigits = maxCount.to_s.size
+
+ hashCols = [[maxName, 30].min, "CodeBlock".size].max
+ remaining -= hashCols + 1
+
+ countCols = [maxCountDigits * $engines.size, "Source Counts".size].max
+ remaining -= countCols + 1
+
+ if mode == :full
+ instructionCountCols = 6
+ remaining -= instructionCountCols + 1
+
+ machineCountCols = [maxCountDigits * $engines.size, "Machine Counts".size].max
+ remaining -= machineCountCols + 1
+
+ compilationsCols = 7
+ remaining -= compilationsCols + 1
+
+ inlinesCols = 9
+ remaining -= inlinesCols + 1
+
+ exitCountCols = 7
+ remaining -= exitCountCols + 1
+
+ recentOptsCols = 12
+ remaining -= recentOptsCols + 1
+ end
+
+ if remaining > 0
+ sourceCols = remaining
+ else
+ sourceCols = nil
+ end
+
+ print(center("CodeBlock", hashCols))
+ if mode == :full
+ print(" " + center("#Instr", instructionCountCols))
+ end
+ print(" " + center("Source Counts", countCols))
+ if mode == :full
+ print(" " + center("Machine Counts", machineCountCols))
+ print(" " + center("#Compil", compilationsCols))
+ print(" " + center("Inlines", inlinesCols))
+ print(" " + center("#Exits", exitCountCols))
+ print(" " + center("Last Opts", recentOptsCols))
+ end
+ if sourceCols
+ print(" " + center("Source", sourceCols))
+ end
+ puts
+
+ print(center("", hashCols))
+ if mode == :full
+ print(" " + (" " * instructionCountCols))
+ end
+ print(" " + center("Base/DFG", countCols))
+ if mode == :full
+ print(" " + center("Base/DFG", machineCountCols))
+ print(" " + (" " * compilationsCols))
+ print(" " + center("Src/Total", inlinesCols))
+ print(" " + (" " * exitCountCols))
+ print(" " + center("Get/Put/Call", recentOptsCols))
+ end
+ puts
+ $bytecodes.sort {
+ | a, b |
+ b.totalMaxTopExecutionCount <=> a.totalMaxTopExecutionCount
+ }.each {
+ | bytecode |
+ print(center(bytecode.name(hashCols), hashCols))
+ if mode == :full
+ print(" " + center(bytecode.instructionCount.to_s, instructionCountCols))
+ end
+ print(" " +
+ center($engines.map {
+ | engine |
+ bytecode.maxTopExecutionCount(engine).to_s
+ }.join("/"), countCols))
+ if mode == :full
+ print(" " + center($engines.map {
+ | engine |
+ bytecode.maxBottomExecutionCount(engine).to_s
+ }.join("/"), machineCountCols))
+ print(" " + center(bytecode.compilations.size.to_s, compilationsCols))
+ print(" " + center(bytecode.sourceMachineInlineSites.to_s + "/" + bytecode.totalMachineInlineSites.to_s, inlinesCols))
+ print(" " + center(bytecode.totalExitCount.to_s, exitCountCols))
+ lastCompilation = bytecode.compilations[-1]
+ if lastCompilation
+ optData = [lastCompilation.numInlinedGetByIds,
+ lastCompilation.numInlinedPutByIds,
+ lastCompilation.numInlinedCalls]
+ else
+ optData = ["N/A"]
+ end
+ print(" " + center(optData.join('/'), recentOptsCols))
+ end
+ if sourceCols
+ print(" " + sourceOnOneLine(bytecode.source, sourceCols))
+ end
+ puts
+ }
+end
+
+def executeCommand(*commandArray)
+ command = commandArray[0]
+ args = commandArray[1..-1]
+ case command
+ when "help", "h", "?"
+ puts "summary (s) Print a summary of code block execution rates."
+ puts "full (f) Same as summary, but prints more information."
+ puts "source Show the source for a code block."
+ puts "bytecode (b) Show the bytecode for a code block, with counts."
+ puts "profiling (p) Show the (internal) profiling data for a code block."
+ puts "display (d) Display details for a code block."
+ puts "inlines Show all inlining stacks that the code block was on."
+ puts "help (h) Print this message."
+ puts "quit (q) Quit."
+ when "quit", "q", "exit"
+ exit 0
+ when "summary", "s"
+ summary(:summary)
+ when "full", "f"
+ summary(:full)
+ when "source"
+ if args.length != 1
+ puts "Usage: source <code block hash>"
+ return
+ end
+ $bytecodes.each {
+ | bytecode |
+ if bytecode.matches(args[0])
+ puts bytecode.source
+ end
+ }
+ when "bytecode", "b"
+ if args.length != 1
+ puts "Usage: source <code block hash>"
+ return
+ end
+
+ hash = args[0]
+
+ countCols = 10 * $engines.size
+ machineCols = 10 * $engines.size
+ pad = 1
+ while (countCols + 1 + machineCols + pad) % 8 != 0
+ pad += 1
+ end
+
+ $bytecodes.each {
+ | bytecodes |
+ next unless bytecodes.matches(hash)
+ puts(center("Source Counts", countCols) + " " + center("Machine Counts", machineCols) +
+ (" " * pad) + center("Bytecode for #{bytecodes}", screenWidth - pad - countCols - 1 - machineCols))
+ puts(center("Base/DFG", countCols) + " " + center("Base/DFG", countCols))
+ bytecodes.each {
+ | bytecode |
+ if bytecode.shouldHaveCounts?
+ countsString = $engines.map {
+ | myEngine |
+ bytecode.topExecutionCount(myEngine)
+ }.join("/")
+ machineString = $engines.map {
+ | myEngine |
+ bytecode.bottomExecutionCount(myEngine)
+ }.join("/")
+ else
+ countsString = ""
+ machineString = ""
+ end
+ puts(center(countsString, countCols) + " " + center(machineString, machineCols) + (" " * pad) + bytecode.description.chomp)
+ bytecode.osrExits.each {
+ | exit |
+ puts(center("!!!!!", countCols) + " " + center("!!!!!", machineCols) + (" " * (pad + 10)) +
+ "EXIT: in #{exit.compilation} due to #{exit.exitKind}, #{exit.count} times")
+ }
+ }
+ }
+ when "profiling", "p"
+ if args.length != 1
+ puts "Usage: profiling <code block hash>"
+ return
+ end
+
+ hash = args[0]
+
+ first = true
+ $compilations.each {
+ | compilation |
+
+ compilation.profiledBytecodes.each {
+ | profiledBytecodes |
+ if profiledBytecodes.bytecodes.matches(hash)
+ if first
+ first = false
+ else
+ puts
+ end
+
+ puts "Compilation #{compilation}:"
+ profiledBytecodes.header.each {
+ | header |
+ puts(" " * 6 + header)
+ }
+ profiledBytecodes.each {
+ | bytecode |
+ puts(" " * 8 + bytecode.description)
+ profiledBytecodes.bytecodes.bytecode(bytecode.bytecodeIndex).osrExits.each {
+ | exit |
+ if exit.compilation == compilation
+ puts(" !!!!! EXIT: due to #{exit.exitKind}, #{exit.count} times")
+ end
+ }
+ }
+ end
+ }
+ }
+ when "inlines"
+ if args.length != 1
+ puts "Usage: inlines <code block hash>"
+ return
+ end
+
+ hash = args[0]
+
+ $bytecodes.each {
+ | bytecodes |
+ next unless bytecodes.matches(hash)
+
+ # FIXME: print something useful to say more about which code block this is.
+
+ $compilations.each {
+ | compilation |
+ myOrigins = []
+ compilation.descriptions.each {
+ | description |
+ if description.origin.index {
+ | myBytecode |
+ bytecodes == myBytecode.bytecodes
+ }
+ myOrigins << description.origin
+ end
+ }
+ myOrigins.uniq!
+ myOrigins.sort! {
+ | a, b |
+ result = 0
+ [a.size, b.size].min.times {
+ | index |
+ result = a[index].bytecodeIndex <=> b[index].bytecodeIndex
+ break if result != 0
+ }
+ result
+ }
+
+ next if myOrigins.empty?
+
+ printArray = []
+ lastPrintStack = []
+
+ def originToPrintStack(origin)
+ (0...(origin.size - 1)).map {
+ | index |
+ "bc\##{origin[index].bytecodeIndex} --> #{origin[index + 1].bytecodes}"
+ }
+ end
+
+ def printStack(printArray, stack, lastStack)
+ stillCommon = true
+ stack.each_with_index {
+ | entry, index |
+ next if stillCommon and entry == lastStack[index]
+ printArray << (" " * (index + 1) + entry)
+ stillCommon = false
+ }
+ end
+
+ myOrigins.each {
+ | origin |
+ currentPrintStack = originToPrintStack(origin)
+ printStack(printArray, currentPrintStack, lastPrintStack)
+ lastPrintStack = currentPrintStack
+ }
+
+ next if printArray.empty?
+
+ puts "Compilation #{compilation}:"
+ printArray.each {
+ | entry |
+ puts entry
+ }
+ }
+ }
+ when "display", "d"
+ compilationIndex = nil
+
+ case args.length
+ when 1
+ if args[0] == "*"
+ hash = nil
+ else
+ hash = args[0]
+ end
+ engine = nil
+ when 2
+ if mayBeHash(args[0])
+ hash = args[0]
+ engine = args[1]
+ else
+ engine = args[0]
+ hash = args[1]
+ end
+ else
+ puts "Usage: summary <code block hash> <engine>"
+ return
+ end
+
+ if hash and hash =~ /-([0-9]+)-/
+ hash = $~.pre_match
+ engine = $~.post_match
+ compilationIndex = $1.to_i
+ end
+
+ if engine and not $engines.index(engine)
+ pattern = Regexp.new(Regexp.escape(engine), "i")
+ trueEngine = nil
+ $engines.each {
+ | myEngine |
+ if myEngine =~ pattern
+ trueEngine = myEngine
+ break
+ end
+ }
+ unless trueEngine
+ puts "#{engine} is not a valid engine, try #{$engines.join(' or ')}."
+ return
+ end
+ engine = trueEngine
+ end
+
+ actualCountCols = 13
+ sourceCountCols = 10 * $engines.size
+
+ first = true
+ $compilations.each {
+ | compilation |
+ next if hash and not compilation.bytecode.matches(hash)
+ next if engine and compilation.engine != engine
+ next if compilationIndex and compilation.compilationIndex != compilationIndex
+
+ if first
+ first = false
+ else
+ puts
+ end
+
+ puts("Compilation #{compilation}:")
+ puts(" Num inlined: GetByIds: #{compilation.numInlinedGetByIds} PutByIds: #{compilation.numInlinedPutByIds} Calls: #{compilation.numInlinedCalls}")
+ puts(center("Actual Counts", actualCountCols) + " " + center("Source Counts", sourceCountCols) + " " + center("Disassembly in #{compilation.engine}", screenWidth - 1 - sourceCountCols - 1 - actualCountCols))
+ puts((" " * actualCountCols) + " " + center("Base/DFG", sourceCountCols))
+
+ lines = []
+
+ compilation.descriptions.each {
+ | description |
+ # FIXME: We should have a better way of detecting things like CountExecution nodes
+ # and slow path entries in the baseline JIT.
+ if description.description =~ /CountExecution\(/ and compilation.engine == "DFG"
+ shouldShow = false
+ else
+ shouldShow = true
+ end
+ if description.origin.empty? or not description.origin[-1].shouldHaveCounts? or (compilation.engine == "Baseline" and description.description =~ /^\s*\(S\)/)
+ actualCountsString = ""
+ sourceCountsString = ""
+ else
+ actualCountsString = compilation.counter(description.origin).count.to_s
+ sourceCountsString = $engines.map {
+ | myEngine |
+ description.origin[-1].topExecutionCount(myEngine)
+ }.join("/")
+ end
+ description.description.split("\n").each {
+ | line |
+ lines << DescriptionLine.new(actualCountsString, sourceCountsString, line.chomp, shouldShow)
+ }
+ }
+
+ exitPrefix = center("!!!!!", actualCountCols) + " " + center("!!!!!", sourceCountCols) + (" " * 25)
+
+ lines.each_with_index {
+ | line, index |
+ codeAddress = line.codeAddress
+ if codeAddress
+ list = compilation.osrExits[codeAddress]
+ if list
+ list.each {
+ | exit |
+ if exit.isWatchpoint
+ exit.dumpForDisplay(exitPrefix)
+ end
+ }
+ end
+ end
+ if line.shouldShow
+ puts(center(line.actualCountsString, actualCountCols) + " " + center(line.sourceCountsString, sourceCountCols) + " " + line.disassembly)
+ end
+ if codeAddress
+ # Find the next disassembly address.
+ endIndex = index + 1
+ endAddress = nil
+ while endIndex < lines.size
+ myAddress = lines[endIndex].codeAddress
+ if myAddress
+ endAddress = myAddress
+ break
+ end
+ endIndex += 1
+ end
+
+ if endAddress
+ list = compilation.osrExits[endAddress]
+ if list
+ list.each {
+ | exit |
+ unless exit.isWatchpoint
+ exit.dumpForDisplay(exitPrefix)
+ end
+ }
+ end
+ end
+ end
+ }
+ }
+ else
+ puts "Invalid command: #{command}"
+ end
+end
+
+if $stdin.tty?
+ executeCommand("full")
+end
+
+while commandLine = Readline.readline("> ", true)
+ executeCommand(*commandLine.split)
+end
+
diff --git a/src/third_party/blink/Tools/Scripts/do-file-rename b/src/third_party/blink/Tools/Scripts/do-file-rename
new file mode 100755
index 0000000..b9ccdfe
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/do-file-rename
@@ -0,0 +1,116 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2006, 2008 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Script to do file renaming.
+
+use strict;
+use File::Find;
+use FindBin;
+use lib $FindBin::Bin;
+use webkitdirs;
+use VCSUtils;
+
+setConfiguration();
+chdirWebKit();
+
+my %words;
+
+# find all files we want to process
+
+my @paths;
+find(\&wanted, "Source/JavaScriptCore");
+find(\&wanted, "Source/WebCore");
+find(\&wanted, "WebKit");
+find(\&wanted, "Source/WebKit2");
+
+sub wanted
+{
+ my $file = $_;
+
+ if ($file eq "icu") {
+ $File::Find::prune = 1;
+ return;
+ }
+
+ if ($file =~ /^\../) {
+ $File::Find::prune = 1;
+ return;
+ }
+
+ return if $file =~ /^ChangeLog/;
+ return if -d $file;
+
+ push @paths, $File::Find::name;
+}
+
+my %renames = (
+);
+
+my %renamesContemplatedForTheFuture = (
+);
+
+# rename files
+
+my %newFile;
+for my $file (sort @paths) {
+ my $f = $file;
+ $f = "$1$renames{$2}" if $f =~ /^(.*\/)(\w+\.\w+)$/ && $renames{$2};
+ $newFile{$file} = $f if $f ne $file;
+}
+
+for my $file (sort @paths) {
+ if ($newFile{$file}) {
+ my $newFile = $newFile{$file};
+ print "Renaming $file to $newFile\n";
+ scmMoveOrRenameFile($file, $newFile);
+ }
+}
+
+# change all file contents
+
+for my $file (sort @paths) {
+ $file = $newFile{$file} if $newFile{$file};
+ my $contents;
+ {
+ local $/;
+ open FILE, $file or die;
+ $contents = <FILE>;
+ close FILE;
+ }
+ my $newContents = $contents;
+
+ for my $from (keys %renames) {
+ $newContents =~ s/\b\Q$from\E(?!\w)/$renames{$from}/g; # this " unconfuses Xcode syntax highlighting
+ }
+
+ if ($newContents ne $contents) {
+ open FILE, ">", $file or die;
+ print FILE $newContents;
+ close FILE;
+ }
+}
diff --git a/src/third_party/blink/Tools/Scripts/do-webcore-rename b/src/third_party/blink/Tools/Scripts/do-webcore-rename
new file mode 100755
index 0000000..bd04939
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/do-webcore-rename
@@ -0,0 +1,251 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Script to do a rename in JavaScriptCore, WebCore, and WebKit.
+
+use strict;
+
+use File::Find;
+use FindBin;
+use Getopt::Long qw(:config pass_through);
+
+use lib $FindBin::Bin;
+use webkitdirs;
+use VCSUtils;
+
+setConfiguration();
+chdirWebKit();
+
+my $showHelp;
+my $verbose;
+
+my $programName = basename($0);
+my $usage = <<EOF;
+Usage: $programName [options]
+ -h|--help Show this help message
+ -v|--verbose More verbose output
+EOF
+
+my $getOptionsResult = GetOptions(
+ 'help|h' => \$showHelp,
+ 'verbose|v' => \$verbose,
+);
+
+if (!$getOptionsResult || $showHelp) {
+ print STDERR $usage;
+ exit 1;
+}
+
+my @directoriesToIgnoreList = (
+ "icu",
+);
+my %directoriesToIgnore = map { $_ => 1 } @directoriesToIgnoreList;
+
+# find all files we want to process
+
+my @paths;
+find(\&wanted, "Source/JavaScriptCore");
+find(\&wanted, "Source/WebCore");
+find(\&wanted, "Source/WebKit");
+find(\&wanted, "Source/WebKit2");
+find(\&wanted, "Tools/DumpRenderTree");
+
+sub wanted
+{
+ my $file = $_;
+
+ # Ignore excluded and hidden files/directories.
+ if ($directoriesToIgnore{$file} or $file =~ /^\../ or $file =~ /^ChangeLog/) {
+ print "Ignoring $File::Find::name\n" if $verbose;
+ $File::Find::prune = 1;
+ return;
+ }
+
+ return if -d $file;
+
+ push @paths, $File::Find::name;
+}
+
+# Setting isDOMTypeRename to 1 rather than 0 expands the regexps used
+# below to handle custom JavaScript bindings.
+my $isDOMTypeRename = 1;
+my %renames = (
+ # Renames go here in the form of:
+ "JavaScriptAudioNode" => "ScriptProcessorNode",
+ "RealtimeAnalyserNode" => "AnalyserNode",
+ "AudioGainNode" => "GainNode",
+ "AudioPannerNode" => "PannerNode",
+ "AudioChannelSplitter" => "ChannelSplitterNode",
+ "AudioChannelMerger" => "ChannelMergerNode",
+ "Oscillator" => "OscillatorNode",
+);
+
+my %renamesContemplatedForTheFuture = (
+ "HTMLPlugInImageElement" => "HTMLEmbeddedObjectElement",
+
+ "DOMObject" => "JSDOMObject",
+
+ "runtimeObjectGetter" => "pluginElementGetter",
+ "runtimeObjectPropertyGetter" => "pluginElementPropertyGetter",
+ "runtimeObjectCustomGetOwnPropertySlot" => "pluginElementCustomGetOwnPropertySlot",
+ "runtimeObjectCustomPut" => "pluginElementCustomPut",
+ "runtimeObjectImplementsCall" => "pluginElementImplementsCall",
+ "runtimeObjectCallAsFunction" => "pluginElementCallAsFunction",
+
+ "CLONE_CONTENTS" => "Clone",
+ "DELETE_CONTENTS" => "Delete",
+ "EXTRACT_CONTENTS" => "Extract",
+
+ "DateInstance" => "JSDate",
+ "ErrorInstance" => "JSError",
+
+ "KURL" => "URL",
+ "KURLCFNet" => "URLCF",
+ "KURLHash" => "URLHash",
+ "KURLMac" => "URLMac",
+ "KURL_h" => "URL_h",
+
+ "TreeShared" => "TreeRefCounted",
+
+ "StringImpl" => "SharedString",
+
+ "RenderView" => "RenderViewport",
+
+ "ObjcFallbackObjectImp" => "ObjCFallbackObject",
+ "RuntimeObjectImp" => "ForeignObject",
+
+ "runtime_array" => "BridgedArray",
+ "runtime_method" => "BridgedFunction",
+ "runtime_object" => "BridgedObject",
+ "objc_runtime" => "ObjCBridge",
+
+ "equalIgnoringCase" => "equalFoldingCase",
+
+ "FTPDirectoryTokenizer" => "FTPDirectoryDocumentBuilder",
+ "HTMLTokenizer" => "HTMLDocumentBuilder",
+ "ImageTokenizer" => "ImageDocumentBuilder",
+ "PluginTokenizer" => "PluginDocumentBuilder",
+ "TextTokenizer" => "TextDocumentBuilder",
+ "Tokenizer" => "DocumentBuilder",
+ "Tokenizer_h" => "DocumentBuilder_h",
+ "XMLTokenizer" => "XMLDocumentBuilder",
+ "isHTMLTokenizer" => "isHTMLDocumentBuilder",
+ "m_tokenizer" => "m_builder",
+ "createTokenizer" => "createBuilder",
+ "tokenizerProcessedData" => "documentBuilderProcessedData",
+
+ "WTF_UNICODE_H" => "Unicode_h",
+ "WTF_UNICODE_ICU_H" => "UnicodeICU_h",
+ "WTF_UNICODE_QT4_H" => "UnicodeQt4_h",
+ "UnicodeIcu" => "UnicodeICU",
+
+ "m_invertibleCTM" => "m_transformIsInvertible",
+
+ "NativeFunctionWrapper_h" => "JSHostFunction_h",
+ "NativeFunctionWrapper" => "JSHostFunction",
+ "nativeFunctionThunk" => "hostFunctionThunk",
+ "nativeFunction" => "hostFunction",
+ "NativeFunction" => "HostFunction",
+);
+
+# Sort the keys of the renames hash in order of decreasing length. This
+# handles the case where some of the renames are substrings of others;
+# i.e., "Foo" => "Bar" and "FooBuffer" => "BarBuffer".
+my @sortedRenameKeys = sort { length($b) - length($a) } keys %renames;
+
+# rename files
+
+sub renameFile
+{
+ my $file = shift;
+
+ if ($isDOMTypeRename) {
+ # Find the longest key in %renames which matches this more permissive regexp.
+ # (The old regexp would match ".../Foo.cpp" but not ".../JSFooCustom.cpp".)
+ # This handles renaming of custom JavaScript bindings even when some of the
+ # renames are substrings of others. The only reason we don't do this all the
+ # time is to avoid accidental file renamings for short, non-DOM renames.
+ for my $key (@sortedRenameKeys) {
+ my $newFile = "";
+ $newFile = "$1$renames{$2}$3" if $file =~ /^(.*\/\w*)($key)(\w*\.\w+)$/;
+ if ($newFile ne "") {
+ return $newFile;
+ }
+ }
+ } else {
+ $file = "$1$renames{$2}$3" if $file =~ /^(.*\/)(\w+)(\.\w+)$/ && $renames{$2};
+ }
+ return $file;
+}
+
+my %newFile;
+for my $file (sort @paths) {
+ my $f = renameFile($file);
+ if ($f ne $file) {
+ $newFile{$file} = $f;
+ }
+}
+
+for my $file (sort @paths) {
+ if ($newFile{$file}) {
+ my $newFile = $newFile{$file};
+ print "Renaming $file to $newFile\n";
+ scmMoveOrRenameFile($file, $newFile);
+ }
+}
+
+# change all file contents
+
+for my $file (sort @paths) {
+ $file = $newFile{$file} if $newFile{$file};
+ my $contents;
+ {
+ local $/;
+ open FILE, $file or die "Failed to open $file";
+ $contents = <FILE>;
+ close FILE;
+ }
+ my $newContents = $contents;
+
+ if ($isDOMTypeRename) {
+ for my $from (@sortedRenameKeys) {
+ # Handle JavaScript custom bindings.
+ $newContents =~ s/\b(JS|V8|to|)$from/$1$renames{$from}/g;
+ }
+ } else {
+ for my $from (@sortedRenameKeys) {
+ $newContents =~ s/\b$from(?!["\w])/$renames{$from}/g; # this " unconfuses Xcode syntax highlighting
+ }
+ }
+
+ if ($newContents ne $contents) {
+ open FILE, ">", $file or die "Failed to open $file";
+ print FILE $newContents;
+ close FILE;
+ }
+}
diff --git a/src/third_party/blink/Tools/Scripts/export-w3c-performance-wg-tests b/src/third_party/blink/Tools/Scripts/export-w3c-performance-wg-tests
new file mode 100755
index 0000000..3b98a42
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/export-w3c-performance-wg-tests
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script exports newly added tests to the W3C Web Performance WG's test
+# suite.
+#
+# You must have checked out the 'webperf' repository from https://dvcs.w3.org/hg/
+#
+# This script will export the LayoutTests/http/tests/w3c/submission directory
+# to a local 'webperf' repository.
+#
+# The main step in exporting the tests is updating all of the URLs to account
+# for the differences in directory layout.
+
+import os
+import shutil
+import sys
+
+if len(sys.argv) != 3:
+ print 'USAGE: %s path_to_webkit_checkout_root path_to_webperf_checkout_root' % sys.argv[0]
+ sys.exit(1)
+
+source_directory = os.path.join(sys.argv[1], 'LayoutTests', 'http', 'tests', 'w3c', 'webperf')
+destination_directory = os.path.join(sys.argv[2], 'tests')
+
+directories_to_copy = ['resources', 'submission']
+replacements = [
+ ('localhost:8000', 'www.w3c-test.org'), # This is the alternate host for cross-server requests.
+ ('127.0.0.1:8000', 'w3c-test.org'), # This is the primary test server.
+ ('w3c/webperf', 'webperf/tests'), # We prepend /w3c to all of our paths.
+ ('/w3c/resources/', '/resources/'),
+ ('\n', '\r\n'), # Convert from *NIX format.
+]
+
+for directory_to_copy in directories_to_copy:
+ destination_subdirectory = os.path.join(destination_directory, directory_to_copy)
+ if not os.path.exists(destination_subdirectory):
+ os.makedirs(destination_subdirectory)
+ for root, dirs, files in os.walk(os.path.join(source_directory, directory_to_copy)):
+ root = os.path.relpath(root, source_directory)
+ for dirname in dirs:
+ destination_subdirectory = os.path.join(destination_directory, root, dirname)
+ if not os.path.exists(destination_subdirectory):
+ os.makedirs(destination_subdirectory)
+ for filename in files:
+ if filename.endswith('-expected.txt'):
+ continue
+ with open(os.path.join(source_directory, root, filename), 'r') as in_file:
+ with open(os.path.join(destination_directory, root, filename), 'w') as out_file:
+ for line in in_file:
+ for to_find, replace_with in replacements:
+ line = line.replace(to_find, replace_with)
+ out_file.write(line)
+ print 'Exported %s' % os.path.join(root, filename)
diff --git a/src/third_party/blink/Tools/Scripts/find-extra-includes b/src/third_party/blink/Tools/Scripts/find-extra-includes
new file mode 100755
index 0000000..fedddc5
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/find-extra-includes
@@ -0,0 +1,102 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2005 Apple Computer, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# "find-extra-includes" script for WebKit Open Source Project
+
+use strict;
+use File::Find;
+
+find(\&wanted, @ARGV ? @ARGV : ".");
+
+my %paths;
+my %includes;
+
+sub wanted
+{
+ my $file = $_;
+
+ if ($file eq "icu") {
+ $File::Find::prune = 1;
+ return;
+ }
+
+ if ($file !~ /^\./ && $file =~ /\.(h|cpp|c|mm|m)$/) {
+ $paths{$file} = $File::Find::name;
+ open FILE, $file or die;
+ while (<FILE>) {
+ if (m-^\s*#\s*(include|import)\s+["<]((\S+/)*)(\S+)[">]-) {
+ my $include = ($2 eq "sys/" ? $2 : "") . $4;
+ $includes{$file}{$include}++;
+ }
+ }
+ close FILE;
+ }
+}
+
+my %totalIncludes;
+
+sub fillOut
+{
+ my ($file) = @_;
+
+ return if defined $totalIncludes{$file};
+
+ for my $include (keys %{ $includes{$file} }) {
+ $totalIncludes{$file}{$include} = 1;
+ fillOut($include);
+ for my $i (keys %{ $totalIncludes{$include} }) {
+ $totalIncludes{$file}{$i} = 1;
+ }
+ }
+}
+
+sub check
+{
+ my ($file) = @_;
+
+ for my $include (keys %{ $includes{$file} }) {
+ fillOut($include);
+ }
+ for my $i1 (sort keys %{ $includes{$file} }) {
+ for my $i2 (keys %{ $includes{$file} }) {
+ next if $i1 eq $i2;
+ if ($totalIncludes{$i2}{$i1}) {
+ my $b1 = $i1;
+ my $b2 = $file;
+ $b1 =~ s/\..+$//;
+ $b2 =~ s/\..+$//;
+ print "$paths{$file} does not need to include $i1, because $i2 does\n" if $b1 ne $b2;
+ last;
+ }
+ }
+ }
+}
+
+for my $file (sort keys %includes) {
+ check($file);
+}
diff --git a/src/third_party/blink/Tools/Scripts/find-included-framework-headers b/src/third_party/blink/Tools/Scripts/find-included-framework-headers
new file mode 100755
index 0000000..759a60b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/find-included-framework-headers
@@ -0,0 +1,30 @@
+#!/bin/sh
+# Copyright (C) 2007, 2008, 2009, 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# A script to find headers included from the given frameworks by files in the
+# current directory (and subdirectories).
+
+for framework in $*; do
+ echo -e "\n$framework\n=================="
+ find . \( -name '*.cpp' -o -name '*.h' -o -name '*.m' -o -name '*.mm' \) -exec grep "<$framework/" {} ';' | sed -e 's|.*/\(.*\.h\).*|\1|' | sort -u
+done
diff --git a/src/third_party/blink/Tools/Scripts/format-webkitpy b/src/third_party/blink/Tools/Scripts/format-webkitpy
new file mode 100755
index 0000000..d96c0c4
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/format-webkitpy
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+from webkitpy.formatter.main import main
+
+
+sys.exit(main())
diff --git a/src/third_party/blink/Tools/Scripts/import-w3c-performance-wg-tests b/src/third_party/blink/Tools/Scripts/import-w3c-performance-wg-tests
new file mode 100755
index 0000000..7423dc8
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/import-w3c-performance-wg-tests
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script imports the W3C Web Performance WG's test suite into WebKit.
+#
+# You must have checked out the 'webperf' repository from https://dvcs.w3.org/hg/
+#
+# This script will populate the LayoutTests directory with the new tests. If the
+# tests already exist, the script will refuse to run. Please clear out the
+# w3c/webperf directory first.
+#
+# The main step in importing the tests is updating all of the URLs to match our
+# directory layout.
+
+import os
+import sys
+
+if len(sys.argv) != 3:
+ print 'USAGE: %s path_to_webperf_checkout_root path_to_webkit_checkout_root' % sys.argv[0]
+ sys.exit(1)
+
+source_directory = os.path.join(sys.argv[1], 'tests')
+destination_directory = os.path.join(sys.argv[2], 'LayoutTests', 'http', 'tests', 'w3c', 'webperf')
+
+if os.path.exists(destination_directory):
+ print 'Refusing to overwrite existing directory: %s' % destination_directory
+ sys.exit(1)
+os.makedirs(destination_directory)
+
+directories_to_copy = ['approved', 'resources', 'submission']
+directories_to_ignore = ['html5'] # These are just duplicates of the sibling directory 'html'.
+replacements = [
+ ('www.w3c-test.org', 'localhost:8000'), # This is the alternate host for cross-server requests.
+ ('w3c-test.org', '127.0.0.1:8000'), # This is the primary test server.
+ ('webperf/tests', 'w3c/webperf'), # We prepend /w3c to all of our paths.
+ ('"/resources/', '"/w3c/resources/'),
+ ('+ "(" + reloadTime[time] + ")"', ''), # Remove dynamic values from the output. We'll still see PASS.
+ ('+ "(" + startingTime[time] + ")"', ''),
+ ('+ expectedStartTime', ''),
+ ('+ expectedDuration', ''),
+ ('\t', ' '), # Convert tabs to spaces.
+]
+
+for directory_to_copy in directories_to_copy:
+ os.makedirs(os.path.join(destination_directory, directory_to_copy))
+ os.chdir(source_directory)
+ for root, dirs, files in os.walk(directory_to_copy):
+ for dirname in directories_to_ignore:
+ if dirname in dirs:
+ dirs.remove(dirname)
+ for dirname in dirs:
+ os.makedirs(os.path.join(destination_directory, root, dirname))
+ for filename in files:
+ with open(os.path.join(source_directory, root, filename), 'r') as in_file:
+ with open(os.path.join(destination_directory, root, filename), 'w') as out_file:
+ for line in in_file:
+ if filename.endswith(('.htm', '.html', '.css', '.js', '.php')):
+ for to_find, replace_with in replacements:
+ line = line.replace(to_find, replace_with).rstrip() + '\n'
+ assert 'w3c-test.org' not in line, 'Imported test must not depend on live site. Bad line: "%s"' % line
+ out_file.write(line)
diff --git a/src/third_party/blink/Tools/Scripts/import-w3c-tests b/src/third_party/blink/Tools/Scripts/import-w3c-tests
new file mode 100755
index 0000000..bb72096
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/import-w3c-tests
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import sys
+
+from webkitpy.w3c import test_importer
+
+
+sys.exit(test_importer.main(sys.argv[1:], sys.stdout, sys.stderr))
diff --git a/src/third_party/blink/Tools/Scripts/lint-test-expectations b/src/third_party/blink/Tools/Scripts/lint-test-expectations
new file mode 100755
index 0000000..c56eb77
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/lint-test-expectations
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+from webkitpy.common import version_check
+from webkitpy.layout_tests import lint_test_expectations
+
+
+sys.exit(lint_test_expectations.main(sys.argv[1:], sys.stdout, sys.stderr))
+
diff --git a/src/third_party/blink/Tools/Scripts/lint-webkitpy b/src/third_party/blink/Tools/Scripts/lint-webkitpy
new file mode 100755
index 0000000..4e6af9f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/lint-webkitpy
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+from webkitpy.style.checkers.python import PythonChecker
+
+for path in sys.argv[1:]:
+ checker = PythonChecker(path, lambda *args: None)
+ sys.stdout.write(checker._run_pylint(path))
diff --git a/src/third_party/blink/Tools/Scripts/malloc-tree b/src/third_party/blink/Tools/Scripts/malloc-tree
new file mode 100755
index 0000000..fa8df42
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/malloc-tree
@@ -0,0 +1,210 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import getopt
+from optparse import OptionParser
+
+oneK = 1024
+oneM = 1024 * 1024
+oneG = 1024 * 1024 * 1024
+
+hotspot = False
+scaleSize = True
+showBars = True
+
+def byteString(bytes):
+ if scaleSize:
+ format = ' %4d '
+ val = bytes
+
+ if bytes >= oneG:
+ format = '%8.1fG'
+ val = float(bytes) / oneG
+ elif bytes >= oneM:
+ format = '%8.1fM'
+ val = float(bytes) / oneM
+ elif bytes >= oneK:
+ format = '%8.1fK'
+ val = float(bytes) / oneK
+
+ return format % val
+ if hotspot:
+ return '%d' % bytes
+ return '%12d' % bytes
+
+class Node:
+ def __init__(self, name, level = 0, bytes = 0):
+ self.name = name
+ self.level = level
+ self.children = {}
+ self.totalBytes = bytes
+
+ def hasChildren(self):
+ return len(self.children) > 0
+
+ def getChild(self, name):
+ if not name in self.children:
+ newChild = Node(name, self.level + 1)
+ self.children[name] = newChild
+
+ return self.children[name]
+
+ def getBytes(self):
+ return self.totalBytes
+
+ def addBytes(self, bytes):
+ self.totalBytes = self.totalBytes + bytes
+
+ def processLine(self, bytes, line):
+ sep = line.find('|')
+ if sep < 0:
+ childName = line.strip()
+ line = ''
+ else:
+ childName = line[:sep].strip()
+ line = line[sep+1:]
+
+ child = self.getChild(childName)
+ child.addBytes(bytes)
+
+ if len(line) > 0:
+ child.processLine(bytes, line)
+
+ def printNode(self, prefix = ' '):
+ global hotspot
+ global scaleSize
+ global showBars
+
+ if self.hasChildren():
+ byteStr = byteString(self.totalBytes)
+
+ if hotspot:
+ print(' %s%s %s' % (self.level * ' ', byteString(self.totalBytes), self.name))
+ else:
+ print('%s %s%s' % (byteString(self.totalBytes), prefix[:-1], self.name))
+
+ sortedChildren = sorted(self.children.values(), key=sortKeyByBytes, reverse=True)
+
+ if showBars and len(self.children) > 1:
+ newPrefix = prefix + '|'
+ else:
+ newPrefix = prefix + ' '
+
+ childrenLeft = len(sortedChildren)
+ for child in sortedChildren:
+ if childrenLeft <= 1:
+ newPrefix = prefix + ' '
+ else:
+ childrenLeft = childrenLeft - 1
+ child.printNode(newPrefix)
+ else:
+ byteStr = byteString(self.totalBytes)
+
+ if hotspot:
+ print(' %s%s %s' % (self.level * ' ', byteString(self.totalBytes), self.name))
+ else:
+ print('%s %s%s' % (byteString(self.totalBytes), prefix[:-1], self.name))
+
+def sortKeyByBytes(node):
+ return node.getBytes();
+
+def main():
+ global hotspot
+ global scaleSize
+ global showBars
+
+ # parse command line options
+ parser = OptionParser(usage='malloc-tree [options] [malloc_history-file]',
+ description='Format malloc_history output as a nested tree',
+ epilog='stdin used if malloc_history-file is missing')
+
+ parser.add_option('-n', '--nobars', action='store_false', dest='showBars',
+ default=True, help='don\'t show bars lining up siblings in tree');
+ parser.add_option('-b', '--size-in-bytes', action='store_false', dest='scaleSize',
+ default=None, help='show sizes in bytes');
+ parser.add_option('-s', '--size-scale', action='store_true', dest='scaleSize',
+ default=None, help='show sizes with appropriate scale suffix [K,M,G]');
+ parser.add_option('-t', '--hotspot', action='store_true', dest='hotspot',
+ default=False, help='output in HotSpotFinder format, implies -b');
+
+ (options, args) = parser.parse_args()
+
+ hotspot = options.hotspot
+ if options.scaleSize is None:
+ if hotspot:
+ scaleSize = False
+ else:
+ scaleSize = True
+ else:
+ scaleSize = options.scaleSize
+ showBars = options.showBars
+
+ if len(args) < 1:
+ inputFile = sys.stdin
+ else:
+ inputFile = open(args[0], "r")
+
+ line = inputFile.readline()
+
+ rootNodes = {}
+
+ while line:
+ firstSep = line.find('|')
+ if firstSep > 0:
+ firstPart = line[:firstSep].strip()
+ lineRemain = line[firstSep+1:]
+ bytesSep = firstPart.find('bytes:')
+ if bytesSep >= 0:
+ name = firstPart[bytesSep+7:]
+ stats = firstPart.split(' ')
+ bytes = int(stats[3].replace(',', ''))
+
+ if not name in rootNodes:
+ node = Node(name, 0, bytes);
+ rootNodes[name] = node
+ else:
+ node = rootNodes[name]
+ node.addBytes(bytes)
+
+ node.processLine(bytes, lineRemain)
+
+ line = inputFile.readline()
+
+ sortedRootNodes = sorted(rootNodes.values(), key=sortKeyByBytes, reverse=True)
+
+ print 'Call graph:'
+ try:
+ for node in sortedRootNodes:
+ node.printNode()
+ print
+ except:
+ pass
+
+if __name__ == "__main__":
+ main()
diff --git a/src/third_party/blink/Tools/Scripts/move-layout-tests b/src/third_party/blink/Tools/Scripts/move-layout-tests
new file mode 100755
index 0000000..5cee8c4
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/move-layout-tests
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+from webkitpy.layout_tests import layout_tests_mover
+
+sys.exit(layout_tests_mover.main(sys.argv[1:]))
diff --git a/src/third_party/blink/Tools/Scripts/parse-malloc-history b/src/third_party/blink/Tools/Scripts/parse-malloc-history
new file mode 100755
index 0000000..53ec374
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/parse-malloc-history
@@ -0,0 +1,177 @@
+#!/usr/bin/perl
+
+# Copyright (C) 2007 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Parses the callstacks in a file with malloc_history formatted content, sorting
+# based on total number of bytes allocated, and filtering based on command-line
+# parameters.
+
+use Getopt::Long;
+use File::Basename;
+
+use strict;
+use warnings;
+
+sub commify($);
+
+sub main()
+{
+ my $usage =
+ "Usage: " . basename($0) . " [options] malloc_history.txt\n" .
+ " --grep-regexp Include only call stacks that match this regular expression.\n" .
+ " --byte-minimum Include only call stacks with allocation sizes >= this value.\n" .
+ " --merge-regexp Merge all call stacks that match this regular expression.\n" .
+ " --merge-depth Merge all call stacks that match at this stack depth and above.\n";
+
+ my $grepRegexp = "";
+ my $byteMinimum = "";
+ my @mergeRegexps = ();
+ my $mergeDepth = "";
+ my $getOptionsResult = GetOptions(
+ "grep-regexp:s" => \$grepRegexp,
+ "byte-minimum:i" => \$byteMinimum,
+ "merge-regexp:s" => \@mergeRegexps,
+ "merge-depth:i" => \$mergeDepth
+ );
+ die $usage if (!$getOptionsResult || !scalar(@ARGV));
+
+ my @lines = ();
+ foreach my $fileName (@ARGV) {
+ open FILE, "<$fileName" or die "bad file: $fileName";
+ push(@lines, <FILE>);
+ close FILE;
+ }
+
+ my %callstacks = ();
+ my $byteCountTotal = 0;
+
+ for (my $i = 0; $i < @lines; $i++) {
+ my $line = $lines[$i];
+ my ($callCount, $byteCount);
+
+ # First try malloc_history format
+ # 6 calls for 664 bytes thread_ffffffff |0x0 | start
+ ($callCount, $byteCount) = ($line =~ /(\d+) calls for (\d+) bytes/);
+
+ # Then try leaks format
+ # Leak: 0x0ac3ca40 size=48
+ # 0x00020001 0x00000001 0x00000000 0x00000000 ................
+ # Call stack: [thread ffffffff]: | 0x0 | start
+ if (!$callCount || !$byteCount) {
+ $callCount = 1;
+ ($byteCount) = ($line =~ /Leak: [x[:xdigit:]]* size=(\d+)/);
+
+ if ($byteCount) {
+ while (!($line =~ "Call stack: ")) {
+ $i++;
+ $line = $lines[$i];
+ }
+ }
+ }
+
+ # Then try LeakFinder format
+ # --------------- Key: 213813, 84 bytes ---------
+ # c:\cygwin\home\buildbot\webkit\opensource\webcore\rendering\renderarena.cpp(78): WebCore::RenderArena::allocate
+ # c:\cygwin\home\buildbot\webkit\opensource\webcore\rendering\renderobject.cpp(82): WebCore::RenderObject::operator new
+ if (!$callCount || !$byteCount) {
+ $callCount = 1;
+ ($byteCount) = ($line =~ /Key: (?:\d+), (\d+) bytes/);
+ if ($byteCount) {
+ $line = $lines[++$i];
+ my @tempStack;
+ while ($lines[$i+1] !~ /^(?:-|\d)/) {
+ if ($line =~ /\): (.*)$/) {
+ my $call = $1;
+ $call =~ s/\r$//;
+ unshift(@tempStack, $call);
+ }
+ $line = $lines[++$i];
+ }
+ $line = join(" | ", @tempStack);
+ }
+ }
+
+ # Then give up
+ next if (!$callCount || !$byteCount);
+
+ $byteCountTotal += $byteCount;
+
+ next if ($grepRegexp && !($line =~ $grepRegexp));
+
+ my $callstackBegin = 0;
+ if ($mergeDepth) {
+ # count stack frames backwards from end of callstack
+ $callstackBegin = length($line);
+ for (my $pipeCount = 0; $pipeCount < $mergeDepth; $pipeCount++) {
+ my $rindexResult = rindex($line, "|", $callstackBegin - 1);
+ last if $rindexResult == -1;
+ $callstackBegin = $rindexResult;
+ }
+ } else {
+ # start at beginning of callstack
+ $callstackBegin = index($line, "|");
+ }
+
+ my $callstack = substr($line, $callstackBegin + 2); # + 2 skips "| "
+ for my $regexp (@mergeRegexps) {
+ if ($callstack =~ $regexp) {
+ $callstack = $regexp . "\n";
+ last;
+ }
+ }
+
+ if (!$callstacks{$callstack}) {
+ $callstacks{$callstack} = {"callCount" => 0, "byteCount" => 0};
+ }
+
+ $callstacks{$callstack}{"callCount"} += $callCount;
+ $callstacks{$callstack}{"byteCount"} += $byteCount;
+ }
+
+ my $byteCountTotalReported = 0;
+ for my $callstack (sort { $callstacks{$b}{"byteCount"} <=> $callstacks{$a}{"byteCount"} } keys %callstacks) {
+ my $callCount = $callstacks{$callstack}{"callCount"};
+ my $byteCount = $callstacks{$callstack}{"byteCount"};
+ last if ($byteMinimum && $byteCount < $byteMinimum);
+
+ $byteCountTotalReported += $byteCount;
+ print commify($callCount) . " calls for " . commify($byteCount) . " bytes: $callstack\n";
+ }
+
+ print "total: " . commify($byteCountTotalReported) . " bytes (" . commify($byteCountTotal - $byteCountTotalReported) . " bytes excluded).\n";
+ return 0;
+}
+
+exit(main());
+
+# Copied from perldoc -- please excuse the style
+sub commify($)
+{
+ local $_ = shift;
+ 1 while s/^([-+]?\d+)(\d{3})/$1,$2/;
+ return $_;
+}
diff --git a/src/third_party/blink/Tools/Scripts/print-json-test-results b/src/third_party/blink/Tools/Scripts/print-json-test-results
new file mode 100755
index 0000000..e6d07606
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/print-json-test-results
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+import json
+import optparse
+import os
+import sys
+
+from webkitpy.common.host import Host
+from webkitpy.layout_tests.port import platform_options, configuration_options
+
+
+def main(argv):
+ parser = optparse.OptionParser(usage='%prog [path-to-results.json]')
+ parser.add_option('--failures', action='store_true',
+ help='show failing tests')
+ parser.add_option('--flakes', action='store_true',
+ help='show flaky tests')
+ parser.add_option('--expected', action='store_true',
+ help='include expected results along with unexpected')
+ parser.add_option('--passes', action='store_true',
+ help='show passing tests')
+ parser.add_option('--ignored-failures-path', action='store',
+ help='ignore failures seen in a previous run')
+ parser.add_options(platform_options())
+ parser.add_options(configuration_options())
+ options, args = parser.parse_args(argv)
+
+ host = Host()
+ if args:
+ if args[0] == '-':
+ txt = sys.stdin.read()
+ elif os.path.exists(args[0]):
+ with open(args[0], 'r') as fp:
+ txt = fp.read()
+ else:
+ print >> sys.stderr, "file not found: %s" % args[0]
+ sys.exit(1)
+ else:
+ txt = host.filesystem.read_text_file(host.filesystem.join(host.port_factory.get(options=options).results_directory(), 'full_results.json'))
+
+ if txt.startswith('ADD_RESULTS(') and txt.endswith(');'):
+ txt = txt[12:-2] # ignore optional JSONP wrapper
+ results = json.loads(txt)
+
+ passes, failures, flakes = decode_results(results, options.expected)
+
+ tests_to_print = []
+ if options.passes:
+ tests_to_print += passes.keys()
+ if options.failures:
+ tests_to_print += failures.keys()
+ if options.flakes:
+ tests_to_print += flakes.keys()
+ print "\n".join(sorted(tests_to_print))
+
+ if options.ignored_failures_path:
+ with open(options.ignored_failures_path, 'r') as fp:
+ txt = fp.read()
+ if txt.startswith('ADD_RESULTS(') and txt.endswith(');'):
+ txt = txt[12:-2] # ignore optional JSONP wrapper
+ results = json.loads(txt)
+ _, ignored_failures, _ = decode_results(results, options.expected)
+ new_failures = set(failures.keys()) - set(ignored_failures.keys())
+ if new_failures:
+ print "New failures:"
+ print "\n".join(sorted(new_failures))
+ print
+ if ignored_failures:
+ print "Ignored failures:"
+ print "\n".join(sorted(ignored_failures.keys()))
+ if new_failures:
+ return 1
+ return 0
+
+
+def decode_results(results, include_expected=False):
+ tests = convert_trie_to_flat_paths(results['tests'])
+ failures = {}
+ flakes = {}
+ passes = {}
+ for (test, result) in tests.iteritems():
+ if include_expected or result.get('is_unexpected'):
+ actual_results = result['actual'].split()
+ expected_results = result['expected'].split()
+ if len(actual_results) > 1:
+ if actual_results[1] in expected_results:
+ flakes[test] = actual_results[0]
+ else:
+ # We report the first failure type back, even if the second
+ # was more severe.
+ failures[test] = actual_results[0]
+ elif actual_results[0] == 'PASS':
+ passes[test] = result
+ else:
+ failures[test] = actual_results[0]
+
+ return (passes, failures, flakes)
+
+
+def convert_trie_to_flat_paths(trie, prefix=None):
+ # Cloned from webkitpy.layout_tests.layout_package.json_results_generator
+ # so that this code can stand alone.
+ result = {}
+ for name, data in trie.iteritems():
+ if prefix:
+ name = prefix + "/" + name
+
+ if len(data) and not "actual" in data and not "expected" in data:
+ result.update(convert_trie_to_flat_paths(data, name))
+ else:
+ result[name] = data
+
+ return result
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/src/third_party/blink/Tools/Scripts/print-layout-test-times b/src/third_party/blink/Tools/Scripts/print-layout-test-times
new file mode 100755
index 0000000..2001995
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/print-layout-test-times
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+from webkitpy.common import host
+from webkitpy.layout_tests import print_layout_test_times
+
+print_layout_test_times.main(host.Host(), sys.argv[1:])
diff --git a/src/third_party/blink/Tools/Scripts/print-layout-test-types b/src/third_party/blink/Tools/Scripts/print-layout-test-types
new file mode 100755
index 0000000..3cc36c6
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/print-layout-test-types
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+from webkitpy.common import host
+from webkitpy.layout_tests import print_layout_test_types
+
+print_layout_test_types.main(host.Host(), sys.argv[1:])
diff --git a/src/third_party/blink/Tools/Scripts/print-stale-test-expectations-entries b/src/third_party/blink/Tools/Scripts/print-stale-test-expectations-entries
new file mode 100755
index 0000000..4836318
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/print-stale-test-expectations-entries
@@ -0,0 +1,105 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Prints lists of bug numbers / tests whose bugs haven't been modified recently."""
+
+import datetime
+import json
+import optparse
+import re
+import sys
+import time
+import urllib2
+
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.webkit_finder import WebKitFinder
+
+google_code_url = 'https://www.googleapis.com/projecthosting/v2/projects/chromium/issues/%s?key=AIzaSyDgCqT1Dt5AZWLHo4QJjyMHaCjhnFacGF0'
+crbug_prefix = 'crbug.com/'
+
+class StaleTestPrinter(object):
+ def __init__(self, options):
+ self._days = options.days
+
+ def is_stale(self, bug_number):
+ url = google_code_url % bug_number
+ response = urllib2.urlopen(url)
+ parsed = json.loads(response.read())
+ last_updated = parsed['updated']
+ parsed_time = datetime.datetime.strptime(last_updated.split(".")[0]+"UTC", "%Y-%m-%dT%H:%M:%S%Z")
+ time_delta = datetime.datetime.now() - parsed_time
+ return time_delta.days > 90
+
+ def print_stale_tests(self):
+ finder = WebKitFinder(FileSystem())
+ path_to_expectations = finder.path_from_webkit_base('LayoutTests', 'TestExpectations')
+ expectations = open(path_to_expectations)
+
+ for line in expectations:
+ comment_index = line.find("#")
+ if comment_index == -1:
+ comment_index = len(line)
+
+ remaining_string = re.sub(r"\s+", " ", line[:comment_index].strip())
+ if len(remaining_string) == 0:
+ continue
+
+ is_bug_stale = True
+ parts = line.split(' ')
+ for part in parts:
+ if part.startswith(crbug_prefix):
+ bug_number = part.split('/')[1]
+ try:
+ if not self.is_stale(bug_number):
+ is_bug_stale = False
+ break;
+ except urllib2.HTTPError as error:
+ if error.code == 404:
+ print '%s%s does not exist.' % (crbug_prefix, bug_number)
+ elif error.code == 403:
+ print '%s%s is not accessible. Not able to tell if it\'s stale.' % (crbug_prefix, bug_number)
+ is_bug_stale = False
+ else:
+ raise error
+
+ if is_bug_stale:
+ print line.strip()
+
+def main(argv):
+ option_parser = optparse.OptionParser()
+ option_parser.add_option('--days', type='int', default=90, help='Number of days to consider a bug stale.'),
+ options, args = option_parser.parse_args(argv)
+
+ printer = StaleTestPrinter(options)
+ printer.print_stale_tests()
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/src/third_party/blink/Tools/Scripts/print-test-ordering b/src/third_party/blink/Tools/Scripts/print-test-ordering
new file mode 100755
index 0000000..0d3997d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/print-test-ordering
@@ -0,0 +1,85 @@
+#!/usr/bin/python
+
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import optparse
+import sys
+
+from webkitpy.common.host import Host
+
+def main(argv):
+ parser = optparse.OptionParser(usage='%prog [stats.json]')
+ parser.description = "Prints out lists of tests run on each worker as per the stats.json file."
+ options, args = parser.parse_args(argv)
+
+ if args and args[0]:
+ stats_path = args[0]
+ else:
+ host = Host()
+ stats_path = host.filesystem.join(host.port_factory.get().results_directory(), 'stats.json')
+
+ with open(stats_path, 'r') as fp:
+ stats_trie = json.load(fp)
+
+ stats = convert_trie_to_flat_paths(stats_trie)
+ stats_by_worker = {}
+ for test_name, data in stats.items():
+ worker = "worker/" + str(data["results"][0])
+ if worker not in stats_by_worker:
+ stats_by_worker[worker] = []
+ test_number = data["results"][1]
+ stats_by_worker[worker].append({
+ "name": test_name,
+ "number": test_number
+ })
+
+ for worker in sorted(stats_by_worker.keys()):
+ print worker + ':'
+ for test in sorted(stats_by_worker[worker], key=lambda test:test["number"]):
+ print test["name"]
+ print
+
+def convert_trie_to_flat_paths(trie, prefix=None):
+ # Cloned from webkitpy.layout_tests.layout_package.json_results_generator
+ # so that this code can stand alone.
+ result = {}
+ for name, data in trie.iteritems():
+ if prefix:
+ name = prefix + "/" + name
+ if "results" in data:
+ result[name] = data
+ else:
+ result.update(convert_trie_to_flat_paths(data, name))
+
+ return result
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/src/third_party/blink/Tools/Scripts/read-checksum-from-png b/src/third_party/blink/Tools/Scripts/read-checksum-from-png
new file mode 100755
index 0000000..fb03f28
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/read-checksum-from-png
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import with_statement
+import sys
+
+from webkitpy.common import read_checksum_from_png
+
+
+if '__main__' == __name__:
+ for filename in sys.argv[1:]:
+ with open(filename, 'r') as filehandle:
+ print "%s: %s" % (read_checksum_from_png.read_checksum(filehandle), filename)
diff --git a/src/third_party/blink/Tools/Scripts/report-include-statistics b/src/third_party/blink/Tools/Scripts/report-include-statistics
new file mode 100755
index 0000000..a58fffe
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/report-include-statistics
@@ -0,0 +1,114 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2005, 2006 Apple Computer, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# "report-include-statistics" script for WebKit Open Source Project
+
+use strict;
+use File::Find;
+
+find(\&wanted, @ARGV ? @ARGV : ".");
+
+my %paths;
+my %sources;
+my %includes;
+
+sub wanted
+{
+ my $file = $_;
+
+ if ($file eq "icu") {
+ $File::Find::prune = 1;
+ return;
+ }
+
+ if ($file !~ /^\./ && $file =~ /\.(h|cpp|c|mm|m)$/) {
+ $paths{$file} = $File::Find::name;
+ $sources{$file} = $File::Find::name if $file !~ /\.h/;
+ open FILE, $file or die;
+ while (<FILE>) {
+ if (m-^\s*#\s*(include|import)\s+["<]((\S+/)*)(\S+)[">]-) {
+ my $include = ($2 eq "sys/" ? $2 : "") . $4;
+ $includes{$file}{$include}++;
+ }
+ }
+ close FILE;
+ }
+}
+
+my %totalIncludes;
+
+sub fillOut
+{
+ my ($file) = @_;
+
+ return if defined $totalIncludes{$file};
+
+ for my $include (keys %{ $includes{$file} }) {
+ $totalIncludes{$file}{$include} = 1;
+ fillOut($include);
+ for my $i (keys %{ $totalIncludes{$include} }) {
+ $totalIncludes{$file}{$i} = 1;
+ }
+ }
+}
+
+my %inclusionCounts;
+for my $file (keys %includes) {
+ $inclusionCounts{$file} = 0;
+ fillOut($file);
+}
+
+for my $file (keys %sources) {
+ for my $include (keys %{ $totalIncludes{$file} }) {
+ $inclusionCounts{$include}++;
+ }
+}
+
+for my $file (sort mostincludedcmp keys %includes) {
+ next if !$paths{$file};
+ my $count = $inclusionCounts{$file};
+ my $numIncludes = keys %{ $includes{$file} };
+ my $numTotalIncludes = keys %{ $totalIncludes{$file} };
+ print "$file is included $count times, includes $numIncludes files directly, $numTotalIncludes files total.\n"
+}
+
+# Sort most-included files first.
+sub mostincludedcmp($$)
+{
+ my ($filea, $fileb) = @_;
+
+ my $counta = $inclusionCounts{$filea} || 0;
+ my $countb = $inclusionCounts{$fileb} || 0;
+ return $countb <=> $counta if $counta != $countb;
+
+ my $ta = keys %{ $totalIncludes{$filea} };
+ my $tb = keys %{ $totalIncludes{$fileb} };
+ return $ta <=> $tb if $ta != $tb;
+
+ return $filea cmp $fileb;
+}
diff --git a/src/third_party/blink/Tools/Scripts/run-bindings-tests b/src/third_party/blink/Tools/Scripts/run-bindings-tests
new file mode 100755
index 0000000..1cba1c0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/run-bindings-tests
@@ -0,0 +1,50 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+import sys
+
+from webkitpy.bindings.main import run_bindings_tests
+
+def main(argv):
+ """Runs Blink bindings IDL compiler on test IDL files and compares the
+ results with reference files.
+
+ Please execute the script whenever changes are made to the compiler
+ (this is automatically done as a presubmit script),
+ and submit changes to the test results in the same patch.
+ This makes it easier to track and review changes in generated code.
+
+ Options:
+ --reset-results: Overwrites reference files with the generated results.
+ --verbose: Show output on success and logging messages (not just failure)
+ """
+ reset_results = '--reset-results' in argv
+ verbose = '--verbose' in argv
+
+ return run_bindings_tests(reset_results, verbose)
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/src/third_party/blink/Tools/Scripts/run-blink-httpd b/src/third_party/blink/Tools/Scripts/run-blink-httpd
new file mode 100755
index 0000000..a3a01a4
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/run-blink-httpd
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import webkitpy.common.version_check
+
+from webkitpy.layout_tests.servers import cli_wrapper
+from webkitpy.layout_tests.servers import apache_http
+
+cli_wrapper.main(apache_http.ApacheHTTP, additional_dirs={}, number_of_servers=4)
diff --git a/src/third_party/blink/Tools/Scripts/run-blink-websocketserver b/src/third_party/blink/Tools/Scripts/run-blink-websocketserver
new file mode 100755
index 0000000..4053e8b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/run-blink-websocketserver
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import webkitpy.common.version_check
+
+from webkitpy.layout_tests.servers import cli_wrapper
+from webkitpy.layout_tests.servers import pywebsocket
+
+cli_wrapper.main(pywebsocket.PyWebSocket)
diff --git a/src/third_party/blink/Tools/Scripts/run-inspector-perf-tests b/src/third_party/blink/Tools/Scripts/run-inspector-perf-tests
new file mode 100755
index 0000000..d282185
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/run-inspector-perf-tests
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Run Inspector's perf tests in perf mode."""
+
+import logging
+import sys
+
+from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
+
+_log = logging.getLogger(__name__)
+
+if '__main__' == __name__:
+ logging.basicConfig(level=logging.INFO, format='%(message)s')
+ sys.exit(PerfTestsRunner(args=['inspector']).run())
diff --git a/src/third_party/blink/Tools/Scripts/run-perf-tests b/src/third_party/blink/Tools/Scripts/run-perf-tests
new file mode 100755
index 0000000..1bf8ec8
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/run-perf-tests
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Run performance tests."""
+
+import logging
+import sys
+
+from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
+
+if '__main__' == __name__:
+ logging.basicConfig(level=logging.INFO, format="%(message)s")
+
+ sys.exit(PerfTestsRunner().run())
diff --git a/src/third_party/blink/Tools/Scripts/run-webkit-tests b/src/third_party/blink/Tools/Scripts/run-webkit-tests
new file mode 100755
index 0000000..4d6826e
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/run-webkit-tests
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Wrapper around webkitpy/layout_tests/run_webkit_tests.py"""
+from webkitpy.common import multiprocessing_bootstrap
+
+multiprocessing_bootstrap.run('webkitpy', 'layout_tests', 'run_webkit_tests.py')
diff --git a/src/third_party/blink/Tools/Scripts/run-webkit-tests.bat b/src/third_party/blink/Tools/Scripts/run-webkit-tests.bat
new file mode 100755
index 0000000..68426f2
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/run-webkit-tests.bat
@@ -0,0 +1 @@
+@python %~dp0\run-webkit-tests %*
diff --git a/src/third_party/blink/Tools/Scripts/sampstat b/src/third_party/blink/Tools/Scripts/sampstat
new file mode 100755
index 0000000..b8a2ca9
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/sampstat
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2007, 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import math
+import sys
+import re
+import fileinput
+from optparse import OptionParser
+
+usage = "usage: %prog [options] [FILES]\n Compute the mean and 95% confidence interval of a sample set.\n Standard input or files must contain two or more decimal numbers, one per line."
+parser = OptionParser(usage=usage)
+parser.add_option("-u", "--unit", dest="unit", default="",
+ help="assume values are in units of UNIT", metavar="UNIT")
+parser.add_option("-v", "--verbose",
+ action="store_true", dest="verbose", default=False,
+ help="print all values (with units)")
+(options, files) = parser.parse_args()
+
+def sum(items):
+ return reduce(lambda x,y: x+y, items)
+
+def arithmeticMean(items):
+ return sum(items) / len(items)
+
+def standardDeviation(mean, items):
+ deltaSquares = [(item - mean) ** 2 for item in items]
+ return math.sqrt(sum(deltaSquares) / (len(items) - 1))
+
+def standardError(stdDev, items):
+ return stdDev / math.sqrt(len(items))
+
+# t-distribution for 2-sided 95% confidence intervals
+tDistribution = [float('NaN'), float('NaN'), 12.71, 4.30, 3.18, 2.78, 2.57, 2.45, 2.36, 2.31, 2.26, 2.23, 2.20, 2.18, 2.16, 2.14, 2.13, 2.12, 2.11, 2.10, 2.09, 2.09, 2.08, 2.07, 2.07, 2.06, 2.06, 2.06, 2.05, 2.05, 2.05, 2.04, 2.04, 2.04, 2.03, 2.03, 2.03, 2.03, 2.03, 2.02, 2.02, 2.02, 2.02, 2.02, 2.02, 2.02, 2.01, 2.01, 2.01, 2.01, 2.01, 2.01, 2.01, 2.01, 2.01, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.96]
+tMax = len(tDistribution)
+tLimit = 1.96
+
+def tDist(n):
+ if n > tMax:
+ return tLimit
+ return tDistribution[n]
+
+def twoSidedConfidenceInterval(items):
+ mean = arithmeticMean(items)
+ stdDev = standardDeviation(mean, items)
+ stdErr = standardError(stdDev, items)
+ return tDist(len(items)) * stdErr
+
+results = []
+
+decimalNumberPattern = re.compile(r"\d+\.?\d*")
+for line in fileinput.input(files):
+ match = re.search(decimalNumberPattern, line)
+ if match:
+ results.append(float(match.group(0)))
+
+if len(results) == 0:
+ parser.print_help()
+ quit()
+
+
+mean = arithmeticMean(results)
+confidenceInterval = twoSidedConfidenceInterval(results)
+confidencePercent = 100 * confidenceInterval / mean
+
+if options.verbose:
+ length = 7
+ for item in results:
+ line = " %.2f %s" % (item, options.unit)
+ print line
+ length = len(line) if len(line) > length else length
+
+ print "-" * length
+
+prefix = "Mean: " if options.verbose else ""
+print "%s%.2f %s +/- %.2f %s (%.1f%%)" % (prefix, mean, options.unit, confidenceInterval, options.unit, confidencePercent)
+
diff --git a/src/third_party/blink/Tools/Scripts/show-pretty-diff b/src/third_party/blink/Tools/Scripts/show-pretty-diff
new file mode 100755
index 0000000..b20dac8
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/show-pretty-diff
@@ -0,0 +1,79 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+use strict;
+use FindBin;
+use File::Temp qw(tempfile);
+use lib $FindBin::Bin;
+use webkitdirs;
+
+my $inputPath = "";
+if ($ARGV[0]) {
+ $inputPath = $ARGV[0]
+} else {
+ # Create a temporary file for STDIN.
+ # FIXME: We can probably avoid putting this on the disk by directly piping
+ # to prettify.rb via IPC::Open2.
+ my $inputTempFileHandle;
+ ($inputTempFileHandle, $inputPath) = tempfile(
+ "inputtemp-XXXXXXXX",
+ DIR => File::Spec->tmpdir(),
+ SUFFIX => ".diff",
+ UNLINK => 0,
+ );
+
+ while (<STDIN>) {
+ print $inputTempFileHandle $_;
+ }
+
+ close($inputTempFileHandle);
+}
+
+# Create a temporary file for prettified patch.
+my ($prettydiffFileHandle, $prettydiffPath) = tempfile(
+ "prettydiff-XXXXXXXX",
+ DIR => File::Spec->tmpdir(),
+ SUFFIX => ".html",
+ UNLINK => 0,
+);
+close($prettydiffFileHandle);
+
+my $prettyPatchDir = sourceDir() . "/Tools/Scripts/webkitruby/PrettyPatch/";
+my $prettyPatchTool = sourceDir() . "/Tools/Scripts/webkitruby/PrettyPatch/prettify.rb";
+
+my $pathToPrettify = "ruby -I " . sourceDir() . "/Tools/Scripts/webkitruby/PrettyPatch/ " . sourceDir() . "/Tools/Scripts/webkitruby/PrettyPatch/prettify.rb";
+system "$pathToPrettify " . quotemeta($inputPath) . " > $prettydiffPath";
+
+if (isAppleMacWebKit()) {
+ system "open", $prettydiffPath;
+} elsif (isCygwin()) {
+ system "cygstart",$prettydiffPath;
+} elsif (isWindows()) {
+ system "start", $prettydiffPath;
+} elsif (isLinux() && `which xdg-open`) {
+ system "xdg-open", $prettydiffPath;
+} else {
+ print "Created prettified diff at " . $prettydiffPath . ".";
+}
diff --git a/src/third_party/blink/Tools/Scripts/split-file-by-class b/src/third_party/blink/Tools/Scripts/split-file-by-class
new file mode 100755
index 0000000..4dd8348
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/split-file-by-class
@@ -0,0 +1,159 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2006 Apple Computer, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Used for splitting a single file into multiple class files
+# Usage: split-class <header file>
+
+use strict;
+use File::Copy;
+use FindBin;
+use lib $FindBin::Bin;
+use SpacingHeuristics;
+
+
+for my $filename (@ARGV) {
+
+ $filename =~ m/^(\w+)\.h$/ or die "Command line args must be .h files.\n";
+ my $basename = $1;
+
+ open(OLDFILE, "<", $filename) or die "File does not exist: $filename\n";
+ print "Splitting class $filename.{h,cpp}:\n";
+
+ my $currentClassName = "";
+ my $classIndent = "";
+ my $fileContent = "";
+ my %classDefs = ();
+ while (my $line = <OLDFILE>) {
+ if ($currentClassName) {
+ $classDefs{$currentClassName} .= $line;
+ if ($line =~ /^$classIndent};\s*$/) {
+ $currentClassName = "";
+ }
+ } else {
+ if ($line =~ /^(\s*)class\s+(\w+)\s+[^;]*$/) {
+ $classIndent = $1;
+ $currentClassName = $2;
+ $classDefs{$currentClassName} .= $line;
+ $fileContent .= "###CLASS###$currentClassName\n";
+ } else {
+ $fileContent .= $line;
+ }
+ }
+ }
+ close(OLDFILE);
+
+ if (scalar(keys(%classDefs)) == 1) { # degenerate case
+ my ($classname) = keys(%classDefs);
+ if (!($classname eq $basename)) {
+ print "Skipping $filename, already correctly named.\n";
+ } else {
+ print "$filename only includes one class, renaming to $classname.h\n";
+ system("svn rm --force $classname.h") if (-r "$classname.h");
+ system "svn mv $basename.h $classname.h";
+ }
+ } else {
+ while (my ($classname, $classDef) = each(%classDefs)) {
+ if (($classname eq $basename)) {
+ print "Skipping $filename, already correctly named.\n";
+ } else {
+ print "Using SVN to copy $basename.{h,cpp} to $classname.{h,cpp}\n";
+
+ system("svn rm --force $classname.h") if (-r "$classname.h");
+ system "svn cp $basename.h $classname.h";
+
+ system("svn rm --force $classname.cpp") if (-r "$classname.cpp");
+ system "svn cp $basename.cpp $classname.cpp";
+ }
+
+ print "Fixing $classname.h as much as possible.\n";
+ open(NEWHEADER, ">", "$classname.h") or die "File does not exist: $filename\n";
+ my @lines = split("\n", $fileContent);
+ foreach my $line (@lines) {
+ if ($line =~ /^###CLASS###(\w+)/) {
+ if ($1 eq $classname) {
+ print NEWHEADER $classDef . "\n";
+ }
+ } else {
+ print NEWHEADER $line . "\n";
+ }
+ }
+ close(NEWHEADER);
+
+ print "Fixing $classname.cpp as much as possible.\n";
+ copy("$classname.cpp", "$classname.cpp.original");
+ open(OLDCPP, "<", "$classname.cpp.original") or die "Failed to copy file for reading: $filename\n";
+ open(NEWCPP, ">", "$classname.cpp") or die "File does not exist: $filename\n";
+ my $insideMemberFunction = 0;
+ my $shouldPrintMemberFunction = 0;
+ resetSpacingHeuristics();
+ while (my $line = <OLDCPP>) {
+ if ($insideMemberFunction) {
+ if ($shouldPrintMemberFunction) {
+ print NEWCPP $line;
+ #setPreviousAllowedLine($line);
+ } else {
+ ignoringLine($line);
+ }
+ if ($line =~ /^}\s*$/) {
+ $insideMemberFunction = 0;
+ }
+ } elsif ($line =~ /$filename/) {
+ print NEWCPP "#include \"config.h\"\n";
+ print NEWCPP "#include \"$classname.h\"\n";
+ } elsif ($line =~ /#include/ || $line =~ /#import/) {
+ next; # skip includes, they're generally wrong or unecessary anyway.
+ } else {
+ $line =~ s/DOM:://;
+ $line =~ s/khtml:://;
+ $line =~ s/namespace DOM/namespace WebCore/;
+ $line =~ s/namespace khtml/namespace WebCore/;
+
+ if ($line =~ /^(.*?\s+)?(\*|&)?(\w+)::(~)?\w+\s*\(/) {
+ $insideMemberFunction = 1;
+ $shouldPrintMemberFunction = ($classname eq $3);
+ if ($shouldPrintMemberFunction) {
+ printPendingEmptyLines(*NEWCPP, $line);
+ print NEWCPP $line;
+ }
+ } else {
+ next if isOnlyWhiteSpace($line);
+ next if ($line =~ m/------------/);
+ printPendingEmptyLines(*NEWCPP, $line);
+ applySpacingHeuristicsAndPrint(*NEWCPP, $line);
+ }
+ }
+ }
+ close(NEWCPP);
+ close(OLDCPP);
+ unlink("$classname.cpp.original");
+ }
+ }
+
+ print "Opening new files...\n";
+ system("open " . join(".* ", keys(%classDefs)) . ".*");
+}
\ No newline at end of file
diff --git a/src/third_party/blink/Tools/Scripts/svn-apply b/src/third_party/blink/Tools/Scripts/svn-apply
new file mode 100755
index 0000000..0d3e7c4
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/svn-apply
@@ -0,0 +1,475 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2005, 2006, 2007 Apple Inc. All rights reserved.
+# Copyright (C) 2009 Cameron McCormack <cam@mcc.id.au>
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# "patch" script for WebKit Open Source Project, used to apply patches.
+
+# Differences from invoking "patch -p0":
+#
+# Handles added files (does a svn add with logic to handle local changes).
+# Handles added directories (does a svn add).
+# Handles removed files (does a svn rm with logic to handle local changes).
+# Handles removed directories--those with no more files or directories left in them
+# (does a svn rm).
+# Has mode where it will roll back to svn version numbers in the patch file so svn
+# can do a 3-way merge.
+# Paths from Index: lines are used rather than the paths on the patch lines, which
+# makes patches generated by "cvs diff" work (increasingly unimportant since we
+# use Subversion now).
+# ChangeLog patches use --fuzz=3 to prevent rejects.
+# Handles binary files (requires patches made by svn-create-patch).
+# Handles copied and moved files (requires patches made by svn-create-patch).
+# Handles git-diff patches (without binary changes) created at the top-level directory
+#
+# Missing features:
+#
+# Handle property changes.
+# Handle copied and moved directories (would require patches made by svn-create-patch).
+# When doing a removal, check that old file matches what's being removed.
+# Notice a patch that's being applied at the "wrong level" and make it work anyway.
+# Do a dry run on the whole patch and don't do anything if part of the patch is
+# going to fail (probably too strict unless we exclude ChangeLog).
+# Handle git-diff patches with binary delta
+
+use strict;
+use warnings;
+
+use Digest::MD5;
+use File::Basename;
+use File::Spec;
+use Getopt::Long;
+use MIME::Base64;
+use POSIX qw(strftime);
+
+use FindBin;
+use lib $FindBin::Bin;
+use VCSUtils;
+
+sub addDirectoriesIfNeeded($);
+sub applyPatch($$;$);
+sub checksum($);
+sub handleBinaryChange($$);
+sub handleGitBinaryChange($$);
+sub isDirectoryEmptyForRemoval($);
+sub patch($);
+sub removeDirectoriesIfNeeded();
+
+# These should be replaced by an scm class/module:
+sub scmKnowsOfFile($);
+sub scmCopy($$);
+sub scmAdd($);
+sub scmRemove($);
+
+my $merge = 0;
+my $showHelp = 0;
+my $reviewer;
+my $force = 0;
+
+my $optionParseSuccess = GetOptions(
+ "merge!" => \$merge,
+ "help!" => \$showHelp,
+ "reviewer=s" => \$reviewer,
+ "force!" => \$force
+);
+
+if (!$optionParseSuccess || $showHelp) {
+ print STDERR basename($0) . " [-h|--help] [--force] [-m|--merge] [-r|--reviewer name] patch1 [patch2 ...]\n";
+ exit 1;
+}
+
+my %removeDirectoryIgnoreList = (
+ '.' => 1,
+ '..' => 1,
+ '.git' => 1,
+ '.svn' => 1,
+ '_svn' => 1,
+);
+
+my $epochTime = time(); # This is used to set the date in ChangeLog files.
+my $globalExitStatus = 0;
+
+my $repositoryRootPath = determineVCSRoot();
+
+my %checkedDirectories;
+
+# Need to use a typeglob to pass the file handle as a parameter,
+# otherwise get a bareword error.
+my @diffHashRefs = parsePatch(*ARGV);
+
+print "Parsed " . @diffHashRefs . " diffs from patch file(s).\n";
+
+my $preparedPatchHash = prepareParsedPatch($force, @diffHashRefs);
+
+my @copyDiffHashRefs = @{$preparedPatchHash->{copyDiffHashRefs}};
+my @nonCopyDiffHashRefs = @{$preparedPatchHash->{nonCopyDiffHashRefs}};
+my %sourceRevisions = %{$preparedPatchHash->{sourceRevisionHash}};
+
+if ($merge) {
+ die "--merge is currently only supported for SVN" unless isSVN();
+ # How do we handle Git patches applied to an SVN checkout here?
+ for my $file (sort keys %sourceRevisions) {
+ my $version = $sourceRevisions{$file};
+ print "Getting version $version of $file\n";
+ my $escapedFile = escapeSubversionPath($file);
+ system("svn", "update", "-r", $version, $escapedFile) == 0 or die "Failed to run svn update -r $version $escapedFile.";
+ }
+}
+
+# Handle copied and moved files first since moved files may have their
+# source deleted before the move.
+for my $copyDiffHashRef (@copyDiffHashRefs) {
+ my $indexPath = $copyDiffHashRef->{indexPath};
+ my $copiedFromPath = $copyDiffHashRef->{copiedFromPath};
+
+ addDirectoriesIfNeeded(dirname($indexPath));
+ scmCopy($copiedFromPath, $indexPath);
+}
+
+for my $diffHashRef (@nonCopyDiffHashRefs) {
+ patch($diffHashRef);
+}
+
+removeDirectoriesIfNeeded();
+
+exit $globalExitStatus;
+
+sub addDirectoriesIfNeeded($)
+{
+ # Git removes a directory once the last file in it is removed. We need
+ # explicitly check for the existence of each directory along the path
+ # (and create it if it doesn't) so as to support patches that move all files in
+ # directory A to A/B. That is, we cannot depend on %checkedDirectories.
+ my ($path) = @_;
+ my @dirs = File::Spec->splitdir($path);
+ my $dir = ".";
+ while (scalar @dirs) {
+ $dir = File::Spec->catdir($dir, shift @dirs);
+ next if !isGit() && exists $checkedDirectories{$dir};
+ if (! -e $dir) {
+ mkdir $dir or die "Failed to create required directory '$dir' for path '$path'\n";
+ scmAdd($dir);
+ $checkedDirectories{$dir} = 1;
+ }
+ elsif (-d $dir) {
+ # SVN prints "svn: warning: 'directory' is already under version control"
+ # if you try and add a directory which is already in the repository.
+ # Git will ignore the add, but re-adding large directories can be sloooow.
+ # So we check first to see if the directory is under version control first.
+ if (!scmKnowsOfFile($dir)) {
+ scmAdd($dir);
+ }
+ $checkedDirectories{$dir} = 1;
+ }
+ else {
+ die "'$dir' exists, but is not a directory";
+ }
+ }
+}
+
+# Args:
+# $patch: a patch string.
+# $pathRelativeToRoot: the path of the file to be patched, relative to the
+# repository root. This should normally be the path
+# found in the patch's "Index:" line.
+# $options: a reference to an array of options to pass to the patch command.
+sub applyPatch($$;$)
+{
+ my ($patch, $pathRelativeToRoot, $options) = @_;
+
+ my $optionalArgs = {options => $options, ensureForce => $force};
+
+ my $exitStatus = runPatchCommand($patch, $repositoryRootPath, $pathRelativeToRoot, $optionalArgs);
+
+ if ($exitStatus) {
+ $globalExitStatus = $exitStatus;
+ }
+}
+
+sub checksum($)
+{
+ my $file = shift;
+ open(FILE, $file) or die "Can't open '$file': $!";
+ binmode(FILE);
+ my $checksum = Digest::MD5->new->addfile(*FILE)->hexdigest();
+ close(FILE);
+ return $checksum;
+}
+
+sub handleBinaryChange($$)
+{
+ my ($fullPath, $contents) = @_;
+ # [A-Za-z0-9+/] is the class of allowed base64 characters.
+ # One or more lines, at most 76 characters in length.
+ # The last line is allowed to have up to two '=' characters at the end (to signify padding).
+ if ($contents =~ m#((\n[A-Za-z0-9+/]{76})*\n[A-Za-z0-9+/]{2,74}?[A-Za-z0-9+/=]{2}\n)#) {
+ # Addition or Modification
+ open FILE, ">", $fullPath or die "Failed to open $fullPath.";
+ print FILE decode_base64($1);
+ close FILE;
+ if (!scmKnowsOfFile($fullPath)) {
+ # Addition
+ scmAdd($fullPath);
+ }
+ } else {
+ # Deletion
+ scmRemove($fullPath);
+ }
+}
+
+sub handleGitBinaryChange($$)
+{
+ my ($fullPath, $diffHashRef) = @_;
+
+ my $contents = $diffHashRef->{svnConvertedText};
+
+ my ($binaryChunkType, $binaryChunk, $reverseBinaryChunkType, $reverseBinaryChunk) = decodeGitBinaryPatch($contents, $fullPath);
+
+ my $isFileAddition = $diffHashRef->{isNew};
+ my $isFileDeletion = $diffHashRef->{isDeletion};
+
+ my $originalContents = "";
+ if (open FILE, $fullPath) {
+ die "$fullPath already exists" if $isFileAddition;
+
+ $originalContents = join("", <FILE>);
+ close FILE;
+ }
+
+ if ($reverseBinaryChunkType eq "literal") {
+ die "Original content of $fullPath mismatches" if $originalContents ne $reverseBinaryChunk;
+ }
+
+ if ($isFileDeletion) {
+ scmRemove($fullPath);
+ } else {
+ # Addition or Modification
+ my $out = "";
+ if ($binaryChunkType eq "delta") {
+ $out = applyGitBinaryPatchDelta($binaryChunk, $originalContents);
+ } else {
+ $out = $binaryChunk;
+ }
+ if ($reverseBinaryChunkType eq "delta") {
+ die "Original content of $fullPath mismatches" if $originalContents ne applyGitBinaryPatchDelta($reverseBinaryChunk, $out);
+ }
+ open FILE, ">", $fullPath or die "Failed to open $fullPath.";
+ print FILE $out;
+ close FILE;
+ if ($isFileAddition) {
+ scmAdd($fullPath);
+ }
+ }
+}
+
+sub isDirectoryEmptyForRemoval($)
+{
+ my ($dir) = @_;
+ return 1 unless -d $dir;
+ my $directoryIsEmpty = 1;
+ opendir DIR, $dir or die "Could not open '$dir' to list files: $?";
+ for (my $item = readdir DIR; $item && $directoryIsEmpty; $item = readdir DIR) {
+ next if exists $removeDirectoryIgnoreList{$item};
+ if (-d File::Spec->catdir($dir, $item)) {
+ $directoryIsEmpty = 0;
+ } else {
+ next if (scmWillDeleteFile(File::Spec->catdir($dir, $item)));
+ $directoryIsEmpty = 0;
+ }
+ }
+ closedir DIR;
+ return $directoryIsEmpty;
+}
+
+# Args:
+# $diffHashRef: a diff hash reference of the type returned by parsePatch().
+sub patch($)
+{
+ my ($diffHashRef) = @_;
+
+ # Make sure $patch is initialized to some value. A deletion can have no
+ # svnConvertedText property in the case of a deletion resulting from a
+ # Git rename.
+ my $patch = $diffHashRef->{svnConvertedText} || "";
+
+ my $fullPath = $diffHashRef->{indexPath};
+ my $isBinary = $diffHashRef->{isBinary};
+ my $isGit = $diffHashRef->{isGit};
+ my $hasTextChunks = $patch && $diffHashRef->{numTextChunks};
+
+ my $deletion = 0;
+ my $addition = 0;
+
+ $addition = 1 if ($diffHashRef->{isNew} || $patch =~ /\n@@ -0,0 .* @@/);
+ $deletion = 1 if ($diffHashRef->{isDeletion} || $patch =~ /\n@@ .* \+0,0 @@/);
+
+ if (!$addition && !$deletion && !$isBinary && $hasTextChunks) {
+ # Standard patch, patch tool can handle this.
+ if (basename($fullPath) eq "ChangeLog") {
+ my $changeLogDotOrigExisted = -f "${fullPath}.orig";
+ my $changeLogHash = fixChangeLogPatch($patch);
+ my $newPatch = setChangeLogDateAndReviewer($changeLogHash->{patch}, $reviewer, $epochTime);
+ applyPatch($newPatch, $fullPath, ["--fuzz=3"]);
+ unlink("${fullPath}.orig") if (! $changeLogDotOrigExisted);
+ } else {
+ applyPatch($patch, $fullPath);
+ }
+ } else {
+ # Either a deletion, an addition or a binary change.
+
+ addDirectoriesIfNeeded(dirname($fullPath));
+
+ if ($isBinary) {
+ if ($isGit) {
+ handleGitBinaryChange($fullPath, $diffHashRef);
+ } else {
+ handleBinaryChange($fullPath, $patch) if $patch;
+ }
+ } elsif ($deletion) {
+ applyPatch($patch, $fullPath, ["--force"]) if $patch;
+ scmRemove($fullPath);
+ } elsif ($addition) {
+ # Addition
+ rename($fullPath, "$fullPath.orig") if -e $fullPath;
+ applyPatch($patch, $fullPath) if $patch;
+ unlink("$fullPath.orig") if -e "$fullPath.orig" && checksum($fullPath) eq checksum("$fullPath.orig");
+ scmAdd($fullPath);
+ my $escapedFullPath = escapeSubversionPath("$fullPath.orig");
+ # What is this for?
+ system("svn", "stat", "$escapedFullPath") if isSVN() && -e "$fullPath.orig";
+ }
+ }
+
+ scmToggleExecutableBit($fullPath, $diffHashRef->{executableBitDelta}) if defined($diffHashRef->{executableBitDelta});
+}
+
+sub removeDirectoriesIfNeeded()
+{
+ foreach my $dir (reverse sort keys %checkedDirectories) {
+ if (isDirectoryEmptyForRemoval($dir)) {
+ scmRemove($dir);
+ }
+ }
+}
+
+# This could be made into a more general "status" call, except svn and git
+# have different ideas about "moving" files which might get confusing.
+sub scmWillDeleteFile($)
+{
+ my ($path) = @_;
+ if (isSVN()) {
+ my $svnOutput = svnStatus($path);
+ return 1 if $svnOutput && substr($svnOutput, 0, 1) eq "D";
+ } elsif (isGit()) {
+ my $command = runCommand("git", "diff-index", "--name-status", "HEAD", "--", $path);
+ return 1 if $command->{stdout} && substr($command->{stdout}, 0, 1) eq "D";
+ }
+ return 0;
+}
+
+# Return whether the file at the given path is known to Git.
+#
+# This method outputs a message like the following to STDERR when
+# returning false:
+#
+# "error: pathspec 'test.png' did not match any file(s) known to git.
+# Did you forget to 'git add'?"
+sub gitKnowsOfFile($)
+{
+ my $path = shift;
+
+ `git ls-files --error-unmatch -- $path`;
+ my $exitStatus = exitStatus($?);
+ return $exitStatus == 0;
+}
+
+sub scmKnowsOfFile($)
+{
+ my ($path) = @_;
+ if (isSVN()) {
+ my $svnOutput = svnStatus($path);
+ # This will match more than intended. ? might not be the first field in the status
+ if ($svnOutput && $svnOutput =~ m#\?\s+$path\n#) {
+ return 0;
+ }
+ # This does not handle errors well.
+ return 1;
+ } elsif (isGit()) {
+ my @result = callSilently(\&gitKnowsOfFile, $path);
+ return $result[0];
+ }
+}
+
+sub scmCopy($$)
+{
+ my ($source, $destination) = @_;
+ if (isSVN()) {
+ my $escapedSource = escapeSubversionPath($source);
+ my $escapedDestination = escapeSubversionPath($destination);
+ system("svn", "copy", $escapedSource, $escapedDestination) == 0 or die "Failed to svn copy $escapedSource $escapedDestination.";
+ } elsif (isGit()) {
+ system("cp", $source, $destination) == 0 or die "Failed to copy $source $destination.";
+ system("git", "add", $destination) == 0 or die "Failed to git add $destination.";
+ }
+}
+
+sub scmAdd($)
+{
+ my ($path) = @_;
+ if (isSVN()) {
+ my $escapedPath = escapeSubversionPath($path);
+ system("svn", "add", $escapedPath) == 0 or die "Failed to svn add $escapedPath.";
+ } elsif (isGit()) {
+ system("git", "add", $path) == 0 or die "Failed to git add $path.";
+ }
+}
+
+sub scmRemove($)
+{
+ my ($path) = @_;
+ if (isSVN()) {
+ # SVN is very verbose when removing directories. Squelch all output except the last line.
+ my $svnOutput;
+ my $escapedPath = escapeSubversionPath($path);
+ open SVN, "svn rm --force '$escapedPath' |" or die "svn rm --force '$escapedPath' failed!";
+ # Only print the last line. Subversion outputs all changed statuses below $dir
+ while (<SVN>) {
+ $svnOutput = $_;
+ }
+ close SVN;
+ print $svnOutput if $svnOutput;
+ } elsif (isGit()) {
+ # Git removes a directory if it becomes empty when the last file it contains is
+ # removed by `git rm`. In svn-apply this can happen when a directory is being
+ # removed in a patch, and all of the files inside of the directory are removed
+ # before attemping to remove the directory itself. In this case, Git will have
+ # already deleted the directory and `git rm` would exit with an error claiming
+ # there was no file. The --ignore-unmatch switch gracefully handles this case.
+ system("git", "rm", "--force", "--ignore-unmatch", $path) == 0 or die "Failed to git rm --force --ignore-unmatch $path.";
+ }
+}
diff --git a/src/third_party/blink/Tools/Scripts/svn-create-patch b/src/third_party/blink/Tools/Scripts/svn-create-patch
new file mode 100755
index 0000000..2a6e2f1
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/svn-create-patch
@@ -0,0 +1,437 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2005, 2006 Apple Computer, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Extended "svn diff" script for WebKit Open Source Project, used to make patches.
+
+# Differences from standard "svn diff":
+#
+# Uses the real diff, not svn's built-in diff.
+# Always passes "-p" to diff so it will try to include function names.
+# Handles binary files (encoded as a base64 chunk of text).
+# Sorts the diffs alphabetically by text files, then binary files.
+# Handles copied and moved files.
+#
+# Missing features:
+#
+# Handle copied and moved directories.
+
+use strict;
+use warnings;
+
+use Config;
+use File::Basename;
+use File::Spec;
+use File::stat;
+use FindBin;
+use Getopt::Long;
+use lib $FindBin::Bin;
+use MIME::Base64;
+use POSIX qw(:errno_h);
+use Time::gmtime;
+use VCSUtils;
+
+sub binarycmp($$);
+sub diffOptionsForFile($);
+sub findBaseUrl($);
+sub findMimeType($;$);
+sub findModificationType($);
+sub findSourceFileAndRevision($);
+sub generateDiff($$);
+sub generateFileList($\%);
+sub hunkHeaderLineRegExForFile($);
+sub isBinaryMimeType($);
+sub manufacturePatchForAdditionWithHistory($);
+sub numericcmp($$);
+sub outputBinaryContent($);
+sub patchpathcmp($$);
+sub pathcmp($$);
+sub processPaths(\@);
+sub splitpath($);
+sub testfilecmp($$);
+
+$ENV{'LC_ALL'} = 'C';
+
+my $showHelp;
+my $ignoreChangelogs = 0;
+my $devNull = File::Spec->devnull();
+
+my $result = GetOptions(
+ "help" => \$showHelp,
+ "ignore-changelogs" => \$ignoreChangelogs
+);
+if (!$result || $showHelp) {
+ print STDERR basename($0) . " [-h|--help] [--ignore-changelogs] [svndir1 [svndir2 ...]]\n";
+ exit 1;
+}
+
+# Sort the diffs for easier reviewing.
+my %paths = processPaths(@ARGV);
+
+# Generate a list of files requiring diffs.
+my %diffFiles;
+for my $path (keys %paths) {
+ generateFileList($path, %diffFiles);
+}
+
+my $svnRoot = determineSVNRoot();
+my $prefix = chdirReturningRelativePath($svnRoot);
+
+my $patchSize = 0;
+
+# Generate the diffs, in a order chosen for easy reviewing.
+for my $path (sort patchpathcmp values %diffFiles) {
+ $patchSize += generateDiff($path, $prefix);
+}
+
+if ($patchSize > 20480) {
+ print STDERR "WARNING: Patch's size is " . int($patchSize/1024) . " kbytes.\n";
+ print STDERR "Patches 20k or smaller are more likely to be reviewed. Larger patches may sit unreviewed for a long time.\n";
+}
+
+exit 0;
+
+# Overall sort, considering multiple criteria.
+sub patchpathcmp($$)
+{
+ my ($a, $b) = @_;
+
+ # All binary files come after all non-binary files.
+ my $result = binarycmp($a, $b);
+ return $result if $result;
+
+ # All test files come after all non-test files.
+ $result = testfilecmp($a, $b);
+ return $result if $result;
+
+ # Final sort is a "smart" sort by directory and file name.
+ return pathcmp($a, $b);
+}
+
+# Sort so text files appear before binary files.
+sub binarycmp($$)
+{
+ my ($fileDataA, $fileDataB) = @_;
+ return $fileDataA->{isBinary} <=> $fileDataB->{isBinary};
+}
+
+sub diffOptionsForFile($)
+{
+ my ($file) = @_;
+
+ my $options = "uaNp";
+
+ if (my $hunkHeaderLineRegEx = hunkHeaderLineRegExForFile($file)) {
+ $options .= "F'$hunkHeaderLineRegEx'";
+ }
+
+ return $options;
+}
+
+sub findBaseUrl($)
+{
+ my ($infoPath) = @_;
+ my $baseUrl;
+ my $escapedInfoPath = escapeSubversionPath($infoPath);
+ open INFO, "svn info '$escapedInfoPath' |" or die;
+ while (<INFO>) {
+ if (/^URL: (.+?)[\r\n]*$/) {
+ $baseUrl = $1;
+ }
+ }
+ close INFO;
+ return $baseUrl;
+}
+
+sub findMimeType($;$)
+{
+ my ($file, $revision) = @_;
+ my $args = $revision ? "--revision $revision" : "";
+ my $escapedFile = escapeSubversionPath($file);
+ open PROPGET, "svn propget svn:mime-type $args '$escapedFile' |" or die;
+ my $mimeType = <PROPGET>;
+ close PROPGET;
+ # svn may output a different EOL sequence than $/, so avoid chomp.
+ if ($mimeType) {
+ $mimeType =~ s/[\r\n]+$//g;
+ }
+ return $mimeType;
+}
+
+sub findModificationType($)
+{
+ my ($stat) = @_;
+ my $fileStat = substr($stat, 0, 1);
+ my $propertyStat = substr($stat, 1, 1);
+ if ($fileStat eq "A" || $fileStat eq "R") {
+ my $additionWithHistory = substr($stat, 3, 1);
+ return $additionWithHistory eq "+" ? "additionWithHistory" : "addition";
+ }
+ return "modification" if ($fileStat eq "M" || $propertyStat eq "M");
+ return "deletion" if ($fileStat eq "D");
+ return undef;
+}
+
+sub findSourceFileAndRevision($)
+{
+ my ($file) = @_;
+ my $baseUrl = findBaseUrl(".");
+ my $sourceFile;
+ my $sourceRevision;
+ my $escapedFile = escapeSubversionPath($file);
+ open INFO, "svn info '$escapedFile' |" or die;
+ while (<INFO>) {
+ if (/^Copied From URL: (.+?)[\r\n]*$/) {
+ $sourceFile = File::Spec->abs2rel($1, $baseUrl);
+ } elsif (/^Copied From Rev: ([0-9]+)/) {
+ $sourceRevision = $1;
+ }
+ }
+ close INFO;
+ return ($sourceFile, $sourceRevision);
+}
+
+sub generateDiff($$)
+{
+ my ($fileData, $prefix) = @_;
+ my $file = File::Spec->catdir($prefix, $fileData->{path});
+
+ if ($ignoreChangelogs && basename($file) eq "ChangeLog") {
+ return 0;
+ }
+
+ my $patch = "";
+ if ($fileData->{modificationType} eq "additionWithHistory") {
+ manufacturePatchForAdditionWithHistory($fileData);
+ }
+
+ my $diffOptions = diffOptionsForFile($file);
+ my $escapedFile = escapeSubversionPath($file);
+ open DIFF, "svn diff --diff-cmd diff -x -$diffOptions '$escapedFile' |" or die;
+ while (<DIFF>) {
+ $patch .= $_;
+ }
+ close DIFF;
+ if (basename($file) eq "ChangeLog") {
+ my $changeLogHash = fixChangeLogPatch($patch);
+ $patch = $changeLogHash->{patch};
+ }
+ print $patch;
+ if ($fileData->{isBinary}) {
+ print "\n" if ($patch && $patch =~ m/\n\S+$/m);
+ outputBinaryContent($file);
+ }
+ return length($patch);
+}
+
+sub generateFileList($\%)
+{
+ my ($statPath, $diffFiles) = @_;
+ my %testDirectories = map { $_ => 1 } qw(LayoutTests);
+ my $escapedStatPath = escapeSubversionPath($statPath);
+ open STAT, "svn stat '$escapedStatPath' |" or die;
+ while (my $line = <STAT>) {
+ # svn may output a different EOL sequence than $/, so avoid chomp.
+ $line =~ s/[\r\n]+$//g;
+ my $stat;
+ my $path;
+ if (isSVNVersion16OrNewer()) {
+ $stat = substr($line, 0, 8);
+ $path = substr($line, 8);
+ } else {
+ $stat = substr($line, 0, 7);
+ $path = substr($line, 7);
+ }
+ next if -d $path;
+ my $modificationType = findModificationType($stat);
+ if ($modificationType) {
+ $diffFiles->{$path}->{path} = $path;
+ $diffFiles->{$path}->{modificationType} = $modificationType;
+ $diffFiles->{$path}->{isBinary} = isBinaryMimeType($path);
+ $diffFiles->{$path}->{isTestFile} = exists $testDirectories{(File::Spec->splitdir($path))[0]} ? 1 : 0;
+ if ($modificationType eq "additionWithHistory") {
+ my ($sourceFile, $sourceRevision) = findSourceFileAndRevision($path);
+ $diffFiles->{$path}->{sourceFile} = $sourceFile;
+ $diffFiles->{$path}->{sourceRevision} = $sourceRevision;
+ }
+ } else {
+ print STDERR $line, "\n";
+ }
+ }
+ close STAT;
+}
+
+sub hunkHeaderLineRegExForFile($)
+{
+ my ($file) = @_;
+
+ my $startOfObjCInterfaceRegEx = "@(implementation\\|interface\\|protocol)";
+ return "^[-+]\\|$startOfObjCInterfaceRegEx" if $file =~ /\.mm?$/;
+ return "^$startOfObjCInterfaceRegEx" if $file =~ /^(.*\/)?(mac|objc)\// && $file =~ /\.h$/;
+}
+
+sub isBinaryMimeType($)
+{
+ my ($file) = @_;
+ my $mimeType = findMimeType($file);
+ return 0 if (!$mimeType || substr($mimeType, 0, 5) eq "text/");
+ return 1;
+}
+
+sub manufacturePatchForAdditionWithHistory($)
+{
+ my ($fileData) = @_;
+ my $file = $fileData->{path};
+ print "Index: ${file}\n";
+ print "=" x 67, "\n";
+ my $sourceFile = $fileData->{sourceFile};
+ my $sourceRevision = $fileData->{sourceRevision};
+ print "--- ${file}\t(revision ${sourceRevision})\t(from ${sourceFile}:${sourceRevision})\n";
+ print "+++ ${file}\t(working copy)\n";
+ if ($fileData->{isBinary}) {
+ print "\nCannot display: file marked as a binary type.\n";
+ my $mimeType = findMimeType($file, $sourceRevision);
+ print "svn:mime-type = ${mimeType}\n\n";
+ } else {
+ my $escapedSourceFile = escapeSubversionPath($sourceFile);
+ print `svn cat ${escapedSourceFile} | diff -u $devNull - | tail -n +3`;
+ }
+}
+
+# Sort numeric parts of strings as numbers, other parts as strings.
+# Makes 1.33 come after 1.3, which is cool.
+sub numericcmp($$)
+{
+ my ($aa, $bb) = @_;
+
+ my @a = split /(\d+)/, $aa;
+ my @b = split /(\d+)/, $bb;
+
+ # Compare one chunk at a time.
+ # Each chunk is either all numeric digits, or all not numeric digits.
+ while (@a && @b) {
+ my $a = shift @a;
+ my $b = shift @b;
+
+ # Use numeric comparison if chunks are non-equal numbers.
+ return $a <=> $b if $a =~ /^\d/ && $b =~ /^\d/ && $a != $b;
+
+ # Use string comparison if chunks are any other kind of non-equal string.
+ return $a cmp $b if $a ne $b;
+ }
+
+ # One of the two is now empty; compare lengths for result in this case.
+ return @a <=> @b;
+}
+
+sub outputBinaryContent($)
+{
+ my ($path) = @_;
+ # Deletion
+ return if (! -e $path);
+ # Addition or Modification
+ my $buffer;
+ open BINARY, $path or die;
+ while (read(BINARY, $buffer, 60*57)) {
+ print encode_base64($buffer);
+ }
+ close BINARY;
+ print "\n";
+}
+
+# Sort first by directory, then by file, so all paths in one directory are grouped
+# rather than being interspersed with items from subdirectories.
+# Use numericcmp to sort directory and filenames to make order logical.
+# Also include a special case for ChangeLog, which comes first in any directory.
+sub pathcmp($$)
+{
+ my ($fileDataA, $fileDataB) = @_;
+
+ my ($dira, $namea) = splitpath($fileDataA->{path});
+ my ($dirb, $nameb) = splitpath($fileDataB->{path});
+
+ return numericcmp($dira, $dirb) if $dira ne $dirb;
+ return -1 if $namea eq "ChangeLog" && $nameb ne "ChangeLog";
+ return +1 if $namea ne "ChangeLog" && $nameb eq "ChangeLog";
+ return numericcmp($namea, $nameb);
+}
+
+sub processPaths(\@)
+{
+ my ($paths) = @_;
+ return ("." => 1) if (!@{$paths});
+
+ my %result = ();
+
+ for my $file (@{$paths}) {
+ die "can't handle absolute paths like \"$file\"\n" if File::Spec->file_name_is_absolute($file);
+ die "can't handle empty string path\n" if $file eq "";
+ die "can't handle path with single quote in the name like \"$file\"\n" if $file =~ /'/; # ' (keep Xcode syntax highlighting happy)
+
+ my $untouchedFile = $file;
+
+ $file = canonicalizePath($file);
+
+ die "can't handle paths with .. like \"$untouchedFile\"\n" if $file =~ m|/\.\./|;
+
+ $result{$file} = 1;
+ }
+
+ return ("." => 1) if ($result{"."});
+
+ # Remove any paths that also have a parent listed.
+ for my $path (keys %result) {
+ for (my $parent = dirname($path); $parent ne '.'; $parent = dirname($parent)) {
+ if ($result{$parent}) {
+ delete $result{$path};
+ last;
+ }
+ }
+ }
+
+ return %result;
+}
+
+# Break up a path into the directory (with slash) and base name.
+sub splitpath($)
+{
+ my ($path) = @_;
+
+ my $pathSeparator = "/";
+ my $dirname = dirname($path) . $pathSeparator;
+ $dirname = "" if $dirname eq "." . $pathSeparator;
+
+ return ($dirname, basename($path));
+}
+
+# Sort so source code files appear before test files.
+sub testfilecmp($$)
+{
+ my ($fileDataA, $fileDataB) = @_;
+ return $fileDataA->{isTestFile} <=> $fileDataB->{isTestFile};
+}
+
diff --git a/src/third_party/blink/Tools/Scripts/svn-unapply b/src/third_party/blink/Tools/Scripts/svn-unapply
new file mode 100755
index 0000000..56db50b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/svn-unapply
@@ -0,0 +1,283 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2005, 2006, 2007 Apple Inc. All rights reserved.
+# Copyright (C) 2009 Cameron McCormack <cam@mcc.id.au>
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# "unpatch" script for WebKit Open Source Project, used to remove patches.
+
+# Differences from invoking "patch -p0 -R":
+#
+# Handles added files (does a svn revert with additional logic to handle local changes).
+# Handles added directories (does a svn revert and a rmdir).
+# Handles removed files (does a svn revert with additional logic to handle local changes).
+# Handles removed directories (does a svn revert).
+# Paths from Index: lines are used rather than the paths on the patch lines, which
+# makes patches generated by "cvs diff" work (increasingly unimportant since we
+# use Subversion now).
+# ChangeLog patches use --fuzz=3 to prevent rejects, and the entry date is reset in
+# the patch before it is applied (svn-apply sets it when applying a patch).
+# Handles binary files (requires patches made by svn-create-patch).
+# Handles copied and moved files (requires patches made by svn-create-patch).
+# Handles git-diff patches (without binary changes) created at the top-level directory
+#
+# Missing features:
+#
+# Handle property changes.
+# Handle copied and moved directories (would require patches made by svn-create-patch).
+# Use version numbers in the patch file and do a 3-way merge.
+# When reversing an addition, check that the file matches what's being removed.
+# Notice a patch that's being unapplied at the "wrong level" and make it work anyway.
+# Do a dry run on the whole patch and don't do anything if part of the patch is
+# going to fail (probably too strict unless we exclude ChangeLog).
+# Handle git-diff patches with binary changes
+
+use strict;
+use warnings;
+
+use Cwd;
+use Digest::MD5;
+use Fcntl qw(:DEFAULT :seek);
+use File::Basename;
+use File::Spec;
+use File::Temp qw(tempfile);
+use Getopt::Long;
+
+use FindBin;
+use lib $FindBin::Bin;
+use VCSUtils;
+
+sub checksum($);
+sub patch($);
+sub revertDirectories();
+sub unapplyPatch($$;$);
+sub unsetChangeLogDate($$);
+
+my $force = 0;
+my $showHelp = 0;
+
+my $optionParseSuccess = GetOptions(
+ "force!" => \$force,
+ "help!" => \$showHelp
+);
+
+if (!$optionParseSuccess || $showHelp) {
+ print STDERR basename($0) . " [-h|--help] [--force] patch1 [patch2 ...]\n";
+ exit 1;
+}
+
+my $globalExitStatus = 0;
+
+my $repositoryRootPath = determineVCSRoot();
+
+my @copiedFiles;
+my %directoriesToCheck;
+
+# Need to use a typeglob to pass the file handle as a parameter,
+# otherwise get a bareword error.
+my @diffHashRefs = parsePatch(*ARGV);
+
+print "Parsed " . @diffHashRefs . " diffs from patch file(s).\n";
+
+my $preparedPatchHash = prepareParsedPatch($force, @diffHashRefs);
+
+my @copyDiffHashRefs = @{$preparedPatchHash->{copyDiffHashRefs}};
+my @nonCopyDiffHashRefs = @{$preparedPatchHash->{nonCopyDiffHashRefs}};
+
+for my $diffHashRef (@nonCopyDiffHashRefs) {
+ patch($diffHashRef);
+}
+
+# Handle copied and moved files last since they may have had post-copy changes that have now been unapplied
+for my $diffHashRef (@copyDiffHashRefs) {
+ patch($diffHashRef);
+}
+
+if (isSVN()) {
+ revertDirectories();
+}
+
+exit $globalExitStatus;
+
+sub checksum($)
+{
+ my $file = shift;
+ open(FILE, $file) or die "Can't open '$file': $!";
+ binmode(FILE);
+ my $checksum = Digest::MD5->new->addfile(*FILE)->hexdigest();
+ close(FILE);
+ return $checksum;
+}
+
+# Args:
+# $diffHashRef: a diff hash reference of the type returned by parsePatch().
+sub patch($)
+{
+ my ($diffHashRef) = @_;
+
+ # Make sure $patch is initialized to some value. There is no
+ # svnConvertedText when reversing an svn copy/move.
+ my $patch = $diffHashRef->{svnConvertedText} || "";
+
+ my $fullPath = $diffHashRef->{indexPath};
+ my $isSvnBinary = $diffHashRef->{isBinary} && $diffHashRef->{isSvn};
+ my $hasTextChunks = $patch && $diffHashRef->{numTextChunks};
+
+ $directoriesToCheck{dirname($fullPath)} = 1;
+
+ my $deletion = 0;
+ my $addition = 0;
+
+ $addition = 1 if ($diffHashRef->{isNew} || $diffHashRef->{copiedFromPath} || $patch =~ /\n@@ -0,0 .* @@/);
+ $deletion = 1 if ($diffHashRef->{isDeletion} || $patch =~ /\n@@ .* \+0,0 @@/);
+
+ if (!$addition && !$deletion && !$isSvnBinary && $hasTextChunks) {
+ # Standard patch, patch tool can handle this.
+ if (basename($fullPath) eq "ChangeLog") {
+ my $changeLogDotOrigExisted = -f "${fullPath}.orig";
+ my $changeLogHash = fixChangeLogPatch($patch);
+ unapplyPatch(unsetChangeLogDate($fullPath, $changeLogHash->{patch}), $fullPath, ["--fuzz=3"]);
+ unlink("${fullPath}.orig") if (! $changeLogDotOrigExisted);
+ } else {
+ unapplyPatch($patch, $fullPath);
+ }
+ } else {
+ # Either a deletion, an addition or a binary change.
+
+ my $escapedFullPath = escapeSubversionPath($fullPath);
+ # FIXME: Add support for Git binary files.
+ if ($isSvnBinary) {
+ # Reverse binary change
+ unlink($fullPath) if (-e $fullPath);
+ system "svn", "revert", $escapedFullPath;
+ } elsif ($deletion) {
+ # Reverse deletion
+ rename($fullPath, "$fullPath.orig") if -e $fullPath;
+
+ unapplyPatch($patch, $fullPath);
+
+ # If we don't ask for the filehandle here, we always get a warning.
+ my ($fh, $tempPath) = tempfile(basename($fullPath) . "-XXXXXXXX",
+ DIR => dirname($fullPath), UNLINK => 1);
+ close($fh);
+
+ # Keep the version from the patch in case it's different from svn.
+ rename($fullPath, $tempPath);
+ system "svn", "revert", $escapedFullPath;
+ rename($tempPath, $fullPath);
+
+ # This works around a bug in the svn client.
+ # [Issue 1960] file modifications get lost due to FAT 2s time resolution
+ # http://subversion.tigris.org/issues/show_bug.cgi?id=1960
+ system "touch", $fullPath;
+
+ # Remove $fullPath.orig if it is the same as $fullPath
+ unlink("$fullPath.orig") if -e "$fullPath.orig" && checksum($fullPath) eq checksum("$fullPath.orig");
+
+ # Show status if the file is modifed
+ system "svn", "stat", $escapedFullPath;
+ } elsif ($addition) {
+ # Reverse addition
+ #
+ # FIXME: This should use the same logic as svn-apply's deletion
+ # code. In particular, svn-apply's scmRemove() subroutine
+ # should be used here.
+ unapplyPatch($patch, $fullPath, ["--force"]) if $patch;
+ unlink($fullPath) if -z $fullPath;
+ system "svn", "revert", $escapedFullPath;
+ }
+ }
+
+ scmToggleExecutableBit($fullPath, -1 * $diffHashRef->{executableBitDelta}) if defined($diffHashRef->{executableBitDelta});
+}
+
+sub revertDirectories()
+{
+ chdir $repositoryRootPath;
+ my %checkedDirectories;
+ foreach my $path (reverse sort keys %directoriesToCheck) {
+ my @dirs = File::Spec->splitdir($path);
+ while (scalar @dirs) {
+ my $dir = File::Spec->catdir(@dirs);
+ pop(@dirs);
+ next if (exists $checkedDirectories{$dir});
+ if (-d $dir) {
+ my $svnOutput = svnStatus($dir);
+ my $escapedDir = escapeSubversionPath($dir);
+ if ($svnOutput && $svnOutput =~ m#A\s+$dir\n#) {
+ system "svn", "revert", $escapedDir;
+ rmdir $dir;
+ }
+ elsif ($svnOutput && $svnOutput =~ m#D\s+$dir\n#) {
+ system "svn", "revert", $escapedDir;
+ }
+ else {
+ # Modification
+ print $svnOutput if $svnOutput;
+ }
+ $checkedDirectories{$dir} = 1;
+ }
+ else {
+ die "'$dir' is not a directory";
+ }
+ }
+ }
+}
+
+# Args:
+# $patch: a patch string.
+# $pathRelativeToRoot: the path of the file to be patched, relative to the
+# repository root. This should normally be the path
+# found in the patch's "Index:" line.
+# $options: a reference to an array of options to pass to the patch command.
+# Do not include --reverse in this array.
+sub unapplyPatch($$;$)
+{
+ my ($patch, $pathRelativeToRoot, $options) = @_;
+
+ my $optionalArgs = {options => $options, ensureForce => $force, shouldReverse => 1};
+
+ my $exitStatus = runPatchCommand($patch, $repositoryRootPath, $pathRelativeToRoot, $optionalArgs);
+
+ if ($exitStatus) {
+ $globalExitStatus = $exitStatus;
+ }
+}
+
+sub unsetChangeLogDate($$)
+{
+ my $fullPath = shift;
+ my $patch = shift;
+ my $newDate;
+ sysopen(CHANGELOG, $fullPath, O_RDONLY) or die "Failed to open $fullPath: $!";
+ sysseek(CHANGELOG, 0, SEEK_SET);
+ my $byteCount = sysread(CHANGELOG, $newDate, 10);
+ die "Failed reading $fullPath: $!" if !$byteCount || $byteCount != 10;
+ close(CHANGELOG);
+ $patch =~ s/(\n\+)\d{4}-[^-]{2}-[^-]{2}( )/$1$newDate$2/;
+ return $patch;
+}
diff --git a/src/third_party/blink/Tools/Scripts/test-webkit-scripts b/src/third_party/blink/Tools/Scripts/test-webkit-scripts
new file mode 100755
index 0000000..baba059
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/test-webkit-scripts
@@ -0,0 +1,86 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Run unit tests of WebKit's Perl, Python, and Ruby scripts."""
+
+# The docstring above is passed as the "description" to the OptionParser
+# used in this script's __main__ block.
+#
+# For the command options supported by this script, see the code below
+# that instantiates the OptionParser class, or else pass --help
+# while running this script (since argument help is auto-generated).
+
+import os
+import subprocess
+import sys
+from optparse import OptionParser
+
+class ScriptsTester(object):
+
+ """Supports running unit tests of WebKit scripts."""
+
+ def __init__(self, scripts_directory):
+ self.scripts_directory = scripts_directory
+
+ def script_path(self, script_file_name):
+ """Return an absolute path to the given script."""
+ return os.path.join(self.scripts_directory, script_file_name)
+
+ def run_test_script(self, script_title, script_path, args=None):
+ """Run the given test script."""
+ print('Testing %s:' % script_title)
+ call_args = [script_path]
+ if args:
+ call_args.extend(args)
+ subprocess.call(call_args)
+ print(70 * "*") # dividing line
+
+ def main(self):
+ parser = OptionParser(description=__doc__)
+ parser.add_option('-a', '--all', dest='all', action='store_true',
+ default=False, help='run all available tests, '
+ 'including those suppressed by default')
+ (options, args) = parser.parse_args()
+
+ self.run_test_script('Perl scripts', self.script_path('test-webkitperl'))
+ self.run_test_script('Python scripts', self.script_path('test-webkitpy'),
+ ['--all'] if options.all else None)
+ self.run_test_script('Ruby scripts', self.script_path('test-webkitruby'))
+
+ # FIXME: Display a cumulative indication of success or failure.
+ # In addition, call sys.exit() with 0 or 1 depending on that
+ # cumulative success or failure.
+ print('Note: Perl, Python, and Ruby results appear separately above.')
+
+
+if __name__ == '__main__':
+ # The scripts directory is the directory containing this file.
+ tester = ScriptsTester(os.path.dirname(__file__))
+ tester.main()
diff --git a/src/third_party/blink/Tools/Scripts/test-webkitperl b/src/third_party/blink/Tools/Scripts/test-webkitperl
new file mode 100755
index 0000000..4875843
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/test-webkitperl
@@ -0,0 +1,59 @@
+#!/usr/bin/perl
+#
+# Copyright (C) 2009 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Runs unit tests of WebKit Perl code.
+
+use strict;
+use warnings;
+
+use File::Spec;
+use FindBin;
+use Test::Harness;
+use lib $FindBin::Bin; # so this script can be run from any directory.
+use VCSUtils;
+
+# Change the working directory so that we can pass shorter, relative
+# paths to runtests(), rather than longer, absolute paths.
+#
+# We change to the source root so the paths can be relative to the
+# source root. These paths display on the screen, and their meaning
+# will be clearer to the user if relative to the root, rather than to
+# the Scripts directory, say.
+#
+# Source root is two levels up from the Scripts directory.
+my $sourceRootDir = File::Spec->catfile($FindBin::Bin, "../..");
+chdir($sourceRootDir);
+
+# Relative to root
+my $pattern = "Tools/Scripts/webkitperl/*_unittest/*.pl";
+
+my @files = <${pattern}>; # lists files alphabetically
+
+runtests(@files);
diff --git a/src/third_party/blink/Tools/Scripts/test-webkitpy b/src/third_party/blink/Tools/Scripts/test-webkitpy
new file mode 100755
index 0000000..d7b5d99
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/test-webkitpy
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import sys
+
+dirname = os.path.dirname
+scripts_dir = dirname(os.path.realpath(__file__))
+chromium_src_dir = dirname(dirname(dirname(dirname(scripts_dir))))
+
+path_to_typ = os.path.join(chromium_src_dir, 'third_party', 'typ')
+if path_to_typ not in sys.path:
+ sys.path.append(path_to_typ)
+
+import typ
+
+if sys.platform == 'win32':
+ # These test fail on win32. We could annotate some of these in
+ # class-level skips, but we don't support package/module-level skips.
+ # bugs.webkit.org/show_bug.cgi?id=54526 .
+ skip = [
+ 'webkitpy.common.checkout.*',
+ 'webkitpy.common.config.*',
+ 'webkitpy.tool.*',
+ 'webkitpy.w3c.*',
+ 'webkitpy.layout_tests.layout_package.bot_test_expectations_unittest.*',
+ ]
+else:
+ # The scm tests are really slow, so we skip them by default.
+ # bugs.webkit.org/show_bug.cgi?id=31818 .
+ skip = [
+ 'webkitpy.common.checkout.scm.scm_unittest.*',
+ ]
+
+sys.exit(typ.main(top_level_dir=scripts_dir,
+ skip=skip,
+ path=[os.path.join(scripts_dir, 'webkitpy', 'thirdparty')],
+ win_multiprocessing='spawn'))
diff --git a/src/third_party/blink/Tools/Scripts/test-webkitruby b/src/third_party/blink/Tools/Scripts/test-webkitruby
new file mode 100755
index 0000000..cd04a0a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/test-webkitruby
@@ -0,0 +1,34 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+$exit_code = 0;
+
+Dir.chdir File.dirname(__FILE__)
+Dir.glob("./webkitruby/*/*.rb").each do |test|
+ puts %x{ '#{test}' }
+ $exit_code = 1 if $?.exitstatus != 0
+end
+
+exit $exit_code
diff --git a/src/third_party/blink/Tools/Scripts/update-w3c-deps b/src/third_party/blink/Tools/Scripts/update-w3c-deps
new file mode 100755
index 0000000..42cf144
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/update-w3c-deps
@@ -0,0 +1,20 @@
+#!/usr/bin/python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Pull latest revisions of the W3C test repos and update our DEPS entries."""
+
+from webkitpy.common import version_check
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.w3c.deps_updater import DepsUpdater
+
+
+if __name__ == '__main__':
+ host = SystemHost()
+ updater = DepsUpdater(host)
+ try:
+ host.exit(updater.main())
+ except KeyboardInterrupt:
+ host.print_("Interrupted, exiting")
+ host.exit(130)
diff --git a/src/third_party/blink/Tools/Scripts/update-webgl-conformance-tests b/src/third_party/blink/Tools/Scripts/update-webgl-conformance-tests
new file mode 100755
index 0000000..cf9bd7f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/update-webgl-conformance-tests
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Wrapper around webkitpy/layout_tests/update-webgl-conformance-tests.py"""
+
+import webkitpy.webgl.update_webgl_conformance_tests
+import sys
+
+if __name__ == '__main__':
+ sys.exit(webkitpy.webgl.update_webgl_conformance_tests.main())
diff --git a/src/third_party/blink/Tools/Scripts/validate-committer-lists b/src/third_party/blink/Tools/Scripts/validate-committer-lists
new file mode 100755
index 0000000..8da4474
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/validate-committer-lists
@@ -0,0 +1,287 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Checks Python's known list of committers against lists.webkit.org and SVN history.
+
+
+import os
+import subprocess
+import re
+import urllib2
+from datetime import date, datetime, timedelta
+from optparse import OptionParser
+
+from webkitpy.common.config.committers import CommitterList
+from webkitpy.common.system.deprecated_logging import log, error
+from webkitpy.common.checkout.scm import Git
+from webkitpy.common.net.bugzilla import Bugzilla
+
+# WebKit includes a built copy of BeautifulSoup in Scripts/webkitpy
+# so this import should always succeed.
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
+
+
+def print_list_if_non_empty(title, list_to_print):
+ if not list_to_print:
+ return
+ print # Newline before the list
+ print title
+ for item in list_to_print:
+ print item
+
+
+class CommitterListFromMailingList(object):
+ committers_list_url = "http://lists.webkit.org/mailman/roster.cgi/webkit-committers"
+ reviewers_list_url = "http://lists.webkit.org/mailman/roster.cgi/webkit-reviewers"
+
+ def _fetch_emails_from_page(self, url):
+ page = urllib2.urlopen(url)
+ soup = BeautifulSoup(page)
+
+ emails = []
+ # Grab the cells in the first column (which happens to be the bug ids).
+ for email_item in soup('li'):
+ email_link = email_item.find("a")
+ email = email_link.string.replace(" at ", "@") # The email is obfuscated using " at " instead of "@".
+ emails.append(email)
+ return emails
+
+ @staticmethod
+ def _commiters_not_found_in_email_list(committers, emails):
+ missing_from_mailing_list = []
+ for committer in committers:
+ for email in committer.emails:
+ if email in emails:
+ break
+ else:
+ missing_from_mailing_list.append(committer)
+ return missing_from_mailing_list
+
+ @staticmethod
+ def _emails_not_found_in_committer_list(committers, emails):
+ email_to_committer_map = {}
+ for committer in committers:
+ for email in committer.emails:
+ email_to_committer_map[email] = committer
+
+ return filter(lambda email: not email_to_committer_map.get(email), emails)
+
+ def check_for_emails_missing_from_list(self, committer_list):
+ committer_emails = self._fetch_emails_from_page(self.committers_list_url)
+ list_name = "webkit-committers@lists.webkit.org"
+
+ missing_from_mailing_list = self._commiters_not_found_in_email_list(committer_list.committers(), committer_emails)
+ print_list_if_non_empty("Committers missing from %s:" % list_name, missing_from_mailing_list)
+
+ users_missing_from_committers = self._emails_not_found_in_committer_list(committer_list.committers(), committer_emails)
+ print_list_if_non_empty("Subcribers to %s missing from committer.py:" % list_name, users_missing_from_committers)
+
+
+ reviewer_emails = self._fetch_emails_from_page(self.reviewers_list_url)
+ list_name = "webkit-reviewers@lists.webkit.org"
+
+ missing_from_mailing_list = self._commiters_not_found_in_email_list(committer_list.reviewers(), reviewer_emails)
+ print_list_if_non_empty("Reviewers missing from %s:" % list_name, missing_from_mailing_list)
+
+ missing_from_reviewers = self._emails_not_found_in_committer_list(committer_list.reviewers(), reviewer_emails)
+ print_list_if_non_empty("Subcribers to %s missing from reviewers in committer.py:" % list_name, missing_from_reviewers)
+
+ missing_from_committers = self._emails_not_found_in_committer_list(committer_list.committers(), reviewer_emails)
+ print_list_if_non_empty("Subcribers to %s completely missing from committers.py" % list_name, missing_from_committers)
+
+
+class CommitterListFromGit(object):
+ login_to_email_address = {
+ 'aliceli1' : 'alice.liu@apple.com',
+ 'bdash' : 'mrowe@apple.com',
+ 'bdibello' : 'bdibello@apple.com', # Bruce DiBello, only 4 commits: r10023, r9548, r9538, r9535
+ 'cblu' : 'cblu@apple.com',
+ 'cpeterse' : 'cpetersen@apple.com',
+ 'eseidel' : 'eric@webkit.org',
+ 'gdennis' : 'gdennis@webkit.org',
+ 'goldsmit' : 'goldsmit@apple.com', # Debbie Goldsmith, only one commit r8839
+ 'gramps' : 'gramps@apple.com',
+ 'honeycutt' : 'jhoneycutt@apple.com',
+ 'jdevalk' : 'joost@webkit.org',
+ 'jens' : 'jens@apple.com',
+ 'justing' : 'justin.garcia@apple.com',
+ 'kali' : 'kali@apple.com', # Christy Warren, did BIDI work, 5 commits: r8815, r8802, r8801, r8791, r8773, r8603
+ 'kjk' : 'kkowalczyk@gmail.com',
+ 'kmccullo' : 'kmccullough@apple.com',
+ 'kocienda' : 'kocienda@apple.com',
+ 'lamadio' : 'lamadio@apple.com', # Lou Amadio, only 2 commits: r17949 and r17783
+ 'lars' : 'lars@kde.org',
+ 'lweintraub' : 'lweintraub@apple.com',
+ 'lypanov' : 'lypanov@kde.org',
+ 'mhay' : 'mhay@apple.com', # Mike Hay, 3 commits: r3813, r2552, r2548
+ 'ouch' : 'ouch@apple.com', # John Louch
+ 'pyeh' : 'patti@apple.com', # Patti Yeh, did VoiceOver work in WebKit
+ 'rjw' : 'rjw@apple.com',
+ 'seangies' : 'seangies@apple.com', # Sean Gies?, only 5 commits: r16600, r16592, r16511, r16489, r16484
+ 'sheridan' : 'sheridan@apple.com', # Shelly Sheridan
+ 'thatcher' : 'timothy@apple.com',
+ 'tomernic' : 'timo@apple.com',
+ 'trey' : 'trey@usa.net',
+ 'tristan' : 'tristan@apple.com',
+ 'vicki' : 'vicki@apple.com',
+ 'voas' : 'voas@apple.com', # Ed Voas, did some Carbon work in WebKit
+ 'zack' : 'zack@kde.org',
+ 'zimmermann' : 'zimmermann@webkit.org',
+ }
+
+ def __init__(self):
+ self._last_commit_time_by_author_cache = {}
+
+ def _fetch_authors_and_last_commit_time_from_git_log(self):
+ last_commit_dates = {}
+ git_log_args = ['git', 'log', '--reverse', '--pretty=format:%ae %at']
+ process = subprocess.Popen(git_log_args, stdout=subprocess.PIPE)
+
+ # eric@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc 1257090899
+ line_regexp = re.compile("^(?P<author>.+)@\S+ (?P<timestamp>\d+)$")
+ while True:
+ output_line = process.stdout.readline()
+ if output_line == '' and process.poll() != None:
+ return last_commit_dates
+
+ match_result = line_regexp.match(output_line)
+ if not match_result:
+ error("Failed to match line: %s" % output_line)
+ last_commit_dates[match_result.group('author')] = float(match_result.group('timestamp'))
+
+ def _fill_in_emails_for_old_logins(self):
+ authors_missing_email = filter(lambda author: author.find('@') == -1, self._last_commit_time_by_author_cache)
+ authors_with_email = filter(lambda author: author.find('@') != -1, self._last_commit_time_by_author_cache)
+ prefixes_of_authors_with_email = map(lambda author: author.split('@')[0], authors_with_email)
+
+ for author in authors_missing_email:
+ # First check to see if we have a manual mapping from login to email.
+ author_email = self.login_to_email_address.get(author)
+
+ # Most old logins like 'darin' are now just 'darin@apple.com', so check for a prefix match if a manual mapping was not found.
+ if not author_email and author in prefixes_of_authors_with_email:
+ author_email_index = prefixes_of_authors_with_email.index(author)
+ author_email = authors_with_email[author_email_index]
+
+ if not author_email:
+ # No known email mapping, likely not an active committer. We could log here.
+ continue
+
+ # log("%s -> %s" % (author, author_email)) # For sanity checking.
+ no_email_commit_time = self._last_commit_time_by_author_cache.get(author)
+ email_commit_time = self._last_commit_time_by_author_cache.get(author_email)
+ # We compare the timestamps for extra sanity even though we could assume commits before email address were used for login are always going to be older.
+ if not email_commit_time or email_commit_time < no_email_commit_time:
+ self._last_commit_time_by_author_cache[author_email] = no_email_commit_time
+ del self._last_commit_time_by_author_cache[author]
+
+ def _last_commit_by_author(self):
+ if not self._last_commit_time_by_author_cache:
+ self._last_commit_time_by_author_cache = self._fetch_authors_and_last_commit_time_from_git_log()
+ self._fill_in_emails_for_old_logins()
+ del self._last_commit_time_by_author_cache['(no author)'] # The initial svn import isn't very useful.
+ return self._last_commit_time_by_author_cache
+
+ @staticmethod
+ def _print_three_column_row(widths, values):
+ print "%s%s%s" % (values[0].ljust(widths[0]), values[1].ljust(widths[1]), values[2])
+
+ def print_possibly_expired_committers(self, committer_list):
+ authors_and_last_commits = self._last_commit_by_author().items()
+ authors_and_last_commits.sort(lambda a,b: cmp(a[1], b[1]), reverse=True)
+ committer_cuttof = date.today() - timedelta(days=365)
+ column_widths = [13, 25]
+ print
+ print "Committers who have not committed within one year:"
+ self._print_three_column_row(column_widths, ("Last Commit", "Committer Email", "Committer Record"))
+ for (author, last_commit) in authors_and_last_commits:
+ last_commit_date = date.fromtimestamp(last_commit)
+ if committer_cuttof > last_commit_date:
+ committer_record = committer_list.committer_by_email(author)
+ self._print_three_column_row(column_widths, (str(last_commit_date), author, committer_record))
+
+ def print_committers_missing_from_committer_list(self, committer_list):
+ missing_from_committers_py = []
+ last_commit_time_by_author = self._last_commit_by_author()
+ for author in last_commit_time_by_author:
+ if not committer_list.committer_by_email(author):
+ missing_from_committers_py.append(author)
+
+ never_committed = []
+ for committer in committer_list.committers():
+ for email in committer.emails:
+ if last_commit_time_by_author.get(email):
+ break
+ else:
+ never_committed.append(committer)
+
+ print_list_if_non_empty("Historical committers missing from committer.py:", missing_from_committers_py)
+ print_list_if_non_empty("Committers in committer.py who have never committed:", never_committed)
+
+
+class CommitterListBugzillaChecker(object):
+ def __init__(self):
+ self._bugzilla = Bugzilla()
+
+ def _has_invalid_bugzilla_email(self, committer):
+ return not self._bugzilla.queries.fetch_logins_matching_substring(committer.bugzilla_email())
+
+ def print_committers_with_invalid_bugzilla_emails(self, committer_list):
+ print # Print a newline before we start hitting bugzilla (it logs about logging in).
+ print "Checking committer emails against bugzilla (this will take a long time)"
+ committers_with_invalid_bugzilla_email = filter(self._has_invalid_bugzilla_email, committer_list.committers())
+ print_list_if_non_empty("Committers with invalid bugzilla email:", committers_with_invalid_bugzilla_email)
+
+
+def main():
+ parser = OptionParser()
+ parser.add_option("-b", "--check-bugzilla-emails", action="store_true", help="Check the bugzilla_email for each committer against bugs.webkit.org")
+ (options, args) = parser.parse_args()
+
+ committer_list = CommitterList()
+ CommitterListFromMailingList().check_for_emails_missing_from_list(committer_list)
+
+ if not Git.in_working_directory("."):
+ print """\n\nWARNING: validate-committer-lists requires a git checkout.
+The following checks are disabled:
+ - List of committers ordered by last commit
+ - List of historical committers missing from committers.py
+"""
+ return 1
+ svn_committer_list = CommitterListFromGit()
+ svn_committer_list.print_possibly_expired_committers(committer_list)
+ svn_committer_list.print_committers_missing_from_committer_list(committer_list)
+
+ if options.check_bugzilla_emails:
+ CommitterListBugzillaChecker().print_committers_with_invalid_bugzilla_emails(committer_list)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/third_party/blink/Tools/Scripts/webkit-patch b/src/third_party/blink/Tools/Scripts/webkit-patch
new file mode 100755
index 0000000..2ed9d8d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkit-patch
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Code Aurora Forum. All rights reserved.
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# A tool for automating dealing with bugzilla, posting patches, committing patches, etc.
+
+import logging
+import os
+import signal
+import sys
+import codecs
+
+import webkitpy.common.version_check
+
+from webkitpy.common.system.logutils import configure_logging
+from webkitpy.tool.main import WebKitPatch
+
+# A StreamWriter will by default try to encode all objects passed
+# to write(), so when passed a raw string already encoded as utf8,
+# it will blow up with an UnicodeDecodeError. This does not match
+# the default behaviour of writing to sys.stdout, so we intercept
+# the case of writing raw strings and make sure StreamWriter gets
+# input that it can handle.
+class ForgivingUTF8Writer(codecs.lookup('utf-8')[-1]):
+ def write(self, object):
+ if isinstance(object, str):
+ # Assume raw strings are utf-8 encoded. If this line
+ # fails with an UnicodeDecodeError, our assumption was
+ # wrong, and the stacktrace should show you where we
+ # write non-Unicode/UTF-8 data (which we shouldn't).
+ object = object.decode('utf-8')
+ return codecs.StreamWriter.write(self, object)
+
+# By default, sys.stdout assumes ascii encoding. Since our messages can
+# contain unicode strings (as with some peoples' names) we need to apply
+# the utf-8 codec to prevent throwing and exception.
+# Not having this was the cause of https://bugs.webkit.org/show_bug.cgi?id=63452.
+sys.stdout = ForgivingUTF8Writer(sys.stdout)
+
+_log = logging.getLogger("webkit-patch")
+
+def main():
+ # This is a hack to let us enable DEBUG logging as early as possible.
+ # Note this can't be ternary as versioning.check_version()
+ # hasn't run yet and this python might be older than 2.5.
+ if set(["-v", "--verbose"]).intersection(set(sys.argv)):
+ logging_level = logging.DEBUG
+ else:
+ logging_level = logging.INFO
+ configure_logging(logging_level=logging_level)
+ WebKitPatch(os.path.abspath(__file__)).main()
+
+
+if __name__ == "__main__":
+ try:
+ main()
+ except KeyboardInterrupt:
+ sys.exit(signal.SIGINT + 128)
diff --git a/src/third_party/blink/Tools/Scripts/webkit-tools-completion.sh b/src/third_party/blink/Tools/Scripts/webkit-tools-completion.sh
new file mode 100755
index 0000000..3d9ac2b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkit-tools-completion.sh
@@ -0,0 +1,77 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Command line completion for common commands used in WebKit development.
+#
+# Set-up:
+# Add a line like this to your .bashrc:
+# source /path/to/WebKitCode/Tools/Scripts/webkit-tools-completion.sh
+
+__webkit-patch_generate_reply()
+{
+ COMPREPLY=( $(compgen -W "$1" -- "${COMP_WORDS[COMP_CWORD]}") )
+}
+
+__webkit-patch_upload_cc_generate_reply()
+{
+ # Note: This won't work well if hostname completion is enabled, disable it with: shopt -u hostcomplete
+ # Completion is done on tokens and our comma-separated list is one single token, so we have to do completion on the whole list each time.
+ # Return a \n separated list for each possible bugzilla email completion of the substring following the last comma.
+ # Redirect strerr to /dev/null to prevent noise in the shell if this ever breaks somehow.
+ COMPREPLY=( $(PYTHONPATH=$(dirname "${BASH_SOURCE[0]}") python -c "
+import sys,re
+from webkitpy.common.config.committers import CommitterList
+m = re.match('((.*,)*)(.*)', sys.argv[1])
+untilLastComma = m.group(1)
+afterLastComma = m.group(3)
+print('\n'.join([untilLastComma + c.bugzilla_email() + ',' for c in CommitterList().contributors() if c.bugzilla_email().startswith(afterLastComma)]))" "${COMP_WORDS[COMP_CWORD]}" 2>/dev/null ) )
+}
+
+_webkit-patch_complete()
+{
+ local command current_command="${COMP_WORDS[1]}"
+ case "$current_command" in
+ -h|--help)
+ command="help";
+ ;;
+ *)
+ command="$current_command"
+ ;;
+ esac
+
+ if [ $COMP_CWORD -eq 1 ]; then
+ __webkit-patch_generate_reply "--help"
+ return
+ fi
+}
+
+complete -F _webkit-patch_complete webkit-patch
+complete -o default -W "--continue --fix-merged --help --no-continue --no-warnings --warnings -c -f -h -w" resolve-ChangeLogs
+complete -o default -W "--bug --diff --git-commit --git-index --git-reviewer --help --no-update --no-write --open --update --write -d -h -o" prepare-ChangeLog
+complete -W "--clean --debug --help -h" build-webkit
+complete -o default -W "--add-platform-exceptions --complex-text --configuration --guard-malloc --help --http --ignore-tests --launch-safari --leaks --merge-leak-depth --new-test-results --no-http --no-show-results --no-new-test-results --no-sample-on-timeout --no-strip-editing-callbacks --pixel-tests --platform --port --quiet --random --reset-results --results-directory --reverse --root --sample-on-timeout --singly --skipped --slowest --strict --strip-editing-callbacks --threaded --timeout --tolerance --use-remote-links-to-tests --valgrind --verbose -1 -c -g -h -i -l -m -o -p -q -t -v" run-webkit-tests
diff --git a/src/third_party/blink/Tools/Scripts/webkitdirs.pm b/src/third_party/blink/Tools/Scripts/webkitdirs.pm
new file mode 100755
index 0000000..012e6f1
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitdirs.pm
@@ -0,0 +1,2863 @@
+# Copyright (C) 2005, 2006, 2007, 2010, 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2011 Research In Motion Limited. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Module to share code to get to WebKit directories.
+
+use strict;
+use version;
+use warnings;
+use Config;
+use Digest::MD5 qw(md5_hex);
+use FindBin;
+use File::Basename;
+use File::Path qw(mkpath rmtree);
+use File::Spec;
+use File::stat;
+use POSIX;
+use VCSUtils;
+
+BEGIN {
+ use Exporter ();
+ our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS);
+ $VERSION = 1.00;
+ @ISA = qw(Exporter);
+ @EXPORT = qw(
+ &XcodeOptionString
+ &XcodeOptionStringNoConfig
+ &XcodeOptions
+ &baseProductDir
+ &chdirWebKit
+ &checkFrameworks
+ &cmakeBasedPortArguments
+ &cmakeBasedPortName
+ ¤tSVNRevision
+ &debugSafari
+ &nmPath
+ &passedConfiguration
+ &printHelpAndExitForRunAndDebugWebKitAppIfNeeded
+ &productDir
+ &runMacWebKitApp
+ &safariPath
+ &setConfiguration
+ USE_OPEN_COMMAND
+ );
+ %EXPORT_TAGS = ( );
+ @EXPORT_OK = ();
+}
+
+use constant USE_OPEN_COMMAND => 1; # Used in runMacWebKitApp().
+use constant INCLUDE_OPTIONS_FOR_DEBUGGING => 1;
+
+our @EXPORT_OK;
+
+my $architecture;
+my $numberOfCPUs;
+my $baseProductDir;
+my @baseProductDirOption;
+my $configuration;
+my $xcodeSDK;
+my $configurationForVisualStudio;
+my $configurationProductDir;
+my $sourceDir;
+my $currentSVNRevision;
+my $debugger;
+my $nmPath;
+my $osXVersion;
+my $generateDsym;
+my $isQt;
+my $qmakebin = "qmake"; # Allow override of the qmake binary from $PATH
+my $isGtk;
+my $isWinCE;
+my $isWinCairo;
+my $isWx;
+my $isEfl;
+my @wxArgs;
+my $isBlackBerry;
+my $isChromium;
+my $isChromiumAndroid;
+my $isChromiumMacMake;
+my $isChromiumNinja;
+my $forceChromiumUpdate;
+my $isInspectorFrontend;
+my $isWK2;
+my $shouldTargetWebProcess;
+my $shouldUseXPCServiceForWebProcess;
+my $shouldUseGuardMalloc;
+my $xcodeVersion;
+
+# Variables for Win32 support
+my $vcBuildPath;
+my $windowsSourceDir;
+my $winVersion;
+my $willUseVCExpressWhenBuilding = 0;
+
+# Defined in VCSUtils.
+sub exitStatus($);
+
+sub determineSourceDir
+{
+ return if $sourceDir;
+ $sourceDir = $FindBin::Bin;
+ $sourceDir =~ s|/Tools/Scripts/?$||; # Remove trailing '/' as we would die later
+
+ die "Could not find top level Blink directory using FindBin.\n" unless -d "$sourceDir/Tools";
+}
+
+sub currentPerlPath()
+{
+ my $thisPerl = $^X;
+ if ($^O ne 'VMS') {
+ $thisPerl .= $Config{_exe} unless $thisPerl =~ m/$Config{_exe}$/i;
+ }
+ return $thisPerl;
+}
+
+sub setQmakeBinaryPath($)
+{
+ ($qmakebin) = @_;
+}
+
+# used for scripts which are stored in a non-standard location
+sub setSourceDir($)
+{
+ ($sourceDir) = @_;
+}
+
+sub determineXcodeVersion
+{
+ return if defined $xcodeVersion;
+ my $xcodebuildVersionOutput = `xcodebuild -version`;
+ $xcodeVersion = ($xcodebuildVersionOutput =~ /Xcode ([0-9](\.[0-9]+)*)/) ? $1 : "3.0";
+}
+
+sub readXcodeUserDefault($)
+{
+ my ($unprefixedKey) = @_;
+
+ determineXcodeVersion();
+
+ my $xcodeDefaultsDomain = (eval "v$xcodeVersion" lt v4) ? "com.apple.Xcode" : "com.apple.dt.Xcode";
+ my $xcodeDefaultsPrefix = (eval "v$xcodeVersion" lt v4) ? "PBX" : "IDE";
+ my $devnull = File::Spec->devnull();
+
+ my $value = `defaults read $xcodeDefaultsDomain ${xcodeDefaultsPrefix}${unprefixedKey} 2> ${devnull}`;
+ return if $?;
+
+ chomp $value;
+ return $value;
+}
+
+sub determineBaseProductDir
+{
+ return if defined $baseProductDir;
+ determineSourceDir();
+
+ my $setSharedPrecompsDir;
+ $baseProductDir = $ENV{"WEBKITOUTPUTDIR"}; # FIXME: Switch to WEBKIT_OUTPUTDIR as part of https://bugs.webkit.org/show_bug.cgi?id=109472
+
+ if (!defined($baseProductDir) and isAppleMacWebKit()) {
+ # Silently remove ~/Library/Preferences/xcodebuild.plist which can
+ # cause build failure. The presence of
+ # ~/Library/Preferences/xcodebuild.plist can prevent xcodebuild from
+ # respecting global settings such as a custom build products directory
+ # (<rdar://problem/5585899>).
+ my $personalPlistFile = $ENV{HOME} . "/Library/Preferences/xcodebuild.plist";
+ if (-e $personalPlistFile) {
+ unlink($personalPlistFile) || die "Could not delete $personalPlistFile: $!";
+ }
+
+ determineXcodeVersion();
+
+ if (eval "v$xcodeVersion" ge v4) {
+ my $buildLocationStyle = join '', readXcodeUserDefault("BuildLocationStyle");
+ if ($buildLocationStyle eq "Custom") {
+ my $buildLocationType = join '', readXcodeUserDefault("CustomBuildLocationType");
+ # FIXME: Read CustomBuildIntermediatesPath and set OBJROOT accordingly.
+ $baseProductDir = readXcodeUserDefault("CustomBuildProductsPath") if $buildLocationType eq "Absolute";
+ }
+
+ # DeterminedByTargets corresponds to a setting of "Legacy" in Xcode.
+ # It is the only build location style for which SHARED_PRECOMPS_DIR is not
+ # overridden when building from within Xcode.
+ $setSharedPrecompsDir = 1 if $buildLocationStyle ne "DeterminedByTargets";
+ }
+
+ if (!defined($baseProductDir)) {
+ $baseProductDir = join '', readXcodeUserDefault("ApplicationwideBuildSettings");
+ $baseProductDir = $1 if $baseProductDir =~ /SYMROOT\s*=\s*\"(.*?)\";/s;
+ }
+
+ undef $baseProductDir unless $baseProductDir =~ /^\//;
+ } elsif (isChromium()) {
+ if (isLinux() || isChromiumAndroid() || isChromiumMacMake() || isChromiumNinja()) {
+ $baseProductDir = "$sourceDir/out";
+ } elsif (isDarwin()) {
+ $baseProductDir = "$sourceDir/Source/WebKit/chromium/xcodebuild";
+ } elsif (isWindows() || isCygwin()) {
+ $baseProductDir = "$sourceDir/Source/WebKit/chromium/build";
+ }
+ }
+
+ if (!defined($baseProductDir)) { # Port-specific checks failed, use default
+ $baseProductDir = "$sourceDir/WebKitBuild";
+ }
+
+ if (isBlackBerry()) {
+ my %archInfo = blackberryTargetArchitecture();
+ $baseProductDir = "$baseProductDir/" . $archInfo{"cpuDir"};
+ }
+
+ if (isGit() && isGitBranchBuild() && !isChromium()) {
+ my $branch = gitBranch();
+ $baseProductDir = "$baseProductDir/$branch";
+ }
+
+ if (isAppleMacWebKit()) {
+ $baseProductDir =~ s|^\Q$(SRCROOT)/..\E$|$sourceDir|;
+ $baseProductDir =~ s|^\Q$(SRCROOT)/../|$sourceDir/|;
+ $baseProductDir =~ s|^~/|$ENV{HOME}/|;
+ die "Can't handle Xcode product directory with a ~ in it.\n" if $baseProductDir =~ /~/;
+ die "Can't handle Xcode product directory with a variable in it.\n" if $baseProductDir =~ /\$/;
+ @baseProductDirOption = ("SYMROOT=$baseProductDir", "OBJROOT=$baseProductDir");
+ push(@baseProductDirOption, "SHARED_PRECOMPS_DIR=${baseProductDir}/PrecompiledHeaders") if $setSharedPrecompsDir;
+ }
+
+ if (isCygwin()) {
+ my $dosBuildPath = `cygpath --windows \"$baseProductDir\"`;
+ chomp $dosBuildPath;
+ $ENV{"WEBKITOUTPUTDIR"} = $dosBuildPath;
+ $ENV{"WEBKIT_OUTPUTDIR"} = $dosBuildPath;
+ my $unixBuildPath = `cygpath --unix \"$baseProductDir\"`;
+ chomp $unixBuildPath;
+ $baseProductDir = $unixBuildPath;
+ }
+}
+
+sub setBaseProductDir($)
+{
+ ($baseProductDir) = @_;
+}
+
+sub determineConfiguration
+{
+ return if defined $configuration;
+ determineBaseProductDir();
+ if (open CONFIGURATION, "$baseProductDir/Configuration") {
+ $configuration = <CONFIGURATION>;
+ close CONFIGURATION;
+ }
+ if ($configuration) {
+ chomp $configuration;
+ # compatibility for people who have old Configuration files
+ $configuration = "Release" if $configuration eq "Deployment";
+ $configuration = "Debug" if $configuration eq "Development";
+ } else {
+ $configuration = "Release";
+ }
+
+ if ($configuration && isWinCairo()) {
+ unless ($configuration =~ /_Cairo_CFLite$/) {
+ $configuration .= "_Cairo_CFLite";
+ }
+ }
+}
+
+sub determineArchitecture
+{
+ return if defined $architecture;
+ # make sure $architecture is defined in all cases
+ $architecture = "";
+
+ determineBaseProductDir();
+ determineXcodeSDK();
+
+ if (isGtk()) {
+ determineConfigurationProductDir();
+ my $host_triple = `grep -E '^host = ' $configurationProductDir/GNUmakefile`;
+ if ($host_triple =~ m/^host = ([^-]+)-/) {
+ # We have a configured build tree; use it.
+ $architecture = $1;
+ }
+ } elsif (isAppleMacWebKit()) {
+ if (open ARCHITECTURE, "$baseProductDir/Architecture") {
+ $architecture = <ARCHITECTURE>;
+ close ARCHITECTURE;
+ }
+ if ($architecture) {
+ chomp $architecture;
+ } else {
+ if (not defined $xcodeSDK or $xcodeSDK =~ /^(\/$|macosx)/) {
+ my $supports64Bit = `sysctl -n hw.optional.x86_64`;
+ chomp $supports64Bit;
+ $architecture = 'x86_64' if $supports64Bit;
+ } elsif ($xcodeSDK =~ /^iphonesimulator/) {
+ $architecture = 'i386';
+ } elsif ($xcodeSDK =~ /^iphoneos/) {
+ $architecture = 'armv7';
+ }
+ }
+ } elsif (isEfl()) {
+ my $host_processor = "";
+ $host_processor = `cmake --system-information | grep CMAKE_SYSTEM_PROCESSOR`;
+ if ($host_processor =~ m/^CMAKE_SYSTEM_PROCESSOR \"([^"]+)\"/) {
+ # We have a configured build tree; use it.
+ $architecture = $1;
+ $architecture = 'x86_64' if $architecture eq 'amd64';
+ }
+ }
+
+ if (!$architecture && (isGtk() || isAppleMacWebKit() || isEfl())) {
+ # Fall back to output of `arch', if it is present.
+ $architecture = `arch`;
+ chomp $architecture;
+ }
+
+ if (!$architecture && (isGtk() || isAppleMacWebKit() || isEfl())) {
+ # Fall back to output of `uname -m', if it is present.
+ $architecture = `uname -m`;
+ chomp $architecture;
+ }
+}
+
+sub determineNumberOfCPUs
+{
+ return if defined $numberOfCPUs;
+ if (defined($ENV{NUMBER_OF_PROCESSORS})) {
+ $numberOfCPUs = $ENV{NUMBER_OF_PROCESSORS};
+ } elsif (isLinux()) {
+ # First try the nproc utility, if it exists. If we get no
+ # results fall back to just interpretting /proc directly.
+ chomp($numberOfCPUs = `nproc --all 2> /dev/null`);
+ if ($numberOfCPUs eq "") {
+ $numberOfCPUs = (grep /processor/, `cat /proc/cpuinfo`);
+ }
+ } elsif (isWindows() || isCygwin()) {
+ # Assumes cygwin
+ $numberOfCPUs = `ls /proc/registry/HKEY_LOCAL_MACHINE/HARDWARE/DESCRIPTION/System/CentralProcessor | wc -w`;
+ } elsif (isDarwin() || isFreeBSD()) {
+ chomp($numberOfCPUs = `sysctl -n hw.ncpu`);
+ }
+}
+
+sub jscPath($)
+{
+ my ($productDir) = @_;
+ my $jscName = "jsc";
+ $jscName .= "_debug" if configurationForVisualStudio() eq "Debug_All";
+ $jscName .= ".exe" if (isWindows() || isCygwin());
+ return "$productDir/$jscName" if -e "$productDir/$jscName";
+ return "$productDir/JavaScriptCore.framework/Resources/$jscName";
+}
+
+sub argumentsForConfiguration()
+{
+ determineConfiguration();
+ determineArchitecture();
+
+ my @args = ();
+ push(@args, '--debug') if $configuration eq "Debug";
+ push(@args, '--release') if $configuration eq "Release";
+ push(@args, '--32-bit') if $architecture ne "x86_64";
+ push(@args, '--qt') if isQt();
+ push(@args, '--gtk') if isGtk();
+ push(@args, '--efl') if isEfl();
+ push(@args, '--wincairo') if isWinCairo();
+ push(@args, '--wince') if isWinCE();
+ push(@args, '--wx') if isWx();
+ push(@args, '--blackberry') if isBlackBerry();
+ push(@args, '--chromium') if isChromium() && !isChromiumAndroid();
+ push(@args, '--chromium-android') if isChromiumAndroid();
+ push(@args, '--inspector-frontend') if isInspectorFrontend();
+ return @args;
+}
+
+sub determineXcodeSDK
+{
+ return if defined $xcodeSDK;
+ for (my $i = 0; $i <= $#ARGV; $i++) {
+ my $opt = $ARGV[$i];
+ if ($opt =~ /^--sdk$/i) {
+ splice(@ARGV, $i, 1);
+ $xcodeSDK = splice(@ARGV, $i, 1);
+ } elsif ($opt =~ /^--device$/i) {
+ splice(@ARGV, $i, 1);
+ $xcodeSDK = 'iphoneos.internal';
+ } elsif ($opt =~ /^--sim(ulator)?/i) {
+ splice(@ARGV, $i, 1);
+ $xcodeSDK = 'iphonesimulator';
+ }
+ }
+}
+
+sub xcodeSDK
+{
+ determineXcodeSDK();
+ return $xcodeSDK;
+}
+
+sub determineConfigurationForVisualStudio
+{
+ return if defined $configurationForVisualStudio;
+ determineConfiguration();
+ # FIXME: We should detect when Debug_All or Production has been chosen.
+ $configurationForVisualStudio = $configuration;
+}
+
+sub usesPerConfigurationBuildDirectory
+{
+ # [Gtk] We don't have Release/Debug configurations in straight
+ # autotool builds (non build-webkit). In this case and if
+ # WEBKITOUTPUTDIR exist, use that as our configuration dir. This will
+ # allows us to run run-webkit-tests without using build-webkit.
+ return ($ENV{"WEBKITOUTPUTDIR"} && isGtk()) || isAppleWinWebKit();
+}
+
+sub determineConfigurationProductDir
+{
+ return if defined $configurationProductDir;
+ determineBaseProductDir();
+ determineConfiguration();
+ if (isAppleWinWebKit() && !isWx()) {
+ $configurationProductDir = File::Spec->catdir($baseProductDir, configurationForVisualStudio(), "bin");
+ } else {
+ if (usesPerConfigurationBuildDirectory()) {
+ $configurationProductDir = "$baseProductDir";
+ } else {
+ $configurationProductDir = "$baseProductDir/$configuration";
+ }
+ }
+}
+
+sub setConfigurationProductDir($)
+{
+ ($configurationProductDir) = @_;
+}
+
+sub determineCurrentSVNRevision
+{
+ # We always update the current SVN revision here, and leave the caching
+ # to currentSVNRevision(), so that changes to the SVN revision while the
+ # script is running can be picked up by calling this function again.
+ determineSourceDir();
+ $currentSVNRevision = svnRevisionForDirectory($sourceDir);
+ return $currentSVNRevision;
+}
+
+
+sub chdirWebKit
+{
+ determineSourceDir();
+ chdir $sourceDir or die;
+}
+
+sub baseProductDir
+{
+ determineBaseProductDir();
+ return $baseProductDir;
+}
+
+sub sourceDir
+{
+ determineSourceDir();
+ return $sourceDir;
+}
+
+sub productDir
+{
+ determineConfigurationProductDir();
+ return $configurationProductDir;
+}
+
+sub jscProductDir
+{
+ my $productDir = productDir();
+ $productDir .= "/bin" if (isQt() || isEfl());
+ $productDir .= "/Programs" if isGtk();
+
+ return $productDir;
+}
+
+sub configuration()
+{
+ determineConfiguration();
+ return $configuration;
+}
+
+sub configurationForVisualStudio()
+{
+ determineConfigurationForVisualStudio();
+ return $configurationForVisualStudio;
+}
+
+sub currentSVNRevision
+{
+ determineCurrentSVNRevision() if not defined $currentSVNRevision;
+ return $currentSVNRevision;
+}
+
+sub generateDsym()
+{
+ determineGenerateDsym();
+ return $generateDsym;
+}
+
+sub determineGenerateDsym()
+{
+ return if defined($generateDsym);
+ $generateDsym = checkForArgumentAndRemoveFromARGV("--dsym");
+}
+
+sub argumentsForXcode()
+{
+ my @args = ();
+ push @args, "DEBUG_INFORMATION_FORMAT=dwarf-with-dsym" if generateDsym();
+ return @args;
+}
+
+sub XcodeOptions
+{
+ determineBaseProductDir();
+ determineConfiguration();
+ determineArchitecture();
+ determineXcodeSDK();
+
+ my @sdkOption = ($xcodeSDK ? "SDKROOT=$xcodeSDK" : ());
+ my @architectureOption = ($architecture ? "ARCHS=$architecture" : ());
+
+ return (@baseProductDirOption, "-configuration", $configuration, @architectureOption, @sdkOption, argumentsForXcode());
+}
+
+sub XcodeOptionString
+{
+ return join " ", XcodeOptions();
+}
+
+sub XcodeOptionStringNoConfig
+{
+ return join " ", @baseProductDirOption;
+}
+
+sub XcodeCoverageSupportOptions()
+{
+ my @coverageSupportOptions = ();
+ push @coverageSupportOptions, "GCC_GENERATE_TEST_COVERAGE_FILES=YES";
+ push @coverageSupportOptions, "GCC_INSTRUMENT_PROGRAM_FLOW_ARCS=YES";
+ push @coverageSupportOptions, "EXTRA_LINK= \$(EXTRA_LINK) -ftest-coverage -fprofile-arcs";
+ push @coverageSupportOptions, "OTHER_CFLAGS= \$(OTHER_CFLAGS) -DCOVERAGE -MD";
+ push @coverageSupportOptions, "OTHER_LDFLAGS=\$(OTHER_LDFLAGS) -ftest-coverage -fprofile-arcs -lgcov";
+ return @coverageSupportOptions;
+}
+
+my $passedConfiguration;
+my $searchedForPassedConfiguration;
+sub determinePassedConfiguration
+{
+ return if $searchedForPassedConfiguration;
+ $searchedForPassedConfiguration = 1;
+
+ for my $i (0 .. $#ARGV) {
+ my $opt = $ARGV[$i];
+ if ($opt =~ /^--debug$/i) {
+ splice(@ARGV, $i, 1);
+ $passedConfiguration = "Debug";
+ $passedConfiguration .= "_Cairo_CFLite" if (isWinCairo() && isCygwin());
+ return;
+ }
+ if ($opt =~ /^--release$/i) {
+ splice(@ARGV, $i, 1);
+ $passedConfiguration = "Release";
+ $passedConfiguration .= "_Cairo_CFLite" if (isWinCairo() && isCygwin());
+ return;
+ }
+ if ($opt =~ /^--profil(e|ing)$/i) {
+ splice(@ARGV, $i, 1);
+ $passedConfiguration = "Profiling";
+ $passedConfiguration .= "_Cairo_CFLite" if (isWinCairo() && isCygwin());
+ return;
+ }
+ }
+ $passedConfiguration = undef;
+}
+
+sub passedConfiguration
+{
+ determinePassedConfiguration();
+ return $passedConfiguration;
+}
+
+sub setConfiguration
+{
+ setArchitecture();
+
+ if (my $config = shift @_) {
+ $configuration = $config;
+ return;
+ }
+
+ determinePassedConfiguration();
+ $configuration = $passedConfiguration if $passedConfiguration;
+}
+
+
+my $passedArchitecture;
+my $searchedForPassedArchitecture;
+sub determinePassedArchitecture
+{
+ return if $searchedForPassedArchitecture;
+ $searchedForPassedArchitecture = 1;
+
+ for my $i (0 .. $#ARGV) {
+ my $opt = $ARGV[$i];
+ if ($opt =~ /^--32-bit$/i) {
+ splice(@ARGV, $i, 1);
+ if (isAppleMacWebKit() || isWx()) {
+ $passedArchitecture = `arch`;
+ chomp $passedArchitecture;
+ }
+ return;
+ }
+ }
+ $passedArchitecture = undef;
+}
+
+sub passedArchitecture
+{
+ determinePassedArchitecture();
+ return $passedArchitecture;
+}
+
+sub architecture()
+{
+ determineArchitecture();
+ return $architecture;
+}
+
+sub numberOfCPUs()
+{
+ determineNumberOfCPUs();
+ return $numberOfCPUs;
+}
+
+sub setArchitecture
+{
+ if (my $arch = shift @_) {
+ $architecture = $arch;
+ return;
+ }
+
+ determinePassedArchitecture();
+ $architecture = $passedArchitecture if $passedArchitecture;
+}
+
+sub executableHasEntitlements
+{
+ my $executablePath = shift;
+ return (`codesign -d --entitlements - $executablePath 2>&1` =~ /<key>/);
+}
+
+sub safariPathFromSafariBundle
+{
+ my ($safariBundle) = @_;
+
+ if (isAppleMacWebKit()) {
+ my $safariPath = "$safariBundle/Contents/MacOS/Safari";
+ my $safariForWebKitDevelopmentPath = "$safariBundle/Contents/MacOS/SafariForWebKitDevelopment";
+ return $safariForWebKitDevelopmentPath if -f $safariForWebKitDevelopmentPath && executableHasEntitlements($safariPath);
+ return $safariPath;
+ }
+ return $safariBundle if isAppleWinWebKit();
+}
+
+sub installedSafariPath
+{
+ my $safariBundle;
+
+ if (isAppleMacWebKit()) {
+ $safariBundle = "/Applications/Safari.app";
+ } elsif (isAppleWinWebKit()) {
+ $safariBundle = readRegistryString("/HKLM/SOFTWARE/Apple Computer, Inc./Safari/InstallDir");
+ $safariBundle =~ s/[\r\n]+$//;
+ $safariBundle = `cygpath -u '$safariBundle'` if isCygwin();
+ $safariBundle =~ s/[\r\n]+$//;
+ $safariBundle .= "Safari.exe";
+ }
+
+ return safariPathFromSafariBundle($safariBundle);
+}
+
+# Locate Safari.
+sub safariPath
+{
+ # Use WEBKIT_SAFARI environment variable if present.
+ my $safariBundle = $ENV{WEBKIT_SAFARI};
+ if (!$safariBundle) {
+ determineConfigurationProductDir();
+ # Use Safari.app in product directory if present (good for Safari development team).
+ if (isAppleMacWebKit() && -d "$configurationProductDir/Safari.app") {
+ $safariBundle = "$configurationProductDir/Safari.app";
+ } elsif (isAppleWinWebKit()) {
+ my $path = "$configurationProductDir/Safari.exe";
+ my $debugPath = "$configurationProductDir/Safari_debug.exe";
+
+ if (configurationForVisualStudio() eq "Debug_All" && -x $debugPath) {
+ $safariBundle = $debugPath;
+ } elsif (-x $path) {
+ $safariBundle = $path;
+ }
+ }
+ if (!$safariBundle) {
+ return installedSafariPath();
+ }
+ }
+ my $safariPath = safariPathFromSafariBundle($safariBundle);
+ die "Can't find executable at $safariPath.\n" if isAppleMacWebKit() && !-x $safariPath;
+ return $safariPath;
+}
+
+sub builtDylibPathForName
+{
+ my $libraryName = shift;
+ determineConfigurationProductDir();
+ if (isChromium()) {
+ return "$configurationProductDir/$libraryName";
+ }
+ if (isBlackBerry()) {
+ my $libraryExtension = $libraryName =~ /^WebKit$/i ? ".so" : ".a";
+ return "$configurationProductDir/$libraryName/lib" . lc($libraryName) . $libraryExtension;
+ }
+ if (isQt()) {
+ my $isSearchingForWebCore = $libraryName =~ "WebCore";
+ if (isDarwin()) {
+ $libraryName = "QtWebKitWidgets";
+ } else {
+ $libraryName = "Qt5WebKitWidgets";
+ }
+ my $result;
+ if (isDarwin() and -d "$configurationProductDir/lib/$libraryName.framework") {
+ $result = "$configurationProductDir/lib/$libraryName.framework/$libraryName";
+ } elsif (isDarwin() and -d "$configurationProductDir/lib") {
+ $result = "$configurationProductDir/lib/lib$libraryName.dylib";
+ } elsif (isWindows()) {
+ if (configuration() eq "Debug") {
+ # On Windows, there is a "d" suffix to the library name. See <http://trac.webkit.org/changeset/53924/>.
+ $libraryName .= "d";
+ }
+
+ chomp(my $mkspec = `$qmakebin -query QT_HOST_DATA`);
+ $mkspec .= "/mkspecs";
+ my $qtMajorVersion = retrieveQMakespecVar("$mkspec/qconfig.pri", "QT_MAJOR_VERSION");
+ if (not $qtMajorVersion) {
+ $qtMajorVersion = "";
+ }
+
+ $result = "$configurationProductDir/lib/$libraryName$qtMajorVersion.dll";
+ } else {
+ $result = "$configurationProductDir/lib/lib$libraryName.so";
+ }
+
+ if ($isSearchingForWebCore) {
+ # With CONFIG+=force_static_libs_as_shared we have a shared library for each subdir.
+ # For feature detection to work it is necessary to return the path of the WebCore library here.
+ my $replacedWithWebCore = $result;
+ $replacedWithWebCore =~ s/$libraryName/WebCore/g;
+ if (-e $replacedWithWebCore) {
+ return $replacedWithWebCore;
+ }
+ }
+
+ return $result;
+ }
+ if (isWx()) {
+ return "$configurationProductDir/libwxwebkit.dylib";
+ }
+ if (isGtk()) {
+ # WebKitGTK+ for GTK2, WebKitGTK+ for GTK3, and WebKit2 respectively.
+ my @libraries = ("libwebkitgtk-1.0", "libwebkitgtk-3.0", "libwebkit2gtk-3.0");
+ my $extension = isDarwin() ? ".dylib" : ".so";
+
+ foreach $libraryName (@libraries) {
+ my $libraryPath = "$configurationProductDir/.libs/" . $libraryName . $extension;
+ return $libraryPath if -e $libraryPath;
+ }
+ return "NotFound";
+ }
+ if (isEfl()) {
+ if (isWK2()) {
+ return "$configurationProductDir/lib/libewebkit2.so";
+ }
+ return "$configurationProductDir/lib/libewebkit.so";
+ }
+ if (isWinCE()) {
+ return "$configurationProductDir/$libraryName";
+ }
+ if (isAppleMacWebKit()) {
+ return "$configurationProductDir/$libraryName.framework/Versions/A/$libraryName";
+ }
+ if (isAppleWinWebKit()) {
+ if ($libraryName eq "JavaScriptCore") {
+ return "$baseProductDir/lib/$libraryName.lib";
+ } else {
+ return "$baseProductDir/$libraryName.intermediate/$configuration/$libraryName.intermediate/$libraryName.lib";
+ }
+ }
+
+ die "Unsupported platform, can't determine built library locations.\nTry `build-webkit --help` for more information.\n";
+}
+
+# Check to see that all the frameworks are built.
+sub checkFrameworks # FIXME: This is a poor name since only the Mac calls built WebCore a Framework.
+{
+ return if isCygwin() || isWindows();
+ my @frameworks = ("JavaScriptCore", "WebCore");
+ push(@frameworks, "WebKit") if isAppleMacWebKit(); # FIXME: This seems wrong, all ports should have a WebKit these days.
+ for my $framework (@frameworks) {
+ my $path = builtDylibPathForName($framework);
+ die "Can't find built framework at \"$path\".\n" unless -e $path;
+ }
+}
+
+sub isInspectorFrontend()
+{
+ determineIsInspectorFrontend();
+ return $isInspectorFrontend;
+}
+
+sub determineIsInspectorFrontend()
+{
+ return if defined($isInspectorFrontend);
+ $isInspectorFrontend = checkForArgumentAndRemoveFromARGV("--inspector-frontend");
+}
+
+sub isQt()
+{
+ determineIsQt();
+ return $isQt;
+}
+
+sub getQtVersion()
+{
+ my $qtVersion = `$qmakebin --version`;
+ $qtVersion =~ s/^(.*)Qt version (\d\.\d)(.*)/$2/s ;
+ return $qtVersion;
+}
+
+sub qtFeatureDefaults
+{
+ die "ERROR: qmake missing but required to build WebKit.\n" if not commandExists($qmakebin);
+
+ my $oldQmakeEval = $ENV{QMAKE_CACHE_EVAL};
+ $ENV{QMAKE_CACHE_EVAL} = "CONFIG+=print_defaults";
+
+ my $originalCwd = getcwd();
+ my $qmakepath = File::Spec->catfile(sourceDir(), "Tools", "qmake");
+ chdir $qmakepath or die "Failed to cd into " . $qmakepath . "\n";
+
+ my $file = File::Spec->catfile(sourceDir(), "WebKit.pro");
+
+ my @buildArgs;
+ @buildArgs = (@buildArgs, @{$_[0]}) if (@_);
+
+ my @defaults = `$qmakebin @buildArgs $file 2>&1`;
+
+ my %qtFeatureDefaults;
+ for (@defaults) {
+ if (/DEFINES: /) {
+ while (/(\S+?)=(\S+?)/gi) {
+ $qtFeatureDefaults{$1}=$2;
+ }
+ } elsif (/Done computing defaults/) {
+ last;
+ } elsif (@_) {
+ print $_;
+ }
+ }
+
+ chdir $originalCwd;
+ $ENV{QMAKE_CACHE_EVAL} = $oldQmakeEval;
+
+ return %qtFeatureDefaults;
+}
+
+sub commandExists($)
+{
+ my $command = shift;
+ my $devnull = File::Spec->devnull();
+ return `$command --version 2> $devnull`;
+}
+
+sub checkForArgumentAndRemoveFromARGV
+{
+ my $argToCheck = shift;
+ return checkForArgumentAndRemoveFromArrayRef($argToCheck, \@ARGV);
+}
+
+sub checkForArgumentAndRemoveFromArrayRef
+{
+ my ($argToCheck, $arrayRef) = @_;
+ my @indicesToRemove;
+ foreach my $index (0 .. $#$arrayRef) {
+ my $opt = $$arrayRef[$index];
+ if ($opt =~ /^$argToCheck$/i ) {
+ push(@indicesToRemove, $index);
+ }
+ }
+ foreach my $index (@indicesToRemove) {
+ splice(@$arrayRef, $index, 1);
+ }
+ return $#indicesToRemove > -1;
+}
+
+sub isWK2()
+{
+ if (defined($isWK2)) {
+ return $isWK2;
+ }
+ if (checkForArgumentAndRemoveFromARGV("-2")) {
+ $isWK2 = 1;
+ } else {
+ $isWK2 = 0;
+ }
+ return $isWK2;
+}
+
+sub determineIsQt()
+{
+ return if defined($isQt);
+
+ # Allow override in case QTDIR is not set.
+ if (checkForArgumentAndRemoveFromARGV("--qt")) {
+ $isQt = 1;
+ return;
+ }
+
+ # The presence of QTDIR only means Qt if --gtk or --wx or --efl or --blackberry or --chromium or --wincairo are not on the command-line
+ if (isGtk() || isWx() || isEfl() || isBlackBerry() || isChromium() || isWinCairo()) {
+ $isQt = 0;
+ return;
+ }
+
+ $isQt = defined($ENV{'QTDIR'});
+}
+
+sub isBlackBerry()
+{
+ determineIsBlackBerry();
+ return $isBlackBerry;
+}
+
+sub determineIsBlackBerry()
+{
+ return if defined($isBlackBerry);
+ $isBlackBerry = checkForArgumentAndRemoveFromARGV("--blackberry");
+}
+
+sub blackberryTargetArchitecture()
+{
+ my $arch = $ENV{"BLACKBERRY_ARCH_TYPE"} ? $ENV{"BLACKBERRY_ARCH_TYPE"} : "arm";
+ my $cpu = $ENV{"BLACKBERRY_ARCH_CPU"} ? $ENV{"BLACKBERRY_ARCH_CPU"} : "";
+ my $cpuDir;
+ my $buSuffix;
+ if (($cpu eq "v7le") || ($cpu eq "a9")) {
+ $cpuDir = $arch . "le-v7";
+ $buSuffix = $arch . "v7";
+ } else {
+ $cpu = $arch;
+ $cpuDir = $arch;
+ $buSuffix = $arch;
+ }
+ return ("arch" => $arch,
+ "cpu" => $cpu,
+ "cpuDir" => $cpuDir,
+ "buSuffix" => $buSuffix);
+}
+
+sub blackberryCMakeArguments()
+{
+ my %archInfo = blackberryTargetArchitecture();
+ my $arch = $archInfo{"arch"};
+ my $cpu = $archInfo{"cpu"};
+ my $cpuDir = $archInfo{"cpuDir"};
+ my $buSuffix = $archInfo{"buSuffix"};
+
+ my @cmakeExtraOptions;
+ if ($cpu eq "a9") {
+ $cpu = $arch . "v7le";
+ push @cmakeExtraOptions, '-DTARGETING_PLAYBOOK=1';
+ }
+
+ my $stageDir = $ENV{"STAGE_DIR"};
+ my $stageLib = File::Spec->catdir($stageDir, $cpuDir, "lib");
+ my $stageUsrLib = File::Spec->catdir($stageDir, $cpuDir, "usr", "lib");
+ my $stageInc = File::Spec->catdir($stageDir, "usr", "include");
+
+ my $qnxHost = $ENV{"QNX_HOST"};
+ my $ccCommand;
+ my $cxxCommand;
+ if ($ENV{"USE_ICECC"}) {
+ chomp($ccCommand = `which icecc`);
+ $cxxCommand = $ccCommand;
+ } else {
+ $ccCommand = File::Spec->catfile($qnxHost, "usr", "bin", "qcc");
+ $cxxCommand = $ccCommand;
+ }
+
+ if ($ENV{"CCWRAP"}) {
+ $ccCommand = $ENV{"CCWRAP"};
+ push @cmakeExtraOptions, "-DCMAKE_C_COMPILER_ARG1=qcc";
+ push @cmakeExtraOptions, "-DCMAKE_CXX_COMPILER_ARG1=qcc";
+ }
+
+ push @cmakeExtraOptions, "-DCMAKE_SKIP_RPATH='ON'" if isDarwin();
+ push @cmakeExtraOptions, "-DPUBLIC_BUILD=1" if $ENV{"PUBLIC_BUILD"};
+ push @cmakeExtraOptions, "-DENABLE_GLES2=1" unless $ENV{"DISABLE_GLES2"};
+
+ my @includeSystemDirectories;
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "grskia", "skia");
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "grskia");
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "harfbuzz");
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "imf");
+ # We only use jpeg-turbo for device build
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "jpeg-turbo") if $arch=~/arm/;
+ push @includeSystemDirectories, $stageInc;
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "browser", "platform");
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "browser", "platform", "graphics");
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "browser", "qsk");
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "ots");
+
+ my @cxxFlags;
+ push @cxxFlags, "-Wl,-rpath-link,$stageLib";
+ push @cxxFlags, "-Wl,-rpath-link," . File::Spec->catfile($stageUsrLib, "torch-webkit");
+ push @cxxFlags, "-Wl,-rpath-link,$stageUsrLib";
+ push @cxxFlags, "-L$stageLib";
+ push @cxxFlags, "-L$stageUsrLib";
+
+ if ($ENV{"PROFILE"}) {
+ push @cmakeExtraOptions, "-DPROFILING=1";
+ push @cxxFlags, "-p";
+ }
+
+ my @cmakeArgs;
+ push @cmakeArgs, '-DCMAKE_SYSTEM_NAME="QNX"';
+ push @cmakeArgs, "-DCMAKE_SYSTEM_PROCESSOR=\"$cpuDir\"";
+ push @cmakeArgs, '-DCMAKE_SYSTEM_VERSION="1"';
+ push @cmakeArgs, "-DCMAKE_C_COMPILER=\"$ccCommand\"";
+ push @cmakeArgs, "-DCMAKE_CXX_COMPILER=\"$cxxCommand\"";
+ push @cmakeArgs, "-DCMAKE_C_FLAGS=\"-Vgcc_nto${cpu} -g @cxxFlags\"";
+ push @cmakeArgs, "-DCMAKE_CXX_FLAGS=\"-Vgcc_nto${cpu}_cpp-ne -g -lang-c++ @cxxFlags\"";
+
+ # We cannot use CMAKE_INCLUDE_PATH since this describes the search path for header files in user directories.
+ # And the QNX system headers are in user directories on the host OS (i.e. they aren't installed in the host OS's
+ # system header search path). So, we need to inform g++ that these user directories (@includeSystemDirectories)
+ # are to be taken as the host OS's system header directories when building our port.
+ #
+ # Also, we cannot use CMAKE_SYSTEM_INCLUDE_PATH since that will override the entire system header path.
+ # So, we define the additional system include paths in ADDITIONAL_SYSTEM_INCLUDE_PATH. This list will
+ # be processed in OptionsBlackBerry.cmake.
+ push @cmakeArgs, '-DADDITIONAL_SYSTEM_INCLUDE_PATH="' . join(';', @includeSystemDirectories) . '"';
+
+ # FIXME: Make this more general purpose such that we can pass a list of directories and files.
+ push @cmakeArgs, '-DTHIRD_PARTY_ICU_DIR="' . File::Spec->catdir($stageInc, "unicode") . '"';
+ push @cmakeArgs, '-DTHIRD_PARTY_UNICODE_FILE="' . File::Spec->catfile($stageInc, "unicode.h") . '"';
+
+ push @cmakeArgs, "-DCMAKE_LIBRARY_PATH=\"$stageLib;$stageUsrLib\"";
+ push @cmakeArgs, '-DCMAKE_AR="' . File::Spec->catfile($qnxHost, "usr", "bin", "nto${buSuffix}-ar") . '"';
+ push @cmakeArgs, '-DCMAKE_RANLIB="' . File::Spec->catfile($qnxHost, "usr", "bin", "nto${buSuffix}-ranlib") . '"';
+ push @cmakeArgs, '-DCMAKE_LD="'. File::Spec->catfile($qnxHost, "usr", "bin", "nto${buSuffix}-ld") . '"';
+ push @cmakeArgs, '-DCMAKE_LINKER="' . File::Spec->catfile($qnxHost, "usr", "bin", "nto${buSuffix}-ld") . '"';
+ push @cmakeArgs, "-DECLIPSE_CDT4_GENERATE_SOURCE_PROJECT=TRUE";
+ push @cmakeArgs, '-G"Eclipse CDT4 - Unix Makefiles"';
+ push @cmakeArgs, @cmakeExtraOptions;
+ return @cmakeArgs;
+}
+
+sub determineIsEfl()
+{
+ return if defined($isEfl);
+ $isEfl = checkForArgumentAndRemoveFromARGV("--efl");
+}
+
+sub isEfl()
+{
+ determineIsEfl();
+ return $isEfl;
+}
+
+sub isGtk()
+{
+ determineIsGtk();
+ return $isGtk;
+}
+
+sub determineIsGtk()
+{
+ return if defined($isGtk);
+ $isGtk = checkForArgumentAndRemoveFromARGV("--gtk");
+}
+
+sub isWinCE()
+{
+ determineIsWinCE();
+ return $isWinCE;
+}
+
+sub determineIsWinCE()
+{
+ return if defined($isWinCE);
+ $isWinCE = checkForArgumentAndRemoveFromARGV("--wince");
+}
+
+sub isWx()
+{
+ determineIsWx();
+ return $isWx;
+}
+
+sub determineIsWx()
+{
+ return if defined($isWx);
+ $isWx = checkForArgumentAndRemoveFromARGV("--wx");
+}
+
+sub getWxArgs()
+{
+ if (!@wxArgs) {
+ @wxArgs = ("");
+ my $rawWxArgs = "";
+ foreach my $opt (@ARGV) {
+ if ($opt =~ /^--wx-args/i ) {
+ @ARGV = grep(!/^--wx-args/i, @ARGV);
+ $rawWxArgs = $opt;
+ $rawWxArgs =~ s/--wx-args=//i;
+ }
+ }
+ @wxArgs = split(/,/, $rawWxArgs);
+ }
+ return @wxArgs;
+}
+
+# Determine if this is debian, ubuntu, linspire, or something similar.
+sub isDebianBased()
+{
+ return -e "/etc/debian_version";
+}
+
+sub isFedoraBased()
+{
+ return -e "/etc/fedora-release";
+}
+
+sub isChromium()
+{
+ determineIsChromium();
+ determineIsChromiumAndroid();
+ return $isChromium || $isChromiumAndroid;
+}
+
+sub determineIsChromium()
+{
+ return if defined($isChromium);
+ $isChromium = checkForArgumentAndRemoveFromARGV("--chromium");
+ if ($isChromium) {
+ $forceChromiumUpdate = checkForArgumentAndRemoveFromARGV("--force-update");
+ }
+}
+
+sub isChromiumAndroid()
+{
+ determineIsChromiumAndroid();
+ return $isChromiumAndroid;
+}
+
+sub determineIsChromiumAndroid()
+{
+ return if defined($isChromiumAndroid);
+ $isChromiumAndroid = checkForArgumentAndRemoveFromARGV("--chromium-android");
+}
+
+sub isChromiumMacMake()
+{
+ determineIsChromiumMacMake();
+ return $isChromiumMacMake;
+}
+
+sub determineIsChromiumMacMake()
+{
+ return if defined($isChromiumMacMake);
+
+ my $hasUpToDateMakefile = 0;
+ if (-e 'Makefile.chromium') {
+ unless (-e 'Source/WebKit/chromium/WebKit.xcodeproj') {
+ $hasUpToDateMakefile = 1;
+ } else {
+ $hasUpToDateMakefile = stat('Makefile.chromium')->mtime > stat('Source/WebKit/chromium/WebKit.xcodeproj')->mtime;
+ }
+ }
+ $isChromiumMacMake = isDarwin() && $hasUpToDateMakefile;
+}
+
+sub isChromiumNinja()
+{
+ determineIsChromiumNinja();
+ return $isChromiumNinja;
+}
+
+sub determineIsChromiumNinja()
+{
+ return if defined($isChromiumNinja);
+
+ # This function can be called from baseProductDir(), which in turn is
+ # called by configuration(). So calling configuration() here leads to
+ # infinite recursion. Gyp writes both Debug and Release at the same time
+ # by default, so just check the timestamp on the Release build.ninja file.
+ my $config = "Release";
+
+ my $hasUpToDateNinjabuild = 0;
+ if (-e "out/$config/build.ninja") {
+ my $statNinja = stat("out/$config/build.ninja")->mtime;
+
+ my $statXcode = 0;
+ if (-e 'Source/WebKit/chromium/WebKit.xcodeproj') {
+ $statXcode = stat('Source/WebKit/chromium/WebKit.xcodeproj')->mtime;
+ }
+
+ my $statMake = 0;
+ if (-e 'Makefile.chromium') {
+ $statMake = stat('Makefile.chromium')->mtime;
+ }
+
+ my $statVisualStudio = 0;
+ if (-e 'Source/WebKit/chromium/webkit.vcxproj') {
+ $statVisualStudio = stat('Source/WebKit/chromium/webkit.vcxproj')->mtime;
+ }
+
+ $hasUpToDateNinjabuild = $statNinja > $statXcode && $statNinja > $statMake && $statNinja > $statVisualStudio;
+ }
+ $isChromiumNinja = $hasUpToDateNinjabuild;
+}
+
+sub forceChromiumUpdate()
+{
+ determineIsChromium();
+ return $forceChromiumUpdate;
+}
+
+sub isWinCairo()
+{
+ determineIsWinCairo();
+ return $isWinCairo;
+}
+
+sub determineIsWinCairo()
+{
+ return if defined($isWinCairo);
+ $isWinCairo = checkForArgumentAndRemoveFromARGV("--wincairo");
+}
+
+sub isCygwin()
+{
+ return ($^O eq "cygwin") || 0;
+}
+
+sub isAnyWindows()
+{
+ return isWindows() || isCygwin();
+}
+
+sub determineWinVersion()
+{
+ return if $winVersion;
+
+ if (!isAnyWindows()) {
+ $winVersion = -1;
+ return;
+ }
+
+ my $versionString = `cmd /c ver`;
+ $versionString =~ /(\d)\.(\d)\.(\d+)/;
+
+ $winVersion = {
+ major => $1,
+ minor => $2,
+ build => $3,
+ };
+}
+
+sub winVersion()
+{
+ determineWinVersion();
+ return $winVersion;
+}
+
+sub isWindows7SP0()
+{
+ return isAnyWindows() && winVersion()->{major} == 6 && winVersion()->{minor} == 1 && winVersion()->{build} == 7600;
+}
+
+sub isWindowsVista()
+{
+ return isAnyWindows() && winVersion()->{major} == 6 && winVersion()->{minor} == 0;
+}
+
+sub isWindowsXP()
+{
+ return isAnyWindows() && winVersion()->{major} == 5 && winVersion()->{minor} == 1;
+}
+
+sub isDarwin()
+{
+ return ($^O eq "darwin") || 0;
+}
+
+sub isWindows()
+{
+ return ($^O eq "MSWin32") || 0;
+}
+
+sub isLinux()
+{
+ return ($^O eq "linux") || 0;
+}
+
+sub isFreeBSD()
+{
+ return ($^O eq "freebsd") || 0;
+}
+
+sub isARM()
+{
+ return $Config{archname} =~ /^arm[v\-]/;
+}
+
+sub isCrossCompilation()
+{
+ my $compiler = "";
+ $compiler = $ENV{'CC'} if (defined($ENV{'CC'}));
+ if ($compiler =~ /gcc/) {
+ my $compiler_options = `$compiler -v 2>&1`;
+ my @host = $compiler_options =~ m/--host=(.*?)\s/;
+ my @target = $compiler_options =~ m/--target=(.*?)\s/;
+
+ return ($host[0] ne "" && $target[0] ne "" && $host[0] ne $target[0]);
+ }
+ return 0;
+}
+
+sub isAppleWebKit()
+{
+ return !(isQt() or isGtk() or isWx() or isChromium() or isEfl() or isWinCE() or isBlackBerry());
+}
+
+sub isAppleMacWebKit()
+{
+ return isAppleWebKit() && isDarwin();
+}
+
+sub isAppleWinWebKit()
+{
+ return isAppleWebKit() && (isCygwin() || isWindows());
+}
+
+sub isPerianInstalled()
+{
+ if (!isAppleWebKit()) {
+ return 0;
+ }
+
+ if (-d "/Library/QuickTime/Perian.component") {
+ return 1;
+ }
+
+ if (-d "$ENV{HOME}/Library/QuickTime/Perian.component") {
+ return 1;
+ }
+
+ return 0;
+}
+
+sub determineNmPath()
+{
+ return if $nmPath;
+
+ if (isAppleMacWebKit()) {
+ $nmPath = `xcrun -find nm`;
+ chomp $nmPath;
+ }
+ $nmPath = "nm" if !$nmPath;
+}
+
+sub nmPath()
+{
+ determineNmPath();
+ return $nmPath;
+}
+
+sub determineOSXVersion()
+{
+ return if $osXVersion;
+
+ if (!isDarwin()) {
+ $osXVersion = -1;
+ return;
+ }
+
+ my $version = `sw_vers -productVersion`;
+ my @splitVersion = split(/\./, $version);
+ @splitVersion >= 2 or die "Invalid version $version";
+ $osXVersion = {
+ "major" => $splitVersion[0],
+ "minor" => $splitVersion[1],
+ "subminor" => (defined($splitVersion[2]) ? $splitVersion[2] : 0),
+ };
+}
+
+sub osXVersion()
+{
+ determineOSXVersion();
+ return $osXVersion;
+}
+
+sub isSnowLeopard()
+{
+ return isDarwin() && osXVersion()->{"minor"} == 6;
+}
+
+sub isLion()
+{
+ return isDarwin() && osXVersion()->{"minor"} == 7;
+}
+
+sub isWindowsNT()
+{
+ return $ENV{'OS'} eq 'Windows_NT';
+}
+
+sub shouldTargetWebProcess
+{
+ determineShouldTargetWebProcess();
+ return $shouldTargetWebProcess;
+}
+
+sub determineShouldTargetWebProcess
+{
+ return if defined($shouldTargetWebProcess);
+ $shouldTargetWebProcess = checkForArgumentAndRemoveFromARGV("--target-web-process");
+}
+
+sub shouldUseXPCServiceForWebProcess
+{
+ determineShouldUseXPCServiceForWebProcess();
+ return $shouldUseXPCServiceForWebProcess;
+}
+
+sub determineShouldUseXPCServiceForWebProcess
+{
+ return if defined($shouldUseXPCServiceForWebProcess);
+ $shouldUseXPCServiceForWebProcess = checkForArgumentAndRemoveFromARGV("--use-web-process-xpc-service");
+}
+
+sub debugger
+{
+ determineDebugger();
+ return $debugger;
+}
+
+sub determineDebugger
+{
+ return if defined($debugger);
+
+ determineXcodeVersion();
+ if (eval "v$xcodeVersion" ge v4.5) {
+ $debugger = "lldb";
+ } else {
+ $debugger = "gdb";
+ }
+
+ if (checkForArgumentAndRemoveFromARGV("--use-lldb")) {
+ $debugger = "lldb";
+ }
+
+ if (checkForArgumentAndRemoveFromARGV("--use-gdb")) {
+ $debugger = "gdb";
+ }
+}
+
+sub appendToEnvironmentVariableList
+{
+ my ($environmentVariableName, $value) = @_;
+
+ if (defined($ENV{$environmentVariableName})) {
+ $ENV{$environmentVariableName} .= ":" . $value;
+ } else {
+ $ENV{$environmentVariableName} = $value;
+ }
+}
+
+sub setUpGuardMallocIfNeeded
+{
+ if (!isDarwin()) {
+ return;
+ }
+
+ if (!defined($shouldUseGuardMalloc)) {
+ $shouldUseGuardMalloc = checkForArgumentAndRemoveFromARGV("--guard-malloc");
+ }
+
+ if ($shouldUseGuardMalloc) {
+ appendToEnvironmentVariableList("DYLD_INSERT_LIBRARIES", "/usr/lib/libgmalloc.dylib");
+ }
+}
+
+sub relativeScriptsDir()
+{
+ my $scriptDir = File::Spec->catpath("", File::Spec->abs2rel($FindBin::Bin, getcwd()), "");
+ if ($scriptDir eq "") {
+ $scriptDir = ".";
+ }
+ return $scriptDir;
+}
+
+sub launcherPath()
+{
+ my $relativeScriptsPath = relativeScriptsDir();
+ if (isGtk() || isQt() || isWx() || isEfl() || isWinCE()) {
+ return "$relativeScriptsPath/run-launcher";
+ } elsif (isAppleWebKit()) {
+ return "$relativeScriptsPath/run-safari";
+ }
+}
+
+sub launcherName()
+{
+ if (isGtk()) {
+ return "GtkLauncher";
+ } elsif (isQt()) {
+ return "QtTestBrowser";
+ } elsif (isWx()) {
+ return "wxBrowser";
+ } elsif (isAppleWebKit()) {
+ return "Safari";
+ } elsif (isEfl()) {
+ return "EWebLauncher/MiniBrowser";
+ } elsif (isWinCE()) {
+ return "WinCELauncher";
+ }
+}
+
+sub checkRequiredSystemConfig
+{
+ if (isDarwin()) {
+ chomp(my $productVersion = `sw_vers -productVersion`);
+ if (eval "v$productVersion" lt v10.4) {
+ print "*************************************************************\n";
+ print "Mac OS X Version 10.4.0 or later is required to build WebKit.\n";
+ print "You have " . $productVersion . ", thus the build will most likely fail.\n";
+ print "*************************************************************\n";
+ }
+ my $xcodebuildVersionOutput = `xcodebuild -version`;
+ my $devToolsCoreVersion = ($xcodebuildVersionOutput =~ /DevToolsCore-(\d+)/) ? $1 : undef;
+ my $xcodeVersion = ($xcodebuildVersionOutput =~ /Xcode ([0-9](\.[0-9]+)*)/) ? $1 : undef;
+ if (!$devToolsCoreVersion && !$xcodeVersion
+ || $devToolsCoreVersion && $devToolsCoreVersion < 747
+ || $xcodeVersion && eval "v$xcodeVersion" lt v2.3) {
+ print "*************************************************************\n";
+ print "Xcode Version 2.3 or later is required to build WebKit.\n";
+ print "You have an earlier version of Xcode, thus the build will\n";
+ print "most likely fail. The latest Xcode is available from the web:\n";
+ print "http://developer.apple.com/tools/xcode\n";
+ print "*************************************************************\n";
+ }
+ } elsif (isGtk() or isQt() or isWx() or isEfl()) {
+ my @cmds = qw(bison gperf);
+ if (isQt() and isWindows()) {
+ push @cmds, "win_flex";
+ } else {
+ push @cmds, "flex";
+ }
+ my @missing = ();
+ my $oldPath = $ENV{PATH};
+ if (isQt() and isWindows()) {
+ chomp(my $gnuWin32Dir = `$qmakebin -query QT_HOST_DATA`);
+ $gnuWin32Dir = File::Spec->catfile($gnuWin32Dir, "..", "gnuwin32", "bin");
+ if (-d "$gnuWin32Dir") {
+ $ENV{PATH} = $gnuWin32Dir . ";" . $ENV{PATH};
+ }
+ }
+ foreach my $cmd (@cmds) {
+ push @missing, $cmd if not commandExists($cmd);
+ }
+
+ if (@missing) {
+ my $list = join ", ", @missing;
+ die "ERROR: $list missing but required to build WebKit.\n";
+ }
+ if (isQt() and isWindows()) {
+ $ENV{PATH} = $oldPath;
+ }
+ }
+ # Win32 and other platforms may want to check for minimum config
+}
+
+sub determineWindowsSourceDir()
+{
+ return if $windowsSourceDir;
+ $windowsSourceDir = sourceDir();
+ chomp($windowsSourceDir = `cygpath -w '$windowsSourceDir'`) if isCygwin();
+}
+
+sub windowsSourceDir()
+{
+ determineWindowsSourceDir();
+ return $windowsSourceDir;
+}
+
+sub windowsSourceSourceDir()
+{
+ return windowsSourceDir() . "\\Source";
+}
+
+sub windowsLibrariesDir()
+{
+ return windowsSourceDir() . "\\WebKitLibraries\\win";
+}
+
+sub windowsOutputDir()
+{
+ return windowsSourceDir() . "\\WebKitBuild";
+}
+
+sub setupAppleWinEnv()
+{
+ return unless isAppleWinWebKit();
+
+ if (isWindowsNT()) {
+ my $restartNeeded = 0;
+ my %variablesToSet = ();
+
+ # FIXME: We should remove this explicit version check for cygwin once we stop supporting Cygwin 1.7.9 or older versions.
+ # https://bugs.webkit.org/show_bug.cgi?id=85791
+ my $uname_version = (POSIX::uname())[2];
+ $uname_version =~ s/\(.*\)//; # Remove the trailing cygwin version, if any.
+ if (version->parse($uname_version) < version->parse("1.7.10")) {
+ # Setting the environment variable 'CYGWIN' to 'tty' makes cygwin enable extra support (i.e., termios)
+ # for UNIX-like ttys in the Windows console
+ $variablesToSet{CYGWIN} = "tty" unless $ENV{CYGWIN};
+ }
+
+ # Those environment variables must be set to be able to build inside Visual Studio.
+ $variablesToSet{WEBKITLIBRARIESDIR} = windowsLibrariesDir() unless $ENV{WEBKITLIBRARIESDIR};
+ $variablesToSet{WEBKIT_LIBRARIES} = windowsLibrariesDir() unless $ENV{WEBKIT_LIBRARIES};
+ $variablesToSet{WEBKITOUTPUTDIR} = windowsOutputDir() unless $ENV{WEBKITOUTPUTDIR};
+ $variablesToSet{WEBKIT_OUTPUTDIR} = windowsOutputDir() unless $ENV{WEBKIT_OUTPUTDIR};
+ $variablesToSet{WEBKIT_SOURCE} = windowsSourceSourceDir() unless $ENV{WEBKIT_SOURCE};
+
+ foreach my $variable (keys %variablesToSet) {
+ print "Setting the Environment Variable '" . $variable . "' to '" . $variablesToSet{$variable} . "'\n\n";
+ system qw(regtool -s set), '\\HKEY_CURRENT_USER\\Environment\\' . $variable, $variablesToSet{$variable};
+ $restartNeeded ||= $variable eq "WEBKITLIBRARIESDIR" || $variable eq "WEBKITOUTPUTDIR" || $variable eq "WEBKIT_LIBRARIES" || $variable eq "WEBKIT_OUTPUTDIR" || $variable eq "WEBKIT_SOURCE";
+ }
+
+ if ($restartNeeded) {
+ print "Please restart your computer before attempting to build inside Visual Studio.\n\n";
+ }
+ } else {
+ if (!$ENV{'WEBKITLIBRARIESDIR'}) {
+ # VS2005 version. This will be removed as part of https://bugs.webkit.org/show_bug.cgi?id=109472.
+ print "Warning: You must set the 'WebKitLibrariesDir' environment variable\n";
+ print " to be able build WebKit from within Visual Studio 2005.\n";
+ print " Make sure that 'WebKitLibrariesDir' points to the\n";
+ print " 'WebKitLibraries/win' directory, not the 'WebKitLibraries/' directory.\n\n";
+ }
+ if (!$ENV{'WEBKIT_LIBRARIES'}) {
+ # VS2010 (and newer) version. This will replace the VS2005 version as part of
+ # https://bugs.webkit.org/show_bug.cgi?id=109472.
+ print "Warning: You must set the 'WebKit_Libraries' environment variable\n";
+ print " to be able build WebKit from within Visual Studio 2010 and newer.\n";
+ print " Make sure that 'WebKit_Libraries' points to the\n";
+ print " 'WebKitLibraries/win' directory, not the 'WebKitLibraries/' directory.\n\n";
+ }
+ if (!$ENV{'WEBKITOUTPUTDIR'}) {
+ # VS2005 version. This will be removed as part of https://bugs.webkit.org/show_bug.cgi?id=109472.
+ print "Warning: You must set the 'WebKitOutputDir' environment variable\n";
+ print " to be able build WebKit from within Visual Studio 2005.\n\n";
+ }
+ if (!$ENV{'WEBKIT_OUTPUTDIR'}) {
+ # VS2010 (and newer) version. This will replace the VS2005 version as part of
+ # https://bugs.webkit.org/show_bug.cgi?id=109472.
+ print "Warning: You must set the 'WebKit_OutputDir' environment variable\n";
+ print " to be able build WebKit from within Visual Studio 2010 and newer.\n\n";
+ }
+ if (!$ENV{'WEBKIT_SOURCE'}) {
+ print "Warning: You must set the 'WebKit_Source' environment variable\n";
+ print " to be able build WebKit from within Visual Studio 2010 and newer.\n\n";
+ }
+ }
+}
+
+sub setupCygwinEnv()
+{
+ return if !isCygwin() && !isWindows();
+ return if $vcBuildPath;
+
+ my $vsInstallDir;
+ my $programFilesPath = $ENV{'PROGRAMFILES(X86)'} || $ENV{'PROGRAMFILES'} || "C:\\Program Files";
+ if ($ENV{'VSINSTALLDIR'}) {
+ $vsInstallDir = $ENV{'VSINSTALLDIR'};
+ } else {
+ $vsInstallDir = File::Spec->catdir($programFilesPath, "Microsoft Visual Studio 8");
+ }
+ chomp($vsInstallDir = `cygpath "$vsInstallDir"`) if isCygwin();
+ $vcBuildPath = File::Spec->catfile($vsInstallDir, qw(Common7 IDE devenv.com));
+ if (-e $vcBuildPath) {
+ # Visual Studio is installed; we can use pdevenv to build.
+ # FIXME: Make pdevenv work with non-Cygwin Perl.
+ $vcBuildPath = File::Spec->catfile(sourceDir(), qw(Tools Scripts pdevenv)) if isCygwin();
+ } else {
+ # Visual Studio not found, try VC++ Express
+ $vcBuildPath = File::Spec->catfile($vsInstallDir, qw(Common7 IDE VCExpress.exe));
+ if (! -e $vcBuildPath) {
+ print "*************************************************************\n";
+ print "Cannot find '$vcBuildPath'\n";
+ print "Please execute the file 'vcvars32.bat' from\n";
+ print "'$programFilesPath\\Microsoft Visual Studio 8\\VC\\bin\\'\n";
+ print "to setup the necessary environment variables.\n";
+ print "*************************************************************\n";
+ die;
+ }
+ $willUseVCExpressWhenBuilding = 1;
+ }
+
+ my $qtSDKPath = File::Spec->catdir($programFilesPath, "QuickTime SDK");
+ if (0 && ! -e $qtSDKPath) {
+ print "*************************************************************\n";
+ print "Cannot find '$qtSDKPath'\n";
+ print "Please download the QuickTime SDK for Windows from\n";
+ print "http://developer.apple.com/quicktime/download/\n";
+ print "*************************************************************\n";
+ die;
+ }
+
+ unless ($ENV{WEBKITLIBRARIESDIR}) {
+ $ENV{'WEBKITLIBRARIESDIR'} = File::Spec->catdir($sourceDir, "WebKitLibraries", "win");
+ chomp($ENV{WEBKITLIBRARIESDIR} = `cygpath -wa '$ENV{WEBKITLIBRARIESDIR}'`) if isCygwin();
+ }
+ unless ($ENV{WEBKIT_LIBRARIES}) {
+ $ENV{'WEBKIT_LIBRARIES'} = File::Spec->catdir($sourceDir, "WebKitLibraries", "win");
+ chomp($ENV{WEBKIT_LIBRARIES} = `cygpath -wa '$ENV{WEBKIT_LIBRARIES}'`) if isCygwin();
+ }
+
+ print "Building results into: ", baseProductDir(), "\n";
+ print "WEBKITOUTPUTDIR is set to: ", $ENV{"WEBKITOUTPUTDIR"}, "\n";
+ print "WEBKIT_OUTPUTDIR is set to: ", $ENV{"WEBKIT_OUTPUTDIR"}, "\n";
+ print "WEBKITLIBRARIESDIR is set to: ", $ENV{"WEBKITLIBRARIESDIR"}, "\n";
+ print "WEBKIT_LIBRARIES is set to: ", $ENV{"WEBKIT_LIBRARIES"}, "\n";
+}
+
+sub dieIfWindowsPlatformSDKNotInstalled
+{
+ my $registry32Path = "/proc/registry/";
+ my $registry64Path = "/proc/registry64/";
+ my $windowsPlatformSDKRegistryEntry = "HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/MicrosoftSDK/InstalledSDKs/D2FF9F89-8AA2-4373-8A31-C838BF4DBBE1";
+
+ # FIXME: It would be better to detect whether we are using 32- or 64-bit Windows
+ # and only check the appropriate entry. But for now we just blindly check both.
+ return if (-e $registry32Path . $windowsPlatformSDKRegistryEntry) || (-e $registry64Path . $windowsPlatformSDKRegistryEntry);
+
+ print "*************************************************************\n";
+ print "Cannot find registry entry '$windowsPlatformSDKRegistryEntry'.\n";
+ print "Please download and install the Microsoft Windows Server 2003 R2\n";
+ print "Platform SDK from <http://www.microsoft.com/downloads/details.aspx?\n";
+ print "familyid=0baf2b35-c656-4969-ace8-e4c0c0716adb&displaylang=en>.\n\n";
+ print "Then follow step 2 in the Windows section of the \"Installing Developer\n";
+ print "Tools\" instructions at <http://www.webkit.org/building/tools.html>.\n";
+ print "*************************************************************\n";
+ die;
+}
+
+sub copyInspectorFrontendFiles
+{
+ my $productDir = productDir();
+ my $sourceInspectorPath = sourceDir() . "/Source/WebCore/inspector/front-end/";
+ my $inspectorResourcesDirPath = $ENV{"WEBKITINSPECTORRESOURCESDIR"};
+
+ if (!defined($inspectorResourcesDirPath)) {
+ $inspectorResourcesDirPath = "";
+ }
+
+ if (isAppleMacWebKit()) {
+ $inspectorResourcesDirPath = $productDir . "/WebCore.framework/Resources/inspector";
+ } elsif (isAppleWinWebKit()) {
+ $inspectorResourcesDirPath = $productDir . "/WebKit.resources/inspector";
+ } elsif (isQt() || isGtk()) {
+ my $prefix = $ENV{"WebKitInstallationPrefix"};
+ $inspectorResourcesDirPath = (defined($prefix) ? $prefix : "/usr/share") . "/webkit-1.0/webinspector";
+ } elsif (isEfl()) {
+ my $prefix = $ENV{"WebKitInstallationPrefix"};
+ $inspectorResourcesDirPath = (defined($prefix) ? $prefix : "/usr/share") . "/ewebkit/webinspector";
+ }
+
+ if (! -d $inspectorResourcesDirPath) {
+ print "*************************************************************\n";
+ print "Cannot find '$inspectorResourcesDirPath'.\n" if (defined($inspectorResourcesDirPath));
+ print "Make sure that you have built WebKit first.\n" if (! -d $productDir || defined($inspectorResourcesDirPath));
+ print "Optionally, set the environment variable 'WebKitInspectorResourcesDir'\n";
+ print "to point to the directory that contains the WebKit Inspector front-end\n";
+ print "files for the built WebCore framework.\n";
+ print "*************************************************************\n";
+ die;
+ }
+
+ if (isAppleMacWebKit()) {
+ my $sourceLocalizedStrings = sourceDir() . "/Source/WebCore/English.lproj/localizedStrings.js";
+ my $destinationLocalizedStrings = $productDir . "/WebCore.framework/Resources/English.lproj/localizedStrings.js";
+ system "ditto", $sourceLocalizedStrings, $destinationLocalizedStrings;
+ }
+
+ return system "rsync", "-aut", "--exclude=/.DS_Store", "--exclude=*.re2js", "--exclude=.svn/", !isQt() ? "--exclude=/WebKit.qrc" : "", $sourceInspectorPath, $inspectorResourcesDirPath;
+}
+
+sub buildXCodeProject($$@)
+{
+ my ($project, $clean, @extraOptions) = @_;
+
+ if ($clean) {
+ push(@extraOptions, "-alltargets");
+ push(@extraOptions, "clean");
+ }
+
+ return system "xcodebuild", "-project", "$project.xcodeproj", @extraOptions;
+}
+
+sub usingVisualStudioExpress()
+{
+ setupCygwinEnv();
+ return $willUseVCExpressWhenBuilding;
+}
+
+sub buildVisualStudioProject
+{
+ my ($project, $clean) = @_;
+ setupCygwinEnv();
+
+ my $config = configurationForVisualStudio();
+
+ dieIfWindowsPlatformSDKNotInstalled() if $willUseVCExpressWhenBuilding;
+
+ chomp($project = `cygpath -w "$project"`) if isCygwin();
+
+ my $action = "/build";
+ if ($clean) {
+ $action = "/clean";
+ }
+
+ my @command = ($vcBuildPath, $project, $action, $config);
+
+ print join(" ", @command), "\n";
+ return system @command;
+}
+
+sub downloadWafIfNeeded
+{
+ # get / update waf if needed
+ my $waf = "$sourceDir/Tools/waf/waf";
+ my $wafURL = 'http://wxwebkit.kosoftworks.com/downloads/deps/waf';
+ if (!-f $waf) {
+ my $result = system "curl -o $waf $wafURL";
+ chmod 0755, $waf;
+ }
+}
+
+sub buildWafProject
+{
+ my ($project, $shouldClean, @options) = @_;
+
+ # set the PYTHONPATH for waf
+ my $pythonPath = $ENV{'PYTHONPATH'};
+ if (!defined($pythonPath)) {
+ $pythonPath = '';
+ }
+ my $sourceDir = sourceDir();
+ my $newPythonPath = "$sourceDir/Tools/waf/build:$pythonPath";
+ if (isCygwin()) {
+ $newPythonPath = `cygpath --mixed --path $newPythonPath`;
+ }
+ $ENV{'PYTHONPATH'} = $newPythonPath;
+
+ print "Building $project\n";
+
+ my $wafCommand = "$sourceDir/Tools/waf/waf";
+ if ($ENV{'WXWEBKIT_WAF'}) {
+ $wafCommand = $ENV{'WXWEBKIT_WAF'};
+ }
+ if (isCygwin()) {
+ $wafCommand = `cygpath --windows "$wafCommand"`;
+ chomp($wafCommand);
+ }
+ if ($shouldClean) {
+ return system $wafCommand, "uninstall", "clean", "distclean";
+ }
+
+ return system $wafCommand, 'configure', 'build', 'install', @options;
+}
+
+sub retrieveQMakespecVar
+{
+ my $mkspec = $_[0];
+ my $varname = $_[1];
+
+ my $varvalue = undef;
+ #print "retrieveMakespecVar " . $mkspec . ", " . $varname . "\n";
+
+ local *SPEC;
+ open SPEC, "<$mkspec" or return $varvalue;
+ while (<SPEC>) {
+ if ($_ =~ /\s*include\((.+)\)/) {
+ # open the included mkspec
+ my $oldcwd = getcwd();
+ (my $volume, my $directories, my $file) = File::Spec->splitpath($mkspec);
+ my $newcwd = "$volume$directories";
+ chdir $newcwd if $newcwd;
+ $varvalue = retrieveQMakespecVar($1, $varname);
+ chdir $oldcwd;
+ } elsif ($_ =~ /$varname\s*=\s*([^\s]+)/) {
+ $varvalue = $1;
+ last;
+ }
+ }
+ close SPEC;
+ return $varvalue;
+}
+
+sub qtMakeCommand($)
+{
+ my ($qmakebin) = @_;
+ chomp(my $hostDataPath = `$qmakebin -query QT_HOST_DATA`);
+ my $mkspecPath = $hostDataPath . "/mkspecs/default/qmake.conf";
+ if (! -e $mkspecPath) {
+ chomp(my $mkspec= `$qmakebin -query QMAKE_XSPEC`);
+ $mkspecPath = $hostDataPath . "/mkspecs/" . $mkspec . "/qmake.conf";
+ }
+ my $compiler = retrieveQMakespecVar($mkspecPath, "QMAKE_CC");
+
+ #print "default spec: " . $mkspec . "\n";
+ #print "compiler found: " . $compiler . "\n";
+
+ if ($compiler && $compiler eq "cl") {
+ return "nmake";
+ }
+
+ return "make";
+}
+
+sub autotoolsFlag($$)
+{
+ my ($flag, $feature) = @_;
+ my $prefix = $flag ? "--enable" : "--disable";
+
+ return $prefix . '-' . $feature;
+}
+
+sub runAutogenForAutotoolsProjectIfNecessary($@)
+{
+ my ($dir, $prefix, $sourceDir, $project, $joinedOverridableFeatures, @buildArgs) = @_;
+
+ my $joinedBuildArgs = join(" ", @buildArgs);
+
+ if (-e "GNUmakefile") {
+ # Just assume that build-jsc will never be used to reconfigure JSC. Later
+ # we can go back and make this more complicated if the demand is there.
+ if ($project ne "WebKit") {
+ return;
+ }
+
+ # Run autogen.sh again if either the features overrided by build-webkit or build arguments have changed.
+ if (!mustReRunAutogen($sourceDir, "WebKitFeatureOverrides.txt", $joinedOverridableFeatures)
+ && !mustReRunAutogen($sourceDir, "previous-autogen-arguments.txt", $joinedBuildArgs)) {
+ return;
+ }
+ }
+
+ print "Calling autogen.sh in " . $dir . "\n\n";
+ print "Installation prefix directory: $prefix\n" if(defined($prefix));
+
+ # Only for WebKit, write the autogen.sh arguments to a file so that we can detect
+ # when they change and automatically re-run it.
+ if ($project eq 'WebKit') {
+ open(OVERRIDABLE_FEATURES, ">WebKitFeatureOverrides.txt");
+ print OVERRIDABLE_FEATURES $joinedOverridableFeatures;
+ close(OVERRIDABLE_FEATURES);
+
+ open(AUTOTOOLS_ARGUMENTS, ">previous-autogen-arguments.txt");
+ print AUTOTOOLS_ARGUMENTS $joinedBuildArgs;
+ close(AUTOTOOLS_ARGUMENTS);
+ }
+
+ # Make the path relative since it will appear in all -I compiler flags.
+ # Long argument lists cause bizarre slowdowns in libtool.
+ my $relSourceDir = File::Spec->abs2rel($sourceDir) || ".";
+
+ # Compiler options to keep floating point values consistent
+ # between 32-bit and 64-bit architectures. The options are also
+ # used on Chromium build.
+ determineArchitecture();
+ if ($architecture ne "x86_64" && !isARM()) {
+ $ENV{'CXXFLAGS'} = "-march=pentium4 -msse2 -mfpmath=sse " . ($ENV{'CXXFLAGS'} || "");
+ }
+
+ # Prefix the command with jhbuild run.
+ unshift(@buildArgs, "$relSourceDir/autogen.sh");
+ unshift(@buildArgs, jhbuildWrapperPrefixIfNeeded());
+ if (system(@buildArgs) ne 0) {
+ die "Calling autogen.sh failed!\n";
+ }
+}
+
+sub getJhbuildPath()
+{
+ return join('/', baseProductDir(), "Dependencies");
+}
+
+sub mustReRunAutogen($@)
+{
+ my ($sourceDir, $filename, $currentContents) = @_;
+
+ if (! -e $filename) {
+ return 1;
+ }
+
+ open(CONTENTS_FILE, $filename);
+ chomp(my $previousContents = <CONTENTS_FILE>);
+ close(CONTENTS_FILE);
+
+ # We only care about the WebKit2 argument when we are building WebKit itself.
+ # build-jsc never passes --enable-webkit2, so if we didn't do this, autogen.sh
+ # would run for every single build on the bots, since it runs both build-webkit
+ # and build-jsc.
+ if ($previousContents ne $currentContents) {
+ print "Contents for file $filename have changed.\n";
+ print "Previous contents were: $previousContents\n\n";
+ print "New contents are: $currentContents\n";
+ return 1;
+ }
+
+ return 0;
+}
+
+sub buildAutotoolsProject($@)
+{
+ my ($project, $clean, $prefix, $makeArgs, $noWebKit1, $noWebKit2, @features) = @_;
+
+ my $make = 'make';
+ my $dir = productDir();
+ my $config = passedConfiguration() || configuration();
+
+ # Use rm to clean the build directory since distclean may miss files
+ if ($clean && -d $dir) {
+ system "rm", "-rf", "$dir";
+ }
+
+ if (! -d $dir) {
+ File::Path::mkpath($dir) or die "Failed to create build directory " . $dir
+ }
+ chdir $dir or die "Failed to cd into " . $dir . "\n";
+
+ if ($clean) {
+ return 0;
+ }
+
+ my @buildArgs = @ARGV;
+ if ($noWebKit1) {
+ unshift(@buildArgs, "--disable-webkit1");
+ }
+ if ($noWebKit2) {
+ unshift(@buildArgs, "--disable-webkit2");
+ }
+
+ # Configurable features listed here should be kept in sync with the
+ # features for which there exists a configuration option in configure.ac.
+ my %configurableFeatures = (
+ "gamepad" => 1,
+ "geolocation" => 1,
+ "media-stream" => 1,
+ "svg" => 1,
+ "svg-fonts" => 1,
+ "video" => 1,
+ "webgl" => 1,
+ "web-audio" => 1,
+ "xslt" => 1,
+ );
+ my @overridableFeatures = ();
+ foreach (@features) {
+ if ($configurableFeatures{$_->{option}}) {
+ push @buildArgs, autotoolsFlag(${$_->{value}}, $_->{option});;
+ } else {
+ push @overridableFeatures, $_->{define} . "=" . (${$_->{value}} ? "1" : "0");
+ }
+ }
+
+ $makeArgs = $makeArgs || "";
+ $makeArgs = $makeArgs . " " . $ENV{"WebKitMakeArguments"} if $ENV{"WebKitMakeArguments"};
+
+ # Automatically determine the number of CPUs for make only
+ # if make arguments haven't already been specified.
+ if ($makeArgs eq "") {
+ $makeArgs = "-j" . numberOfCPUs();
+ }
+
+ # WebKit is the default target, so we don't need to specify anything.
+ if ($project eq "JavaScriptCore") {
+ $makeArgs .= " jsc";
+ } elsif ($project eq "WTF") {
+ $makeArgs .= " libWTF.la";
+ }
+
+ $prefix = $ENV{"WebKitInstallationPrefix"} if !defined($prefix);
+ push @buildArgs, "--prefix=" . $prefix if defined($prefix);
+
+ # Check if configuration is Debug.
+ my $debug = $config =~ m/debug/i;
+ if ($debug) {
+ push @buildArgs, "--enable-debug";
+ } else {
+ push @buildArgs, "--disable-debug";
+ }
+
+ if (checkForArgumentAndRemoveFromArrayRef("--update-gtk", \@buildArgs)) {
+ # Force autogen to run, to catch the possibly updated libraries.
+ system("rm -f previous-autogen-arguments.txt");
+
+ system("perl", "$sourceDir/Tools/Scripts/update-webkitgtk-libs") == 0 or die $!;
+ }
+
+ # If GNUmakefile exists, don't run autogen.sh unless its arguments
+ # have changed. The makefile should be smart enough to track autotools
+ # dependencies and re-run autogen.sh when build files change.
+ my $joinedOverridableFeatures = join(" ", @overridableFeatures);
+ runAutogenForAutotoolsProjectIfNecessary($dir, $prefix, $sourceDir, $project, $joinedOverridableFeatures, @buildArgs);
+
+ my $runWithJhbuild = join(" ", jhbuildWrapperPrefixIfNeeded());
+ if (system("$runWithJhbuild $make $makeArgs") ne 0) {
+ die "\nFailed to build WebKit using '$make'!\n";
+ }
+
+ chdir ".." or die;
+
+ if ($project eq 'WebKit' && !isCrossCompilation() && !($noWebKit1 && $noWebKit2)) {
+ my @docGenerationOptions = ("$sourceDir/Tools/gtk/generate-gtkdoc", "--skip-html");
+ push(@docGenerationOptions, productDir());
+
+ unshift(@docGenerationOptions, jhbuildWrapperPrefixIfNeeded());
+
+ if (system(@docGenerationOptions)) {
+ die "\n gtkdoc did not build without warnings\n";
+ }
+ }
+
+ return 0;
+}
+
+sub jhbuildWrapperPrefixIfNeeded()
+{
+ if (-e getJhbuildPath()) {
+ my @prefix = (File::Spec->catfile(sourceDir(), "Tools", "jhbuild", "jhbuild-wrapper"));
+ if (isEfl()) {
+ push(@prefix, "--efl");
+ } elsif (isGtk()) {
+ push(@prefix, "--gtk");
+ }
+ push(@prefix, "run");
+
+ return @prefix;
+ }
+
+ return ();
+}
+
+sub removeCMakeCache()
+{
+ my $cacheFilePath = File::Spec->catdir(baseProductDir(), configuration(), "CMakeCache.txt");
+ unlink($cacheFilePath) if -e $cacheFilePath;
+}
+
+sub generateBuildSystemFromCMakeProject
+{
+ my ($port, $prefixPath, @cmakeArgs, $additionalCMakeArgs) = @_;
+ my $config = configuration();
+ my $buildPath = File::Spec->catdir(baseProductDir(), $config);
+ File::Path::mkpath($buildPath) unless -d $buildPath;
+ my $originalWorkingDirectory = getcwd();
+ chdir($buildPath) or die;
+
+ my @args;
+ push @args, "-DPORT=\"$port\"";
+ push @args, "-DCMAKE_INSTALL_PREFIX=\"$prefixPath\"" if $prefixPath;
+ push @args, "-DSHARED_CORE=ON" if isEfl() && $ENV{"ENABLE_DRT"};
+ if ($config =~ /release/i) {
+ push @args, "-DCMAKE_BUILD_TYPE=Release";
+ } elsif ($config =~ /debug/i) {
+ push @args, "-DCMAKE_BUILD_TYPE=Debug";
+ }
+ # Don't warn variables which aren't used by cmake ports.
+ push @args, "--no-warn-unused-cli";
+ push @args, @cmakeArgs if @cmakeArgs;
+ push @args, $additionalCMakeArgs if $additionalCMakeArgs;
+
+ push @args, '"' . sourceDir() . '"';
+
+ # Compiler options to keep floating point values consistent
+ # between 32-bit and 64-bit architectures.
+ determineArchitecture();
+ if ($architecture ne "x86_64" && !isARM()) {
+ $ENV{'CXXFLAGS'} = "-march=pentium4 -msse2 -mfpmath=sse " . ($ENV{'CXXFLAGS'} || "");
+ }
+
+ # We call system("cmake @args") instead of system("cmake", @args) so that @args is
+ # parsed for shell metacharacters.
+ my $wrapper = join(" ", jhbuildWrapperPrefixIfNeeded()) . " ";
+ my $returnCode = system($wrapper . "cmake @args");
+
+ chdir($originalWorkingDirectory);
+ return $returnCode;
+}
+
+sub buildCMakeGeneratedProject($)
+{
+ my ($makeArgs) = @_;
+ my $config = configuration();
+ my $buildPath = File::Spec->catdir(baseProductDir(), $config);
+ if (! -d $buildPath) {
+ die "Must call generateBuildSystemFromCMakeProject() before building CMake project.";
+ }
+ my @args = ("--build", $buildPath, "--config", $config);
+ push @args, ("--", $makeArgs) if $makeArgs;
+
+ # We call system("cmake @args") instead of system("cmake", @args) so that @args is
+ # parsed for shell metacharacters. In particular, $makeArgs may contain such metacharacters.
+ my $wrapper = join(" ", jhbuildWrapperPrefixIfNeeded()) . " ";
+ return system($wrapper . "cmake @args");
+}
+
+sub cleanCMakeGeneratedProject()
+{
+ my $config = configuration();
+ my $buildPath = File::Spec->catdir(baseProductDir(), $config);
+ if (-d $buildPath) {
+ return system("cmake", "--build", $buildPath, "--config", $config, "--target", "clean");
+ }
+ return 0;
+}
+
+sub buildCMakeProjectOrExit($$$$@)
+{
+ my ($clean, $port, $prefixPath, $makeArgs, @cmakeArgs) = @_;
+ my $returnCode;
+
+ exit(exitStatus(cleanCMakeGeneratedProject())) if $clean;
+
+ if (isEfl() && checkForArgumentAndRemoveFromARGV("--update-efl")) {
+ system("perl", "$sourceDir/Tools/Scripts/update-webkitefl-libs") == 0 or die $!;
+ }
+
+
+ $returnCode = exitStatus(generateBuildSystemFromCMakeProject($port, $prefixPath, @cmakeArgs));
+ exit($returnCode) if $returnCode;
+ if (isBlackBerry()) {
+ return 0 if (defined($ENV{"GENERATE_CMAKE_PROJECT_ONLY"}) eq '1');
+ }
+ $returnCode = exitStatus(buildCMakeGeneratedProject($makeArgs));
+ exit($returnCode) if $returnCode;
+ return 0;
+}
+
+sub cmakeBasedPortArguments()
+{
+ return blackberryCMakeArguments() if isBlackBerry();
+ return ('-G "Visual Studio 8 2005 STANDARDSDK_500 (ARMV4I)"') if isWinCE();
+ return ();
+}
+
+sub cmakeBasedPortName()
+{
+ return "BlackBerry" if isBlackBerry();
+ return "Efl" if isEfl();
+ return "WinCE" if isWinCE();
+ return "";
+}
+
+sub promptUser
+{
+ my ($prompt, $default) = @_;
+ my $defaultValue = $default ? "[$default]" : "";
+ print "$prompt $defaultValue: ";
+ chomp(my $input = <STDIN>);
+ return $input ? $input : $default;
+}
+
+sub buildQMakeProjects
+{
+ my ($projects, $clean, @buildParams) = @_;
+
+ my @buildArgs = ();
+ my $qconfigs = "";
+
+ my $make = qtMakeCommand($qmakebin);
+ my $makeargs = "";
+ my $command;
+ my $installHeaders;
+ my $installLibs;
+ for my $i (0 .. $#buildParams) {
+ my $opt = $buildParams[$i];
+ if ($opt =~ /^--qmake=(.*)/i ) {
+ $qmakebin = $1;
+ } elsif ($opt =~ /^--qmakearg=(.*)/i ) {
+ push @buildArgs, $1;
+ } elsif ($opt =~ /^--makeargs=(.*)/i ) {
+ $makeargs = $1;
+ } elsif ($opt =~ /^--install-headers=(.*)/i ) {
+ $installHeaders = $1;
+ } elsif ($opt =~ /^--install-libs=(.*)/i ) {
+ $installLibs = $1;
+ } else {
+ push @buildArgs, $opt;
+ }
+ }
+
+ # Automatically determine the number of CPUs for make only if this make argument haven't already been specified.
+ if ($make eq "make" && $makeargs !~ /-j\s*\d+/i && (!defined $ENV{"MAKEFLAGS"} || ($ENV{"MAKEFLAGS"} !~ /-j\s*\d+/i ))) {
+ $makeargs .= " -j" . numberOfCPUs();
+ }
+
+ $make = "$make $makeargs";
+ $make =~ s/\s+$//;
+
+ my $originalCwd = getcwd();
+ my $dir = File::Spec->canonpath(productDir());
+ File::Path::mkpath($dir);
+ chdir $dir or die "Failed to cd into " . $dir . "\n";
+
+ if ($clean) {
+ $command = "$make distclean";
+ print "\nCalling '$command' in " . $dir . "\n\n";
+ return system $command;
+ }
+
+ my $qmakepath = File::Spec->catfile(sourceDir(), "Tools", "qmake");
+ my $qmakecommand = $qmakebin;
+
+ my $config = configuration();
+ push @buildArgs, "INSTALL_HEADERS=" . $installHeaders if defined($installHeaders);
+ push @buildArgs, "INSTALL_LIBS=" . $installLibs if defined($installLibs);
+
+ my $passedConfig = passedConfiguration() || "";
+ if ($passedConfig =~ m/debug/i) {
+ push @buildArgs, "CONFIG-=release";
+ push @buildArgs, "CONFIG+=debug";
+ } elsif ($passedConfig =~ m/release/i) {
+ push @buildArgs, "CONFIG+=release";
+ push @buildArgs, "CONFIG-=debug";
+ } elsif ($passedConfig) {
+ die "Build type $passedConfig is not supported with --qt.\n";
+ }
+
+ # Using build-webkit to build assumes you want a developer-build
+ push @buildArgs, "CONFIG-=production_build";
+
+ my $svnRevision = currentSVNRevision();
+ my $previousSvnRevision = "unknown";
+
+ my $buildHint = "";
+
+ my $pathToBuiltRevisions = File::Spec->catfile($dir, ".builtRevisions.cache");
+ if (-e $pathToBuiltRevisions && open(BUILTREVISIONS, $pathToBuiltRevisions)) {
+ while (<BUILTREVISIONS>) {
+ if ($_ =~ m/^SVN_REVISION\s=\s(\d+)$/) {
+ $previousSvnRevision = $1;
+ }
+ }
+ close(BUILTREVISIONS);
+ }
+
+ my $result = 0;
+
+ # Run qmake, regadless of having a makefile or not, so that qmake can
+ # detect changes to the configuration.
+
+ push @buildArgs, "-after OVERRIDE_SUBDIRS=\"@{$projects}\"" if @{$projects};
+ unshift @buildArgs, File::Spec->catfile(sourceDir(), "WebKit.pro");
+ $command = "$qmakecommand @buildArgs";
+ print "Calling '$command' in " . $dir . "\n\n";
+ print "Installation headers directory: $installHeaders\n" if(defined($installHeaders));
+ print "Installation libraries directory: $installLibs\n" if(defined($installLibs));
+
+ my $configChanged = 0;
+ open(QMAKE, "$command 2>&1 |") || die "Could not execute qmake";
+ while (<QMAKE>) {
+ $configChanged = 1 if $_ =~ m/The configuration was changed since the last build/;
+ print $_;
+ }
+
+ close(QMAKE);
+ $result = $?;
+
+ if ($result ne 0) {
+ die "\nFailed to set up build environment using $qmakebin!\n";
+ }
+
+ my $maybeNeedsCleanBuild = 0;
+ my $needsIncrementalBuild = 0;
+
+ # Full incremental build (run qmake) needed on buildbots and EWS bots always.
+ if (grep(/CONFIG\+=buildbot/,@buildParams)) {
+ $needsIncrementalBuild = 1;
+ }
+
+ if ($svnRevision ne $previousSvnRevision) {
+ print "Last built revision was " . $previousSvnRevision .
+ ", now at revision $svnRevision. Full incremental build needed.\n";
+ $needsIncrementalBuild = 1;
+
+ my @fileList = listOfChangedFilesBetweenRevisions(sourceDir(), $previousSvnRevision, $svnRevision);
+
+ foreach (@fileList) {
+ if (m/\.pr[oif]$/ or
+ m/\.qmake.conf$/ or
+ m/^Tools\/qmake\//
+ ) {
+ print "Change to $_ detected, clean build may be needed.\n";
+ $maybeNeedsCleanBuild = 1;
+ last;
+ }
+ }
+ }
+
+ if ($configChanged) {
+ print "Calling '$make wipeclean' in " . $dir . "\n\n";
+ $result = system "$make wipeclean";
+ }
+
+ $command = "$make";
+ if ($needsIncrementalBuild) {
+ $command .= " incremental";
+ }
+
+ print "\nCalling '$command' in " . $dir . "\n\n";
+ $result = system $command;
+
+ chdir ".." or die;
+
+ if ($result eq 0) {
+ # Now that the build completed successfully we can save the SVN revision
+ open(BUILTREVISIONS, ">>$pathToBuiltRevisions");
+ print BUILTREVISIONS "SVN_REVISION = $svnRevision\n";
+ close(BUILTREVISIONS);
+ } elsif (!$command =~ /incremental/ && exitStatus($result)) {
+ my $exitCode = exitStatus($result);
+ my $failMessage = <<EOF;
+
+===== BUILD FAILED ======
+
+The build failed with exit code $exitCode. This may have been because you
+
+ - added an #include to a source/header
+ - added a Q_OBJECT macro to a class
+ - added a new resource to a qrc file
+
+as dependencies are not automatically re-computed for local developer builds.
+You may try computing dependencies manually by running 'make qmake_all' in:
+
+ $dir
+
+or passing --makeargs="qmake_all" to build-webkit.
+
+=========================
+
+EOF
+ print "$failMessage";
+ } elsif ($maybeNeedsCleanBuild) {
+ print "\nIncremental build failed, clean build needed. \n";
+ print "Calling '$make wipeclean' in " . $dir . "\n\n";
+ chdir $dir or die;
+ system "$make wipeclean";
+
+ print "\nCalling '$make' in " . $dir . "\n\n";
+ $result = system $make;
+ }
+
+ return $result;
+}
+
+sub buildGtkProject
+{
+ my ($project, $clean, $prefix, $makeArgs, $noWebKit1, $noWebKit2, @features) = @_;
+
+ if ($project ne "WebKit" and $project ne "JavaScriptCore" and $project ne "WTF") {
+ die "Unsupported project: $project. Supported projects: WebKit, JavaScriptCore, WTF\n";
+ }
+
+ return buildAutotoolsProject($project, $clean, $prefix, $makeArgs, $noWebKit1, $noWebKit2, @features);
+}
+
+sub buildChromiumMakefile($$@)
+{
+ my ($target, $clean, @options) = @_;
+ if ($clean) {
+ return system qw(rm -rf out);
+ }
+ my $config = configuration();
+ my $numCpus = numberOfCPUs();
+ my $makeArgs;
+ for (@options) {
+ $makeArgs = $1 if /^--makeargs=(.*)/i;
+ }
+ $makeArgs = "-j$numCpus" if not $makeArgs;
+ my $command .= "make -fMakefile.chromium $makeArgs BUILDTYPE=$config $target";
+
+ print "$command\n";
+ return system $command;
+}
+
+sub buildChromiumNinja($$@)
+{
+ # rm -rf out requires rerunning gyp, so don't support --clean for now.
+ my ($target, @options) = @_;
+ my $config = configuration();
+ my $makeArgs = "";
+ for (@options) {
+ $makeArgs = $1 if /^--makeargs=(.*)/i;
+ }
+ my $command = "";
+
+ # Find ninja.
+ my $ninjaPath;
+ if (commandExists('ninja')) {
+ $ninjaPath = 'ninja';
+ } elsif (-e 'Source/WebKit/chromium/depot_tools/ninja') {
+ $ninjaPath = 'Source/WebKit/chromium/depot_tools/ninja';
+ } else {
+ die "ninja not found. Install chromium's depot_tools by running update-webkit first\n";
+ }
+
+ $command .= "$ninjaPath -C out/$config $target $makeArgs";
+
+ print "$command\n";
+ return system $command;
+}
+
+sub buildChromiumVisualStudioProject($$)
+{
+ my ($projectPath, $clean) = @_;
+
+ my $config = configuration();
+ my $action = "/build";
+ $action = "/clean" if $clean;
+
+ # Find Visual Studio installation.
+ my $vsInstallDir;
+ my $programFilesPath = $ENV{'PROGRAMFILES'} || "C:\\Program Files";
+ if ($ENV{'VSINSTALLDIR'}) {
+ $vsInstallDir = $ENV{'VSINSTALLDIR'};
+ } else {
+ $vsInstallDir = "$programFilesPath/Microsoft Visual Studio 8";
+ }
+ $vsInstallDir =~ s,\\,/,g;
+ $vsInstallDir = `cygpath "$vsInstallDir"` if isCygwin();
+ chomp $vsInstallDir;
+ $vcBuildPath = "$vsInstallDir/Common7/IDE/devenv.com";
+ if (! -e $vcBuildPath) {
+ # Visual Studio not found, try VC++ Express
+ $vcBuildPath = "$vsInstallDir/Common7/IDE/VCExpress.exe";
+ if (! -e $vcBuildPath) {
+ print "*************************************************************\n";
+ print "Cannot find '$vcBuildPath'\n";
+ print "Please execute the file 'vcvars32.bat' from\n";
+ print "'$programFilesPath\\Microsoft Visual Studio 8\\VC\\bin\\'\n";
+ print "to setup the necessary environment variables.\n";
+ print "*************************************************************\n";
+ die;
+ }
+ }
+
+ # Create command line and execute it.
+ my @command = ($vcBuildPath, $projectPath, $action, $config);
+ print "Building results into: ", baseProductDir(), "\n";
+ print join(" ", @command), "\n";
+ return system @command;
+}
+
+sub buildChromium($@)
+{
+ my ($clean, @options) = @_;
+
+ # We might need to update DEPS or re-run GYP if things have changed.
+ if (checkForArgumentAndRemoveFromArrayRef("--update-chromium", \@options)) {
+ my @updateCommand = ("perl", "Tools/Scripts/update-webkit-chromium", "--force");
+ push @updateCommand, "--chromium-android" if isChromiumAndroid();
+ system(@updateCommand) == 0 or die $!;
+ }
+
+ my $result = 1;
+ if (isDarwin() && !isChromiumAndroid() && !isChromiumMacMake() && !isChromiumNinja()) {
+ # Mac build - builds the root xcode project.
+ $result = buildXCodeProject("Source/WebKit/chromium/All", $clean, "-configuration", configuration(), @options);
+ } elsif ((isCygwin() || isWindows()) && !isChromiumNinja()) {
+ # Windows build - builds the root visual studio solution.
+ $result = buildChromiumVisualStudioProject("Source/WebKit/chromium/All.sln", $clean);
+ } elsif (isChromiumNinja()) {
+ $result = buildChromiumNinja("all", $clean, @options);
+ } elsif (isLinux() || isChromiumAndroid() || isChromiumMacMake()) {
+ # Linux build - build using make.
+ $result = buildChromiumMakefile("all", $clean, @options);
+ } else {
+ print STDERR "This platform is not supported by chromium.\n";
+ }
+ return $result;
+}
+
+sub appleApplicationSupportPath
+{
+ open INSTALL_DIR, "</proc/registry/HKEY_LOCAL_MACHINE/SOFTWARE/Apple\ Inc./Apple\ Application\ Support/InstallDir";
+ my $path = <INSTALL_DIR>;
+ $path =~ s/[\r\n\x00].*//;
+ close INSTALL_DIR;
+
+ my $unixPath = `cygpath -u '$path'`;
+ chomp $unixPath;
+ return $unixPath;
+}
+
+sub setPathForRunningWebKitApp
+{
+ my ($env) = @_;
+
+ if (isAppleWinWebKit()) {
+ $env->{PATH} = join(':', productDir(), dirname(installedSafariPath()), appleApplicationSupportPath(), $env->{PATH} || "");
+ } elsif (isQt()) {
+ my $qtLibs = `$qmakebin -query QT_INSTALL_LIBS`;
+ $qtLibs =~ s/[\n|\r]$//g;
+ $env->{PATH} = join(';', $qtLibs, productDir() . "/lib", $env->{PATH} || "");
+ }
+}
+
+sub printHelpAndExitForRunAndDebugWebKitAppIfNeeded
+{
+ return unless checkForArgumentAndRemoveFromARGV("--help");
+
+ my ($includeOptionsForDebugging) = @_;
+
+ print STDERR <<EOF;
+Usage: @{[basename($0)]} [options] [args ...]
+ --help Show this help message
+ --no-saved-state Launch the application without state restoration (OS X 10.7 and later)
+ --guard-malloc Enable Guard Malloc (OS X only)
+ --use-web-process-xpc-service Launch the Web Process as an XPC Service (OS X only)
+EOF
+
+ if ($includeOptionsForDebugging) {
+ print STDERR <<EOF;
+ --target-web-process Debug the web process
+ --use-gdb Use GDB (this is the default when using Xcode 4.4 or earlier)
+ --use-lldb Use LLDB (this is the default when using Xcode 4.5 or later)
+EOF
+ }
+
+ exit(1);
+}
+
+sub argumentsForRunAndDebugMacWebKitApp()
+{
+ my @args = ();
+ push @args, ("-ApplePersistenceIgnoreState", "YES") if !isSnowLeopard() && checkForArgumentAndRemoveFromArrayRef("--no-saved-state", \@args);
+ push @args, ("-WebKit2UseXPCServiceForWebProcess", "YES") if shouldUseXPCServiceForWebProcess();
+ unshift @args, @ARGV;
+
+ return @args;
+}
+
+sub runMacWebKitApp($;$)
+{
+ my ($appPath, $useOpenCommand) = @_;
+ my $productDir = productDir();
+ print "Starting @{[basename($appPath)]} with DYLD_FRAMEWORK_PATH set to point to built WebKit in $productDir.\n";
+ $ENV{DYLD_FRAMEWORK_PATH} = $productDir;
+ $ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = "YES";
+
+ setUpGuardMallocIfNeeded();
+
+ if (defined($useOpenCommand) && $useOpenCommand == USE_OPEN_COMMAND) {
+ return system("open", "-W", "-a", $appPath, "--args", argumentsForRunAndDebugMacWebKitApp());
+ }
+ if (architecture()) {
+ return system "arch", "-" . architecture(), $appPath, argumentsForRunAndDebugMacWebKitApp();
+ }
+ return system { $appPath } $appPath, argumentsForRunAndDebugMacWebKitApp();
+}
+
+sub execMacWebKitAppForDebugging($)
+{
+ my ($appPath) = @_;
+ my $architectureSwitch;
+ my $argumentsSeparator;
+
+ if (debugger() eq "lldb") {
+ $architectureSwitch = "--arch";
+ $argumentsSeparator = "--";
+ } elsif (debugger() eq "gdb") {
+ $architectureSwitch = "-arch";
+ $argumentsSeparator = "--args";
+ } else {
+ die "Unknown debugger $debugger.\n";
+ }
+
+ my $debuggerPath = `xcrun -find $debugger`;
+ chomp $debuggerPath;
+ die "Can't find the $debugger executable.\n" unless -x $debuggerPath;
+
+ my $productDir = productDir();
+ $ENV{DYLD_FRAMEWORK_PATH} = $productDir;
+ $ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = "YES";
+
+ setUpGuardMallocIfNeeded();
+
+ my @architectureFlags = ($architectureSwitch, architecture());
+ if (!shouldTargetWebProcess()) {
+ print "Starting @{[basename($appPath)]} under $debugger with DYLD_FRAMEWORK_PATH set to point to built WebKit in $productDir.\n";
+ exec { $debuggerPath } $debuggerPath, @architectureFlags, $argumentsSeparator, $appPath, argumentsForRunAndDebugMacWebKitApp() or die;
+ } else {
+ if (shouldUseXPCServiceForWebProcess()) {
+ die "Targetting the Web Process is not compatible with using an XPC Service for the Web Process at this time.";
+ }
+
+ my $webProcessShimPath = File::Spec->catfile($productDir, "SecItemShim.dylib");
+ my $webProcessPath = File::Spec->catdir($productDir, "WebProcess.app");
+ my $webKit2ExecutablePath = File::Spec->catfile($productDir, "WebKit2.framework", "WebKit2");
+
+ appendToEnvironmentVariableList("DYLD_INSERT_LIBRARIES", $webProcessShimPath);
+
+ print "Starting WebProcess under $debugger with DYLD_FRAMEWORK_PATH set to point to built WebKit in $productDir.\n";
+ exec { $debuggerPath } $debuggerPath, @architectureFlags, $argumentsSeparator, $webProcessPath, $webKit2ExecutablePath, "-type", "webprocess", "-client-executable", $appPath or die;
+ }
+}
+
+sub debugSafari
+{
+ if (isAppleMacWebKit()) {
+ checkFrameworks();
+ execMacWebKitAppForDebugging(safariPath());
+ }
+
+ if (isAppleWinWebKit()) {
+ setupCygwinEnv();
+ my $productDir = productDir();
+ chomp($ENV{WEBKITNIGHTLY} = `cygpath -wa "$productDir"`);
+ my $safariPath = safariPath();
+ chomp($safariPath = `cygpath -wa "$safariPath"`);
+ return system { $vcBuildPath } $vcBuildPath, "/debugexe", "\"$safariPath\"", @ARGV;
+ }
+
+ return 1; # Unsupported platform; can't debug Safari on this platform.
+}
+
+sub runSafari
+{
+
+ if (isAppleMacWebKit()) {
+ return runMacWebKitApp(safariPath());
+ }
+
+ if (isAppleWinWebKit()) {
+ my $result;
+ my $productDir = productDir();
+ my $webKitLauncherPath = File::Spec->catfile(productDir(), "WebKit.exe");
+ return system { $webKitLauncherPath } $webKitLauncherPath, @ARGV;
+ }
+
+ return 1; # Unsupported platform; can't run Safari on this platform.
+}
+
+sub runMiniBrowser
+{
+ if (isAppleMacWebKit()) {
+ return runMacWebKitApp(File::Spec->catfile(productDir(), "MiniBrowser.app", "Contents", "MacOS", "MiniBrowser"));
+ }
+
+ return 1;
+}
+
+sub debugMiniBrowser
+{
+ if (isAppleMacWebKit()) {
+ execMacWebKitAppForDebugging(File::Spec->catfile(productDir(), "MiniBrowser.app", "Contents", "MacOS", "MiniBrowser"));
+ }
+
+ return 1;
+}
+
+sub runWebKitTestRunner
+{
+ if (isAppleMacWebKit()) {
+ return runMacWebKitApp(File::Spec->catfile(productDir(), "WebKitTestRunner"));
+ } elsif (isGtk()) {
+ my $productDir = productDir();
+ my $injectedBundlePath = "$productDir/Libraries/.libs/libTestRunnerInjectedBundle";
+ print "Starting WebKitTestRunner with TEST_RUNNER_INJECTED_BUNDLE_FILENAME set to point to $injectedBundlePath.\n";
+ $ENV{TEST_RUNNER_INJECTED_BUNDLE_FILENAME} = $injectedBundlePath;
+ my @args = ("$productDir/Programs/WebKitTestRunner", @ARGV);
+ return system {$args[0] } @args;
+ }
+
+ return 1;
+}
+
+sub debugWebKitTestRunner
+{
+ if (isAppleMacWebKit()) {
+ execMacWebKitAppForDebugging(File::Spec->catfile(productDir(), "WebKitTestRunner"));
+ }
+
+ return 1;
+}
+
+sub readRegistryString
+{
+ my ($valueName) = @_;
+ chomp(my $string = `regtool --wow32 get "$valueName"`);
+ return $string;
+}
+
+sub writeRegistryString
+{
+ my ($valueName, $string) = @_;
+
+ my $error = system "regtool", "--wow32", "set", "-s", $valueName, $string;
+
+ # On Windows Vista/7 with UAC enabled, regtool will fail to modify the registry, but will still
+ # return a successful exit code. So we double-check here that the value we tried to write to the
+ # registry was really written.
+ return !$error && readRegistryString($valueName) eq $string;
+}
+
+1;
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/LoadAsModule.pm b/src/third_party/blink/Tools/Scripts/webkitperl/LoadAsModule.pm
new file mode 100644
index 0000000..5c78c0e
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/LoadAsModule.pm
@@ -0,0 +1,80 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Imports Perl scripts into a package for easy unit testing.
+
+package LoadAsModule;
+
+use strict;
+use warnings;
+
+use File::Spec;
+use FindBin;
+use lib File::Spec->catdir($FindBin::Bin, "..", "..");
+use webkitdirs;
+
+use base 'Exporter';
+use vars qw(@EXPORT @EXPORT_OK %EXPORT_TAGS $VERSION);
+
+@EXPORT = ();
+@EXPORT_OK = ();
+%EXPORT_TAGS = ();
+$VERSION = '1.0';
+
+sub readFile($);
+
+sub import
+{
+ my ($self, $package, $script) = @_;
+ my $scriptPath = File::Spec->catfile(sourceDir(), "Tools", "Scripts", $script);
+ eval "
+ package $package;
+
+ use strict;
+ use warnings;
+
+ use base 'Exporter';
+ use vars qw(\@EXPORT \@EXPORT_OK \%EXPORT_TAGS \$VERSION);
+
+ \@EXPORT = ();
+ \@EXPORT_OK = ();
+ \%EXPORT_TAGS = ();
+ \$VERSION = '1.0';
+
+ sub {" . readFile($scriptPath) . "}
+ ";
+}
+
+sub readFile($)
+{
+ my $path = shift;
+ local $/ = undef; # Read in the whole file at once.
+ open FILE, "<", $path or die "Cannot open $path: $!";
+ my $contents = <FILE>;
+ close FILE;
+ return $contents;
+};
+
+1;
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/fixChangeLogPatch.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/fixChangeLogPatch.pl
new file mode 100644
index 0000000..4ee54fb
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/fixChangeLogPatch.pl
@@ -0,0 +1,525 @@
+#!/usr/bin/perl
+#
+# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+# Copyright (C) Research In Motion 2010. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of VCSUtils::fixChangeLogPatch().
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+# The source ChangeLog for these tests is the following:
+#
+# 2009-12-22 Alice <alice@email.address>
+#
+# Reviewed by Ray.
+#
+# Changed some code on 2009-12-22.
+#
+# * File:
+# * File2:
+#
+# 2009-12-21 Alice <alice@email.address>
+#
+# Reviewed by Ray.
+#
+# Changed some code on 2009-12-21.
+#
+# * File:
+# * File2:
+
+my @testCaseHashRefs = (
+{ # New test
+ diffName => "fixChangeLogPatch: [no change] In-place change.",
+ inputText => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -1,5 +1,5 @@
+ 2010-12-22 Bob <bob@email.address>
+
+- Reviewed by Sue.
++ Reviewed by Ray.
+
+ Changed some code on 2010-12-22.
+END
+ expectedReturn => {
+ patch => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -1,5 +1,5 @@
+ 2010-12-22 Bob <bob@email.address>
+
+- Reviewed by Sue.
++ Reviewed by Ray.
+
+ Changed some code on 2010-12-22.
+END
+ }
+},
+{ # New test
+ diffName => "fixChangeLogPatch: [no change] Remove first entry.",
+ inputText => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -1,11 +1,3 @@
+-2010-12-22 Bob <bob@email.address>
+-
+- Reviewed by Ray.
+-
+- Changed some code on 2010-12-22.
+-
+- * File:
+-
+ 2010-12-22 Alice <alice@email.address>
+
+ Reviewed by Ray.
+END
+ expectedReturn => {
+ patch => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -1,11 +1,3 @@
+-2010-12-22 Bob <bob@email.address>
+-
+- Reviewed by Ray.
+-
+- Changed some code on 2010-12-22.
+-
+- * File:
+-
+ 2010-12-22 Alice <alice@email.address>
+
+ Reviewed by Ray.
+END
+ }
+},
+{ # New test
+ diffName => "fixChangeLogPatch: [no change] Remove entry in the middle.",
+ inputText => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@@ -7,10 +7,6 @@
+
+ * File:
+
+-2010-12-22 Bob <bob@email.address>
+-
+- Changed some code on 2010-12-22.
+-
+ 2010-12-22 Alice <alice@email.address>
+
+ Reviewed by Ray.
+END
+ expectedReturn => {
+ patch => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@@ -7,10 +7,6 @@
+
+ * File:
+
+-2010-12-22 Bob <bob@email.address>
+-
+- Changed some code on 2010-12-22.
+-
+ 2010-12-22 Alice <alice@email.address>
+
+ Reviewed by Ray.
+END
+ }
+},
+{ # New test
+ diffName => "fixChangeLogPatch: [no change] Far apart changes (i.e. more than one chunk).",
+ inputText => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -7,7 +7,7 @@
+
+ * File:
+
+-2010-12-22 Bob <bob@email.address>
++2010-12-22 Bobby <bob@email.address>
+
+ Changed some code on 2010-12-22.
+
+@@ -21,7 +21,7 @@
+
+ * File2:
+
+-2010-12-21 Bob <bob@email.address>
++2010-12-21 Bobby <bob@email.address>
+
+ Changed some code on 2010-12-21.
+END
+ expectedReturn => {
+ patch => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -7,7 +7,7 @@
+
+ * File:
+
+-2010-12-22 Bob <bob@email.address>
++2010-12-22 Bobby <bob@email.address>
+
+ Changed some code on 2010-12-22.
+
+@@ -21,7 +21,7 @@
+
+ * File2:
+
+-2010-12-21 Bob <bob@email.address>
++2010-12-21 Bobby <bob@email.address>
+
+ Changed some code on 2010-12-21.
+END
+ }
+},
+{ # New test
+ diffName => "fixChangeLogPatch: [no change] First line is new line.",
+ inputText => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -1,3 +1,11 @@
++2009-12-22 Bob <bob@email.address>
++
++ Reviewed by Ray.
++
++ Changed some more code on 2009-12-22.
++
++ * File:
++
+ 2009-12-22 Alice <alice@email.address>
+
+ Reviewed by Ray.
+END
+ expectedReturn => {
+ patch => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -1,3 +1,11 @@
++2009-12-22 Bob <bob@email.address>
++
++ Reviewed by Ray.
++
++ Changed some more code on 2009-12-22.
++
++ * File:
++
+ 2009-12-22 Alice <alice@email.address>
+
+ Reviewed by Ray.
+END
+ }
+},
+{ # New test
+ diffName => "fixChangeLogPatch: [no change] No date string.",
+ inputText => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -6,6 +6,7 @@
+
+ * File:
+ * File2:
++ * File3:
+
+ 2009-12-21 Alice <alice@email.address>
+
+END
+ expectedReturn => {
+ patch => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -6,6 +6,7 @@
+
+ * File:
+ * File2:
++ * File3:
+
+ 2009-12-21 Alice <alice@email.address>
+
+END
+ }
+},
+{ # New test
+ diffName => "fixChangeLogPatch: New entry inserted in middle.",
+ inputText => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -11,6 +11,14 @@
+
+ Reviewed by Ray.
+
++ Changed some more code on 2009-12-21.
++
++ * File:
++
++2009-12-21 Alice <alice@email.address>
++
++ Reviewed by Ray.
++
+ Changed some code on 2009-12-21.
+
+ * File:
+END
+ expectedReturn => {
+ patch => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -1,3 +1,11 @@
++2009-12-21 Alice <alice@email.address>
++
++ Reviewed by Ray.
++
++ Changed some more code on 2009-12-21.
++
++ * File:
++
+ 2009-12-21 Alice <alice@email.address>
+
+ Reviewed by Ray.
+END
+ }
+},
+{ # New test
+ diffName => "fixChangeLogPatch: New entry inserted earlier in the file, but after an entry with the same author and date.",
+ inputText => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -70,6 +70,14 @@
+
+ 2009-12-22 Alice <alice@email.address>
+
++ Reviewed by Sue.
++
++ Changed some more code on 2009-12-22.
++
++ * File:
++
++2009-12-22 Alice <alice@email.address>
++
+ Reviewed by Ray.
+
+ Changed some code on 2009-12-22.
+END
+ expectedReturn => {
+ patch => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -1,3 +1,11 @@
++2009-12-22 Alice <alice@email.address>
++
++ Reviewed by Sue.
++
++ Changed some more code on 2009-12-22.
++
++ * File:
++
+ 2009-12-22 Alice <alice@email.address>
+
+ Reviewed by Ray.
+END
+ }
+},
+{ # New test
+ diffName => "fixChangeLogPatch: Leading context includes first line.",
+ inputText => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -1,5 +1,13 @@
+ 2009-12-22 Alice <alice@email.address>
+
++ Reviewed by Sue.
++
++ Changed some more code on 2009-12-22.
++
++ * File:
++
++2009-12-22 Alice <alice@email.address>
++
+ Reviewed by Ray.
+
+ Changed some code on 2009-12-22.
+END
+ expectedReturn => {
+ patch => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -1,3 +1,11 @@
++2009-12-22 Alice <alice@email.address>
++
++ Reviewed by Sue.
++
++ Changed some more code on 2009-12-22.
++
++ * File:
++
+ 2009-12-22 Alice <alice@email.address>
+
+ Reviewed by Ray.
+END
+ }
+},
+{ # New test
+ diffName => "fixChangeLogPatch: Leading context does not include first line.",
+ inputText => <<'END',
+@@ -2,6 +2,14 @@
+
+ Reviewed by Ray.
+
++ Changed some more code on 2009-12-22.
++
++ * File:
++
++2009-12-22 Alice <alice@email.address>
++
++ Reviewed by Ray.
++
+ Changed some code on 2009-12-22.
+
+ * File:
+END
+ expectedReturn => {
+ patch => <<'END',
+@@ -1,3 +1,11 @@
++2009-12-22 Alice <alice@email.address>
++
++ Reviewed by Ray.
++
++ Changed some more code on 2009-12-22.
++
++ * File:
++
+ 2009-12-22 Alice <alice@email.address>
+
+ Reviewed by Ray.
+END
+ }
+},
+{ # New test
+ diffName => "fixChangeLogPatch: Non-consecutive line additions.",
+
+# This can occur, for example, if the new ChangeLog entry includes
+# trailing white space in the first blank line but not the second.
+# A diff command can then match the second blank line of the new
+# ChangeLog entry with the first blank line of the old.
+# The svn diff command with the default --diff-cmd has done this.
+ inputText => <<'END',
+@@ -1,5 +1,11 @@
+ 2009-12-22 Alice <alice@email.address>
++ <pretend-whitespace>
++ Reviewed by Ray.
+
++ Changed some more code on 2009-12-22.
++
++2009-12-22 Alice <alice@email.address>
++
+ Reviewed by Ray.
+
+ Changed some code on 2009-12-22.
+END
+ expectedReturn => {
+ patch => <<'END',
+@@ -1,3 +1,9 @@
++2009-12-22 Alice <alice@email.address>
++ <pretend-whitespace>
++ Reviewed by Ray.
++
++ Changed some more code on 2009-12-22.
++
+ 2009-12-22 Alice <alice@email.address>
+
+ Reviewed by Ray.
+END
+ }
+},
+{ # New test
+ diffName => "fixChangeLogPatch: Additional edits after new entry.",
+ inputText => <<'END',
+@@ -2,10 +2,17 @@
+
+ Reviewed by Ray.
+
++ Changed some more code on 2009-12-22.
++
++ * File:
++
++2009-12-22 Alice <alice@email.address>
++
++ Reviewed by Ray.
++
+ Changed some code on 2009-12-22.
+
+ * File:
+- * File2:
+
+ 2009-12-21 Alice <alice@email.address>
+
+END
+ expectedReturn => {
+ patch => <<'END',
+@@ -1,11 +1,18 @@
++2009-12-22 Alice <alice@email.address>
++
++ Reviewed by Ray.
++
++ Changed some more code on 2009-12-22.
++
++ * File:
++
+ 2009-12-22 Alice <alice@email.address>
+
+ Reviewed by Ray.
+
+ Changed some code on 2009-12-22.
+
+ * File:
+- * File2:
+
+ 2009-12-21 Alice <alice@email.address>
+
+END
+ }
+},
+);
+
+my $testCasesCount = @testCaseHashRefs;
+plan(tests => $testCasesCount); # Total number of assertions.
+
+foreach my $testCase (@testCaseHashRefs) {
+ my $testNameStart = "fixChangeLogPatch(): $testCase->{diffName}: comparing";
+
+ my $got = VCSUtils::fixChangeLogPatch($testCase->{inputText});
+ my $expectedReturn = $testCase->{expectedReturn};
+
+ is_deeply($got, $expectedReturn, "$testNameStart return value.");
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/fixChangeLogPatchThenSetChangeLogDateAndReviewer.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/fixChangeLogPatchThenSetChangeLogDateAndReviewer.pl
new file mode 100644
index 0000000..41d9522
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/fixChangeLogPatchThenSetChangeLogDateAndReviewer.pl
@@ -0,0 +1,92 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+# Copyright (C) 2010 Research In Motion Limited. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests for setChangeLogDateAndReviewer(fixChangeLogPatch()).
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+my @testCaseHashRefs = (
+{
+ testName => "New entry inserted earlier in the file, but after an entry with the same author and date, patch applied a day later.",
+ reviewer => "Sue",
+ epochTime => 1273414321,
+ patch => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -70,6 +70,14 @@
+
+ 2010-05-08 Alice <alice@email.address>
+
++ Reviewed by NOBODY (OOPS!).
++
++ Changed some more code on 2010-05-08.
++
++ * File:
++
++2010-05-08 Alice <alice@email.address>
++
+ Reviewed by Ray.
+
+ Changed some code on 2010-05-08.
+END
+ expectedReturn => <<'END',
+--- ChangeLog
++++ ChangeLog
+@@ -1,3 +1,11 @@
++2010-05-09 Alice <alice@email.address>
++
++ Reviewed by Sue.
++
++ Changed some more code on 2010-05-08.
++
++ * File:
++
+ 2010-05-08 Alice <alice@email.address>
+
+ Reviewed by Ray.
+END
+},
+);
+
+my $testCasesCount = @testCaseHashRefs;
+plan(tests => 1 * $testCasesCount); # Total number of assertions.
+
+foreach my $testCase (@testCaseHashRefs) {
+ my $testNameStart = "setChangeLogDateAndReviewer(fixChangeLogPatch()): $testCase->{testName}: comparing";
+
+ my $patch = $testCase->{patch};
+ my $reviewer = $testCase->{reviewer};
+ my $epochTime = $testCase->{epochTime};
+
+ my $fixedChangeLog = VCSUtils::fixChangeLogPatch($patch);
+ my $got = VCSUtils::setChangeLogDateAndReviewer($fixedChangeLog->{patch}, $reviewer, $epochTime);
+ my $expectedReturn = $testCase->{expectedReturn};
+
+ is($got, $expectedReturn, "$testNameStart return value.");
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/generatePatchCommand.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/generatePatchCommand.pl
new file mode 100644
index 0000000..ef38c7a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/generatePatchCommand.pl
@@ -0,0 +1,87 @@
+#!/usr/bin/perl
+#
+# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of VCSUtils::generatePatchCommand().
+
+use Test::Simple tests => 10;
+use VCSUtils;
+
+# New test
+$title = "generatePatchCommand: Undefined optional arguments.";
+
+my $argsHashRef;
+my ($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef);
+
+ok($patchCommand eq "patch -p0", $title);
+ok($isForcing == 0, $title);
+
+# New test
+$title = "generatePatchCommand: Undefined options.";
+
+my $options;
+$argsHashRef = {options => $options};
+($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef);
+
+ok($patchCommand eq "patch -p0", $title);
+ok($isForcing == 0, $title);
+
+# New test
+$title = "generatePatchCommand: --force and no \"ensure force\".";
+
+$argsHashRef = {options => ["--force"]};
+($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef);
+
+ok($patchCommand eq "patch -p0 --force", $title);
+ok($isForcing == 1, $title);
+
+# New test
+$title = "generatePatchCommand: no --force and \"ensure force\".";
+
+$argsHashRef = {ensureForce => 1};
+($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef);
+
+ok($patchCommand eq "patch -p0 --force", $title);
+ok($isForcing == 1, $title);
+
+# New test
+$title = "generatePatchCommand: \"should reverse\".";
+
+$argsHashRef = {shouldReverse => 1};
+($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef);
+
+ok($patchCommand eq "patch -p0 --reverse", $title);
+
+# New test
+$title = "generatePatchCommand: --fuzz=3, --force.";
+
+$argsHashRef = {options => ["--fuzz=3", "--force"]};
+($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef);
+
+ok($patchCommand eq "patch -p0 --force --fuzz=3", $title);
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/mergeChangeLogs.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/mergeChangeLogs.pl
new file mode 100644
index 0000000..8bdd2bd
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/mergeChangeLogs.pl
@@ -0,0 +1,336 @@
+#!/usr/bin/perl
+#
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of VCSUtils::mergeChangeLogs().
+
+use strict;
+
+use Test::Simple tests => 16;
+use File::Temp qw(tempfile);
+use VCSUtils;
+
+# Read contents of a file and return it.
+sub readFile($)
+{
+ my ($fileName) = @_;
+
+ local $/;
+ open(FH, "<", $fileName);
+ my $content = <FH>;
+ close(FH);
+
+ return $content;
+}
+
+# Write a temporary file and return the filename.
+sub writeTempFile($$$)
+{
+ my ($name, $extension, $content) = @_;
+
+ my ($FH, $fileName) = tempfile(
+ $name . "-XXXXXXXX",
+ DIR => ($ENV{'TMPDIR'} || $ENV{'TEMP'} || "/tmp"),
+ UNLINK => 0,
+ );
+ print $FH $content;
+ close $FH;
+
+ if ($extension) {
+ my $newFileName = $fileName . $extension;
+ rename($fileName, $newFileName);
+ $fileName = $newFileName;
+ }
+
+ return $fileName;
+}
+
+# --------------------------------------------------------------------------------
+
+{
+ # New test
+ my $title = "mergeChangeLogs: traditional rejected patch success";
+
+ my $fileNewerContent = <<'EOF';
+2010-01-29 Mark Rowe <mrowe@apple.com>
+
+ Fix the Mac build.
+
+ Disable ENABLE_INDEXED_DATABASE since it is "completely non-functional".
+
+2010-01-29 Simon Hausmann <simon.hausmann@nokia.com>
+
+ Rubber-stamped by Maciej Stachowiak.
+
+ Fix the ARM build.
+EOF
+ my $fileNewer = writeTempFile("file", "", $fileNewerContent);
+
+ my $fileMineContent = <<'EOF';
+***************
+*** 1,3 ****
+ 2010-01-29 Simon Hausmann <simon.hausmann@nokia.com>
+
+ Rubber-stamped by Maciej Stachowiak.
+--- 1,9 ----
++ 2010-01-29 Oliver Hunt <oliver@apple.com>
++
++ Reviewed by Darin Adler.
++
++ JSC is failing to propagate anonymous slot count on some transitions
++
+ 2010-01-29 Simon Hausmann <simon.hausmann@nokia.com>
+
+ Rubber-stamped by Maciej Stachowiak.
+EOF
+ my $fileMine = writeTempFile("file", ".rej", $fileMineContent);
+ rename($fileMine, $fileNewer . ".rej");
+ $fileMine = $fileNewer . ".rej";
+
+ my $fileOlderContent = $fileNewerContent;
+ my $fileOlder = writeTempFile("file", ".orig", $fileOlderContent);
+ rename($fileOlder, $fileNewer . ".orig");
+ $fileOlder = $fileNewer . ".orig";
+
+ my $exitStatus = mergeChangeLogs($fileMine, $fileOlder, $fileNewer);
+
+ # mergeChangeLogs() should return 1 since the patch succeeded.
+ ok($exitStatus == 1, "$title: should return 1 for success");
+
+ ok(readFile($fileMine) eq $fileMineContent, "$title: \$fileMine should be unchanged");
+ ok(readFile($fileOlder) eq $fileOlderContent, "$title: \$fileOlder should be unchanged");
+
+ my $expectedContent = <<'EOF';
+2010-01-29 Oliver Hunt <oliver@apple.com>
+
+ Reviewed by Darin Adler.
+
+ JSC is failing to propagate anonymous slot count on some transitions
+
+EOF
+ $expectedContent .= $fileNewerContent;
+ ok(readFile($fileNewer) eq $expectedContent, "$title: \$fileNewer should be updated to include patch");
+
+ unlink($fileMine, $fileOlder, $fileNewer);
+}
+
+# --------------------------------------------------------------------------------
+
+{
+ # New test
+ my $title = "mergeChangeLogs: traditional rejected patch failure";
+
+ my $fileNewerContent = <<'EOF';
+2010-01-29 Mark Rowe <mrowe@apple.com>
+
+ Fix the Mac build.
+
+ Disable ENABLE_INDEXED_DATABASE since it is "completely non-functional".
+
+2010-01-29 Simon Hausmann <simon.hausmann@nokia.com>
+
+ Rubber-stamped by Maciej Stachowiak.
+
+ Fix the ARM build.
+EOF
+ my $fileNewer = writeTempFile("file", "", $fileNewerContent);
+
+ my $fileMineContent = <<'EOF';
+***************
+*** 1,9 ****
+- 2010-01-29 Oliver Hunt <oliver@apple.com>
+-
+- Reviewed by Darin Adler.
+-
+- JSC is failing to propagate anonymous slot count on some transitions
+-
+ 2010-01-29 Simon Hausmann <simon.hausmann@nokia.com>
+
+ Rubber-stamped by Maciej Stachowiak.
+--- 1,3 ----
+ 2010-01-29 Simon Hausmann <simon.hausmann@nokia.com>
+
+ Rubber-stamped by Maciej Stachowiak.
+EOF
+ my $fileMine = writeTempFile("file", ".rej", $fileMineContent);
+ rename($fileMine, $fileNewer . ".rej");
+ $fileMine = $fileNewer . ".rej";
+
+ my $fileOlderContent = $fileNewerContent;
+ my $fileOlder = writeTempFile("file", ".orig", $fileOlderContent);
+ rename($fileOlder, $fileNewer . ".orig");
+ $fileOlder = $fileNewer . ".orig";
+
+ my $exitStatus = mergeChangeLogs($fileMine, $fileOlder, $fileNewer);
+
+ # mergeChangeLogs() should return 0 since the patch failed.
+ ok($exitStatus == 0, "$title: should return 0 for failure");
+
+ ok(readFile($fileMine) eq $fileMineContent, "$title: \$fileMine should be unchanged");
+ ok(readFile($fileOlder) eq $fileOlderContent, "$title: \$fileOlder should be unchanged");
+ ok(readFile($fileNewer) eq $fileNewerContent, "$title: \$fileNewer should be unchanged");
+
+ unlink($fileMine, $fileOlder, $fileNewer);
+}
+
+# --------------------------------------------------------------------------------
+
+{
+ # New test
+ my $title = "mergeChangeLogs: patch succeeds";
+
+ my $fileMineContent = <<'EOF';
+2010-01-29 Oliver Hunt <oliver@apple.com>
+
+ Reviewed by Darin Adler.
+
+ JSC is failing to propagate anonymous slot count on some transitions
+
+2010-01-29 Simon Hausmann <simon.hausmann@nokia.com>
+
+ Rubber-stamped by Maciej Stachowiak.
+
+ Fix the ARM build.
+EOF
+ my $fileMine = writeTempFile("fileMine", "", $fileMineContent);
+
+ my $fileOlderContent = <<'EOF';
+2010-01-29 Simon Hausmann <simon.hausmann@nokia.com>
+
+ Rubber-stamped by Maciej Stachowiak.
+
+ Fix the ARM build.
+EOF
+ my $fileOlder = writeTempFile("fileOlder", "", $fileOlderContent);
+
+ my $fileNewerContent = <<'EOF';
+2010-01-29 Mark Rowe <mrowe@apple.com>
+
+ Fix the Mac build.
+
+ Disable ENABLE_INDEXED_DATABASE since it is "completely non-functional".
+
+2010-01-29 Simon Hausmann <simon.hausmann@nokia.com>
+
+ Rubber-stamped by Maciej Stachowiak.
+
+ Fix the ARM build.
+EOF
+ my $fileNewer = writeTempFile("fileNewer", "", $fileNewerContent);
+
+ my $exitStatus = mergeChangeLogs($fileMine, $fileOlder, $fileNewer);
+
+ # mergeChangeLogs() should return 1 since the patch succeeded.
+ ok($exitStatus == 1, "$title: should return 1 for success");
+
+ ok(readFile($fileMine) eq $fileMineContent, "$title: \$fileMine should be unchanged");
+ ok(readFile($fileOlder) eq $fileOlderContent, "$title: \$fileOlder should be unchanged");
+
+ my $expectedContent = <<'EOF';
+2010-01-29 Oliver Hunt <oliver@apple.com>
+
+ Reviewed by Darin Adler.
+
+ JSC is failing to propagate anonymous slot count on some transitions
+
+EOF
+ $expectedContent .= $fileNewerContent;
+
+ ok(readFile($fileNewer) eq $expectedContent, "$title: \$fileNewer should be patched");
+
+ unlink($fileMine, $fileOlder, $fileNewer);
+}
+
+# --------------------------------------------------------------------------------
+
+{
+ # New test
+ my $title = "mergeChangeLogs: patch fails";
+
+ my $fileMineContent = <<'EOF';
+2010-01-29 Mark Rowe <mrowe@apple.com>
+
+ Fix the Mac build.
+
+ Disable ENABLE_INDEXED_DATABASE since it is "completely non-functional".
+
+2010-01-29 Simon Hausmann <simon.hausmann@nokia.com>
+
+ Rubber-stamped by Maciej Stachowiak.
+
+ Fix the ARM build.
+EOF
+ my $fileMine = writeTempFile("fileMine", "", $fileMineContent);
+
+ my $fileOlderContent = <<'EOF';
+2010-01-29 Mark Rowe <mrowe@apple.com>
+
+ Fix the Mac build.
+
+ Disable ENABLE_INDEXED_DATABASE since it is "completely non-functional".
+
+2010-01-29 Oliver Hunt <oliver@apple.com>
+
+ Reviewed by Darin Adler.
+
+ JSC is failing to propagate anonymous slot count on some transitions
+
+2010-01-29 Simon Hausmann <simon.hausmann@nokia.com>
+
+ Rubber-stamped by Maciej Stachowiak.
+
+ Fix the ARM build.
+EOF
+ my $fileOlder = writeTempFile("fileOlder", "", $fileOlderContent);
+
+ my $fileNewerContent = <<'EOF';
+2010-01-29 Oliver Hunt <oliver@apple.com>
+
+ Reviewed by Darin Adler.
+
+ JSC is failing to propagate anonymous slot count on some transitions
+
+2010-01-29 Simon Hausmann <simon.hausmann@nokia.com>
+
+ Rubber-stamped by Maciej Stachowiak.
+
+ Fix the ARM build.
+EOF
+ my $fileNewer = writeTempFile("fileNewer", "", $fileNewerContent);
+
+ my $exitStatus = mergeChangeLogs($fileMine, $fileOlder, $fileNewer);
+
+ # mergeChangeLogs() should return a non-zero exit status since the patch failed.
+ ok($exitStatus == 0, "$title: return non-zero exit status for failure");
+
+ ok(readFile($fileMine) eq $fileMineContent, "$title: \$fileMine should be unchanged");
+ ok(readFile($fileOlder) eq $fileOlderContent, "$title: \$fileOlder should be unchanged");
+
+ # $fileNewer should still exist unchanged because the patch failed
+ ok(readFile($fileNewer) eq $fileNewerContent, "$title: \$fileNewer should be unchanged");
+
+ unlink($fileMine, $fileOlder, $fileNewer);
+}
+
+# --------------------------------------------------------------------------------
+
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseChunkRange.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseChunkRange.pl
new file mode 100644
index 0000000..caee50b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseChunkRange.pl
@@ -0,0 +1,267 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2011 Research In Motion Limited. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+# Copyright (C) 2012 Daniel Bates (dbates@intudata.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of VCSUtils::parseChunkRange().
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+my @testCaseHashRefs = (
+###
+# Invalid and malformed chunk range
+##
+# FIXME: We should make this set of tests more comprehensive.
+{ # New test
+ testName => "[invalid] Empty string",
+ inputText => "",
+ expectedReturn => []
+},
+{ # New test
+ testName => "[invalid] Bogus chunk range",
+ inputText => "@@ this is not valid @@",
+ expectedReturn => []
+},
+{ # New test
+ testName => "[invalid] Chunk range missing -/+ prefix",
+ inputText => "@@ 0,0 1,4 @@",
+ expectedReturn => []
+},
+{ # New test
+ testName => "[invalid] Chunk range missing commas",
+ inputText => "@@ -0 0 +1 4 @@",
+ expectedReturn => []
+},
+{ # New test
+ testName => "[invalid] Chunk range with swapped old and rew ranges",
+ inputText => "@@ +0,0 -1,4 @@",
+ expectedReturn => []
+},
+{ # New test
+ testName => "[invalid] Chunk range with leading junk",
+ inputText => "leading junk @@ -0,0 +1,4 @@",
+ expectedReturn => []
+},
+###
+# Simple test cases
+##
+{ # New test
+ testName => "Line count is 0",
+ inputText => "@@ -0,0 +1,4 @@",
+ expectedReturn => [
+{
+ startingLine => 0,
+ lineCount => 0,
+ newStartingLine => 1,
+ newLineCount => 4,
+}
+]
+},
+{ # New test
+ testName => "Line count is 1",
+ inputText => "@@ -1 +1,4 @@",
+ expectedReturn => [
+{
+ startingLine => 1,
+ lineCount => 1,
+ newStartingLine => 1,
+ newLineCount => 4,
+}
+]
+},
+{ # New test
+ testName => "Both original and new line count is 1",
+ inputText => "@@ -1 +1 @@",
+ expectedReturn => [
+{
+ startingLine => 1,
+ lineCount => 1,
+ newStartingLine => 1,
+ newLineCount => 1,
+}
+]
+},
+{ # New test
+ testName => "Line count and new line count > 1",
+ inputText => "@@ -1,2 +1,4 @@",
+ expectedReturn => [
+{
+ startingLine => 1,
+ lineCount => 2,
+ newStartingLine => 1,
+ newLineCount => 4,
+}
+]
+},
+{ # New test
+ testName => "New line count is 0",
+ inputText => "@@ -1,4 +0,0 @@",
+ expectedReturn => [
+{
+ startingLine => 1,
+ lineCount => 4,
+ newStartingLine => 0,
+ newLineCount => 0,
+}
+]
+},
+{ # New test
+ testName => "New line count is 1",
+ inputText => "@@ -1,4 +1 @@",
+ expectedReturn => [
+{
+ startingLine => 1,
+ lineCount => 4,
+ newStartingLine => 1,
+ newLineCount => 1,
+}
+]
+},
+###
+# Simple SVN 1.7 property diff chunk range tests
+##
+{ # New test
+ testName => "Line count is 0",
+ inputText => "## -0,0 +1,4 ##",
+ chunkSentinel => "##",
+ expectedReturn => [
+{
+ startingLine => 0,
+ lineCount => 0,
+ newStartingLine => 1,
+ newLineCount => 4,
+}
+]
+},
+{ # New test
+ testName => "New line count is 1",
+ inputText => "## -0,0 +1 ##",
+ chunkSentinel => "##",
+ expectedReturn => [
+{
+ startingLine => 0,
+ lineCount => 0,
+ newStartingLine => 1,
+ newLineCount => 1,
+}
+]
+},
+###
+# Chunk range followed by ending junk
+##
+{ # New test
+ testName => "Line count is 0 and chunk range has ending junk",
+ inputText => "@@ -0,0 +1,4 @@ foo()",
+ expectedReturn => [
+{
+ startingLine => 0,
+ lineCount => 0,
+ newStartingLine => 1,
+ newLineCount => 4,
+}
+]
+},
+{ # New test
+ testName => "Line count is 1 and chunk range has ending junk",
+ inputText => "@@ -1 +1,4 @@ foo()",
+ expectedReturn => [
+{
+ startingLine => 1,
+ lineCount => 1,
+ newStartingLine => 1,
+ newLineCount => 4,
+}
+]
+},
+{ # New test
+ testName => "Both original and new line count is 1 and chunk range has ending junk",
+ inputText => "@@ -1 +1 @@ foo()",
+ expectedReturn => [
+{
+ startingLine => 1,
+ lineCount => 1,
+ newStartingLine => 1,
+ newLineCount => 1,
+}
+]
+},
+{ # New test
+ testName => "Line count and new line count > 1 and chunk range has ending junk",
+ inputText => "@@ -1,2 +1,4 @@ foo()",
+ expectedReturn => [
+{
+ startingLine => 1,
+ lineCount => 2,
+ newStartingLine => 1,
+ newLineCount => 4,
+}
+]
+},
+{ # New test
+ testName => "New line count is 0 and chunk range has ending junk",
+ inputText => "@@ -1,4 +0,0 @@ foo()",
+ expectedReturn => [
+{
+ startingLine => 1,
+ lineCount => 4,
+ newStartingLine => 0,
+ newLineCount => 0,
+}
+]
+},
+{ # New test
+ testName => "New line count is 1 and chunk range has ending junk",
+ inputText => "@@ -1,4 +1 @@ foo()",
+ expectedReturn => [
+{
+ startingLine => 1,
+ lineCount => 4,
+ newStartingLine => 1,
+ newLineCount => 1,
+}
+]
+},
+);
+
+my $testCasesCount = @testCaseHashRefs;
+plan(tests => $testCasesCount);
+
+foreach my $testCase (@testCaseHashRefs) {
+ my $testNameStart = "parseChunkRange(): $testCase->{testName}: comparing";
+
+ my @got = VCSUtils::parseChunkRange($testCase->{inputText}, $testCase->{chunkSentinel});
+ my $expectedReturn = $testCase->{expectedReturn};
+
+ is_deeply(\@got, $expectedReturn, "$testNameStart return value.");
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl
new file mode 100644
index 0000000..aad1da9
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl
@@ -0,0 +1,1277 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of parseDiff().
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+# The array of test cases.
+my @testCaseHashRefs = (
+{
+ # New test
+ diffName => "SVN: simple",
+ inputText => <<'END',
+Index: Makefile
+===================================================================
+--- Makefile (revision 53052)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+
+ all:
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END', # Same as input text
+Index: Makefile
+===================================================================
+--- Makefile (revision 53052)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+
+ all:
+END
+ indexPath => "Makefile",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "53052",
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: binary file (isBinary true)",
+ inputText => <<'END',
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+Property changes on: test_file.swf
+___________________________________________________________________
+Name: svn:mime-type
+ + application/octet-stream
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END', # Same as input text
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+ indexPath => "test_file.swf",
+ isBinary => 1,
+ isSvn => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: binary file (isBinary true) using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+Property changes on: test_file.swf
+___________________________________________________________________
+Name: svn:mime-type
+ + application/octet-stream
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+),
+ expectedReturn => [
+[{
+ svnConvertedText => toWindowsLineEndings(<<'END', # Same as input text
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+),
+ indexPath => "test_file.swf",
+ isBinary => 1,
+ isSvn => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: leading junk",
+ inputText => <<'END',
+
+LEADING JUNK
+
+Index: Makefile
+===================================================================
+--- Makefile (revision 53052)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+
+ all:
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END', # Same as input text
+
+LEADING JUNK
+
+Index: Makefile
+===================================================================
+--- Makefile (revision 53052)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+
+ all:
+END
+ indexPath => "Makefile",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "53052",
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: copied file",
+ inputText => <<'END',
+Index: Makefile_new
+===================================================================
+--- Makefile_new (revision 53131) (from Makefile:53131)
++++ Makefile_new (working copy)
+@@ -0,0 +1,1 @@
++MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+END
+ expectedReturn => [
+[{
+ copiedFromPath => "Makefile",
+ indexPath => "Makefile_new",
+ sourceRevision => "53131",
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: two diffs",
+ inputText => <<'END',
+Index: Makefile
+===================================================================
+--- Makefile (revision 53131)
++++ Makefile (working copy)
+@@ -1,1 +0,0 @@
+-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+Index: Makefile_new
+===================================================================
+--- Makefile_new (revision 53131) (from Makefile:53131)
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END',
+Index: Makefile
+===================================================================
+--- Makefile (revision 53131)
++++ Makefile (working copy)
+@@ -1,1 +0,0 @@
+-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+END
+ indexPath => "Makefile",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "53131",
+}],
+"Index: Makefile_new\n"],
+ expectedNextLine => "===================================================================\n",
+},
+{
+ # New test
+ diffName => "SVN: SVN diff followed by Git diff", # Should not recognize Git start
+ inputText => <<'END',
+Index: Makefile
+===================================================================
+--- Makefile (revision 53131)
++++ Makefile (working copy)
+@@ -1,1 +0,0 @@
+-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+diff --git a/Makefile b/Makefile
+index f5d5e74..3b6aa92 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,1 1,1 @@ public:
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END', # Same as input text
+Index: Makefile
+===================================================================
+--- Makefile (revision 53131)
++++ Makefile (working copy)
+@@ -1,1 +0,0 @@
+-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+diff --git a/Makefile b/Makefile
+index f5d5e74..3b6aa92 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,1 1,1 @@ public:
+END
+ indexPath => "Makefile",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "53131",
+}],
+undef],
+ expectedNextLine => undef,
+},
+####
+# Property Changes: Simple
+##
+{
+ # New test
+ diffName => "SVN: file change diff with property change diff",
+ inputText => <<'END',
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+
+Property changes on: Makefile
+___________________________________________________________________
+Name: svn:executable
+ + *
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END', # Same as input text
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+
+END
+ executableBitDelta => 1,
+ indexPath => "Makefile",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "60021",
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: file change diff, followed by property change diff on different file",
+ inputText => <<'END',
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+
+Property changes on: Makefile.shared
+___________________________________________________________________
+Name: svn:executable
+ + *
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END', # Same as input text
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+
+END
+ indexPath => "Makefile",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "60021",
+}],
+"Property changes on: Makefile.shared\n"],
+ expectedNextLine => "___________________________________________________________________\n",
+},
+{
+ # New test
+ diffName => "SVN: property diff, followed by file change diff",
+ inputText => <<'END',
+Property changes on: Makefile
+___________________________________________________________________
+Deleted: svn:executable
+ - *
+
+Index: Makefile.shared
+===================================================================
+--- Makefile.shared (revision 60021)
++++ Makefile.shared (working copy)
+@@ -1,3 +1,4 @@
++
+SCRIPTS_PATH ?= ../WebKitTools/Scripts
+XCODE_OPTIONS = `perl -I$(SCRIPTS_PATH) -Mwebkitdirs -e 'print XcodeOptionString()'` $(ARGS)
+END
+ expectedReturn => [
+[{
+ executableBitDelta => -1,
+ indexPath => "Makefile",
+ isSvn => 1,
+}],
+"Index: Makefile.shared\n"],
+ expectedNextLine => "===================================================================\n",
+},
+{
+ # New test
+ diffName => "SVN: property diff, followed by file change diff using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Property changes on: Makefile
+___________________________________________________________________
+Deleted: svn:executable
+ - *
+
+Index: Makefile.shared
+===================================================================
+--- Makefile.shared (revision 60021)
++++ Makefile.shared (working copy)
+@@ -1,3 +1,4 @@
++
+SCRIPTS_PATH ?= ../WebKitTools/Scripts
+XCODE_OPTIONS = `perl -I$(SCRIPTS_PATH) -Mwebkitdirs -e 'print XcodeOptionString()'` $(ARGS)
+END
+),
+ expectedReturn => [
+[{
+ executableBitDelta => -1,
+ indexPath => "Makefile",
+ isSvn => 1,
+}],
+"Index: Makefile.shared\r\n"],
+ expectedNextLine => "===================================================================\r\n",
+},
+{
+ # New test
+ diffName => "SVN: copied file with property change",
+ inputText => <<'END',
+Index: NMakefile
+===================================================================
+--- NMakefile (revision 60021) (from Makefile:60021)
++++ NMakefile (working copy)
+@@ -0,0 +1,1 @@
++MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+Property changes on: NMakefile
+___________________________________________________________________
+Added: svn:executable
+ + *
+END
+ expectedReturn => [
+[{
+ copiedFromPath => "Makefile",
+ executableBitDelta => 1,
+ indexPath => "NMakefile",
+ sourceRevision => "60021",
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: two consecutive property diffs",
+ inputText => <<'END',
+Property changes on: Makefile
+___________________________________________________________________
+Added: svn:executable
+ + *
+
+
+Property changes on: Makefile.shared
+___________________________________________________________________
+Added: svn:executable
+ + *
+END
+ expectedReturn => [
+[{
+ executableBitDelta => 1,
+ indexPath => "Makefile",
+ isSvn => 1,
+}],
+"Property changes on: Makefile.shared\n"],
+ expectedNextLine => "___________________________________________________________________\n",
+},
+{
+ # New test
+ diffName => "SVN: two consecutive property diffs using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Property changes on: Makefile
+___________________________________________________________________
+Added: svn:executable
+ + *
+
+
+Property changes on: Makefile.shared
+___________________________________________________________________
+Added: svn:executable
+ + *
+END
+),
+ expectedReturn => [
+[{
+ executableBitDelta => 1,
+ indexPath => "Makefile",
+ isSvn => 1,
+}],
+"Property changes on: Makefile.shared\r\n"],
+ expectedNextLine => "___________________________________________________________________\r\n",
+},
+####
+# Property Changes: Binary files
+##
+{
+ # New test
+ diffName => "SVN: binary file with executable bit change",
+ inputText => <<'END',
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+Property changes on: test_file.swf
+___________________________________________________________________
+Name: svn:mime-type
+ + application/octet-stream
+Name: svn:executable
+ + *
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END', # Same as input text
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+ executableBitDelta => 1,
+ indexPath => "test_file.swf",
+ isBinary => 1,
+ isSvn => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: binary file with executable bit change usng Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+Property changes on: test_file.swf
+___________________________________________________________________
+Name: svn:mime-type
+ + application/octet-stream
+Name: svn:executable
+ + *
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+),
+ expectedReturn => [
+[{
+ svnConvertedText => toWindowsLineEndings(<<'END', # Same as input text
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+),
+ executableBitDelta => 1,
+ indexPath => "test_file.swf",
+ isBinary => 1,
+ isSvn => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: binary file followed by property change on different file",
+ inputText => <<'END',
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+Property changes on: test_file.swf
+___________________________________________________________________
+Name: svn:mime-type
+ + application/octet-stream
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+
+Property changes on: Makefile
+___________________________________________________________________
+Added: svn:executable
+ + *
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END', # Same as input text
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+
+END
+ indexPath => "test_file.swf",
+ isBinary => 1,
+ isSvn => 1,
+}],
+"Property changes on: Makefile\n"],
+ expectedNextLine => "___________________________________________________________________\n",
+},
+{
+ # New test
+ diffName => "SVN: binary file followed by property change on different file using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+Property changes on: test_file.swf
+___________________________________________________________________
+Name: svn:mime-type
+ + application/octet-stream
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+
+Property changes on: Makefile
+___________________________________________________________________
+Added: svn:executable
+ + *
+END
+),
+ expectedReturn => [
+[{
+ svnConvertedText => toWindowsLineEndings(<<'END', # Same as input text
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+
+END
+),
+ indexPath => "test_file.swf",
+ isBinary => 1,
+ isSvn => 1,
+}],
+"Property changes on: Makefile\r\n"],
+ expectedNextLine => "___________________________________________________________________\r\n",
+},
+{
+ # New test
+ diffName => "SVN: binary file followed by file change on different file",
+ inputText => <<'END',
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+Property changes on: test_file.swf
+___________________________________________________________________
+Name: svn:mime-type
+ + application/octet-stream
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END', # Same as input text
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+
+END
+ indexPath => "test_file.swf",
+ isBinary => 1,
+ isSvn => 1,
+}],
+"Index: Makefile\n"],
+ expectedNextLine => "===================================================================\n",
+},
+{
+ # New test
+ diffName => "SVN: binary file followed by file change on different file using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+Property changes on: test_file.swf
+___________________________________________________________________
+Name: svn:mime-type
+ + application/octet-stream
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+END
+),
+ expectedReturn => [
+[{
+ svnConvertedText => toWindowsLineEndings(<<'END', # Same as input text
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+
+END
+),
+ indexPath => "test_file.swf",
+ isBinary => 1,
+ isSvn => 1,
+}],
+"Index: Makefile\r\n"],
+ expectedNextLine => "===================================================================\r\n",
+},
+####
+# Property Changes: File change with property change
+##
+{
+ # New test
+ diffName => "SVN: file change diff with property change, followed by property change diff",
+ inputText => <<'END',
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+
+Property changes on: Makefile
+___________________________________________________________________
+Added: svn:executable
+ + *
+
+
+Property changes on: Makefile.shared
+___________________________________________________________________
+Deleted: svn:executable
+ - *
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END', # Same as input text
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+
+
+
+END
+ executableBitDelta => 1,
+ indexPath => "Makefile",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "60021",
+}],
+"Property changes on: Makefile.shared\n"],
+ expectedNextLine => "___________________________________________________________________\n",
+},
+{
+ # New test
+ diffName => "SVN: file change diff with property change, followed by property change diff using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+
+Property changes on: Makefile
+___________________________________________________________________
+Added: svn:executable
+ + *
+
+
+Property changes on: Makefile.shared
+___________________________________________________________________
+Deleted: svn:executable
+ - *
+END
+),
+ expectedReturn => [
+[{
+ svnConvertedText => toWindowsLineEndings(<<'END', # Same as input text
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+
+
+
+END
+),
+ executableBitDelta => 1,
+ indexPath => "Makefile",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "60021",
+}],
+"Property changes on: Makefile.shared\r\n"],
+ expectedNextLine => "___________________________________________________________________\r\n",
+},
+{
+ # New test
+ diffName => "SVN: file change diff with property change, followed by file change diff",
+ inputText => <<'END',
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+
+Property changes on: Makefile
+___________________________________________________________________
+Name: svn:executable
+ - *
+
+Index: Makefile.shared
+===================================================================
+--- Makefile.shared (revision 60021)
++++ Makefile.shared (working copy)
+@@ -1,3 +1,4 @@
++
+SCRIPTS_PATH ?= ../WebKitTools/Scripts
+XCODE_OPTIONS = `perl -I$(SCRIPTS_PATH) -Mwebkitdirs -e 'print XcodeOptionString()'` $(ARGS)
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END', # Same as input text
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+
+
+END
+ executableBitDelta => -1,
+ indexPath => "Makefile",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "60021",
+}],
+"Index: Makefile.shared\n"],
+ expectedNextLine => "===================================================================\n",
+},
+{
+ # New test
+ diffName => "SVN: file change diff with property change, followed by file change diff using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+
+Property changes on: Makefile
+___________________________________________________________________
+Name: svn:executable
+ - *
+
+Index: Makefile.shared
+===================================================================
+--- Makefile.shared (revision 60021)
++++ Makefile.shared (working copy)
+@@ -1,3 +1,4 @@
++
+SCRIPTS_PATH ?= ../WebKitTools/Scripts
+XCODE_OPTIONS = `perl -I$(SCRIPTS_PATH) -Mwebkitdirs -e 'print XcodeOptionString()'` $(ARGS)
+END
+),
+ expectedReturn => [
+[{
+ svnConvertedText => toWindowsLineEndings(<<'END', # Same as input text
+Index: Makefile
+===================================================================
+--- Makefile (revision 60021)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools
+
+ all:
+
+
+END
+),
+ executableBitDelta => -1,
+ indexPath => "Makefile",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "60021",
+}],
+"Index: Makefile.shared\r\n"],
+ expectedNextLine => "===================================================================\r\n",
+},
+####
+# Git test cases
+##
+{
+ # New test
+ diffName => "Git: simple",
+ inputText => <<'END',
+diff --git a/Makefile b/Makefile
+index f5d5e74..3b6aa92 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,1 +1,1 @@ public:
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END',
+Index: Makefile
+index f5d5e74..3b6aa92 100644
+--- Makefile
++++ Makefile
+@@ -1,1 +1,1 @@ public:
+END
+ indexPath => "Makefile",
+ isGit => 1,
+ numTextChunks => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "Git: Append new line to the end of an existing file",
+ inputText => <<'END',
+diff --git a/foo b/foo
+index 863339f..db418b2 100644
+--- a/foo
++++ b/foo
+@@ -1 +1,2 @@
+ Passed
++
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END',
+Index: foo
+index 863339f..db418b2 100644
+--- foo
++++ foo
+@@ -1 +1,2 @@
+ Passed
++
+END
+ indexPath => "foo",
+ isGit => 1,
+ numTextChunks => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+{ # New test
+ diffName => "Git: new file",
+ inputText => <<'END',
+diff --git a/foo.h b/foo.h
+new file mode 100644
+index 0000000..3c9f114
+--- /dev/null
++++ b/foo.h
+@@ -0,0 +1,34 @@
++<html>
+diff --git a/bar b/bar
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END',
+Index: foo.h
+new file mode 100644
+index 0000000..3c9f114
+--- foo.h
++++ foo.h
+@@ -0,0 +1,34 @@
++<html>
+END
+ indexPath => "foo.h",
+ isGit => 1,
+ isNew => 1,
+ numTextChunks => 1,
+}],
+"diff --git a/bar b/bar\n"],
+ expectedNextLine => "index d45dd40..3494526 100644\n",
+},
+{ # New test
+ diffName => "Git: file deletion",
+ inputText => <<'END',
+diff --git a/foo b/foo
+deleted file mode 100644
+index 1e50d1d..0000000
+--- a/foo
++++ /dev/null
+@@ -1,1 +0,0 @@
+-line1
+diff --git a/bar b/bar
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END',
+Index: foo
+deleted file mode 100644
+index 1e50d1d..0000000
+--- foo
++++ foo
+@@ -1,1 +0,0 @@
+-line1
+END
+ indexPath => "foo",
+ isDeletion => 1,
+ isGit => 1,
+ numTextChunks => 1,
+}],
+"diff --git a/bar b/bar\n"],
+ expectedNextLine => "index d45dd40..3494526 100644\n",
+},
+{
+ # New test
+ diffName => "Git: Git diff followed by SVN diff", # Should not recognize SVN start
+ inputText => <<'END',
+diff --git a/Makefile b/Makefile
+index f5d5e74..3b6aa92 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,1 +1,1 @@ public:
+Index: Makefile_new
+===================================================================
+--- Makefile_new (revision 53131) (from Makefile:53131)
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END',
+Index: Makefile
+index f5d5e74..3b6aa92 100644
+--- Makefile
++++ Makefile
+@@ -1,1 +1,1 @@ public:
+Index: Makefile_new
+===================================================================
+--- Makefile_new (revision 53131) (from Makefile:53131)
+END
+ indexPath => "Makefile",
+ isGit => 1,
+ numTextChunks => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "Git: file that only has an executable bit change",
+ inputText => <<'END',
+diff --git a/foo b/foo
+old mode 100644
+new mode 100755
+END
+ expectedReturn => [
+[{
+ svnConvertedText => <<'END',
+Index: foo
+old mode 100644
+new mode 100755
+END
+ executableBitDelta => 1,
+ indexPath => "foo",
+ isGit => 1,
+ numTextChunks => 0,
+}],
+undef],
+ expectedNextLine => undef,
+},
+####
+# Git test cases: file moves (multiple return values)
+##
+{
+ diffName => "Git: rename (with similarity index 100%)",
+ inputText => <<'END',
+diff --git a/foo b/foo_new
+similarity index 100%
+rename from foo
+rename to foo_new
+diff --git a/bar b/bar
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+[{
+ indexPath => "foo",
+ isDeletion => 1,
+},
+{
+ copiedFromPath => "foo",
+ indexPath => "foo_new",
+}],
+"diff --git a/bar b/bar\n"],
+ expectedNextLine => "index d45dd40..3494526 100644\n",
+},
+{
+ diffName => "rename (with similarity index < 100%)",
+ inputText => <<'END',
+diff --git a/foo b/foo_new
+similarity index 99%
+rename from foo
+rename to foo_new
+index 1e50d1d..1459d21 100644
+--- a/foo
++++ b/foo_new
+@@ -15,3 +15,4 @@ release r deployment dep deploy:
+ line1
+ line2
+ line3
++line4
+diff --git a/bar b/bar
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+[{
+ indexPath => "foo",
+ isDeletion => 1,
+},
+{
+ copiedFromPath => "foo",
+ indexPath => "foo_new",
+},
+{
+ indexPath => "foo_new",
+ isGit => 1,
+ numTextChunks => 1,
+ svnConvertedText => <<'END',
+Index: foo_new
+similarity index 99%
+rename from foo
+rename to foo_new
+index 1e50d1d..1459d21 100644
+--- foo_new
++++ foo_new
+@@ -15,3 +15,4 @@ release r deployment dep deploy:
+ line1
+ line2
+ line3
++line4
+END
+}],
+"diff --git a/bar b/bar\n"],
+ expectedNextLine => "index d45dd40..3494526 100644\n",
+},
+{
+ diffName => "rename (with executable bit change)",
+ inputText => <<'END',
+diff --git a/foo b/foo_new
+old mode 100644
+new mode 100755
+similarity index 100%
+rename from foo
+rename to foo_new
+diff --git a/bar b/bar
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+[{
+ indexPath => "foo",
+ isDeletion => 1,
+},
+{
+ copiedFromPath => "foo",
+ indexPath => "foo_new",
+},
+{
+ executableBitDelta => 1,
+ indexPath => "foo_new",
+ isGit => 1,
+ numTextChunks => 0,
+ svnConvertedText => <<'END',
+Index: foo_new
+old mode 100644
+new mode 100755
+similarity index 100%
+rename from foo
+rename to foo_new
+END
+}],
+"diff --git a/bar b/bar\n"],
+ expectedNextLine => "index d45dd40..3494526 100644\n",
+},
+);
+
+my $testCasesCount = @testCaseHashRefs;
+plan(tests => 2 * $testCasesCount); # Total number of assertions.
+
+foreach my $testCase (@testCaseHashRefs) {
+ my $testNameStart = "parseDiff(): $testCase->{diffName}: comparing";
+
+ my $fileHandle;
+ open($fileHandle, "<", \$testCase->{inputText});
+ my $line = <$fileHandle>;
+
+ my @got = VCSUtils::parseDiff($fileHandle, $line, {"shouldNotUseIndexPathEOL" => 1});
+ my $expectedReturn = $testCase->{expectedReturn};
+
+ is_deeply(\@got, $expectedReturn, "$testNameStart return value.");
+
+ my $gotNextLine = <$fileHandle>;
+ is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line.");
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl
new file mode 100644
index 0000000..8c20f65
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl
@@ -0,0 +1,121 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of parseDiffHeader().
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+# The unit tests for parseGitDiffHeader() and parseSvnDiffHeader()
+# already thoroughly test parsing each format.
+#
+# For parseDiffHeader(), it should suffice to verify that -- (1) for each
+# format, the method can return non-trivial values back for each key
+# supported by that format (e.g. "sourceRevision" for SVN), (2) the method
+# correctly sets default values when specific key-values are not set
+# (e.g. undef for "sourceRevision" for Git), and (3) key-values unique to
+# this method are set correctly (e.g. "scmFormat").
+my @testCaseHashRefs = (
+####
+# SVN test cases
+##
+{ # New test
+ diffName => "SVN: non-trivial copiedFromPath and sourceRevision values",
+ inputText => <<'END',
+Index: index_path.py
+===================================================================
+--- index_path.py (revision 53048) (from copied_from_path.py:53048)
++++ index_path.py (working copy)
+@@ -0,0 +1,7 @@
++# Python file...
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: index_path.py
+===================================================================
+--- index_path.py (revision 53048) (from copied_from_path.py:53048)
++++ index_path.py (working copy)
+END
+ copiedFromPath => "copied_from_path.py",
+ indexPath => "index_path.py",
+ isSvn => 1,
+ sourceRevision => 53048,
+},
+"@@ -0,0 +1,7 @@\n"],
+ expectedNextLine => "+# Python file...\n",
+},
+####
+# Git test cases
+##
+{ # New test case
+ diffName => "Git: Non-zero executable bit",
+ inputText => <<'END',
+diff --git a/foo.exe b/foo.exe
+old mode 100644
+new mode 100755
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo.exe
+old mode 100644
+new mode 100755
+END
+ executableBitDelta => 1,
+ indexPath => "foo.exe",
+ isGit => 1,
+},
+undef],
+ expectedNextLine => undef,
+},
+);
+
+my $testCasesCount = @testCaseHashRefs;
+plan(tests => 2 * $testCasesCount); # Total number of assertions.
+
+foreach my $testCase (@testCaseHashRefs) {
+ my $testNameStart = "parseDiffHeader(): $testCase->{diffName}: comparing";
+
+ my $fileHandle;
+ open($fileHandle, "<", \$testCase->{inputText});
+ my $line = <$fileHandle>;
+
+ my @got = VCSUtils::parseDiffHeader($fileHandle, $line);
+ my $expectedReturn = $testCase->{expectedReturn};
+
+ is_deeply(\@got, $expectedReturn, "$testNameStart return value.");
+
+ my $gotNextLine = <$fileHandle>;
+ is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line.");
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffWithMockFiles.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffWithMockFiles.pl
new file mode 100644
index 0000000..589f53b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffWithMockFiles.pl
@@ -0,0 +1,486 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2011 Research In Motion Limited. All rights reserved.
+# Copyright (C) 2013 Apple Inc. All rights reserved.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Unit tests of parseDiff() with mock files; test override of patch EOL with EOL of target file.
+
+use strict;
+use warnings;
+
+use File::Temp;
+use POSIX qw/getcwd/;
+use Test::More;
+use VCSUtils;
+
+# We should consider moving escapeNewLineCharacters() and toMacLineEndings()
+# to VCSUtils.pm if they're useful in other places.
+sub escapeNewLineCharacters($)
+{
+ my ($text) = @_;
+ my @characters = split(//, $text);
+ my $result = "";
+ foreach (@characters) {
+ if (/^\r$/) {
+ $result .= '\r';
+ next;
+ }
+ if (/^\n$/) {
+ $result .= '\n';
+ }
+ $result .= $_;
+ }
+ return $result;
+}
+
+sub toMacLineEndings($)
+{
+ my ($text) = @_;
+ $text =~ s/\n/\r/g;
+ return $text;
+}
+
+my $gitDiffHeaderForNewFile = <<EOF;
+diff --git a/Makefile b/Makefile
+new file mode 100644
+index 0000000..756e864
+--- /dev/null
++++ b/Makefile
+@@ -0,0 +1,17 @@
+EOF
+
+my $gitDiffHeader = <<EOF;
+diff --git a/Makefile b/Makefile
+index 756e864..04d2ae1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,3 +1,4 @@
+EOF
+
+my $svnConvertedGitDiffHeader = <<EOF;
+Index: Makefile
+index 756e864..04d2ae1 100644
+--- Makefile
++++ Makefile
+@@ -1,3 +1,4 @@
+EOF
+
+my $svnConvertedGitDiffHeaderForNewFile = <<EOF;
+Index: Makefile
+new file mode 100644
+index 0000000..756e864
+--- Makefile
++++ Makefile
+@@ -0,0 +1,17 @@
+EOF
+
+my $svnDiffHeaderForNewFile = <<EOF;
+Index: Makefile
+===================================================================
+--- Makefile (revision 0)
++++ Makefile (revision 0)
+@@ -0,0 +1,17 @@
+EOF
+
+my $svnDiffHeader = <<EOF;
+Index: Makefile
+===================================================================
+--- Makefile (revision 53052)
++++ Makefile (working copy)
+@@ -1,3 +1,4 @@
+EOF
+
+my $diffBody = <<EOF;
++
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+
+ all:
+EOF
+
+my $MakefileContents = <<EOF;
+MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+
+all:
+EOF
+
+my $mockDir = File::Temp->tempdir("parseDiffXXXX", CLEANUP => 1);
+writeToFile(File::Spec->catfile($mockDir, "MakefileWithUnixEOL"), $MakefileContents);
+writeToFile(File::Spec->catfile($mockDir, "MakefileWithWindowsEOL"), toWindowsLineEndings($MakefileContents));
+writeToFile(File::Spec->catfile($mockDir, "MakefileWithMacEOL"), toMacLineEndings($MakefileContents));
+
+# The array of test cases.
+my @testCaseHashRefs = (
+###
+# SVN test cases
+##
+{
+ # New test
+ diffName => "SVN: Patch with Unix line endings and IndexPath has Unix line endings",
+ inputText => substituteString($svnDiffHeader, "Makefile", "MakefileWithUnixEOL") . $diffBody,
+ expectedReturn => [
+[{
+ svnConvertedText => substituteString($svnDiffHeader, "Makefile", "MakefileWithUnixEOL") . $diffBody, # Same as input text
+ indexPath => "MakefileWithUnixEOL",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "53052",
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: Patch with Windows line endings and IndexPath has Unix line endings",
+ inputText => substituteString($svnDiffHeader, "Makefile", "MakefileWithUnixEOL") . toWindowsLineEndings($diffBody),
+ expectedReturn => [
+[{
+ svnConvertedText => substituteString($svnDiffHeader, "Makefile", "MakefileWithUnixEOL") . $diffBody,
+ indexPath => "MakefileWithUnixEOL",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "53052",
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: Patch with Windows line endings and IndexPath has Windows line endings",
+ inputText => substituteString($svnDiffHeader, "Makefile", "MakefileWithWindowsEOL") . toWindowsLineEndings($diffBody),
+ expectedReturn => [
+[{
+ svnConvertedText => substituteString($svnDiffHeader, "Makefile", "MakefileWithWindowsEOL") . toWindowsLineEndings($diffBody), # Same as input text
+ indexPath => "MakefileWithWindowsEOL",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "53052",
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: Patch adds Windows newline to EOF and IndexPath has Windows line endings",
+ inputText => <<"EOF",
+Index: MakefileWithWindowsEOL
+===================================================================
+--- MakefileWithWindowsEOL (revision 53052)
++++ MakefileWithWindowsEOL (working copy)
+@@ -1,3 +1,4 @@\r
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r
+ \r
+-all:
+\\ No newline at end of file
++all:\r
++\r
+EOF
+ expectedReturn => [
+[{
+ # Same as input text
+ svnConvertedText => <<"EOF",
+Index: MakefileWithWindowsEOL
+===================================================================
+--- MakefileWithWindowsEOL (revision 53052)
++++ MakefileWithWindowsEOL (working copy)
+@@ -1,3 +1,4 @@\r
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r
+ \r
+-all:
+\\ No newline at end of file
++all:\r
++\r
+EOF
+ indexPath => "MakefileWithWindowsEOL",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => 53052
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: Patch adds Mac newline to EOF and IndexPath has Mac line endings",
+ inputText => <<"EOF",
+Index: MakefileWithMacEOL
+===================================================================
+--- MakefileWithMacEOL (revision 53052)
++++ MakefileWithMacEOL (working copy)
+@@ -1,3 +1,4 @@\r MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r \r-all:
+\\ No newline at end of file
++all:\r+\r
+EOF
+ expectedReturn => [
+[{
+ # Same as input text
+ svnConvertedText => q(Index: MakefileWithMacEOL
+===================================================================
+--- MakefileWithMacEOL (revision 53052)
++++ MakefileWithMacEOL (working copy)
+@@ -1,3 +1,4 @@\r MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r \r-all:
+\\ No newline at end of file
++all:\r+\r),
+ indexPath => "MakefileWithMacEOL",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => 53052
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: Patch with Unix line endings and IndexPath has Windows line endings",
+ inputText => substituteString($svnDiffHeader, "Makefile", "MakefileWithWindowsEOL") . $diffBody,
+ expectedReturn => [
+[{
+ svnConvertedText => substituteString($svnDiffHeader, "Makefile", "MakefileWithWindowsEOL") . toWindowsLineEndings($diffBody),
+ indexPath => "MakefileWithWindowsEOL",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => "53052",
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: Patch with Unix line endings and nonexistent IndexPath",
+ inputText => substituteString($svnDiffHeaderForNewFile, "Makefile", "NonexistentFile") . $diffBody,
+ expectedReturn => [
+[{
+ svnConvertedText => substituteString($svnDiffHeaderForNewFile, "Makefile", "NonexistentFile") . $diffBody, # Same as input text
+ indexPath => "NonexistentFile",
+ isSvn => 1,
+ isNew => 1,
+ numTextChunks => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: Patch with Windows line endings and nonexistent IndexPath",
+ inputText => substituteString($svnDiffHeaderForNewFile, "Makefile", "NonexistentFile") . toWindowsLineEndings($diffBody),
+ expectedReturn => [
+[{
+ svnConvertedText => substituteString($svnDiffHeaderForNewFile, "Makefile", "NonexistentFile") . toWindowsLineEndings($diffBody), # Same as input text
+ indexPath => "NonexistentFile",
+ isSvn => 1,
+ isNew => 1,
+ numTextChunks => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+###
+# Git test cases
+##
+{
+ # New test
+ diffName => "Git: Patch with Unix line endings and IndexPath has Unix line endings",
+ inputText => substituteString($gitDiffHeader, "Makefile", "MakefileWithUnixEOL") . $diffBody,
+ expectedReturn => [
+[{
+ svnConvertedText => substituteString($svnConvertedGitDiffHeader, "Makefile", "MakefileWithUnixEOL") . $diffBody, # Same as input text
+ indexPath => "MakefileWithUnixEOL",
+ isGit => 1,
+ numTextChunks => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "Git: Patch with Windows line endings and IndexPath has Unix line endings",
+ inputText => substituteString($gitDiffHeader, "Makefile", "MakefileWithUnixEOL") . toWindowsLineEndings($diffBody),
+ expectedReturn => [
+[{
+ svnConvertedText => substituteString($svnConvertedGitDiffHeader, "Makefile", "MakefileWithUnixEOL") . $diffBody,
+ indexPath => "MakefileWithUnixEOL",
+ isGit => 1,
+ numTextChunks => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "Git: Patch with Windows line endings and IndexPath has Windows line endings",
+ inputText => substituteString($gitDiffHeader, "Makefile", "MakefileWithWindowsEOL") . toWindowsLineEndings($diffBody),
+ expectedReturn => [
+[{
+ svnConvertedText => substituteString($svnConvertedGitDiffHeader, "Makefile", "MakefileWithWindowsEOL") . toWindowsLineEndings($diffBody), # Same as input text
+ indexPath => "MakefileWithWindowsEOL",
+ isGit => 1,
+ numTextChunks => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "Git: Patch adds newline to EOF with Windows line endings and IndexPath has Windows line endings",
+ inputText => <<"EOF",
+diff --git a/MakefileWithWindowsEOL b/MakefileWithWindowsEOL
+index e7e8475..ae16fc3 100644
+--- a/MakefileWithWindowsEOL
++++ b/MakefileWithWindowsEOL
+@@ -1,3 +1,4 @@\r
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r
+ \r
+-all:
+\\ No newline at end of file
++all:\r
++\r
+EOF
+ expectedReturn => [
+[{
+ # Same as input text
+ svnConvertedText => <<"EOF",
+Index: MakefileWithWindowsEOL
+index e7e8475..ae16fc3 100644
+--- MakefileWithWindowsEOL
++++ MakefileWithWindowsEOL
+@@ -1,3 +1,4 @@\r
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r
+ \r
+-all:
+\\ No newline at end of file
++all:\r
++\r
+EOF
+ indexPath => "MakefileWithWindowsEOL",
+ isGit => 1,
+ numTextChunks => 1
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "Git: Patch adds Mac newline to EOF and IndexPath has Mac line endings",
+ inputText => <<"EOF",
+diff --git a/MakefileWithMacEOL b/MakefileWithMacEOL
+index e7e8475..ae16fc3 100644
+--- a/MakefileWithMacEOL
++++ b/MakefileWithMacEOL
+@@ -1,3 +1,4 @@\r MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r \r-all:
+\\ No newline at end of file
++all:\r+\r
+EOF
+ expectedReturn => [
+[{
+ # Same as input text
+ svnConvertedText => q(Index: MakefileWithMacEOL
+index e7e8475..ae16fc3 100644
+--- MakefileWithMacEOL
++++ MakefileWithMacEOL
+@@ -1,3 +1,4 @@\r MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r \r-all:
+\\ No newline at end of file
++all:\r+\r),
+ indexPath => "MakefileWithMacEOL",
+ isGit => 1,
+ numTextChunks => 1
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "Git: Patch with Unix line endings and IndexPath has Windows line endings",
+ inputText => substituteString($gitDiffHeader, "Makefile", "MakefileWithWindowsEOL") . $diffBody,
+ expectedReturn => [
+[{
+ svnConvertedText => substituteString($svnConvertedGitDiffHeader, "Makefile", "MakefileWithWindowsEOL") . toWindowsLineEndings($diffBody),
+ indexPath => "MakefileWithWindowsEOL",
+ isGit => 1,
+ numTextChunks => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "Git: Patch with Unix line endings and nonexistent IndexPath",
+ inputText => substituteString($gitDiffHeaderForNewFile, "Makefile", "NonexistentFile") . $diffBody,
+ expectedReturn => [
+[{
+ svnConvertedText => substituteString($svnConvertedGitDiffHeaderForNewFile, "Makefile", "NonexistentFile") . $diffBody, # Same as input text
+ indexPath => "NonexistentFile",
+ isGit => 1,
+ isNew => 1,
+ numTextChunks => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "Git: Patch with Windows line endings and nonexistent IndexPath",
+ inputText => substituteString($gitDiffHeaderForNewFile, "Makefile", "NonexistentFile") . toWindowsLineEndings($diffBody),
+ expectedReturn => [
+[{
+ svnConvertedText => substituteString($svnConvertedGitDiffHeaderForNewFile, "Makefile", "NonexistentFile") . toWindowsLineEndings($diffBody), # Same as input text
+ indexPath => "NonexistentFile",
+ isGit => 1,
+ isNew => 1,
+ numTextChunks => 1,
+}],
+undef],
+ expectedNextLine => undef,
+},
+);
+
+my $testCasesCount = @testCaseHashRefs;
+plan(tests => 2 * $testCasesCount); # Total number of assertions.
+
+my $savedCWD = getcwd();
+chdir($mockDir) or die;
+foreach my $testCase (@testCaseHashRefs) {
+ my $testNameStart = "parseDiff(): $testCase->{diffName}: comparing";
+
+ my $fileHandle;
+ open($fileHandle, "<", \$testCase->{inputText});
+ my $line = <$fileHandle>;
+
+ my @got = VCSUtils::parseDiff($fileHandle, $line);
+ my $expectedReturn = $testCase->{expectedReturn};
+
+ $got[0][0]->{svnConvertedText} = escapeNewLineCharacters($got[0][0]->{svnConvertedText});
+ $expectedReturn->[0][0]->{svnConvertedText} = escapeNewLineCharacters($expectedReturn->[0][0]->{svnConvertedText});
+ is_deeply(\@got, $expectedReturn, "$testNameStart return value.");
+
+ my $gotNextLine = <$fileHandle>;
+ is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line.");
+}
+chdir($savedCWD);
+
+sub substituteString
+{
+ my ($string, $searchString, $replacementString) = @_;
+ $string =~ s/$searchString/$replacementString/g;
+ return $string;
+}
+
+sub writeToFile
+{
+ my ($file, $text) = @_;
+ open(FILE, ">$file") or die;
+ print FILE $text;
+ close(FILE);
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseFirstEOL.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseFirstEOL.pl
new file mode 100644
index 0000000..dc364a5
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseFirstEOL.pl
@@ -0,0 +1,63 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2011 Research In Motion Limited. All rights reserved.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Unit tests of VCSUtils::parseFirstEOL().
+
+use strict;
+use warnings;
+
+use Test::Simple tests => 7;
+use VCSUtils;
+
+my $title;
+
+# New test
+$title = "parseFirstEOL: Empty string.";
+ok(!defined(firstEOLInString("")), $title);
+
+# New test
+$title = "parseFirstEOL: Line without a line ending character";
+ok(!defined(firstEOLInString("This line doesn't have a line ending character.")), $title);
+
+# New test
+$title = "parseFirstEOL: Line with Windows line ending.";
+ok(firstEOLInString("This line ends with a Windows line ending.\r\n") eq "\r\n", $title);
+
+# New test
+$title = "parseFirstEOL: Line with Unix line ending.";
+ok(firstEOLInString("This line ends with a Unix line ending.\n") eq "\n", $title);
+
+# New test
+$title = "parseFirstEOL: Line with Mac line ending.";
+ok(firstEOLInString("This line ends with a Mac line ending.\r") eq "\r", $title);
+
+# New test
+$title = "parseFirstEOL: Line with Mac line ending followed by line without a line ending.";
+ok(firstEOLInString("This line ends with a Mac line ending.\rThis line doesn't have a line ending.") eq "\r", $title);
+
+# New test
+$title = "parseFirstEOL: Line with a mix of line endings.";
+ok(firstEOLInString("This line contains a mix of line endings.\r\n\r\n\r\r\n\n\n\n") eq "\r\n", $title);
+
+sub firstEOLInString
+{
+ my ($string) = @_;
+ my $fileHandle;
+ open($fileHandle, "<", \$string);
+ return parseFirstEOL($fileHandle);
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseGitDiffHeader.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseGitDiffHeader.pl
new file mode 100644
index 0000000..bc0d4d4
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseGitDiffHeader.pl
@@ -0,0 +1,494 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of parseGitDiffHeader().
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+# The array of test cases.
+my @testCaseHashRefs = (
+{ # New test
+ diffName => "Modified file",
+ inputText => <<'END',
+diff --git a/foo.h b/foo.h
+index f5d5e74..3b6aa92 100644
+--- a/foo.h
++++ b/foo.h
+@@ -1 +1 @@
+-file contents
++new file contents
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo.h
+index f5d5e74..3b6aa92 100644
+--- foo.h
++++ foo.h
+END
+ indexPath => "foo.h",
+},
+"@@ -1 +1 @@\n"],
+ expectedNextLine => "-file contents\n",
+},
+{ # New test
+ diffName => "new file",
+ inputText => <<'END',
+diff --git a/foo.h b/foo.h
+new file mode 100644
+index 0000000..3c9f114
+--- /dev/null
++++ b/foo.h
+@@ -0,0 +1,34 @@
++<html>
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo.h
+new file mode 100644
+index 0000000..3c9f114
+--- foo.h
++++ foo.h
+END
+ indexPath => "foo.h",
+ isNew => 1,
+},
+"@@ -0,0 +1,34 @@\n"],
+ expectedNextLine => "+<html>\n",
+},
+{ # New test
+ diffName => "file deletion",
+ inputText => <<'END',
+diff --git a/foo b/foo
+deleted file mode 100644
+index 1e50d1d..0000000
+--- a/foo
++++ /dev/null
+@@ -1,1 +0,0 @@
+-line1
+diff --git a/configure.ac b/configure.ac
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo
+deleted file mode 100644
+index 1e50d1d..0000000
+--- foo
++++ foo
+END
+ indexPath => "foo",
+ isDeletion => 1,
+},
+"@@ -1,1 +0,0 @@\n"],
+ expectedNextLine => "-line1\n",
+},
+{ # New test
+ diffName => "using --no-prefix",
+ inputText => <<'END',
+diff --git foo.h foo.h
+index c925780..9e65c43 100644
+--- foo.h
++++ foo.h
+@@ -1,3 +1,17 @@
++contents
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo.h
+index c925780..9e65c43 100644
+--- foo.h
++++ foo.h
+END
+ indexPath => "foo.h",
+},
+"@@ -1,3 +1,17 @@\n"],
+ expectedNextLine => "+contents\n",
+},
+####
+# Copy operations
+##
+{ # New test
+ diffName => "copy (with similarity index 100%)",
+ inputText => <<'END',
+diff --git a/foo b/foo_new
+similarity index 100%
+copy from foo
+copy to foo_new
+diff --git a/bar b/bar
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo_new
+similarity index 100%
+copy from foo
+copy to foo_new
+END
+ copiedFromPath => "foo",
+ indexPath => "foo_new",
+},
+"diff --git a/bar b/bar\n"],
+ expectedNextLine => "index d45dd40..3494526 100644\n",
+},
+{ # New test
+ diffName => "copy (with similarity index < 100%)",
+ inputText => <<'END',
+diff --git a/foo b/foo_new
+similarity index 99%
+copy from foo
+copy to foo_new
+diff --git a/bar b/bar
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo_new
+similarity index 99%
+copy from foo
+copy to foo_new
+END
+ copiedFromPath => "foo",
+ indexPath => "foo_new",
+ isCopyWithChanges => 1,
+},
+"diff --git a/bar b/bar\n"],
+ expectedNextLine => "index d45dd40..3494526 100644\n",
+},
+{ # New test
+ diffName => "rename (with similarity index 100%)",
+ inputText => <<'END',
+diff --git a/foo b/foo_new
+similarity index 100%
+rename from foo
+rename to foo_new
+diff --git a/bar b/bar
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo_new
+similarity index 100%
+rename from foo
+rename to foo_new
+END
+ copiedFromPath => "foo",
+ indexPath => "foo_new",
+ shouldDeleteSource => 1,
+},
+"diff --git a/bar b/bar\n"],
+ expectedNextLine => "index d45dd40..3494526 100644\n",
+},
+{ # New test
+ diffName => "rename (with similarity index < 100%)",
+ inputText => <<'END',
+diff --git a/foo b/foo_new
+similarity index 99%
+rename from foo
+rename to foo_new
+index 1e50d1d..1459d21 100644
+--- a/foo
++++ b/foo_new
+@@ -15,3 +15,4 @@ release r deployment dep deploy:
+ line1
+ line2
+ line3
++line4
+diff --git a/bar b/bar
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo_new
+similarity index 99%
+rename from foo
+rename to foo_new
+index 1e50d1d..1459d21 100644
+--- foo_new
++++ foo_new
+END
+ copiedFromPath => "foo",
+ indexPath => "foo_new",
+ isCopyWithChanges => 1,
+ shouldDeleteSource => 1,
+},
+"@@ -15,3 +15,4 @@ release r deployment dep deploy:\n"],
+ expectedNextLine => " line1\n",
+},
+{ # New test
+ diffName => "rename (with executable bit change)",
+ inputText => <<'END',
+diff --git a/foo b/foo_new
+old mode 100644
+new mode 100755
+similarity index 100%
+rename from foo
+rename to foo_new
+diff --git a/bar b/bar
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo_new
+old mode 100644
+new mode 100755
+similarity index 100%
+rename from foo
+rename to foo_new
+END
+ copiedFromPath => "foo",
+ executableBitDelta => 1,
+ indexPath => "foo_new",
+ isCopyWithChanges => 1,
+ shouldDeleteSource => 1,
+},
+"diff --git a/bar b/bar\n"],
+ expectedNextLine => "index d45dd40..3494526 100644\n",
+},
+####
+# Binary file test cases
+##
+{
+ # New test case
+ diffName => "New binary file",
+ inputText => <<'END',
+diff --git a/foo.gif b/foo.gif
+new file mode 100644
+index 0000000000000000000000000000000000000000..64a9532e7794fcd791f6f12157406d9060151690
+GIT binary patch
+literal 7
+OcmYex&reDa;sO8*F9L)B
+
+literal 0
+HcmV?d00001
+
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo.gif
+new file mode 100644
+index 0000000000000000000000000000000000000000..64a9532e7794fcd791f6f12157406d9060151690
+GIT binary patch
+END
+ indexPath => "foo.gif",
+ isBinary => 1,
+ isNew => 1,
+},
+"literal 7\n"],
+ expectedNextLine => "OcmYex&reDa;sO8*F9L)B\n",
+},
+{
+ # New test case
+ diffName => "Deleted binary file",
+ inputText => <<'END',
+diff --git a/foo.gif b/foo.gif
+deleted file mode 100644
+index 323fae0..0000000
+GIT binary patch
+literal 0
+HcmV?d00001
+
+literal 7
+OcmYex&reDa;sO8*F9L)B
+
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo.gif
+deleted file mode 100644
+index 323fae0..0000000
+GIT binary patch
+END
+ indexPath => "foo.gif",
+ isBinary => 1,
+ isDeletion => 1,
+},
+"literal 0\n"],
+ expectedNextLine => "HcmV?d00001\n",
+},
+####
+# Executable bit test cases
+##
+{
+ # New test case
+ diffName => "Modified executable file",
+ inputText => <<'END',
+diff --git a/foo b/foo
+index d03e242..435ad3a 100755
+--- a/foo
++++ b/foo
+@@ -1 +1 @@
+-file contents
++new file contents
+
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo
+index d03e242..435ad3a 100755
+--- foo
++++ foo
+END
+ indexPath => "foo",
+},
+"@@ -1 +1 @@\n"],
+ expectedNextLine => "-file contents\n",
+},
+{
+ # New test case
+ diffName => "Making file executable (last diff)",
+ inputText => <<'END',
+diff --git a/foo.exe b/foo.exe
+old mode 100644
+new mode 100755
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo.exe
+old mode 100644
+new mode 100755
+END
+ executableBitDelta => 1,
+ indexPath => "foo.exe",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test case
+ diffName => "Making file executable (not last diff)",
+ inputText => <<'END',
+diff --git a/foo.exe b/foo.exe
+old mode 100644
+new mode 100755
+diff --git a/another_file.txt b/another_file.txt
+index d03e242..435ad3a 100755
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo.exe
+old mode 100644
+new mode 100755
+END
+ executableBitDelta => 1,
+ indexPath => "foo.exe",
+},
+"diff --git a/another_file.txt b/another_file.txt\n"],
+ expectedNextLine => "index d03e242..435ad3a 100755\n",
+},
+{
+ # New test case
+ diffName => "New executable file",
+ inputText => <<'END',
+diff --git a/foo b/foo
+new file mode 100755
+index 0000000..d03e242
+--- /dev/null
++++ b/foo
+@@ -0,0 +1 @@
++file contents
+
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo
+new file mode 100755
+index 0000000..d03e242
+--- foo
++++ foo
+END
+ executableBitDelta => 1,
+ indexPath => "foo",
+ isNew => 1,
+},
+"@@ -0,0 +1 @@\n"],
+ expectedNextLine => "+file contents\n",
+},
+{
+ # New test case
+ diffName => "Deleted executable file",
+ inputText => <<'END',
+diff --git a/foo b/foo
+deleted file mode 100755
+index d03e242..0000000
+--- a/foo
++++ /dev/null
+@@ -1 +0,0 @@
+-file contents
+
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: foo
+deleted file mode 100755
+index d03e242..0000000
+--- foo
++++ foo
+END
+ executableBitDelta => -1,
+ indexPath => "foo",
+ isDeletion => 1,
+},
+"@@ -1 +0,0 @@\n"],
+ expectedNextLine => "-file contents\n",
+},
+);
+
+my $testCasesCount = @testCaseHashRefs;
+plan(tests => 2 * $testCasesCount); # Total number of assertions.
+
+foreach my $testCase (@testCaseHashRefs) {
+ my $testNameStart = "parseGitDiffHeader(): $testCase->{diffName}: comparing";
+
+ my $fileHandle;
+ open($fileHandle, "<", \$testCase->{inputText});
+ my $line = <$fileHandle>;
+
+ my @got = VCSUtils::parseGitDiffHeader($fileHandle, $line);
+ my $expectedReturn = $testCase->{expectedReturn};
+
+ is_deeply(\@got, $expectedReturn, "$testNameStart return value.");
+
+ my $gotNextLine = <$fileHandle>;
+ is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line.");
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parsePatch.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parsePatch.pl
new file mode 100644
index 0000000..6a46c5b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parsePatch.pl
@@ -0,0 +1,94 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of parseDiffHeader().
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+my @diffHashRefKeys = ( # The hash reference keys to check per diff.
+ "copiedFromPath",
+ "indexPath",
+ "sourceRevision",
+ "svnConvertedText",
+);
+
+# New test
+my $testNameStart = "parsePatch(): [SVN: Rename] ";
+my $patch = <<'END',
+Index: Makefile
+===================================================================
+--- Makefile (revision 53131)
++++ Makefile (working copy)
+@@ -1,1 +0,0 @@
+-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+Index: Makefile_new
+===================================================================
+--- Makefile_new (revision 53131) (from Makefile:53131)
++++ Makefile_new (working copy)
+@@ -0,0 +1,1 @@
++MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+END
+
+my @expectedDiffHashRefs = (
+{
+ svnConvertedText => <<'END',
+Index: Makefile
+===================================================================
+--- Makefile (revision 53131)
++++ Makefile (working copy)
+@@ -1,1 +0,0 @@
+-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools
+END
+ copiedFromPath => undef,
+ indexPath => "Makefile",
+ sourceRevision => "53131",
+},
+{
+ copiedFromPath => "Makefile",
+ indexPath => "Makefile_new",
+ sourceRevision => "53131",
+},
+);
+
+plan(tests => @expectedDiffHashRefs * @diffHashRefKeys);
+
+my $fileHandle;
+open($fileHandle, "<", \$patch);
+
+my @gotDiffHashRefs = parsePatch($fileHandle);
+
+my $i = 0;
+foreach my $expectedDiffHashRef (@expectedDiffHashRefs) {
+
+ my $gotDiffHashRef = $gotDiffHashRefs[$i++];
+
+ foreach my $diffHashRefKey (@diffHashRefKeys) {
+ my $testName = "${testNameStart}[diff $i] key=\"$diffHashRefKey\"";
+ is($gotDiffHashRef->{$diffHashRefKey}, $expectedDiffHashRef->{$diffHashRefKey}, $testName);
+ }
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnDiffFooter.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnDiffFooter.pl
new file mode 100644
index 0000000..7c3d98c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnDiffFooter.pl
@@ -0,0 +1,443 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) Research in Motion Limited 2010. All Rights Reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+# Copyright (C) 2012 Daniel Bates (dbates@intudata.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of parseSvnDiffProperties().
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+my @testCaseHashRefs = (
+####
+# Simple test cases
+##
+{
+ # New test
+ diffName => "simple: add svn:executable",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Added: svn:executable
+ + *
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => 1,
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "simple: add svn:mergeinfo",
+ inputText => <<'END',
+Property changes on: Makefile
+___________________________________________________________________
+Added: svn:mergeinfo
+ Merged /trunk/Makefile:r33020
+END
+ expectedReturn => [
+{
+ propertyPath => "Makefile",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "simple: delete svn:mergeinfo",
+ inputText => <<'END',
+Property changes on: Makefile
+___________________________________________________________________
+Deleted: svn:mergeinfo
+ Reverse-merged /trunk/Makefile:r33020
+END
+ expectedReturn => [
+{
+ propertyPath => "Makefile",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "simple: modified svn:mergeinfo",
+ inputText => <<'END',
+Property changes on: Makefile
+___________________________________________________________________
+Modified: svn:mergeinfo
+ Reverse-merged /trunk/Makefile:r33020
+ Merged /trunk/Makefile:r41697
+END
+ expectedReturn => [
+{
+ propertyPath => "Makefile",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "simple: delete svn:executable",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Deleted: svn:executable
+ - *
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => -1,
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "simple: delete svn:executable using SVN 1.4 syntax",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Name: svn:executable
+ - *
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => -1,
+},
+undef],
+ expectedNextLine => undef,
+},
+####
+# Property value followed by empty line and start of next diff
+##
+{
+ # New test
+ diffName => "add svn:executable, followed by empty line and start of next diff",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Added: svn:executable
+ + *
+
+Index: Makefile.shared
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => 1,
+},
+"\n"],
+ expectedNextLine => "Index: Makefile.shared\n",
+},
+{
+ # New test
+ diffName => "add svn:executable, followed by empty line and start of next property diff",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Added: svn:executable
+ + *
+
+Property changes on: Makefile.shared
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => 1,
+},
+"\n"],
+ expectedNextLine => "Property changes on: Makefile.shared\n",
+},
+####
+# Property value followed by empty line and start of the binary contents
+##
+{
+ # New test
+ diffName => "add svn:executable, followed by empty line and start of binary contents",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Added: svn:executable
+ + *
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => 1,
+},
+"\n"],
+ expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\n",
+},
+{
+ # New test
+ diffName => "custom property followed by svn:executable, empty line and start of binary contents",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Added: documentation
+ + This is an example sentence.
+Added: svn:executable
+ + *
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => 1,
+},
+"\n"],
+ expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\n",
+},
+####
+# Successive properties
+##
+{
+ # New test
+ diffName => "svn:executable followed by custom property",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Added: svn:executable
+ + *
+Added: documentation
+ + This is an example sentence.
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => 1,
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "svn:executable followed by custom property using SVN 1.7 syntax",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Added: svn:executable
+## -0,0 +1 ##
++*
+\ No newline at end of property
+Added: documentation
+## -0,0 +1 ##
++This is an example sentence.
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => 1,
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "svn:executable followed by custom property without newline using SVN 1.7 syntax",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Added: svn:executable
+## -0,0 +1 ##
++*
+\ No newline at end of property
+Added: documentation
+## -0,0 +1 ##
++This is an example sentence.
+\ No newline at end of property
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => 1,
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "custom property followed by svn:executable",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Added: documentation
+ + This is an example sentence.
+Added: svn:executable
+ + *
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => 1,
+},
+undef],
+ expectedNextLine => undef,
+},
+####
+# Successive properties followed by empty line and start of next diff
+##
+{
+ # New test
+ diffName => "custom property followed by svn:executable, empty line and start of next property diff",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Added: documentation
+ + This is an example sentence.
+Added: svn:executable
+ + *
+
+Property changes on: Makefile.shared
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => 1,
+},
+"\n"],
+ expectedNextLine => "Property changes on: Makefile.shared\n",
+},
+{
+ # New test
+ diffName => "custom property followed by svn:executable, empty line and start of next index diff",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Added: documentation
+ + This is an example sentence.
+Added: svn:executable
+ + *
+
+Index: Makefile.shared
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => 1,
+},
+"\n"],
+ expectedNextLine => "Index: Makefile.shared\n",
+},
+####
+# Custom properties
+##
+# FIXME: We do not support anything other than the svn:executable property.
+# We should add support for handling other properties.
+{
+ # New test
+ diffName => "simple: custom property",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Name: documentation
+ + This is an example sentence.
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "custom property followed by custom property",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Added: copyright
+ + Copyright (C) Research in Motion Limited 2010. All Rights Reserved.
+Added: documentation
+ + This is an example sentence.
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+},
+undef],
+ expectedNextLine => undef,
+},
+####
+# Malformed property diffs
+##
+# We shouldn't encounter such diffs in practice.
+{
+ # New test
+ diffName => "svn:executable followed by custom property and svn:executable",
+ inputText => <<'END',
+Property changes on: FileA
+___________________________________________________________________
+Added: svn:executable
+ + *
+Added: documentation
+ + This is an example sentence.
+Deleted: svn:executable
+ - *
+END
+ expectedReturn => [
+{
+ propertyPath => "FileA",
+ executableBitDelta => -1,
+},
+undef],
+ expectedNextLine => undef,
+},
+);
+
+my $testCasesCount = @testCaseHashRefs;
+plan(tests => 2 * $testCasesCount); # Total number of assertions.
+
+foreach my $testCase (@testCaseHashRefs) {
+ my $testNameStart = "parseSvnDiffProperties(): $testCase->{diffName}: comparing";
+
+ my $fileHandle;
+ open($fileHandle, "<", \$testCase->{inputText});
+ my $line = <$fileHandle>;
+
+ my @got = VCSUtils::parseSvnDiffProperties($fileHandle, $line);
+ my $expectedReturn = $testCase->{expectedReturn};
+
+ is_deeply(\@got, $expectedReturn, "$testNameStart return value.");
+
+ my $gotNextLine = <$fileHandle>;
+ is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line.");
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnDiffHeader.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnDiffHeader.pl
new file mode 100644
index 0000000..fc357c9
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnDiffHeader.pl
@@ -0,0 +1,288 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+# Copyright (C) 2012 Daniel Bates (dbates@intudata.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of parseSvnDiffHeader().
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+# The array of test cases.
+my @testCaseHashRefs = (
+{
+ # New test
+ diffName => "simple diff",
+ inputText => <<'END',
+Index: WebKitTools/Scripts/VCSUtils.pm
+===================================================================
+--- WebKitTools/Scripts/VCSUtils.pm (revision 53004)
++++ WebKitTools/Scripts/VCSUtils.pm (working copy)
+@@ -32,6 +32,7 @@ use strict;
+ use warnings;
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: WebKitTools/Scripts/VCSUtils.pm
+===================================================================
+--- WebKitTools/Scripts/VCSUtils.pm (revision 53004)
++++ WebKitTools/Scripts/VCSUtils.pm (working copy)
+END
+ indexPath => "WebKitTools/Scripts/VCSUtils.pm",
+ sourceRevision => "53004",
+},
+"@@ -32,6 +32,7 @@ use strict;\n"],
+ expectedNextLine => " use warnings;\n",
+},
+{
+ # New test
+ diffName => "new file",
+ inputText => <<'END',
+Index: WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl
+===================================================================
+--- WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl (revision 0)
++++ WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl (revision 0)
+@@ -0,0 +1,262 @@
++#!/usr/bin/perl -w
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl
+===================================================================
+--- WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl (revision 0)
++++ WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl (revision 0)
+END
+ indexPath => "WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl",
+ isNew => 1,
+},
+"@@ -0,0 +1,262 @@\n"],
+ expectedNextLine => "+#!/usr/bin/perl -w\n",
+},
+{
+ # New test
+ diffName => "new file with spaces in its name",
+ inputText => <<'END',
+Index: WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme
+===================================================================
+--- WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme (revision 0)
++++ WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme (revision 0)
+@@ -0,0 +1,8 @@
++<?xml version="1.0" encoding="UTF-8"?>
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme
+===================================================================
+--- WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme (revision 0)
++++ WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme (revision 0)
+END
+ indexPath => "WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme",
+ isNew => 1,
+},
+"@@ -0,0 +1,8 @@\n"],
+ expectedNextLine => "+<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n",
+},
+{
+ # New test
+ diffName => "copied file",
+ inputText => <<'END',
+Index: index_path.py
+===================================================================
+--- index_path.py (revision 53048) (from copied_from_path.py:53048)
++++ index_path.py (working copy)
+@@ -0,0 +1,7 @@
++# Python file...
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: index_path.py
+===================================================================
+--- index_path.py (revision 53048) (from copied_from_path.py:53048)
++++ index_path.py (working copy)
+END
+ copiedFromPath => "copied_from_path.py",
+ indexPath => "index_path.py",
+ sourceRevision => 53048,
+},
+"@@ -0,0 +1,7 @@\n"],
+ expectedNextLine => "+# Python file...\n",
+},
+{
+ # New test
+ diffName => "contains \\r\\n lines",
+ inputText => <<END, # No single quotes to allow interpolation of "\r"
+Index: index_path.py\r
+===================================================================\r
+--- index_path.py (revision 53048)\r
++++ index_path.py (working copy)\r
+@@ -0,0 +1,7 @@\r
++# Python file...\r
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<END, # No single quotes to allow interpolation of "\r"
+Index: index_path.py\r
+===================================================================\r
+--- index_path.py (revision 53048)\r
++++ index_path.py (working copy)\r
+END
+ indexPath => "index_path.py",
+ sourceRevision => 53048,
+},
+"@@ -0,0 +1,7 @@\r\n"],
+ expectedNextLine => "+# Python file...\r\n",
+},
+{
+ # New test
+ diffName => "contains path corrections",
+ inputText => <<'END',
+Index: index_path.py
+===================================================================
+--- bad_path (revision 53048) (from copied_from_path.py:53048)
++++ bad_path (working copy)
+@@ -0,0 +1,7 @@
++# Python file...
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: index_path.py
+===================================================================
+--- index_path.py (revision 53048) (from copied_from_path.py:53048)
++++ index_path.py (working copy)
+END
+ copiedFromPath => "copied_from_path.py",
+ indexPath => "index_path.py",
+ sourceRevision => 53048,
+},
+"@@ -0,0 +1,7 @@\n"],
+ expectedNextLine => "+# Python file...\n",
+},
+####
+# Binary test cases
+##
+{
+ # New test
+ diffName => "binary file",
+ inputText => <<'END',
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+Property changes on: test_file.swf
+___________________________________________________________________
+Name: svn:mime-type
+ + application/octet-stream
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+END
+ indexPath => "test_file.swf",
+ isBinary => 1,
+},
+"Property changes on: test_file.swf\n"],
+ expectedNextLine => "___________________________________________________________________\n",
+},
+{
+ # New test
+ diffName => "binary file using SVN 1.7 syntax",
+ inputText => <<'END',
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+Index: test_file.swf
+===================================================================
+--- test_file.swf
++++ test_file.swf
+
+Property changes on: test_file.swf
+___________________________________________________________________
+Added: svn:mime-type
+## -0,0 +1 ##
++application/octet-stream
+\ No newline at end of property
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+Index: test_file.swf
+===================================================================
+--- test_file.swf
++++ test_file.swf
+END
+ indexPath => "test_file.swf",
+ isBinary => 1,
+},
+"\n"],
+ expectedNextLine => "Property changes on: test_file.swf\n",
+},
+);
+
+my $testCasesCount = @testCaseHashRefs;
+plan(tests => 2 * $testCasesCount); # Total number of assertions.
+
+foreach my $testCase (@testCaseHashRefs) {
+ my $testNameStart = "parseSvnDiffHeader(): $testCase->{diffName}: comparing";
+
+ my $fileHandle;
+ open($fileHandle, "<", \$testCase->{inputText});
+ my $line = <$fileHandle>;
+
+ my @got = VCSUtils::parseSvnDiffHeader($fileHandle, $line);
+ my $expectedReturn = $testCase->{expectedReturn};
+
+ is_deeply(\@got, $expectedReturn, "$testNameStart return value.");
+
+ my $gotNextLine = <$fileHandle>;
+ is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line.");
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnProperty.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnProperty.pl
new file mode 100644
index 0000000..a613bde
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnProperty.pl
@@ -0,0 +1,805 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) Research in Motion Limited 2010. All Rights Reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+# Copyright (C) 2012 Daniel Bates (dbates@intudata.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of parseSvnProperty().
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+my @testCaseHashRefs = (
+####
+# Simple test cases
+##
+{
+ # New test
+ diffName => "simple: add svn:executable",
+ inputText => <<'END',
+Added: svn:executable
+ + *
+END
+ expectedReturn => [
+{
+ name => "svn:executable",
+ propertyChangeDelta => 1,
+ value => "*",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "simple: delete svn:executable",
+ inputText => <<'END',
+Deleted: svn:executable
+ - *
+END
+ expectedReturn => [
+{
+ name => "svn:executable",
+ propertyChangeDelta => -1,
+ value => "*",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "simple: add svn:mergeinfo",
+ inputText => <<'END',
+Added: svn:mergeinfo
+ Merged /trunk/Makefile:r33020
+END
+ expectedReturn => [
+{
+ name => "svn:mergeinfo",
+ propertyChangeDelta => 1,
+ value => "/trunk/Makefile:r33020",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "simple: delete svn:mergeinfo",
+ inputText => <<'END',
+Deleted: svn:mergeinfo
+ Reverse-merged /trunk/Makefile:r33020
+END
+ expectedReturn => [
+{
+ name => "svn:mergeinfo",
+ propertyChangeDelta => -1,
+ value => "/trunk/Makefile:r33020",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "simple: modified svn:mergeinfo",
+ inputText => <<'END',
+Modified: svn:mergeinfo
+ Reverse-merged /trunk/Makefile:r33020
+ Merged /trunk/Makefile:r41697
+END
+ expectedReturn => [
+{
+ name => "svn:mergeinfo",
+ propertyChangeDelta => 1,
+ value => "/trunk/Makefile:r41697",
+},
+undef],
+ expectedNextLine => undef,
+},
+####
+# Using SVN 1.4 syntax
+##
+{
+ # New test
+ diffName => "simple: modified svn:mergeinfo using SVN 1.4 syntax",
+ inputText => <<'END',
+Name: svn:mergeinfo
+ Reverse-merged /trunk/Makefile:r33020
+ Merged /trunk/Makefile:r41697
+END
+ expectedReturn => [
+{
+ name => "svn:mergeinfo",
+ propertyChangeDelta => 1,
+ value => "/trunk/Makefile:r41697",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "simple: delete svn:executable using SVN 1.4 syntax",
+ inputText => <<'END',
+Name: svn:executable
+ - *
+END
+ expectedReturn => [
+{
+ name => "svn:executable",
+ propertyChangeDelta => -1,
+ value => "*",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "simple: add svn:executable using SVN 1.4 syntax",
+ inputText => <<'END',
+Name: svn:executable
+ + *
+END
+ expectedReturn => [
+{
+ name => "svn:executable",
+ propertyChangeDelta => 1,
+ value => "*",
+},
+undef],
+ expectedNextLine => undef,
+},
+####
+# Using SVN 1.7 syntax
+##
+{
+ # New test
+ diffName => "simple: add svn:executable using SVN 1.7 syntax",
+ inputText => <<'END',
+Added: svn:executable
+## -0,0 +1 ##
++*
+\ No newline at end of property
+END
+ expectedReturn => [
+{
+ name => "svn:executable",
+ propertyChangeDelta => 1,
+ value => "*",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "simple: delete svn:executable using SVN 1.7 syntax",
+ inputText => <<'END',
+Deleted: svn:executable
+## -1 +0,0 ##
+-*
+\ No newline at end of property
+END
+ expectedReturn => [
+{
+ name => "svn:executable",
+ propertyChangeDelta => -1,
+ value => "*",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "add svn:mime-type and add svn:executable using SVN 1.7 syntax",
+ inputText => <<'END',
+Added: svn:mime-type
+## -0,0 +1 ##
++image/png
+\ No newline at end of property
+Added: svn:executable
+## -0,0 +1 ##
++*
+\ No newline at end of property
+END
+ expectedReturn => [
+{
+ name => "svn:mime-type",
+ propertyChangeDelta => 1,
+ value => "image/png",
+},
+"Added: svn:executable\n"],
+ expectedNextLine => "## -0,0 +1 ##\n",
+},
+####
+# Property value followed by empty line and start of next diff
+##
+{
+ # New test
+ diffName => "add svn:executable, followed by empty line and start of next diff",
+ inputText => <<'END',
+Added: svn:executable
+ + *
+
+Index: Makefile.shared
+END
+ expectedReturn => [
+{
+ name => "svn:executable",
+ propertyChangeDelta => 1,
+ value => "*",
+},
+"\n"],
+ expectedNextLine => "Index: Makefile.shared\n",
+},
+{
+ # New test
+ diffName => "add svn:executable, followed by empty line and start of next diff using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Added: svn:executable
+ + *
+
+Index: Makefile.shared
+END
+),
+ expectedReturn => [
+{
+ name => "svn:executable",
+ propertyChangeDelta => 1,
+ value => "*",
+},
+"\r\n"],
+ expectedNextLine => "Index: Makefile.shared\r\n",
+},
+{
+ # New test
+ diffName => "add svn:executable, followed by empty line and start of next property diff",
+ inputText => <<'END',
+Added: svn:executable
+ + *
+
+Property changes on: Makefile.shared
+END
+ expectedReturn => [
+{
+ name => "svn:executable",
+ propertyChangeDelta => 1,
+ value => "*",
+},
+"\n"],
+ expectedNextLine => "Property changes on: Makefile.shared\n",
+},
+{
+ # New test
+ diffName => "add svn:executable, followed by empty line and start of next property diff using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Added: svn:executable
+ + *
+
+Property changes on: Makefile.shared
+END
+),
+ expectedReturn => [
+{
+ name => "svn:executable",
+ propertyChangeDelta => 1,
+ value => "*",
+},
+"\r\n"],
+ expectedNextLine => "Property changes on: Makefile.shared\r\n",
+},
+{
+ # New test
+ diffName => "multi-line '+' change, followed by empty line and start of next diff",
+ inputText => <<'END',
+Name: documentation
+ + A
+long sentence that spans
+multiple lines.
+
+Index: Makefile.shared
+END
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "A\nlong sentence that spans\nmultiple lines.",
+},
+"\n"],
+ expectedNextLine => "Index: Makefile.shared\n",
+},
+{
+ # New test
+ diffName => "multi-line '+' change, followed by empty line and start of next diff using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Name: documentation
+ + A
+long sentence that spans
+multiple lines.
+
+Index: Makefile.shared
+END
+),
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "A\r\nlong sentence that spans\r\nmultiple lines.",
+},
+"\r\n"],
+ expectedNextLine => "Index: Makefile.shared\r\n",
+},
+{
+ # New test
+ diffName => "multi-line '+' change, followed by empty line and start of next property diff",
+ inputText => <<'END',
+Name: documentation
+ + A
+long sentence that spans
+multiple lines.
+
+Property changes on: Makefile.shared
+END
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "A\nlong sentence that spans\nmultiple lines.",
+},
+"\n"],
+ expectedNextLine => "Property changes on: Makefile.shared\n",
+},
+{
+ # New test
+ diffName => "multi-line '+' change, followed by empty line and start of next property diff using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Name: documentation
+ + A
+long sentence that spans
+multiple lines.
+
+Property changes on: Makefile.shared
+END
+),
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "A\r\nlong sentence that spans\r\nmultiple lines.",
+},
+"\r\n"],
+ expectedNextLine => "Property changes on: Makefile.shared\r\n",
+},
+####
+# Property value followed by empty line and start of binary patch
+##
+{
+ # New test
+ diffName => "add svn:executable, followed by empty line and start of binary patch",
+ inputText => <<'END',
+Added: svn:executable
+ + *
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+ expectedReturn => [
+{
+ name => "svn:executable",
+ propertyChangeDelta => 1,
+ value => "*",
+},
+"\n"],
+ expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\n",
+},
+{
+ # New test
+ diffName => "add svn:executable, followed by empty line and start of binary patch using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Added: svn:executable
+ + *
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+),
+ expectedReturn => [
+{
+ name => "svn:executable",
+ propertyChangeDelta => 1,
+ value => "*",
+},
+"\r\n"],
+ expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\r\n",
+},
+{
+ # New test
+ diffName => "multi-line '+' change, followed by empty line and start of binary patch",
+ inputText => <<'END',
+Name: documentation
+ + A
+long sentence that spans
+multiple lines.
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "A\nlong sentence that spans\nmultiple lines.",
+},
+"\n"],
+ expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\n",
+},
+{
+ # New test
+ diffName => "multi-line '+' change, followed by empty line and start of binary patch using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Name: documentation
+ + A
+long sentence that spans
+multiple lines.
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+),
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "A\r\nlong sentence that spans\r\nmultiple lines.",
+},
+"\r\n"],
+ expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\r\n",
+},
+{
+ # New test
+ diffName => "multi-line '-' change, followed by multi-line '+' change, empty line, and start of binary patch",
+ inputText => <<'END',
+Modified: documentation
+ - A
+long sentence that spans
+multiple lines.
+ + Another
+long sentence that spans
+multiple lines.
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "Another\nlong sentence that spans\nmultiple lines.",
+},
+"\n"],
+ expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\n",
+},
+{
+ # New test
+ diffName => "multi-line '-' change, followed by multi-line '+' change, empty line, and start of binary patch using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Modified: documentation
+ - A
+long sentence that spans
+multiple lines.
+ + Another
+long sentence that spans
+multiple lines.
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+),
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "Another\r\nlong sentence that spans\r\nmultiple lines.",
+},
+"\r\n"],
+ expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\r\n",
+},
+####
+# Successive properties
+##
+{
+ # New test
+ diffName => "single-line '+' change followed by custom property with single-line '+' change",
+ inputText => <<'END',
+Added: svn:executable
+ + *
+Added: documentation
+ + A sentence.
+END
+ expectedReturn => [
+{
+ name => "svn:executable",
+ propertyChangeDelta => 1,
+ value => "*",
+},
+"Added: documentation\n"],
+ expectedNextLine => " + A sentence.\n",
+},
+{
+ # New test
+ diffName => "multi-line '+' change, followed by svn:executable",
+ inputText => <<'END',
+Name: documentation
+ + A
+long sentence that spans
+multiple lines.
+Name: svn:executable
+ + *
+END
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "A\nlong sentence that spans\nmultiple lines.",
+},
+"Name: svn:executable\n"],
+ expectedNextLine => " + *\n",
+},
+{
+ # New test
+ diffName => "multi-line '-' change, followed by multi-line '+' change and add svn:executable",
+ inputText => <<'END',
+Modified: documentation
+ - A
+long sentence that spans
+multiple lines.
+ + Another
+long sentence that spans
+multiple lines.
+Added: svn:executable
+ + *
+END
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "Another\nlong sentence that spans\nmultiple lines.",
+},
+"Added: svn:executable\n"],
+ expectedNextLine => " + *\n",
+},
+{
+ # New test
+ diffName => "'Merged' change followed by 'Merged' change",
+ inputText => <<'END',
+Added: svn:mergeinfo
+ Merged /trunk/Makefile:r33020
+ Merged /trunk/Makefile.shared:r58350
+END
+ expectedReturn => [
+{
+ name => "svn:mergeinfo",
+ propertyChangeDelta => 1,
+ value => "/trunk/Makefile.shared:r58350",
+},
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "'Reverse-merged' change followed by 'Reverse-merged' change",
+ inputText => <<'END',
+Deleted: svn:mergeinfo
+ Reverse-merged /trunk/Makefile:r33020
+ Reverse-merged /trunk/Makefile.shared:r58350
+END
+ expectedReturn => [
+{
+ name => "svn:mergeinfo",
+ propertyChangeDelta => -1,
+ value => "/trunk/Makefile.shared:r58350",
+},
+undef],
+ expectedNextLine => undef,
+},
+####
+# Property values with trailing new lines.
+##
+# FIXME: We do not support property values with trailing new lines, since it is difficult to
+# disambiguate them from the empty line that preceeds the contents of a binary patch as
+# in the test case (above): "multi-line '+' change, followed by empty line and start of binary patch".
+{
+ # New test
+ diffName => "single-line '+' with trailing new line",
+ inputText => <<'END',
+Added: documentation
+ + A sentence.
+
+END
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "A sentence.",
+},
+"\n"],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "single-line '+' with trailing new line using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Added: documentation
+ + A sentence.
+
+END
+),
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "A sentence.",
+},
+"\r\n"],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "single-line '+' with trailing new line, followed by empty line and start of binary patch",
+ inputText => <<'END',
+Added: documentation
+ + A sentence.
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "A sentence.",
+},
+"\n"],
+ expectedNextLine => "\n",
+},
+{
+ # New test
+ diffName => "single-line '+' with trailing new line, followed by empty line and start of binary patch using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Added: documentation
+ + A sentence.
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+),
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => 1,
+ value => "A sentence.",
+},
+"\r\n"],
+ expectedNextLine => "\r\n",
+},
+{
+ # New test
+ diffName => "single-line '-' change with trailing new line, and single-line '+' change",
+ inputText => <<'END',
+Modified: documentation
+ - A long sentence.
+
+ + A sentence.
+END
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => -1, # Since we only interpret the '-' property.
+ value => "A long sentence.",
+},
+"\n"],
+ expectedNextLine => " + A sentence.\n",
+},
+{
+ # New test
+ diffName => "single-line '-' change with trailing new line, and single-line '+' change using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Modified: documentation
+ - A long sentence.
+
+ + A sentence.
+END
+),
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => -1, # Since we only interpret the '-' property.
+ value => "A long sentence.",
+},
+"\r\n"],
+ expectedNextLine => " + A sentence.\r\n",
+},
+{
+ # New test
+ diffName => "multi-line '-' change with trailing new line, and multi-line '+' change",
+ inputText => <<'END',
+Modified: documentation
+ - A
+long sentence that spans
+multiple lines.
+
+ + Another
+long sentence that spans
+multiple lines.
+END
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => -1, # Since we only interpret the '-' property.
+ value => "A\nlong sentence that spans\nmultiple lines.",
+},
+"\n"],
+ expectedNextLine => " + Another\n",
+},
+{
+ # New test
+ diffName => "multi-line '-' change with trailing new line, and multi-line '+' change using Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+Modified: documentation
+ - A
+long sentence that spans
+multiple lines.
+
+ + Another
+long sentence that spans
+multiple lines.
+END
+),
+ expectedReturn => [
+{
+ name => "documentation",
+ propertyChangeDelta => -1, # Since we only interpret the '-' property.
+ value => "A\r\nlong sentence that spans\r\nmultiple lines.",
+},
+"\r\n"],
+ expectedNextLine => " + Another\r\n",
+},
+);
+
+my $testCasesCount = @testCaseHashRefs;
+plan(tests => 2 * $testCasesCount); # Total number of assertions.
+
+foreach my $testCase (@testCaseHashRefs) {
+ my $testNameStart = "parseSvnProperty(): $testCase->{diffName}: comparing";
+
+ my $fileHandle;
+ open($fileHandle, "<", \$testCase->{inputText});
+ my $line = <$fileHandle>;
+
+ my @got = VCSUtils::parseSvnProperty($fileHandle, $line);
+ my $expectedReturn = $testCase->{expectedReturn};
+
+ is_deeply(\@got, $expectedReturn, "$testNameStart return value.");
+
+ my $gotNextLine = <$fileHandle>;
+ is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line.");
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnPropertyValue.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnPropertyValue.pl
new file mode 100644
index 0000000..33da14a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnPropertyValue.pl
@@ -0,0 +1,257 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) Research in Motion Limited 2010. All Rights Reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+# Copyright (C) 2012 Daniel Bates (dbates@intudata.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of parseSvnPropertyValue().
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+my @testCaseHashRefs = (
+{
+ # New test
+ diffName => "singe-line '+' change",
+ inputText => <<'END',
+ + *
+END
+ expectedReturn => ["*", undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "single-line '-' change",
+ inputText => <<'END',
+ - *
+END
+ expectedReturn => ["*", undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "'Merged' change",
+ inputText => <<'END',
+ Merged /trunk/Makefile:r33020
+END
+ expectedReturn => ["/trunk/Makefile:r33020", undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "'Reverse-merged' change",
+ inputText => <<'END',
+ Reverse-merged /trunk/Makefile:r33020
+END
+ expectedReturn => ["/trunk/Makefile:r33020", undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "single-line '-' change followed by empty line with Unix line endings",
+ inputText => <<'END',
+ - *
+
+END
+ expectedReturn => ["*", "\n"],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "single-line '-' change followed by empty line with Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+ - *
+
+END
+),
+ expectedReturn => ["*", "\r\n"],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "single-line '-' change followed by the next property",
+ inputText => <<'END',
+ - *
+Deleted: svn:executable
+END
+ expectedReturn => ["*", "Deleted: svn:executable\n"],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "multi-line '+' change and start of binary patch",
+ inputText => <<'END',
+ + A
+long sentence that spans
+multiple lines.
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+ expectedReturn => ["A\nlong sentence that spans\nmultiple lines.", "\n"],
+ expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\n",
+},
+{
+ # New test
+ diffName => "multi-line '+' change and start of binary patch with Windows line endings",
+ inputText => toWindowsLineEndings(<<'END',
+ + A
+long sentence that spans
+multiple lines.
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+END
+),
+ expectedReturn => ["A\r\nlong sentence that spans\r\nmultiple lines.", "\r\n"],
+ expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\r\n",
+},
+{
+ # New test
+ diffName => "multi-line '-' change followed by '+' single-line change",
+ inputText => <<'END',
+ - A
+long sentence that spans
+multiple lines.
+ + A single-line.
+END
+ expectedReturn => ["A\nlong sentence that spans\nmultiple lines.", " + A single-line.\n"],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "multi-line '-' change followed by the next property",
+ inputText => <<'END',
+ - A
+long sentence that spans
+multiple lines.
+Added: svn:executable
+END
+ expectedReturn => ["A\nlong sentence that spans\nmultiple lines.", "Added: svn:executable\n"],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "multi-line '-' change followed by '+' multi-line change",
+ inputText => <<'END',
+ - A
+long sentence that spans
+multiple lines.
+ + Another
+long sentence that spans
+multiple lines.
+END
+ expectedReturn => ["A\nlong sentence that spans\nmultiple lines.", " + Another\n"],
+ expectedNextLine => "long sentence that spans\n",
+},
+{
+ # New test
+ diffName => "'Reverse-merged' change followed by 'Merge' change",
+ inputText => <<'END',
+ Reverse-merged /trunk/Makefile:r33020
+ Merged /trunk/Makefile:r41697
+END
+ expectedReturn => ["/trunk/Makefile:r33020", " Merged /trunk/Makefile:r41697\n"],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "'Merged' change followed by 'Merge' change",
+ inputText => <<'END',
+ Merged /trunk/Makefile:r33020
+ Merged /trunk/Makefile.shared:r58350
+END
+ expectedReturn => ["/trunk/Makefile:r33020", " Merged /trunk/Makefile.shared:r58350\n"],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "'Reverse-merged' change followed by 'Reverse-merged' change",
+ inputText => <<'END',
+ Reverse-merged /trunk/Makefile:r33020
+ Reverse-merged /trunk/Makefile.shared:r58350
+END
+ expectedReturn => ["/trunk/Makefile:r33020", " Reverse-merged /trunk/Makefile.shared:r58350\n"],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "'Reverse-merged' change followed by 'Reverse-merged' change followed by 'Merged' change",
+ inputText => <<'END',
+ Reverse-merged /trunk/Makefile:r33020
+ Reverse-merged /trunk/Makefile.shared:r58350
+ Merged /trunk/ChangeLog:r64190
+END
+ expectedReturn => ["/trunk/Makefile:r33020", " Reverse-merged /trunk/Makefile.shared:r58350\n"],
+ expectedNextLine => " Merged /trunk/ChangeLog:r64190\n",
+},
+##
+# Using SVN 1.7 syntax
+##
+{
+ # New test
+ diffName => "singe-line '+' change using SVN 1.7 syntax",
+ inputText => <<'END',
++*
+\ No newline at end of property
+END
+ expectedReturn => ["*", "\\ No newline at end of property\n"],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "single-line '-' change using SVN 1.7 syntax",
+ inputText => <<'END',
+-*
+\ No newline at end of property
+END
+ expectedReturn => ["*", "\\ No newline at end of property\n"],
+ expectedNextLine => undef,
+},
+);
+
+my $testCasesCount = @testCaseHashRefs;
+plan(tests => 2 * $testCasesCount); # Total number of assertions.
+
+foreach my $testCase (@testCaseHashRefs) {
+ my $testNameStart = "parseSvnPropertyValue(): $testCase->{diffName}: comparing";
+
+ my $fileHandle;
+ open($fileHandle, "<", \$testCase->{inputText});
+ my $line = <$fileHandle>;
+
+ my @got = VCSUtils::parseSvnPropertyValue($fileHandle, $line);
+ my $expectedReturn = $testCase->{expectedReturn};
+
+ is_deeply(\@got, $expectedReturn, "$testNameStart return value.");
+
+ my $gotNextLine = <$fileHandle>;
+ is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line.");
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/prepareParsedPatch.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/prepareParsedPatch.pl
new file mode 100644
index 0000000..a7ae807
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/prepareParsedPatch.pl
@@ -0,0 +1,136 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of prepareParsedPatch().
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+my $diffHashRef1 = { # not a copy, no source revision
+ copiedFromPath => undef,
+ indexPath => "indexPath1",
+ sourceRevision => undef,
+ svnConvertedText => "diff1",
+};
+my $diffHashRef2 = { # not a copy, has source revision
+ copiedFromPath => undef,
+ indexPath => "indexPath2",
+ sourceRevision => 20,
+ svnConvertedText => "diff2",
+};
+my $diffHashRef3 = { # a copy (copies always have source revision)
+ copiedFromPath => "sourcePath3",
+ indexPath => "indexPath2", # Deliberately choosing same as $diffHashRef2
+ sourceRevision => 3,
+ svnConvertedText => "diff3",
+};
+
+my @testCases = (
+{
+ # New test
+ testName => "zero diffs: empty array",
+ diffHashRefsInput => [],
+ expected => {
+ copyDiffHashRefs => [],
+ nonCopyDiffHashRefs => [],
+ sourceRevisionHash => {},
+ },
+},
+{
+ # New test
+ testName => "one diff: non-copy, no revision",
+ diffHashRefsInput => [$diffHashRef1],
+ expected => {
+ copyDiffHashRefs => [],
+ nonCopyDiffHashRefs => [$diffHashRef1],
+ sourceRevisionHash => {},
+ },
+},
+{
+ # New test
+ testName => "one diff: non-copy, has revision",
+ diffHashRefsInput => [$diffHashRef2],
+ expected => {
+ copyDiffHashRefs => [],
+ nonCopyDiffHashRefs => [$diffHashRef2],
+ sourceRevisionHash => {
+ "indexPath2" => 20,
+ }
+ },
+},
+{
+ # New test
+ testName => "one diff: copy (has revision)",
+ diffHashRefsInput => [$diffHashRef3],
+ expected => {
+ copyDiffHashRefs => [$diffHashRef3],
+ nonCopyDiffHashRefs => [],
+ sourceRevisionHash => {
+ "sourcePath3" => 3,
+ }
+ },
+},
+{
+ # New test
+ testName => "two diffs: two non-copies",
+ diffHashRefsInput => [$diffHashRef1, $diffHashRef2],
+ expected => {
+ copyDiffHashRefs => [],
+ nonCopyDiffHashRefs => [$diffHashRef1, $diffHashRef2],
+ sourceRevisionHash => {
+ "indexPath2" => 20,
+ }
+ },
+},
+{
+ # New test
+ testName => "two diffs: non-copy and copy",
+ diffHashRefsInput => [$diffHashRef2, $diffHashRef3],
+ expected => {
+ copyDiffHashRefs => [$diffHashRef3],
+ nonCopyDiffHashRefs => [$diffHashRef2],
+ sourceRevisionHash => {
+ "sourcePath3" => 3,
+ "indexPath2" => 20,
+ }
+ },
+},
+);
+
+my $testCasesCount = @testCases;
+plan(tests => $testCasesCount);
+
+foreach my $testCase (@testCases) {
+ my $testName = $testCase->{testName};
+ my @diffHashRefs = @{$testCase->{diffHashRefsInput}};
+ my $expected = $testCase->{expected};
+
+ my $got = prepareParsedPatch(0, @diffHashRefs);
+
+ is_deeply($got, $expected, $testName);
+}
+
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/removeEOL.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/removeEOL.pl
new file mode 100644
index 0000000..93a4cfb
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/removeEOL.pl
@@ -0,0 +1,56 @@
+#!/usr/bin/perl
+#
+# Copyright (C) Research In Motion Limited 2010. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Research In Motion Limited nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of VCSUtils::removeEOL().
+
+use Test::Simple tests => 5;
+use VCSUtils;
+
+my $title;
+
+# New test
+$title = "removeEOL: Undefined argument.";
+ok(removeEOL(undef) eq "", $title);
+
+# New test
+$title = "removeEOL: Line with Windows line ending.";
+ok(removeEOL("This line ends with a Windows line ending.\r\n") eq "This line ends with a Windows line ending.", $title);
+
+# New test
+$title = "removeEOL: Line with Unix line ending.";
+ok(removeEOL("This line ends with a Unix line ending.\n") eq "This line ends with a Unix line ending.", $title);
+
+# New test
+$title = "removeEOL: Line with Mac line ending.";
+ok(removeEOL("This line ends with a Mac line ending.\r") eq "This line ends with a Mac line ending.", $title);
+
+# New test
+$title = "removeEOL: Line with a mix of line endings.";
+ok(removeEOL("This line contains a mix of line endings.\r\n\r\n\r\r\n\n\n\n") eq "This line contains a mix of line endings.", $title);
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/runCommand.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/runCommand.pl
new file mode 100644
index 0000000..4514074
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/runCommand.pl
@@ -0,0 +1,75 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2012 Daniel Bates (dbates@intudata.com). All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of VCSUtils::runCommand().
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+use constant ENOENT => 2; # See <errno.h>
+
+# The array of test cases.
+my @testCaseHashRefs = (
+{
+ # New test
+ testName => "Simple",
+ inputArgs => ["echo", "hello"],
+ expectedReturn => {
+ exitStatus => 0,
+ stdout => "hello\n"
+ }
+},
+{
+ # New test
+ testName => "Multiple commands",
+ inputArgs => ["echo", "first-command;echo second-command"],
+ expectedReturn => {
+ exitStatus => 0,
+ stdout => "first-command;echo second-command\n"
+ }
+},
+{
+ # New test
+ testName => "Non-existent command",
+ inputArgs => ["/usr/bin/non-existent-command"],
+ expectedReturn => {
+ exitStatus => ENOENT
+ }
+}
+);
+
+my $testCasesCount = @testCaseHashRefs;
+plan(tests => $testCasesCount); # Total number of assertions.
+
+foreach my $testCase (@testCaseHashRefs) {
+ my $testNameStart = "runCommand(): $testCase->{testName}: comparing";
+
+ my $got = VCSUtils::runCommand(@{$testCase->{inputArgs}});
+ my $expectedReturn = $testCase->{expectedReturn};
+
+ is_deeply($got, $expectedReturn, "$testNameStart return value.");
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/runPatchCommand.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/runPatchCommand.pl
new file mode 100644
index 0000000..1893255
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/runPatchCommand.pl
@@ -0,0 +1,92 @@
+#!/usr/bin/perl
+#
+# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of VCSUtils::runPatchCommand().
+
+use Test::Simple tests => 4;
+use VCSUtils;
+
+# New test
+$title = "runPatchCommand: Unsuccessful patch, forcing.";
+
+# Since $patch has no "Index:" path, passing this to runPatchCommand
+# should not affect any files.
+my $patch = <<'END';
+Garbage patch contents
+END
+
+# We call via callSilently() to avoid output like the following to STDERR:
+# patch: **** Only garbage was found in the patch input.
+$argsHashRef = {ensureForce => 1};
+$exitStatus = callSilently(\&runPatchCommand, $patch, ".", "file_to_patch.txt", $argsHashRef);
+
+ok($exitStatus != 0, $title);
+
+# New test
+$title = "runPatchCommand: New file, --dry-run.";
+
+# This file should not exist after the tests, but we take care with the
+# file name and contents just in case.
+my $fileToPatch = "temp_OK_TO_ERASE__README_FOR_MORE.txt";
+$patch = <<END;
+Index: $fileToPatch
+===================================================================
+--- $fileToPatch (revision 0)
++++ $fileToPatch (revision 0)
+@@ -0,0 +1,5 @@
++This is a test file for WebKitTools/Scripts/VCSUtils_unittest.pl.
++This file should not have gotten created on your system.
++If it did, some unit tests don't seem to be working quite right:
++It would be great if you could file a bug report. Thanks!
++---------------------------------------------------------------------
+END
+
+# --dry-run prevents creating any files.
+# --silent suppresses the success message to STDOUT.
+$argsHashRef = {options => ["--dry-run", "--silent"]};
+$exitStatus = runPatchCommand($patch, ".", $fileToPatch, $argsHashRef);
+
+ok($exitStatus == 0, $title);
+
+# New test
+$title = "runPatchCommand: New file: \"$fileToPatch\".";
+
+$argsHashRef = {options => ["--silent"]};
+$exitStatus = runPatchCommand($patch, ".", $fileToPatch, $argsHashRef);
+
+ok($exitStatus == 0, $title);
+
+# New test
+$title = "runPatchCommand: Reverse new file (clean up previous).";
+
+$argsHashRef = {shouldReverse => 1,
+ options => ["--silent", "--remove-empty-files"]}; # To clean up.
+$exitStatus = runPatchCommand($patch, ".", $fileToPatch, $argsHashRef);
+ok($exitStatus == 0, $title);
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/setChangeLogDateAndReviewer.pl b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/setChangeLogDateAndReviewer.pl
new file mode 100644
index 0000000..01f6b26
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/VCSUtils_unittest/setChangeLogDateAndReviewer.pl
@@ -0,0 +1,128 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Unit tests of setChangeLogDateAndReviewer().
+
+use strict;
+use warnings;
+
+use Test::More;
+use VCSUtils;
+
+my @testCaseHashRefs = (
+{
+ testName => "reviewer defined and \"NOBODY (OOPS!)\" in leading junk",
+ reviewer => "John Doe",
+ epochTime => 1273414321,
+ patch => <<'END',
+Subject: [PATCH]
+
+Reviewed by NOBODY (OOPS!).
+
+diff --git a/WebCore/ChangeLog b/WebCore/ChangeLog
+--- a/WebCore/ChangeLog
++++ b/WebCore/ChangeLog
+@@ -1,3 +1,15 @@
++2010-05-08 Chris Jerdonek <cjerdonek@webkit.org>
++
++ Reviewed by NOBODY (OOPS!).
++
+ 2010-05-08 Chris Jerdonek <cjerdonek@webkit.org>
+
+ Reviewed by Jane Doe.
+END
+ expectedReturn => <<'END',
+Subject: [PATCH]
+
+Reviewed by NOBODY (OOPS!).
+
+diff --git a/WebCore/ChangeLog b/WebCore/ChangeLog
+--- a/WebCore/ChangeLog
++++ b/WebCore/ChangeLog
+@@ -1,3 +1,15 @@
++2010-05-09 Chris Jerdonek <cjerdonek@webkit.org>
++
++ Reviewed by John Doe.
++
+ 2010-05-08 Chris Jerdonek <cjerdonek@webkit.org>
+
+ Reviewed by Jane Doe.
+END
+},
+{
+ testName => "reviewer not defined and \"NOBODY (OOPS!)\" in leading junk",
+ reviewer => undef,
+ epochTime => 1273414321,
+ patch => <<'END',
+Subject: [PATCH]
+
+Reviewed by NOBODY (OOPS!).
+
+diff --git a/WebCore/ChangeLog b/WebCore/ChangeLog
+--- a/WebCore/ChangeLog
++++ b/WebCore/ChangeLog
+@@ -1,3 +1,15 @@
++2010-05-08 Chris Jerdonek <cjerdonek@webkit.org>
++
++ Reviewed by NOBODY (OOPS!).
++
+ 2010-05-08 Chris Jerdonek <cjerdonek@webkit.org>
+
+ Reviewed by Jane Doe.
+END
+ expectedReturn => <<'END',
+Subject: [PATCH]
+
+Reviewed by NOBODY (OOPS!).
+
+diff --git a/WebCore/ChangeLog b/WebCore/ChangeLog
+--- a/WebCore/ChangeLog
++++ b/WebCore/ChangeLog
+@@ -1,3 +1,15 @@
++2010-05-09 Chris Jerdonek <cjerdonek@webkit.org>
++
++ Reviewed by NOBODY (OOPS!).
++
+ 2010-05-08 Chris Jerdonek <cjerdonek@webkit.org>
+
+ Reviewed by Jane Doe.
+END
+},
+);
+
+my $testCasesCount = @testCaseHashRefs;
+plan(tests => 1 * $testCasesCount); # Total number of assertions.
+
+foreach my $testCase (@testCaseHashRefs) {
+ my $testNameStart = "setChangeLogDateAndReviewer(): $testCase->{testName}: comparing";
+
+ my $patch = $testCase->{patch};
+ my $reviewer = $testCase->{reviewer};
+ my $epochTime = $testCase->{epochTime};
+
+ my $got = VCSUtils::setChangeLogDateAndReviewer($patch, $reviewer, $epochTime);
+ my $expectedReturn = $testCase->{expectedReturn};
+
+ is($got, $expectedReturn, "$testNameStart return value.");
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitperl/httpd.pm b/src/third_party/blink/Tools/Scripts/webkitperl/httpd.pm
new file mode 100644
index 0000000..f61dfa0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitperl/httpd.pm
@@ -0,0 +1,347 @@
+# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved
+# Copyright (C) 2006 Alexey Proskuryakov (ap@nypop.com)
+# Copyright (C) 2010 Andras Becsi (abecsi@inf.u-szeged.hu), University of Szeged
+# Copyright (C) 2011 Research In Motion Limited. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Module to share code to start and stop the Apache daemon.
+
+use strict;
+use warnings;
+
+use File::Copy;
+use File::Path;
+use File::Spec;
+use File::Spec::Functions;
+use Fcntl ':flock';
+use IPC::Open2;
+
+use webkitdirs;
+
+BEGIN {
+ use Exporter ();
+ our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS);
+ $VERSION = 1.00;
+ @ISA = qw(Exporter);
+ @EXPORT = qw(&getHTTPDPath
+ &hasHTTPD
+ &getHTTPDConfigPathForTestDirectory
+ &getDefaultConfigForTestDirectory
+ &openHTTPD
+ &closeHTTPD
+ &setShouldWaitForUserInterrupt
+ &waitForHTTPDLock
+ &getWaitTime);
+ %EXPORT_TAGS = ( );
+ @EXPORT_OK = ();
+}
+
+my $tmpDir = "/tmp";
+my $httpdLockPrefix = "WebKitHttpd.lock.";
+my $myLockFile;
+my $exclusiveLockFile = File::Spec->catfile($tmpDir, "WebKit.lock");
+my $httpdPidDir = File::Spec->catfile($tmpDir, "WebKit");
+my $httpdPidFile = File::Spec->catfile($httpdPidDir, "httpd.pid");
+my $httpdPid;
+my $waitForUserInterrupt = 0;
+my $waitBeginTime;
+my $waitEndTime;
+
+$SIG{'INT'} = 'handleInterrupt';
+$SIG{'TERM'} = 'handleInterrupt';
+
+sub getHTTPDPath
+{
+ my $httpdPath;
+ if (isDebianBased()) {
+ $httpdPath = "/usr/sbin/apache2";
+ } else {
+ $httpdPath = "/usr/sbin/httpd";
+ }
+ return $httpdPath;
+}
+
+sub hasHTTPD
+{
+ my @command = (getHTTPDPath(), "-v");
+ return system(@command) == 0;
+}
+
+sub getApacheVersion
+{
+ my $httpdPath = getHTTPDPath();
+ my $version = `$httpdPath -v`;
+ $version =~ s/.*Server version: Apache\/(\d+\.\d+).*/$1/s;
+ return $version;
+}
+
+sub getDefaultConfigForTestDirectory
+{
+ my ($testDirectory) = @_;
+ die "No test directory has been specified." unless ($testDirectory);
+
+ my $httpdConfig = getHTTPDConfigPathForTestDirectory($testDirectory);
+ my $documentRoot = "$testDirectory/http/tests";
+ my $jsTestResourcesDirectory = $testDirectory . "/fast/js/resources";
+ my $mediaResourcesDirectory = $testDirectory . "/media";
+ my $typesConfig = "$testDirectory/http/conf/mime.types";
+ my $httpdLockFile = File::Spec->catfile($httpdPidDir, "httpd.lock");
+ my $httpdScoreBoardFile = File::Spec->catfile($httpdPidDir, "httpd.scoreboard");
+
+ my @httpdArgs = (
+ "-f", "$httpdConfig",
+ "-C", "DocumentRoot \"$documentRoot\"",
+ # Setup a link to where the js test templates are stored, use -c so that mod_alias will already be loaded.
+ "-c", "Alias /js-test-resources \"$jsTestResourcesDirectory\"",
+ "-c", "Alias /media-resources \"$mediaResourcesDirectory\"",
+ "-c", "TypesConfig \"$typesConfig\"",
+ # Apache wouldn't run CGIs with permissions==700 otherwise
+ "-c", "User \"#$<\"",
+ "-c", "PidFile \"$httpdPidFile\"",
+ "-c", "ScoreBoardFile \"$httpdScoreBoardFile\"",
+ );
+
+ if (getApacheVersion() eq "2.2") {
+ push(@httpdArgs, "-c", "LockFile \"$httpdLockFile\"");
+ }
+
+ # FIXME: Enable this on Windows once <rdar://problem/5345985> is fixed
+ # The version of Apache we use with Cygwin does not support SSL
+ my $sslCertificate = "$testDirectory/http/conf/webkit-httpd.pem";
+ push(@httpdArgs, "-c", "SSLCertificateFile \"$sslCertificate\"") unless isCygwin();
+
+ return @httpdArgs;
+
+}
+
+sub getHTTPDConfigPathForTestDirectory
+{
+ my ($testDirectory) = @_;
+ die "No test directory has been specified." unless ($testDirectory);
+
+ my $httpdConfig;
+ my $httpdPath = getHTTPDPath();
+ my $httpdConfDirectory = "$testDirectory/http/conf/";
+ my $apacheVersion = getApacheVersion();
+
+ if (isCygwin()) {
+ my $libPHP4DllPath = "/usr/lib/apache/libphp4.dll";
+ # FIXME: run-webkit-tests should not modify the user's system, especially not in this method!
+ unless (-x $libPHP4DllPath) {
+ copy("$httpdConfDirectory/libphp4.dll", $libPHP4DllPath);
+ chmod(0755, $libPHP4DllPath);
+ }
+ $httpdConfig = "cygwin-httpd.conf"; # This is an apache 1.3 config.
+ } elsif (isDebianBased()) {
+ $httpdConfig = "debian-httpd-$apacheVersion.conf";
+ } elsif (isFedoraBased()) {
+ $httpdConfig = "fedora-httpd-$apacheVersion.conf";
+ } else {
+ # All other ports use apache2, so just use our default apache2 config.
+ $httpdConfig = "apache2-httpd.conf";
+ }
+ return "$httpdConfDirectory/$httpdConfig";
+}
+
+sub openHTTPD(@)
+{
+ my (@args) = @_;
+ die "No HTTPD configuration has been specified" unless (@args);
+ mkdir($httpdPidDir, 0755);
+ die "No write permissions to $httpdPidDir" unless (-w $httpdPidDir);
+
+ if (-f $httpdPidFile) {
+ open (PIDFILE, $httpdPidFile);
+ my $oldPid = <PIDFILE>;
+ chomp $oldPid;
+ close PIDFILE;
+ if (0 != kill 0, $oldPid) {
+ print "\nhttpd is already running: pid $oldPid, killing...\n";
+ if (!killHTTPD($oldPid)) {
+ cleanUp();
+ die "Timed out waiting for httpd to quit";
+ }
+ }
+ unlink $httpdPidFile;
+ }
+
+ my $httpdPath = getHTTPDPath();
+
+ open2(">&1", \*HTTPDIN, $httpdPath, @args);
+
+ my $retryCount = 20;
+ while (!-f $httpdPidFile && $retryCount) {
+ sleep 1;
+ --$retryCount;
+ }
+
+ if (!$retryCount) {
+ cleanUp();
+ die "Timed out waiting for httpd to start";
+ }
+
+ $httpdPid = <PIDFILE> if open(PIDFILE, $httpdPidFile);
+ chomp $httpdPid if $httpdPid;
+ close PIDFILE;
+
+ waitpid($httpdPid, 0) if ($waitForUserInterrupt && $httpdPid);
+
+ return 1;
+}
+
+sub closeHTTPD
+{
+ close HTTPDIN;
+ my $succeeded = killHTTPD($httpdPid);
+ cleanUp();
+ unless ($succeeded) {
+ print STDERR "Timed out waiting for httpd to terminate!\n" unless $succeeded;
+ return 0;
+ }
+ return 1;
+}
+
+sub killHTTPD
+{
+ my ($pid) = @_;
+
+ return 1 unless $pid;
+
+ kill 15, $pid;
+
+ my $retryCount = 20;
+ while (kill(0, $pid) && $retryCount) {
+ sleep 1;
+ --$retryCount;
+ }
+ return $retryCount != 0;
+}
+
+sub setShouldWaitForUserInterrupt
+{
+ $waitForUserInterrupt = 1;
+}
+
+sub handleInterrupt
+{
+ # On Cygwin, when we receive a signal Apache is still running, so we need
+ # to kill it. On other platforms (at least Mac OS X), Apache will have
+ # already been killed, and trying to kill it again will cause us to hang.
+ # All we need to do in this case is clean up our own files.
+ if (isCygwin()) {
+ closeHTTPD();
+ } else {
+ cleanUp();
+ }
+
+ print "\n";
+ exit(1);
+}
+
+sub cleanUp
+{
+ rmdir $httpdPidDir;
+ unlink $exclusiveLockFile;
+ unlink $myLockFile if $myLockFile;
+}
+
+sub extractLockNumber
+{
+ my ($lockFile) = @_;
+ return -1 unless $lockFile;
+ return substr($lockFile, length($httpdLockPrefix));
+}
+
+sub getLockFiles
+{
+ opendir(TMPDIR, $tmpDir) or die "Could not open " . $tmpDir . ".";
+ my @lockFiles = grep {m/^$httpdLockPrefix\d+$/} readdir(TMPDIR);
+ @lockFiles = sort { extractLockNumber($a) <=> extractLockNumber($b) } @lockFiles;
+ closedir(TMPDIR);
+ return @lockFiles;
+}
+
+sub getNextAvailableLockNumber
+{
+ my @lockFiles = getLockFiles();
+ return 0 unless @lockFiles;
+ return extractLockNumber($lockFiles[-1]) + 1;
+}
+
+sub getLockNumberForCurrentRunning
+{
+ my @lockFiles = getLockFiles();
+ return 0 unless @lockFiles;
+ return extractLockNumber($lockFiles[0]);
+}
+
+sub waitForHTTPDLock
+{
+ $waitBeginTime = time;
+ scheduleHttpTesting();
+ # If we are the only one waiting for Apache just run the tests without any further checking
+ if (scalar getLockFiles() > 1) {
+ my $currentLockFile = File::Spec->catfile($tmpDir, "$httpdLockPrefix" . getLockNumberForCurrentRunning());
+ my $currentLockPid = <SCHEDULER_LOCK> if (-f $currentLockFile && open(SCHEDULER_LOCK, "<$currentLockFile"));
+ # Wait until we are allowed to run the http tests
+ while ($currentLockPid && $currentLockPid != $$) {
+ $currentLockFile = File::Spec->catfile($tmpDir, "$httpdLockPrefix" . getLockNumberForCurrentRunning());
+ if ($currentLockFile eq $myLockFile) {
+ $currentLockPid = <SCHEDULER_LOCK> if open(SCHEDULER_LOCK, "<$currentLockFile");
+ if ($currentLockPid != $$) {
+ print STDERR "\nPID mismatch.\n";
+ last;
+ }
+ } else {
+ sleep 1;
+ }
+ }
+ }
+ $waitEndTime = time;
+}
+
+sub scheduleHttpTesting
+{
+ # We need an exclusive lock file to avoid deadlocks and starvation and ensure that the scheduler lock numbers are sequential.
+ # The scheduler locks are used to schedule the running test sessions in first come first served order.
+ while (!(open(SEQUENTIAL_GUARD_LOCK, ">$exclusiveLockFile") && flock(SEQUENTIAL_GUARD_LOCK, LOCK_EX|LOCK_NB))) {}
+ $myLockFile = File::Spec->catfile($tmpDir, "$httpdLockPrefix" . getNextAvailableLockNumber());
+ open(SCHEDULER_LOCK, ">$myLockFile");
+ print SCHEDULER_LOCK "$$";
+ print SEQUENTIAL_GUARD_LOCK "$$";
+ close(SCHEDULER_LOCK);
+ close(SEQUENTIAL_GUARD_LOCK);
+ unlink $exclusiveLockFile;
+}
+
+sub getWaitTime
+{
+ my $waitTime = 0;
+ if ($waitBeginTime && $waitEndTime) {
+ $waitTime = $waitEndTime - $waitBeginTime;
+ }
+ return $waitTime;
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/__init__.py
new file mode 100644
index 0000000..b376bf2
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/__init__.py
@@ -0,0 +1,13 @@
+# Required for Python to search this directory for module files
+
+# Keep this file free of any code or import statements that could
+# cause either an error to occur or a log message to be logged.
+# This ensures that calling code can import initialization code from
+# webkitpy before any errors or log messages due to code in this file.
+# Initialization code can include things like version-checking code and
+# logging configuration code.
+#
+# We do not execute any version-checking code or logging configuration
+# code in this file so that callers can opt-in as they want. This also
+# allows different callers to choose different initialization code,
+# as necessary.
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/bindings/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/bindings/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/bindings/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/bindings/main.py b/src/third_party/blink/Tools/Scripts/webkitpy/bindings/main.py
new file mode 100644
index 0000000..30a6119
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/bindings/main.py
@@ -0,0 +1,370 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from contextlib import contextmanager
+import filecmp
+import fnmatch
+import os
+import re
+import shutil
+import sys
+import tempfile
+
+from webkitpy.common.system.executive import Executive
+
+# Source/ path is needed both to find input IDL files, and to import other
+# Python modules.
+module_path = os.path.dirname(__file__)
+source_path = os.path.normpath(os.path.join(module_path, os.pardir, os.pardir,
+ os.pardir, os.pardir, 'Source'))
+sys.path.append(source_path) # for Source/bindings imports
+
+from bindings.scripts.code_generator_v8 import CodeGeneratorUnionType
+import bindings.scripts.compute_interfaces_info_individual
+from bindings.scripts.compute_interfaces_info_individual import InterfaceInfoCollector
+import bindings.scripts.compute_interfaces_info_overall
+from bindings.scripts.compute_interfaces_info_overall import compute_interfaces_info_overall, interfaces_info
+from bindings.scripts.idl_compiler import IdlCompilerDictionaryImpl, IdlCompilerV8
+from bindings.scripts.idl_reader import IdlReader
+from bindings.scripts.utilities import idl_filename_to_component, write_file
+
+
+PASS_MESSAGE = 'All tests PASS!'
+FAIL_MESSAGE = """Some tests FAIL!
+To update the reference files, execute:
+ run-bindings-tests --reset-results
+
+If the failures are not due to your changes, test results may be out of sync;
+please rebaseline them in a separate CL, after checking that tests fail in ToT.
+In CL, please set:
+NOTRY=true
+TBR=(someone in Source/bindings/OWNERS or WATCHLISTS:bindings)
+"""
+
+DEPENDENCY_IDL_FILES = frozenset([
+ 'TestImplements.idl',
+ 'TestImplements2.idl',
+ 'TestImplements3.idl',
+ 'TestPartialInterface.idl',
+ 'TestPartialInterface2.idl',
+ 'TestPartialInterface3.idl',
+])
+
+# core/inspector/InspectorInstrumentation.idl is not a valid Blink IDL.
+NON_BLINK_IDL_FILES = frozenset([
+ 'InspectorInstrumentation.idl',
+])
+
+COMPONENT_DIRECTORY = frozenset(['core', 'modules'])
+
+test_input_directory = os.path.join(source_path, 'bindings', 'tests', 'idls')
+reference_directory = os.path.join(source_path, 'bindings', 'tests', 'results')
+
+# component -> set of union types
+union_types = {}
+
+@contextmanager
+def TemporaryDirectory():
+ """Wrapper for tempfile.mkdtemp() so it's usable with 'with' statement.
+
+ Simple backport of tempfile.TemporaryDirectory from Python 3.2.
+ """
+ name = tempfile.mkdtemp()
+ try:
+ yield name
+ finally:
+ shutil.rmtree(name)
+
+
+def generate_interface_dependencies(output_directory, test_input_directory, component_directories,
+ ignore_idl_files, root_directory, extended_attributes_path):
+ def idl_paths_recursive(directory):
+ # This is slow, especially on Windows, due to os.walk making
+ # excess stat() calls. Faster versions may appear in Python 3.5 or
+ # later:
+ # https://github.com/benhoyt/scandir
+ # http://bugs.python.org/issue11406
+ idl_paths = []
+ for dirpath, _, files in os.walk(directory):
+ idl_paths.extend(os.path.join(dirpath, filename)
+ for filename in fnmatch.filter(files, '*.idl'))
+ return idl_paths
+
+ def collect_blink_idl_paths():
+ """Returns IDL file paths which blink actually uses."""
+ idl_paths = []
+ for component in component_directories:
+ directory = os.path.join(source_path, component)
+ idl_paths.extend(idl_paths_recursive(directory))
+ return idl_paths
+
+ def collect_interfaces_info(idl_path_list):
+ info_collector = InterfaceInfoCollector(root_directory, extended_attributes_path)
+ for idl_path in idl_path_list:
+ if os.path.basename(idl_path) in ignore_idl_files:
+ continue
+ info_collector.collect_info(idl_path)
+ info = info_collector.get_info_as_dict()
+ # TestDictionary.{h,cpp} are placed under
+ # Source/bindings/tests/idls/core. However, IdlCompiler generates
+ # TestDictionary.{h,cpp} by using relative_dir.
+ # So the files will be generated under
+ # output_dir/core/bindings/tests/idls/core.
+ # To avoid this issue, we need to clear relative_dir here.
+ for value in info['interfaces_info'].itervalues():
+ value['relative_dir'] = ''
+ # Merge component-wide information.
+ component_info = info_collector.get_component_info_as_dict()
+ info.update(component_info)
+ return info
+
+ # We compute interfaces info for *all* IDL files, not just test IDL
+ # files, as code generator output depends on inheritance (both ancestor
+ # chain and inherited extended attributes), and some real interfaces
+ # are special-cased, such as Node.
+ #
+ # For example, when testing the behavior of interfaces that inherit
+ # from Node, we also need to know that these inherit from EventTarget,
+ # since this is also special-cased and Node inherits from EventTarget,
+ # but this inheritance information requires computing dependencies for
+ # the real Node.idl file.
+ non_test_idl_paths = collect_blink_idl_paths()
+ # For bindings test IDL files, we collect interfaces info for each
+ # component so that we can generate union type containers separately.
+ test_idl_paths = {}
+ for component in component_directories:
+ test_idl_paths[component] = idl_paths_recursive(
+ os.path.join(test_input_directory, component))
+ # 2nd-stage computation: individual, then overall
+ #
+ # Properly should compute separately by component (currently test
+ # includes are invalid), but that's brittle (would need to update this file
+ # for each new component) and doesn't test the code generator any better
+ # than using a single component.
+ non_test_interfaces_info = collect_interfaces_info(non_test_idl_paths)
+ test_interfaces_info = {}
+ for component, paths in test_idl_paths.iteritems():
+ test_interfaces_info[component] = collect_interfaces_info(paths)
+ # In order to allow test IDL files to override the production IDL files if
+ # they have the same interface name, process the test IDL files after the
+ # non-test IDL files.
+ info_individuals = [non_test_interfaces_info] + test_interfaces_info.values()
+ compute_interfaces_info_overall(info_individuals)
+ # 3rd-stage: union types
+ # We only process union types which are defined under
+ # Source/bindings/tests/idls. Otherwise, the result of union type
+ # container classes will be affected by non-test IDL files.
+ for component, interfaces_info in test_interfaces_info.iteritems():
+ union_types[component] = interfaces_info['union_types']
+
+
+def bindings_tests(output_directory, verbose, reference_directory,
+ test_input_directory, idl_compiler_constructor,
+ code_generator_constructor,
+ component_directories, ignore_idl_files,
+ dependency_idl_files, root_directory,
+ extended_attributes_path, generate_union_containers):
+ executive = Executive()
+
+ def list_files(directory):
+ files = []
+ for component in os.listdir(directory):
+ if component not in component_directories:
+ continue
+ directory_with_component = os.path.join(directory, component)
+ for filename in os.listdir(directory_with_component):
+ files.append(os.path.join(directory_with_component, filename))
+ return files
+
+ def diff(filename1, filename2):
+ # Python's difflib module is too slow, especially on long output, so
+ # run external diff(1) command
+ cmd = ['diff',
+ '-u', # unified format
+ '-N', # treat absent files as empty
+ filename1,
+ filename2]
+ # Return output and don't raise exception, even though diff(1) has
+ # non-zero exit if files differ.
+ return executive.run_command(cmd, error_handler=lambda x: None)
+
+ def is_cache_file(filename):
+ return filename.endswith('.cache')
+
+ def delete_cache_files():
+ # FIXME: Instead of deleting cache files, don't generate them.
+ cache_files = [path for path in list_files(output_directory)
+ if is_cache_file(os.path.basename(path))]
+ for cache_file in cache_files:
+ os.remove(cache_file)
+
+ def identical_file(reference_filename, output_filename):
+ reference_basename = os.path.basename(reference_filename)
+
+ if not os.path.isfile(reference_filename):
+ print 'Missing reference file!'
+ print '(if adding new test, update reference files)'
+ print reference_basename
+ print
+ return False
+
+ if not filecmp.cmp(reference_filename, output_filename):
+ # cmp is much faster than diff, and usual case is "no differance",
+ # so only run diff if cmp detects a difference
+ print 'FAIL: %s' % reference_basename
+ print diff(reference_filename, output_filename)
+ return False
+
+ if verbose:
+ print 'PASS: %s' % reference_basename
+ return True
+
+ def identical_output_files(output_files):
+ reference_files = [os.path.join(reference_directory,
+ os.path.relpath(path, output_directory))
+ for path in output_files]
+ return all([identical_file(reference_filename, output_filename)
+ for (reference_filename, output_filename) in zip(reference_files, output_files)])
+
+ def no_excess_files(output_files):
+ generated_files = set([os.path.relpath(path, output_directory)
+ for path in output_files])
+ # Add subversion working copy directories in core and modules.
+ for component in component_directories:
+ generated_files.add(os.path.join(component, '.svn'))
+
+ excess_files = []
+ for path in list_files(reference_directory):
+ relpath = os.path.relpath(path, reference_directory)
+ if relpath not in generated_files:
+ excess_files.append(relpath)
+ if excess_files:
+ print ('Excess reference files! '
+ '(probably cruft from renaming or deleting):\n' +
+ '\n'.join(excess_files))
+ return False
+ return True
+
+ def generate_union_type_containers(output_directory, component):
+ generator = CodeGeneratorUnionType(
+ interfaces_info, cache_dir=None, output_dir=output_directory,
+ target_component=component)
+ outputs = generator.generate_code(union_types[component])
+ for output_path, output_code in outputs:
+ write_file(output_code, output_path, only_if_changed=True)
+
+ try:
+ generate_interface_dependencies(output_directory, test_input_directory, component_directories,
+ ignore_idl_files, root_directory, extended_attributes_path)
+ for component in component_directories:
+ output_dir = os.path.join(output_directory, component)
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+
+ if generate_union_containers:
+ generate_union_type_containers(output_dir, component)
+ idl_compiler = idl_compiler_constructor(
+ code_generator_constructor,
+ output_dir,
+ interfaces_info=interfaces_info,
+ only_if_changed=True,
+ extended_attributes_filepath=extended_attributes_path)
+ if component == 'core':
+ partial_interface_output_dir = os.path.join(output_directory,
+ 'modules')
+ if not os.path.exists(partial_interface_output_dir):
+ os.makedirs(partial_interface_output_dir)
+ idl_partial_interface_compiler = idl_compiler_constructor(
+ partial_interface_output_dir,
+ interfaces_info=interfaces_info,
+ only_if_changed=True,
+ target_component='modules',
+ extended_attributes_filepath=extended_attributes_path)
+ else:
+ idl_partial_interface_compiler = None
+
+ dictionary_impl_compiler = IdlCompilerDictionaryImpl(
+ output_dir, interfaces_info=interfaces_info,
+ only_if_changed=True,
+ extended_attributes_filepath=extended_attributes_path)
+
+ idl_filenames = []
+ input_directory = os.path.join(test_input_directory, component)
+ for filename in os.listdir(input_directory):
+ if (filename.endswith('.idl') and
+ # Dependencies aren't built
+ # (they are used by the dependent)
+ filename not in dependency_idl_files):
+ idl_filenames.append(
+ os.path.realpath(
+ os.path.join(input_directory, filename)))
+ for idl_path in idl_filenames:
+ idl_basename = os.path.basename(idl_path)
+ idl_compiler.compile_file(idl_path)
+ definition_name, _ = os.path.splitext(idl_basename)
+ if definition_name in interfaces_info:
+ interface_info = interfaces_info[definition_name]
+ if interface_info['is_dictionary']:
+ dictionary_impl_compiler.compile_file(idl_path)
+ if component == 'core' and interface_info['dependencies_other_component_full_paths']:
+ idl_partial_interface_compiler.compile_file(idl_path)
+ if verbose:
+ print 'Compiled: %s' % idl_path
+ finally:
+ delete_cache_files()
+
+ # Detect all changes
+ output_files = list_files(output_directory)
+ passed = identical_output_files(output_files)
+ passed &= no_excess_files(output_files)
+
+ if passed:
+ if verbose:
+ print
+ print PASS_MESSAGE
+ return 0
+ print
+ print FAIL_MESSAGE
+ return 1
+
+
+def run_bindings_tests(reset_results, verbose, args=None):
+ # Generate output into the reference directory if resetting results, or
+ # a temp directory if not.
+ if not args:
+ # Default args for blink
+ args = {
+ 'idl_compiler_constructor': IdlCompilerV8,
+ 'test_input_directory': test_input_directory,
+ 'reference_directory': reference_directory,
+ 'component_directories': COMPONENT_DIRECTORY,
+ 'ignore_idl_files': NON_BLINK_IDL_FILES,
+ 'dependency_idl_files': DEPENDENCY_IDL_FILES,
+ 'generate_union_containers': True,
+ }
+ if reset_results:
+ print 'Resetting results'
+ return bindings_tests(args['reference_directory'], verbose, **args)
+ with TemporaryDirectory() as temp_dir:
+ return bindings_tests(temp_dir, verbose, **args)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py
new file mode 100644
index 0000000..3133133
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py
@@ -0,0 +1,358 @@
+# Copyright (C) 2011, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import copy
+import logging
+
+from webkitpy.common.memoized import memoized
+
+_log = logging.getLogger(__name__)
+
+
+# FIXME: Should this function be somewhere more general?
+def _invert_dictionary(dictionary):
+ inverted_dictionary = {}
+ for key, value in dictionary.items():
+ if inverted_dictionary.get(value):
+ inverted_dictionary[value].append(key)
+ else:
+ inverted_dictionary[value] = [key]
+ return inverted_dictionary
+
+
+class BaselineOptimizer(object):
+ ROOT_LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
+
+ def __init__(self, host, port, port_names, skip_scm_commands):
+ self._filesystem = host.filesystem
+ self._skip_scm_commands = skip_scm_commands
+ self._files_to_delete = []
+ self._files_to_add = []
+ self._scm = host.scm()
+ self._default_port = port
+ self._ports = {}
+ for port_name in port_names:
+ self._ports[port_name] = host.port_factory.get(port_name)
+
+ self._webkit_base = port.webkit_base()
+ self._layout_tests_dir = port.layout_tests_dir()
+
+ # Only used by unittests.
+ self.new_results_by_directory = []
+
+ def _baseline_root(self, baseline_name):
+ virtual_suite = self._virtual_suite(baseline_name)
+ if virtual_suite:
+ return self._filesystem.join(self.ROOT_LAYOUT_TESTS_DIRECTORY, virtual_suite.name)
+ return self.ROOT_LAYOUT_TESTS_DIRECTORY
+
+ def _baseline_search_path(self, port, baseline_name):
+ virtual_suite = self._virtual_suite(baseline_name)
+ if virtual_suite:
+ return port.virtual_baseline_search_path(baseline_name)
+ return port.baseline_search_path()
+
+ def _virtual_suite(self, baseline_name):
+ return self._default_port.lookup_virtual_suite(baseline_name)
+
+ def _virtual_base(self, baseline_name):
+ return self._default_port.lookup_virtual_test_base(baseline_name)
+
+ def _relative_baseline_search_paths(self, port, baseline_name):
+ baseline_search_path = self._baseline_search_path(port, baseline_name)
+ baseline_root = self._baseline_root(baseline_name)
+ relative_paths = [self._filesystem.relpath(path, self._webkit_base) for path in baseline_search_path]
+ return relative_paths + [baseline_root]
+
+ def _join_directory(self, directory, baseline_name):
+ # This code is complicated because both the directory name and the baseline_name have the virtual
+ # test suite in the name and the virtual baseline name is not a strict superset of the non-virtual name.
+ # For example, virtual/gpu/fast/canvas/foo-expected.png corresponds to fast/canvas/foo-expected.png and
+ # the baseline directories are like platform/mac/virtual/gpu/fast/canvas. So, to get the path
+ # to the baseline in the platform directory, we need to append jsut foo-expected.png to the directory.
+ virtual_suite = self._virtual_suite(baseline_name)
+ if virtual_suite:
+ baseline_name_without_virtual = baseline_name[len(virtual_suite.name) + 1:]
+ else:
+ baseline_name_without_virtual = baseline_name
+ return self._filesystem.join(self._scm.checkout_root, directory, baseline_name_without_virtual)
+
+ def read_results_by_directory(self, baseline_name):
+ results_by_directory = {}
+ directories = reduce(set.union, map(set, [self._relative_baseline_search_paths(port, baseline_name) for port in self._ports.values()]))
+
+ for directory in directories:
+ path = self._join_directory(directory, baseline_name)
+ if self._filesystem.exists(path):
+ results_by_directory[directory] = self._filesystem.sha1(path)
+ return results_by_directory
+
+ def _results_by_port_name(self, results_by_directory, baseline_name):
+ results_by_port_name = {}
+ for port_name, port in self._ports.items():
+ for directory in self._relative_baseline_search_paths(port, baseline_name):
+ if directory in results_by_directory:
+ results_by_port_name[port_name] = results_by_directory[directory]
+ break
+ return results_by_port_name
+
+ @memoized
+ def _directories_immediately_preceding_root(self, baseline_name):
+ directories = set()
+ for port in self._ports.values():
+ directory = self._filesystem.relpath(self._baseline_search_path(port, baseline_name)[-1], self._webkit_base)
+ directories.add(directory)
+ return directories
+
+ def _optimize_result_for_root(self, new_results_by_directory, baseline_name):
+ # The root directory (i.e. LayoutTests) is the only one that doesn't correspond
+ # to a specific platform. As such, it's the only one where the baseline in fallback directories
+ # immediately before it can be promoted up, i.e. if win and mac
+ # have the same baseline, then it can be promoted up to be the LayoutTests baseline.
+ # All other baselines can only be removed if they're redundant with a baseline earlier
+ # in the fallback order. They can never promoted up.
+ directories_immediately_preceding_root = self._directories_immediately_preceding_root(baseline_name)
+
+ shared_result = None
+ root_baseline_unused = False
+ for directory in directories_immediately_preceding_root:
+ this_result = new_results_by_directory.get(directory)
+
+ # If any of these directories don't have a baseline, there's no optimization we can do.
+ if not this_result:
+ return
+
+ if not shared_result:
+ shared_result = this_result
+ elif shared_result != this_result:
+ root_baseline_unused = True
+
+ baseline_root = self._baseline_root(baseline_name)
+
+ # The root baseline is unused if all the directories immediately preceding the root
+ # have a baseline, but have different baselines, so the baselines can't be promoted up.
+ if root_baseline_unused:
+ if baseline_root in new_results_by_directory:
+ del new_results_by_directory[baseline_root]
+ return
+
+ new_results_by_directory[baseline_root] = shared_result
+ for directory in directories_immediately_preceding_root:
+ del new_results_by_directory[directory]
+
+ def _find_optimal_result_placement(self, baseline_name):
+ results_by_directory = self.read_results_by_directory(baseline_name)
+ results_by_port_name = self._results_by_port_name(results_by_directory, baseline_name)
+ port_names_by_result = _invert_dictionary(results_by_port_name)
+
+ new_results_by_directory = self._remove_redundant_results(results_by_directory, results_by_port_name, port_names_by_result, baseline_name)
+ self._optimize_result_for_root(new_results_by_directory, baseline_name)
+
+ return results_by_directory, new_results_by_directory
+
+ def _remove_redundant_results(self, results_by_directory, results_by_port_name, port_names_by_result, baseline_name):
+ new_results_by_directory = copy.copy(results_by_directory)
+ for port_name, port in self._ports.items():
+ current_result = results_by_port_name.get(port_name)
+
+ # This happens if we're missing baselines for a port.
+ if not current_result:
+ continue;
+
+ fallback_path = self._relative_baseline_search_paths(port, baseline_name)
+ current_index, current_directory = self._find_in_fallbackpath(fallback_path, current_result, new_results_by_directory)
+ for index in range(current_index + 1, len(fallback_path)):
+ new_directory = fallback_path[index]
+ if not new_directory in new_results_by_directory:
+ # No result for this baseline in this directory.
+ continue
+ elif new_results_by_directory[new_directory] == current_result:
+ # Result for new_directory are redundant with the result earlier in the fallback order.
+ if current_directory in new_results_by_directory:
+ del new_results_by_directory[current_directory]
+ else:
+ # The new_directory contains a different result, so stop trying to push results up.
+ break
+
+ return new_results_by_directory
+
+ def _find_in_fallbackpath(self, fallback_path, current_result, results_by_directory):
+ for index, directory in enumerate(fallback_path):
+ if directory in results_by_directory and (results_by_directory[directory] == current_result):
+ return index, directory
+ assert False, "result %s not found in fallback_path %s, %s" % (current_result, fallback_path, results_by_directory)
+
+ def _platform(self, filename):
+ platform_dir = self.ROOT_LAYOUT_TESTS_DIRECTORY + self._filesystem.sep + 'platform' + self._filesystem.sep
+ if filename.startswith(platform_dir):
+ return filename.replace(platform_dir, '').split(self._filesystem.sep)[0]
+ platform_dir = self._filesystem.join(self._scm.checkout_root, platform_dir)
+ if filename.startswith(platform_dir):
+ return filename.replace(platform_dir, '').split(self._filesystem.sep)[0]
+ return '(generic)'
+
+ def _move_baselines(self, baseline_name, results_by_directory, new_results_by_directory):
+ data_for_result = {}
+ for directory, result in results_by_directory.items():
+ if not result in data_for_result:
+ source = self._join_directory(directory, baseline_name)
+ data_for_result[result] = self._filesystem.read_binary_file(source)
+
+ scm_files = []
+ fs_files = []
+ for directory, result in results_by_directory.items():
+ if new_results_by_directory.get(directory) != result:
+ file_name = self._join_directory(directory, baseline_name)
+ if self._scm.exists(file_name):
+ scm_files.append(file_name)
+ else:
+ fs_files.append(file_name)
+
+ if scm_files or fs_files:
+ if scm_files:
+ _log.debug(" Deleting (SCM):")
+ for platform_dir in sorted(self._platform(filename) for filename in scm_files):
+ _log.debug(" " + platform_dir)
+ if self._skip_scm_commands:
+ self._files_to_delete.extend(scm_files)
+ else:
+ self._scm.delete_list(scm_files)
+ if fs_files:
+ _log.debug(" Deleting (file system):")
+ for platform_dir in sorted(self._platform(filename) for filename in fs_files):
+ _log.debug(" " + platform_dir)
+ for filename in fs_files:
+ self._filesystem.remove(filename)
+ else:
+ _log.debug(" (Nothing to delete)")
+
+ file_names = []
+ for directory, result in new_results_by_directory.items():
+ if results_by_directory.get(directory) != result:
+ destination = self._join_directory(directory, baseline_name)
+ self._filesystem.maybe_make_directory(self._filesystem.split(destination)[0])
+ self._filesystem.write_binary_file(destination, data_for_result[result])
+ file_names.append(destination)
+
+ if file_names:
+ _log.debug(" Adding:")
+ for platform_dir in sorted(self._platform(filename) for filename in file_names):
+ _log.debug(" " + platform_dir)
+ if self._skip_scm_commands:
+ # Have adds win over deletes.
+ self._files_to_delete = list(set(self._files_to_delete) - set(file_names))
+ self._files_to_add.extend(file_names)
+ else:
+ self._scm.add_list(file_names)
+ else:
+ _log.debug(" (Nothing to add)")
+
+ def write_by_directory(self, results_by_directory, writer, indent):
+ for path in sorted(results_by_directory):
+ writer("%s%s: %s" % (indent, self._platform(path), results_by_directory[path][0:6]))
+
+ def _optimize_subtree(self, baseline_name):
+ basename = self._filesystem.basename(baseline_name)
+ results_by_directory, new_results_by_directory = self._find_optimal_result_placement(baseline_name)
+
+ if new_results_by_directory == results_by_directory:
+ if new_results_by_directory:
+ _log.debug(" %s: (already optimal)" % basename)
+ self.write_by_directory(results_by_directory, _log.debug, " ")
+ else:
+ _log.debug(" %s: (no baselines found)" % basename)
+ # This is just used for unittests. Intentionally set it to the old data if we don't modify anything.
+ self.new_results_by_directory.append(results_by_directory)
+ return True
+
+ if self._results_by_port_name(results_by_directory, baseline_name) != self._results_by_port_name(new_results_by_directory, baseline_name):
+ # This really should never happen. Just a sanity check to make sure the script fails in the case of bugs
+ # instead of committing incorrect baselines.
+ _log.error(" %s: optimization failed" % basename)
+ self.write_by_directory(results_by_directory, _log.warning, " ")
+ return False
+
+ _log.debug(" %s:" % basename)
+ _log.debug(" Before: ")
+ self.write_by_directory(results_by_directory, _log.debug, " ")
+ _log.debug(" After: ")
+ self.write_by_directory(new_results_by_directory, _log.debug, " ")
+
+ self._move_baselines(baseline_name, results_by_directory, new_results_by_directory)
+ return True
+
+ def _optimize_virtual_root(self, baseline_name, non_virtual_baseline_name):
+ virtual_root_expected_baseline_path = self._filesystem.join(self._layout_tests_dir, baseline_name)
+ if not self._filesystem.exists(virtual_root_expected_baseline_path):
+ return
+ root_sha1 = self._filesystem.sha1(virtual_root_expected_baseline_path)
+
+ results_by_directory = self.read_results_by_directory(non_virtual_baseline_name)
+ # See if all the immediate predecessors of the virtual root have the same expected result.
+ for port in self._ports.values():
+ directories = self._relative_baseline_search_paths(port, non_virtual_baseline_name)
+ for directory in directories:
+ if directory not in results_by_directory:
+ continue
+ if results_by_directory[directory] != root_sha1:
+ return
+ break
+
+ _log.debug("Deleting redundant virtual root expected result.")
+ if self._skip_scm_commands and virtual_root_expected_baseline_path in self._files_to_add:
+ self._files_to_add.remove(virtual_root_expected_baseline_path)
+ if self._scm.exists(virtual_root_expected_baseline_path):
+ _log.debug(" Deleting (SCM): " + virtual_root_expected_baseline_path)
+ if self._skip_scm_commands:
+ self._files_to_delete.append(virtual_root_expected_baseline_path)
+ else:
+ self._scm.delete(virtual_root_expected_baseline_path)
+ else:
+ _log.debug(" Deleting (file system): " + virtual_root_expected_baseline_path)
+ self._filesystem.remove(virtual_root_expected_baseline_path)
+
+ def optimize(self, baseline_name):
+ # The virtual fallback path is the same as the non-virtual one tacked on to the bottom of the non-virtual path.
+ # See https://docs.google.com/a/chromium.org/drawings/d/1eGdsIKzJ2dxDDBbUaIABrN4aMLD1bqJTfyxNGZsTdmg/edit for
+ # a visual representation of this.
+ #
+ # So, we can optimize the virtual path, then the virtual root and then the regular path.
+
+ self._files_to_delete = []
+ self._files_to_add = []
+ _log.debug("Optimizing regular fallback path.")
+ result = self._optimize_subtree(baseline_name)
+ non_virtual_baseline_name = self._virtual_base(baseline_name)
+ if not non_virtual_baseline_name:
+ return result, self._files_to_delete, self._files_to_add
+
+ self._optimize_virtual_root(baseline_name, non_virtual_baseline_name)
+
+ _log.debug("Optimizing non-virtual fallback path.")
+ result |= self._optimize_subtree(non_virtual_baseline_name)
+ return result, self._files_to_delete, self._files_to_add
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py
new file mode 100644
index 0000000..b0e951b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py
@@ -0,0 +1,284 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
+from webkitpy.common.checkout.scm.scm_mock import MockSCM
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.webkit_finder import WebKitFinder
+
+
+class ExcludingMockSCM(MockSCM):
+ def __init__(self, exclusion_list, filesystem=None, executive=None):
+ MockSCM.__init__(self, filesystem, executive)
+ self._exclusion_list = exclusion_list
+
+ def exists(self, path):
+ if path in self._exclusion_list:
+ return False
+ return MockSCM.exists(self, path)
+
+ def delete(self, path):
+ return self.delete_list([path])
+
+ def delete_list(self, paths):
+ for path in paths:
+ if path in self._exclusion_list:
+ raise Exception("File is not SCM managed: " + path)
+ return MockSCM.delete_list(self, paths)
+
+ def move(self, origin, destination):
+ if origin in self._exclusion_list:
+ raise Exception("File is not SCM managed: " + origin)
+ return MockSCM.move(self, origin, destination)
+
+
+class BaselineOptimizerTest(unittest.TestCase):
+ def test_move_baselines(self):
+ host = MockHost(scm=ExcludingMockSCM(['/mock-checkout/third_party/WebKit/LayoutTests/platform/mac/another/test-expected.txt']))
+ host.filesystem.write_text_file('/mock-checkout/third_party/WebKit/LayoutTests/VirtualTestSuites', '[]')
+ host.filesystem.write_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/platform/win/another/test-expected.txt', 'result A')
+ host.filesystem.write_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/platform/mac/another/test-expected.txt', 'result A')
+ host.filesystem.write_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/another/test-expected.txt', 'result B')
+ baseline_optimizer = BaselineOptimizer(host, host.port_factory.get(), host.port_factory.all_port_names(), skip_scm_commands=False)
+ baseline_optimizer._move_baselines('another/test-expected.txt', {
+ '/mock-checkout/third_party/WebKit/LayoutTests/platform/win': 'aaa',
+ '/mock-checkout/third_party/WebKit/LayoutTests/platform/mac': 'aaa',
+ '/mock-checkout/third_party/WebKit/LayoutTests': 'bbb',
+ }, {
+ '/mock-checkout/third_party/WebKit/LayoutTests': 'aaa',
+ })
+ self.assertEqual(host.filesystem.read_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/another/test-expected.txt'), 'result A')
+
+ def test_move_baselines_skip_scm_commands(self):
+ host = MockHost(scm=ExcludingMockSCM(['/mock-checkout/third_party/WebKit/LayoutTests/platform/mac/another/test-expected.txt']))
+ host.filesystem.write_text_file('/mock-checkout/third_party/WebKit/LayoutTests/VirtualTestSuites', '[]')
+ host.filesystem.write_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/platform/win/another/test-expected.txt', 'result A')
+ host.filesystem.write_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/platform/mac/another/test-expected.txt', 'result A')
+ host.filesystem.write_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/another/test-expected.txt', 'result B')
+ baseline_optimizer = BaselineOptimizer(host, host.port_factory.get(), host.port_factory.all_port_names(), skip_scm_commands=True)
+ baseline_optimizer._move_baselines('another/test-expected.txt', {
+ '/mock-checkout/third_party/WebKit/LayoutTests/platform/win': 'aaa',
+ '/mock-checkout/third_party/WebKit/LayoutTests/platform/mac': 'aaa',
+ '/mock-checkout/third_party/WebKit/LayoutTests': 'bbb',
+ }, {
+ '/mock-checkout/third_party/WebKit/LayoutTests/platform/linux': 'bbb',
+ '/mock-checkout/third_party/WebKit/LayoutTests': 'aaa',
+ })
+ self.assertEqual(host.filesystem.read_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/another/test-expected.txt'), 'result A')
+
+ self.assertEqual(baseline_optimizer._files_to_delete, [
+ '/mock-checkout/third_party/WebKit/LayoutTests/platform/win/another/test-expected.txt',
+ ])
+
+ self.assertEqual(baseline_optimizer._files_to_add, [
+ '/mock-checkout/third_party/WebKit/LayoutTests/another/test-expected.txt',
+ '/mock-checkout/third_party/WebKit/LayoutTests/platform/linux/another/test-expected.txt',
+ ])
+
+ def _assertOptimization(self, results_by_directory, expected_new_results_by_directory, baseline_dirname='', expected_files_to_delete=None, host=None):
+ if not host:
+ host = MockHost()
+ fs = host.filesystem
+ webkit_base = WebKitFinder(fs).webkit_base()
+ baseline_name = 'mock-baseline-expected.txt'
+ fs.write_text_file(fs.join(webkit_base, 'LayoutTests', 'VirtualTestSuites'),
+ '[{"prefix": "gpu", "base": "fast/canvas", "args": ["--foo"]}]')
+
+ for dirname, contents in results_by_directory.items():
+ path = fs.join(webkit_base, 'LayoutTests', dirname, baseline_name)
+ fs.write_binary_file(path, contents)
+
+ baseline_optimizer = BaselineOptimizer(host, host.port_factory.get(), host.port_factory.all_port_names(), skip_scm_commands=expected_files_to_delete is not None)
+ self.assertTrue(baseline_optimizer.optimize(fs.join(baseline_dirname, baseline_name)))
+
+ for dirname, contents in expected_new_results_by_directory.items():
+ path = fs.join(webkit_base, 'LayoutTests', dirname, baseline_name)
+ if contents is None:
+ self.assertTrue(not fs.exists(path) or path in baseline_optimizer._files_to_delete)
+ else:
+ self.assertEqual(fs.read_binary_file(path), contents)
+
+ # Check that the files that were in the original set have been deleted where necessary.
+ for dirname in results_by_directory:
+ path = fs.join(webkit_base, 'LayoutTests', dirname, baseline_name)
+ if not dirname in expected_new_results_by_directory:
+ self.assertTrue(not fs.exists(path) or path in baseline_optimizer._files_to_delete)
+
+ if expected_files_to_delete:
+ self.assertEqual(sorted(baseline_optimizer._files_to_delete), sorted(expected_files_to_delete))
+
+ def test_linux_redundant_with_win(self):
+ self._assertOptimization({
+ 'platform/win': '1',
+ 'platform/linux': '1',
+ }, {
+ 'platform/win': '1',
+ })
+
+ def test_covers_mac_win_linux(self):
+ self._assertOptimization({
+ 'platform/mac': '1',
+ 'platform/win': '1',
+ 'platform/linux': '1',
+ '': None,
+ }, {
+ '': '1',
+ })
+
+ def test_overwrites_root(self):
+ self._assertOptimization({
+ 'platform/mac': '1',
+ 'platform/win': '1',
+ 'platform/linux': '1',
+ '': '2',
+ }, {
+ '': '1',
+ })
+
+ def test_no_new_common_directory(self):
+ self._assertOptimization({
+ 'platform/mac': '1',
+ 'platform/linux': '1',
+ '': '2',
+ }, {
+ 'platform/mac': '1',
+ 'platform/linux': '1',
+ '': '2',
+ })
+
+
+ def test_local_optimization(self):
+ self._assertOptimization({
+ 'platform/mac': '1',
+ 'platform/linux': '1',
+ 'platform/linux-x86': '1',
+ }, {
+ 'platform/mac': '1',
+ 'platform/linux': '1',
+ })
+
+ def test_local_optimization_skipping_a_port_in_the_middle(self):
+ self._assertOptimization({
+ 'platform/mac-snowleopard': '1',
+ 'platform/win': '1',
+ 'platform/linux-x86': '1',
+ }, {
+ 'platform/mac-snowleopard': '1',
+ 'platform/win': '1',
+ })
+
+ def test_baseline_redundant_with_root(self):
+ self._assertOptimization({
+ 'platform/mac': '1',
+ 'platform/win': '2',
+ '': '2',
+ }, {
+ 'platform/mac': '1',
+ '': '2',
+ })
+
+ def test_root_baseline_unused(self):
+ self._assertOptimization({
+ 'platform/mac': '1',
+ 'platform/win': '2',
+ '': '3',
+ }, {
+ 'platform/mac': '1',
+ 'platform/win': '2',
+ })
+
+ def test_root_baseline_unused_and_non_existant(self):
+ self._assertOptimization({
+ 'platform/mac': '1',
+ 'platform/win': '2',
+ }, {
+ 'platform/mac': '1',
+ 'platform/win': '2',
+ })
+
+ def test_virtual_root_redundant_with_actual_root(self):
+ self._assertOptimization({
+ 'virtual/gpu/fast/canvas': '2',
+ 'fast/canvas': '2',
+ }, {
+ 'virtual/gpu/fast/canvas': None,
+ 'fast/canvas': '2',
+ }, baseline_dirname='virtual/gpu/fast/canvas')
+
+ def test_virtual_root_redundant_with_ancestors(self):
+ self._assertOptimization({
+ 'virtual/gpu/fast/canvas': '2',
+ 'platform/mac/fast/canvas': '2',
+ 'platform/win/fast/canvas': '2',
+ }, {
+ 'virtual/gpu/fast/canvas': None,
+ 'fast/canvas': '2',
+ }, baseline_dirname='virtual/gpu/fast/canvas')
+
+ def test_virtual_root_redundant_with_ancestors_skip_scm_commands(self):
+ self._assertOptimization({
+ 'virtual/gpu/fast/canvas': '2',
+ 'platform/mac/fast/canvas': '2',
+ 'platform/win/fast/canvas': '2',
+ }, {
+ 'virtual/gpu/fast/canvas': None,
+ 'fast/canvas': '2',
+ },
+ baseline_dirname='virtual/gpu/fast/canvas',
+ expected_files_to_delete=[
+ '/mock-checkout/third_party/WebKit/LayoutTests/virtual/gpu/fast/canvas/mock-baseline-expected.txt',
+ '/mock-checkout/third_party/WebKit/LayoutTests/platform/mac/fast/canvas/mock-baseline-expected.txt',
+ '/mock-checkout/third_party/WebKit/LayoutTests/platform/win/fast/canvas/mock-baseline-expected.txt',
+ ])
+
+ def test_virtual_root_redundant_with_ancestors_skip_scm_commands_with_file_not_in_scm(self):
+ self._assertOptimization({
+ 'virtual/gpu/fast/canvas': '2',
+ 'platform/mac/fast/canvas': '2',
+ 'platform/win/fast/canvas': '2',
+ }, {
+ 'virtual/gpu/fast/canvas': None,
+ 'fast/canvas': '2',
+ },
+ baseline_dirname='virtual/gpu/fast/canvas',
+ expected_files_to_delete=[
+ '/mock-checkout/third_party/WebKit/LayoutTests/platform/mac/fast/canvas/mock-baseline-expected.txt',
+ '/mock-checkout/third_party/WebKit/LayoutTests/platform/win/fast/canvas/mock-baseline-expected.txt',
+ ],
+ host=MockHost(scm=ExcludingMockSCM(['/mock-checkout/third_party/WebKit/LayoutTests/virtual/gpu/fast/canvas/mock-baseline-expected.txt'])))
+
+ def test_virtual_root_not_redundant_with_ancestors(self):
+ self._assertOptimization({
+ 'virtual/gpu/fast/canvas': '2',
+ 'platform/mac/fast/canvas': '1',
+ }, {
+ 'virtual/gpu/fast/canvas': '2',
+ 'platform/mac/fast/canvas': '1',
+ }, baseline_dirname='virtual/gpu/fast/canvas')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
new file mode 100644
index 0000000..0f4c713
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
@@ -0,0 +1,182 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""WebKit's Python module for interacting with patches."""
+
+import logging
+import re
+
+_log = logging.getLogger(__name__)
+
+conversion_patterns = (
+ (re.compile("^diff --git \w/(.+) \w/(?P<FilePath>.+)"), lambda matched: "Index: " + matched.group('FilePath') + "\n"),
+ (re.compile("^new file.*"), lambda matched: "\n"),
+ (re.compile("^index (([0-9a-f]{7}\.\.[0-9a-f]{7})|([0-9a-f]{40}\.\.[0-9a-f]{40})) [0-9]{6}"), lambda matched: ("=" * 67) + "\n"),
+ (re.compile("^--- \w/(?P<FilePath>.+)"), lambda matched: "--- " + matched.group('FilePath') + "\n"),
+ (re.compile("^\+\+\+ \w/(?P<FilePath>.+)"), lambda matched: "+++ " + matched.group('FilePath') + "\n"),
+)
+
+index_pattern = re.compile(r"^Index: (?P<FilePath>.+)")
+lines_changed_pattern = re.compile(r"^@@ -(?P<OldStartLine>\d+)(,\d+)? \+(?P<NewStartLine>\d+)(,\d+)? @@")
+diff_git_pattern = re.compile(r"^diff --git \w/")
+
+
+def git_diff_to_svn_diff(line):
+ """Converts a git formatted diff line to a svn formatted line.
+
+ Args:
+ line: A string representing a line of the diff.
+ """
+ for pattern, conversion in conversion_patterns:
+ matched = pattern.match(line)
+ if matched:
+ return conversion(matched)
+ return line
+
+
+# This function exists so we can unittest get_diff_converter function
+def svn_diff_to_svn_diff(line):
+ return line
+
+
+def get_diff_converter(lines):
+ """Gets a converter function of diff lines.
+
+ Args:
+ lines: The lines of a diff file.
+ If this line is git formatted, we'll return a
+ converter from git to SVN.
+ """
+ for i, line in enumerate(lines[:-1]):
+ # Stop when we find the first patch
+ if line[:3] == "+++" and lines[i + 1] == "---":
+ break
+ if diff_git_pattern.match(line):
+ return git_diff_to_svn_diff
+ return svn_diff_to_svn_diff
+
+_INITIAL_STATE = 1
+_DECLARED_FILE_PATH = 2
+_PROCESSING_CHUNK = 3
+
+
+class DiffFile(object):
+ """Contains the information for one file in a patch.
+
+ The field "lines" is a list which contains tuples in this format:
+ (deleted_line_number, new_line_number, line_string)
+ If deleted_line_number is zero, it means this line is newly added.
+ If new_line_number is zero, it means this line is deleted.
+ """
+ # FIXME: Tuples generally grow into classes. We should consider
+ # adding a DiffLine object.
+
+ def added_or_modified_line_numbers(self):
+ # This logic was moved from patchreader.py, but may not be
+ # the right API for this object long-term.
+ return [line[1] for line in self.lines if not line[0]]
+
+ def __init__(self, filename):
+ self.filename = filename
+ self.lines = []
+
+ def add_new_line(self, line_number, line):
+ self.lines.append((0, line_number, line))
+
+ def add_deleted_line(self, line_number, line):
+ self.lines.append((line_number, 0, line))
+
+ def add_unchanged_line(self, deleted_line_number, new_line_number, line):
+ self.lines.append((deleted_line_number, new_line_number, line))
+
+
+# If this is going to be called DiffParser, it should be a re-useable parser.
+# Otherwise we should rename it to ParsedDiff or just Diff.
+class DiffParser(object):
+ """A parser for a patch file.
+
+ The field "files" is a dict whose key is the filename and value is
+ a DiffFile object.
+ """
+
+ def __init__(self, diff_input):
+ """Parses a diff.
+
+ Args:
+ diff_input: An iterable object.
+ """
+ self.files = self._parse_into_diff_files(diff_input)
+
+ # FIXME: This function is way too long and needs to be broken up.
+ def _parse_into_diff_files(self, diff_input):
+ files = {}
+ state = _INITIAL_STATE
+ current_file = None
+ old_diff_line = None
+ new_diff_line = None
+ transform_line = get_diff_converter(diff_input)
+ for line in diff_input:
+ line = line.rstrip("\n")
+ line = transform_line(line)
+
+ file_declaration = index_pattern.match(line)
+ if file_declaration:
+ filename = file_declaration.group('FilePath')
+ current_file = DiffFile(filename)
+ files[filename] = current_file
+ state = _DECLARED_FILE_PATH
+ continue
+
+ lines_changed = lines_changed_pattern.match(line)
+ if lines_changed:
+ if state != _DECLARED_FILE_PATH and state != _PROCESSING_CHUNK:
+ _log.error('Unexpected line change without file path '
+ 'declaration: %r' % line)
+ old_diff_line = int(lines_changed.group('OldStartLine'))
+ new_diff_line = int(lines_changed.group('NewStartLine'))
+ state = _PROCESSING_CHUNK
+ continue
+
+ if state == _PROCESSING_CHUNK:
+ if line.startswith('+'):
+ current_file.add_new_line(new_diff_line, line[1:])
+ new_diff_line += 1
+ elif line.startswith('-'):
+ current_file.add_deleted_line(old_diff_line, line[1:])
+ old_diff_line += 1
+ elif line.startswith(' '):
+ current_file.add_unchanged_line(old_diff_line, new_diff_line, line[1:])
+ old_diff_line += 1
+ new_diff_line += 1
+ elif line == '\\ No newline at end of file':
+ # Nothing to do. We may still have some added lines.
+ pass
+ else:
+ _log.error('Unexpected diff format when parsing a '
+ 'chunk: %r' % line)
+ return files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py
new file mode 100644
index 0000000..7c4ee08
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py
@@ -0,0 +1,175 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import cStringIO as StringIO
+import diff_parser
+import re
+import unittest
+
+from webkitpy.common.checkout.diff_test_data import DIFF_TEST_DATA
+
+class DiffParserTest(unittest.TestCase):
+ maxDiff = None
+
+ def test_diff_parser(self, parser = None):
+ if not parser:
+ parser = diff_parser.DiffParser(DIFF_TEST_DATA.splitlines())
+ self.assertEqual(3, len(parser.files))
+
+ self.assertTrue('WebCore/rendering/style/StyleFlexibleBoxData.h' in parser.files)
+ diff = parser.files['WebCore/rendering/style/StyleFlexibleBoxData.h']
+ self.assertEqual(7, len(diff.lines))
+ # The first two unchaged lines.
+ self.assertEqual((47, 47), diff.lines[0][0:2])
+ self.assertEqual('', diff.lines[0][2])
+ self.assertEqual((48, 48), diff.lines[1][0:2])
+ self.assertEqual(' unsigned align : 3; // EBoxAlignment', diff.lines[1][2])
+ # The deleted line
+ self.assertEqual((50, 0), diff.lines[3][0:2])
+ self.assertEqual(' unsigned orient: 1; // EBoxOrient', diff.lines[3][2])
+
+ # The first file looks OK. Let's check the next, more complicated file.
+ self.assertTrue('WebCore/rendering/style/StyleRareInheritedData.cpp' in parser.files)
+ diff = parser.files['WebCore/rendering/style/StyleRareInheritedData.cpp']
+ # There are 3 chunks.
+ self.assertEqual(7 + 7 + 9, len(diff.lines))
+ # Around an added line.
+ self.assertEqual((60, 61), diff.lines[9][0:2])
+ self.assertEqual((0, 62), diff.lines[10][0:2])
+ self.assertEqual((61, 63), diff.lines[11][0:2])
+ # Look through the last chunk, which contains both add's and delete's.
+ self.assertEqual((81, 83), diff.lines[14][0:2])
+ self.assertEqual((82, 84), diff.lines[15][0:2])
+ self.assertEqual((83, 85), diff.lines[16][0:2])
+ self.assertEqual((84, 0), diff.lines[17][0:2])
+ self.assertEqual((0, 86), diff.lines[18][0:2])
+ self.assertEqual((0, 87), diff.lines[19][0:2])
+ self.assertEqual((85, 88), diff.lines[20][0:2])
+ self.assertEqual((86, 89), diff.lines[21][0:2])
+ self.assertEqual((87, 90), diff.lines[22][0:2])
+
+ # Check if a newly added file is correctly handled.
+ diff = parser.files['LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum']
+ self.assertEqual(1, len(diff.lines))
+ self.assertEqual((0, 1), diff.lines[0][0:2])
+
+ def test_diff_converter(self):
+ comment_lines = [
+ "Hey guys,\n",
+ "\n",
+ "See my awesome patch below!\n",
+ "\n",
+ " - Cool Hacker\n",
+ "\n",
+ ]
+
+ revision_lines = [
+ "Subversion Revision 289799\n",
+ ]
+
+ svn_diff_lines = [
+ "Index: Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
+ "===================================================================\n",
+ "--- Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
+ "+++ Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
+ "@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):\n",
+ ]
+ self.assertEqual(diff_parser.get_diff_converter(svn_diff_lines), diff_parser.svn_diff_to_svn_diff)
+ self.assertEqual(diff_parser.get_diff_converter(comment_lines + svn_diff_lines), diff_parser.svn_diff_to_svn_diff)
+ self.assertEqual(diff_parser.get_diff_converter(revision_lines + svn_diff_lines), diff_parser.svn_diff_to_svn_diff)
+
+ git_diff_lines = [
+ "diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
+ "index 3c5b45b..0197ead 100644\n",
+ "--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
+ "+++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
+ "@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):\n",
+ ]
+ self.assertEqual(diff_parser.get_diff_converter(git_diff_lines), diff_parser.git_diff_to_svn_diff)
+ self.assertEqual(diff_parser.get_diff_converter(comment_lines + git_diff_lines), diff_parser.git_diff_to_svn_diff)
+ self.assertEqual(diff_parser.get_diff_converter(revision_lines + git_diff_lines), diff_parser.git_diff_to_svn_diff)
+
+ def test_git_mnemonicprefix(self):
+ p = re.compile(r' ([a|b])/')
+
+ prefixes = [
+ { 'a' : 'i', 'b' : 'w' }, # git-diff (compares the (i)ndex and the (w)ork tree)
+ { 'a' : 'c', 'b' : 'w' }, # git-diff HEAD (compares a (c)ommit and the (w)ork tree)
+ { 'a' : 'c', 'b' : 'i' }, # git diff --cached (compares a (c)ommit and the (i)ndex)
+ { 'a' : 'o', 'b' : 'w' }, # git-diff HEAD:file1 file2 (compares an (o)bject and a (w)ork tree entity)
+ { 'a' : '1', 'b' : '2' }, # git diff --no-index a b (compares two non-git things (1) and (2))
+ ]
+
+ for prefix in prefixes:
+ patch = p.sub(lambda x: " %s/" % prefix[x.group(1)], DIFF_TEST_DATA)
+ self.test_diff_parser(diff_parser.DiffParser(patch.splitlines()))
+
+ def test_git_diff_to_svn_diff(self):
+ output = """\
+Index: Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+===================================================================
+--- Tools/Scripts/webkitpy/common/checkout/diff_parser.py
++++ Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):
+ A
+ B
+ C
++D
+ E
+ F
+"""
+
+ inputfmt = StringIO.StringIO("""\
+diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+index 2ed552c4555db72df16b212547f2c125ae301a04..72870482000c0dba64ce4300ed782c03ee79b74f 100644
+--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
++++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):
+ A
+ B
+ C
++D
+ E
+ F
+""")
+ shortfmt = StringIO.StringIO("""\
+diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+index b48b162..f300960 100644
+--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
++++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):
+ A
+ B
+ C
++D
+ E
+ F
+""")
+
+ self.assertMultiLineEqual(output, ''.join(diff_parser.git_diff_to_svn_diff(x) for x in shortfmt.readlines()))
+ self.assertMultiLineEqual(output, ''.join(diff_parser.git_diff_to_svn_diff(x) for x in inputfmt.readlines()))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/diff_test_data.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/diff_test_data.py
new file mode 100644
index 0000000..5f1719d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/diff_test_data.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# FIXME: Store this as a .patch file in some new fixtures directory or similar.
+DIFF_TEST_DATA = '''diff --git a/WebCore/rendering/style/StyleFlexibleBoxData.h b/WebCore/rendering/style/StyleFlexibleBoxData.h
+index f5d5e74..3b6aa92 100644
+--- a/WebCore/rendering/style/StyleFlexibleBoxData.h
++++ b/WebCore/rendering/style/StyleFlexibleBoxData.h
+@@ -47,7 +47,6 @@ public:
+
+ unsigned align : 3; // EBoxAlignment
+ unsigned pack: 3; // EBoxAlignment
+- unsigned orient: 1; // EBoxOrient
+ unsigned lines : 1; // EBoxLines
+
+ private:
+diff --git a/WebCore/rendering/style/StyleRareInheritedData.cpp b/WebCore/rendering/style/StyleRareInheritedData.cpp
+index ce21720..324929e 100644
+--- a/WebCore/rendering/style/StyleRareInheritedData.cpp
++++ b/WebCore/rendering/style/StyleRareInheritedData.cpp
+@@ -39,6 +39,7 @@ StyleRareInheritedData::StyleRareInheritedData()
+ , textSizeAdjust(RenderStyle::initialTextSizeAdjust())
+ , resize(RenderStyle::initialResize())
+ , userSelect(RenderStyle::initialUserSelect())
++ , boxOrient(RenderStyle::initialBoxOrient())
+ {
+ }
+
+@@ -58,6 +59,7 @@ StyleRareInheritedData::StyleRareInheritedData(const StyleRareInheritedData& o)
+ , textSizeAdjust(o.textSizeAdjust)
+ , resize(o.resize)
+ , userSelect(o.userSelect)
++ , boxOrient(o.boxOrient)
+ {
+ }
+
+@@ -81,7 +83,8 @@ bool StyleRareInheritedData::operator==(const StyleRareInheritedData& o) const
+ && khtmlLineBreak == o.khtmlLineBreak
+ && textSizeAdjust == o.textSizeAdjust
+ && resize == o.resize
+- && userSelect == o.userSelect;
++ && userSelect == o.userSelect
++ && boxOrient == o.boxOrient;
+ }
+
+ bool StyleRareInheritedData::shadowDataEquivalent(const StyleRareInheritedData& o) const
+diff --git a/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum b/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum
+new file mode 100644
+index 0000000..6db26bd
+--- /dev/null
++++ b/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum
+@@ -0,0 +1 @@
++61a373ee739673a9dcd7bac62b9f182e
+\ No newline at end of file
+'''
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/__init__.py
new file mode 100644
index 0000000..9a2810c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/__init__.py
@@ -0,0 +1,7 @@
+# Required for Python to search this directory for module files
+
+# We only export public API here.
+from .detection import SCMDetector
+from .git import Git, AmbiguousCommitError
+from .scm import SCM
+from .svn import SVN
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/detection.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/detection.py
new file mode 100644
index 0000000..e635b40
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/detection.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.system.executive import Executive
+
+from .svn import SVN
+from .git import Git
+
+_log = logging.getLogger(__name__)
+
+
+class SCMDetector(object):
+ def __init__(self, filesystem, executive):
+ self._filesystem = filesystem
+ self._executive = executive
+
+ def default_scm(self, patch_directories=None):
+ """Return the default SCM object as determined by the CWD and running code.
+
+ Returns the default SCM object for the current working directory; if the
+ CWD is not in a checkout, then we attempt to figure out if the SCM module
+ itself is part of a checkout, and return that one. If neither is part of
+ a checkout, None is returned.
+ """
+ cwd = self._filesystem.getcwd()
+ scm_system = self.detect_scm_system(cwd, patch_directories)
+ if not scm_system:
+ script_directory = self._filesystem.dirname(self._filesystem.path_to_module(self.__module__))
+ scm_system = self.detect_scm_system(script_directory, patch_directories)
+ if scm_system:
+ _log.info("The current directory (%s) is not a WebKit checkout, using %s" % (cwd, scm_system.checkout_root))
+ else:
+ raise Exception("FATAL: Failed to determine the SCM system for either %s or %s" % (cwd, script_directory))
+ return scm_system
+
+ def detect_scm_system(self, path, patch_directories=None):
+ absolute_path = self._filesystem.abspath(path)
+
+ if patch_directories == []:
+ patch_directories = None
+
+ if SVN.in_working_directory(absolute_path, executive=self._executive):
+ return SVN(cwd=absolute_path, patch_directories=patch_directories, filesystem=self._filesystem, executive=self._executive)
+
+ if Git.in_working_directory(absolute_path, executive=self._executive):
+ return Git(cwd=absolute_path, filesystem=self._filesystem, executive=self._executive)
+
+ return None
+
+
+# FIXME: These free functions are all deprecated:
+
+def detect_scm_system(path, patch_directories=None):
+ return SCMDetector(FileSystem(), Executive()).detect_scm_system(path, patch_directories)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/detection_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/detection_unittest.py
new file mode 100644
index 0000000..966fbac
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/detection_unittest.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2011 Daniel Bates (dbates@intudata.com). All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from .detection import SCMDetector
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.outputcapture import OutputCapture
+
+
+class SCMDetectorTest(unittest.TestCase):
+ def test_detect_scm_system(self):
+ filesystem = MockFileSystem()
+ executive = MockExecutive(should_log=True)
+ detector = SCMDetector(filesystem, executive)
+
+ expected_logs = """\
+MOCK run_command: ['svn', 'info'], cwd=/
+MOCK run_command: ['git', 'rev-parse', '--is-inside-work-tree'], cwd=/
+"""
+ scm = OutputCapture().assert_outputs(self, detector.detect_scm_system, ["/"], expected_logs=expected_logs)
+ self.assertIsNone(scm)
+ # FIXME: This should make a synthetic tree and test SVN and Git detection in that tree.
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/git.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/git.py
new file mode 100644
index 0000000..9a73ce9
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/git.py
@@ -0,0 +1,320 @@
+# Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import datetime
+import logging
+import os
+import re
+
+from webkitpy.common.checkout.scm.scm import SCM
+from webkitpy.common.memoized import memoized
+from webkitpy.common.system.executive import Executive, ScriptError
+
+_log = logging.getLogger(__name__)
+
+
+class AmbiguousCommitError(Exception):
+ def __init__(self, num_local_commits, has_working_directory_changes):
+ Exception.__init__(self, "Found %s local commits and the working directory is %s" % (
+ num_local_commits, ["clean", "not clean"][has_working_directory_changes]))
+ self.num_local_commits = num_local_commits
+ self.has_working_directory_changes = has_working_directory_changes
+
+
+class Git(SCM):
+
+ # Git doesn't appear to document error codes, but seems to return
+ # 1 or 128, mostly.
+ ERROR_FILE_IS_MISSING = 128
+
+ executable_name = 'git'
+
+ def __init__(self, cwd, **kwargs):
+ SCM.__init__(self, cwd, **kwargs)
+
+ def _run_git(self, command_args, **kwargs):
+ full_command_args = [self.executable_name] + command_args
+ full_kwargs = kwargs
+ if not 'cwd' in full_kwargs:
+ full_kwargs['cwd'] = self.checkout_root
+ return self._run(full_command_args, **full_kwargs)
+
+ @classmethod
+ def in_working_directory(cls, path, executive=None):
+ try:
+ executive = executive or Executive()
+ return executive.run_command([cls.executable_name, 'rev-parse', '--is-inside-work-tree'], cwd=path, error_handler=Executive.ignore_error).rstrip() == "true"
+ except OSError, e:
+ # The Windows bots seem to through a WindowsError when git isn't installed.
+ return False
+
+ def find_checkout_root(self, path):
+ # "git rev-parse --show-cdup" would be another way to get to the root
+ checkout_root = self._run_git(['rev-parse', '--show-toplevel'], cwd=(path or "./")).strip()
+ if not self._filesystem.isabs(checkout_root): # Sometimes git returns relative paths
+ checkout_root = self._filesystem.join(path, checkout_root)
+ return checkout_root
+
+ @classmethod
+ def read_git_config(cls, key, cwd=None, executive=None):
+ # FIXME: This should probably use cwd=self.checkout_root.
+ # Pass --get-all for cases where the config has multiple values
+ # Pass the cwd if provided so that we can handle the case of running webkit-patch outside of the working directory.
+ # FIXME: This should use an Executive.
+ executive = executive or Executive()
+ return executive.run_command([cls.executable_name, "config", "--get-all", key], error_handler=Executive.ignore_error, cwd=cwd).rstrip('\n')
+
+ def _discard_local_commits(self):
+ self._run_git(['reset', '--hard', self._remote_branch_ref()])
+
+ def _local_commits(self, ref='HEAD'):
+ return self._run_git(['log', '--pretty=oneline', ref + '...' + self._remote_branch_ref()]).splitlines()
+
+ def _rebase_in_progress(self):
+ return self._filesystem.exists(self.absolute_path(self._filesystem.join('.git', 'rebase-apply')))
+
+ def has_working_directory_changes(self):
+ return self._run_git(['diff', 'HEAD', '--no-renames', '--name-only']) != ""
+
+ def _discard_working_directory_changes(self):
+ # Could run git clean here too, but that wouldn't match subversion
+ self._run_git(['reset', 'HEAD', '--hard'])
+ # Aborting rebase even though this does not match subversion
+ if self._rebase_in_progress():
+ self._run_git(['rebase', '--abort'])
+
+ def status_command(self):
+ # git status returns non-zero when there are changes, so we use git diff name --name-status HEAD instead.
+ # No file contents printed, thus utf-8 autodecoding in self.run is fine.
+ return [self.executable_name, "diff", "--name-status", "--no-renames", "HEAD"]
+
+ def _status_regexp(self, expected_types):
+ return '^(?P<status>[%s])\t(?P<filename>.+)$' % expected_types
+
+ def add_list(self, paths, return_exit_code=False, recurse=True):
+ return self._run_git(["add"] + paths, return_exit_code=return_exit_code)
+
+ def delete_list(self, paths):
+ return self._run_git(["rm", "-f"] + paths)
+
+ def move(self, origin, destination):
+ return self._run_git(["mv", "-f", origin, destination])
+
+ def exists(self, path):
+ return_code = self._run_git(["show", "HEAD:%s" % path], return_exit_code=True, decode_output=False)
+ return return_code != self.ERROR_FILE_IS_MISSING
+
+ def _branch_from_ref(self, ref):
+ return ref.replace('refs/heads/', '')
+
+ def current_branch(self):
+ return self._branch_from_ref(self._run_git(['symbolic-ref', '-q', 'HEAD']).strip())
+
+ def _upstream_branch(self):
+ current_branch = self.current_branch()
+ return self._branch_from_ref(self.read_git_config('branch.%s.merge' % current_branch, cwd=self.checkout_root, executive=self._executive).strip())
+
+ def _merge_base(self, git_commit=None):
+ if git_commit:
+ # Rewrite UPSTREAM to the upstream branch
+ if 'UPSTREAM' in git_commit:
+ upstream = self._upstream_branch()
+ if not upstream:
+ raise ScriptError(message='No upstream/tracking branch set.')
+ git_commit = git_commit.replace('UPSTREAM', upstream)
+
+ # Special-case <refname>.. to include working copy changes, e.g., 'HEAD....' shows only the diffs from HEAD.
+ if git_commit.endswith('....'):
+ return git_commit[:-4]
+
+ if '..' not in git_commit:
+ git_commit = git_commit + "^.." + git_commit
+ return git_commit
+
+ return self._remote_merge_base()
+
+ def changed_files(self, git_commit=None):
+ # FIXME: --diff-filter could be used to avoid the "extract_filenames" step.
+ status_command = [self.executable_name, 'diff', '-r', '--name-status', "--no-renames", "--no-ext-diff", "--full-index", self._merge_base(git_commit)]
+ # FIXME: I'm not sure we're returning the same set of files that SVN.changed_files is.
+ # Added (A), Copied (C), Deleted (D), Modified (M), Renamed (R)
+ return self._run_status_and_extract_filenames(status_command, self._status_regexp("ADM"))
+
+ def _added_files(self):
+ return self._run_status_and_extract_filenames(self.status_command(), self._status_regexp("A"))
+
+ def _deleted_files(self):
+ return self._run_status_and_extract_filenames(self.status_command(), self._status_regexp("D"))
+
+ @staticmethod
+ def supports_local_commits():
+ return True
+
+ def display_name(self):
+ return "git"
+
+ def most_recent_log_matching(self, grep_str, path):
+ # We use '--grep=' + foo rather than '--grep', foo because
+ # git 1.7.0.4 (and earlier) didn't support the separate arg.
+ return self._run_git(['log', '-1', '--grep=' + grep_str, '--date=iso', self.find_checkout_root(path)])
+
+ def svn_revision(self, path):
+ git_log = self.most_recent_log_matching('git-svn-id:', path)
+ match = re.search("^\s*git-svn-id:.*@(?P<svn_revision>\d+)\ ", git_log, re.MULTILINE)
+ if not match:
+ return ""
+ return str(match.group('svn_revision'))
+
+ def timestamp_of_revision(self, path, revision):
+ git_log = self.most_recent_log_matching('git-svn-id:.*@%s' % revision, path)
+ match = re.search("^Date:\s*(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2}) ([+-])(\d{2})(\d{2})$", git_log, re.MULTILINE)
+ if not match:
+ return ""
+
+ # Manually modify the timezone since Git doesn't have an option to show it in UTC.
+ # Git also truncates milliseconds but we're going to ignore that for now.
+ time_with_timezone = datetime.datetime(int(match.group(1)), int(match.group(2)), int(match.group(3)),
+ int(match.group(4)), int(match.group(5)), int(match.group(6)), 0)
+
+ sign = 1 if match.group(7) == '+' else -1
+ time_without_timezone = time_with_timezone - datetime.timedelta(hours=sign * int(match.group(8)), minutes=int(match.group(9)))
+ return time_without_timezone.strftime('%Y-%m-%dT%H:%M:%SZ')
+
+ def _prepend_svn_revision(self, diff):
+ revision = self._head_svn_revision()
+ if not revision:
+ return diff
+
+ return "Subversion Revision: " + revision + '\n' + diff
+
+ def create_patch(self, git_commit=None, changed_files=None):
+ """Returns a byte array (str()) representing the patch file.
+ Patch files are effectively binary since they may contain
+ files of multiple different encodings."""
+
+ # Put code changes at the top of the patch and layout tests
+ # at the bottom, this makes for easier reviewing.
+ config_path = self._filesystem.dirname(self._filesystem.path_to_module('webkitpy.common.config'))
+ order_file = self._filesystem.join(config_path, 'orderfile')
+ order = ""
+ if self._filesystem.exists(order_file):
+ order = "-O%s" % order_file
+
+ command = [self.executable_name, 'diff', '--binary', '--no-color', "--no-ext-diff", "--full-index", "--no-renames", order, self._merge_base(git_commit), "--"]
+ if changed_files:
+ command += changed_files
+ return self._prepend_svn_revision(self._run(command, decode_output=False, cwd=self.checkout_root))
+
+ @memoized
+ def svn_revision_from_git_commit(self, git_commit):
+ # git svn find-rev always exits 0, even when the revision or commit is not found.
+ try:
+ return int(self._run_git(['svn', 'find-rev', git_commit]).rstrip())
+ except ValueError, e:
+ return None
+
+ def checkout_branch(self, name):
+ self._run_git(['checkout', '-q', name])
+
+ def create_clean_branch(self, name):
+ self._run_git(['checkout', '-q', '-b', name, self._remote_branch_ref()])
+
+ def blame(self, path):
+ return self._run_git(['blame', path])
+
+ # Git-specific methods:
+ def _branch_ref_exists(self, branch_ref):
+ return self._run_git(['show-ref', '--quiet', '--verify', branch_ref], return_exit_code=True) == 0
+
+ def delete_branch(self, branch_name):
+ if self._branch_ref_exists('refs/heads/' + branch_name):
+ self._run_git(['branch', '-D', branch_name])
+
+ def _remote_merge_base(self):
+ return self._run_git(['merge-base', self._remote_branch_ref(), 'HEAD']).strip()
+
+ def _remote_branch_ref(self):
+ # Use references so that we can avoid collisions, e.g. we don't want to operate on refs/heads/trunk if it exists.
+ remote_branch_refs = self.read_git_config('svn-remote.svn.fetch', cwd=self.checkout_root, executive=self._executive)
+ if not remote_branch_refs:
+ remote_master_ref = 'refs/remotes/origin/master'
+ if not self._branch_ref_exists(remote_master_ref):
+ raise ScriptError(message="Can't find a branch to diff against. svn-remote.svn.fetch is not in the git config and %s does not exist" % remote_master_ref)
+ return remote_master_ref
+
+ # FIXME: What's the right behavior when there are multiple svn-remotes listed?
+ # For now, just use the first one.
+ first_remote_branch_ref = remote_branch_refs.split('\n')[0]
+ return first_remote_branch_ref.split(':')[1]
+
+ def commit_locally_with_message(self, message, commit_all_working_directory_changes=True):
+ command = ['commit', '-F', '-']
+ if commit_all_working_directory_changes:
+ command.insert(1, '--all')
+ self._run_git(command, input=message)
+
+ # These methods are git specific and are meant to provide support for the Git oriented workflow
+ # that Blink is moving towards, hence there are no equivalent methods in the SVN class.
+
+ def pull(self):
+ self._run_git(['pull'])
+
+ def latest_git_commit(self):
+ return self._run_git(['log', '-1', '--format=%H']).strip()
+
+ def git_commits_since(self, commit):
+ return self._run_git(['log', commit + '..master', '--format=%H', '--reverse']).split()
+
+ def git_commit_detail(self, commit, format=None):
+ args = ['log', '-1', commit]
+ if format:
+ args.append('--format=' + format)
+ return self._run_git(args)
+
+ def _branch_tracking_remote_master(self):
+ origin_info = self._run_git(['remote', 'show', 'origin', '-n'])
+ match = re.search("^\s*(?P<branch_name>\S+)\s+merges with remote master$", origin_info, re.MULTILINE)
+ if not match:
+ raise ScriptError(message="Unable to find local branch tracking origin/master.")
+ branch = str(match.group("branch_name"))
+ return self._branch_from_ref(self._run_git(['rev-parse', '--symbolic-full-name', branch]).strip())
+
+ def is_cleanly_tracking_remote_master(self):
+ if self.has_working_directory_changes():
+ return False
+ if self.current_branch() != self._branch_tracking_remote_master():
+ return False
+ if len(self._local_commits(self._branch_tracking_remote_master())) > 0:
+ return False
+ return True
+
+ def ensure_cleanly_tracking_remote_master(self):
+ self._discard_working_directory_changes()
+ self._run_git(['checkout', '-q', self._branch_tracking_remote_master()])
+ self._discard_local_commits()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/scm.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/scm.py
new file mode 100644
index 0000000..1dbd33f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/scm.py
@@ -0,0 +1,144 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Python module for interacting with an SCM system (like SVN or Git)
+
+import logging
+import re
+import sys
+
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system.filesystem import FileSystem
+
+_log = logging.getLogger(__name__)
+
+
+# SCM methods are expected to return paths relative to self.checkout_root.
+class SCM:
+ def __init__(self, cwd, executive=None, filesystem=None):
+ self.cwd = cwd
+ self._executive = executive or Executive()
+ self._filesystem = filesystem or FileSystem()
+ self.checkout_root = self.find_checkout_root(self.cwd)
+
+ # A wrapper used by subclasses to create processes.
+ def _run(self, args, cwd=None, input=None, error_handler=None, return_exit_code=False, return_stderr=True, decode_output=True):
+ # FIXME: We should set cwd appropriately.
+ return self._executive.run_command(args,
+ cwd=cwd,
+ input=input,
+ error_handler=error_handler,
+ return_exit_code=return_exit_code,
+ return_stderr=return_stderr,
+ decode_output=decode_output)
+
+ # SCM always returns repository relative path, but sometimes we need
+ # absolute paths to pass to rm, etc.
+ def absolute_path(self, repository_relative_path):
+ return self._filesystem.join(self.checkout_root, repository_relative_path)
+
+ def _run_status_and_extract_filenames(self, status_command, status_regexp):
+ filenames = []
+ # We run with cwd=self.checkout_root so that returned-paths are root-relative.
+ for line in self._run(status_command, cwd=self.checkout_root).splitlines():
+ match = re.search(status_regexp, line)
+ if not match:
+ continue
+ # status = match.group('status')
+ filename = match.group('filename')
+ filenames.append(filename)
+ return filenames
+
+ @staticmethod
+ def _subclass_must_implement():
+ raise NotImplementedError("subclasses must implement")
+
+ @classmethod
+ def in_working_directory(cls, path, executive=None):
+ SCM._subclass_must_implement()
+
+ def find_checkout_root(self, path):
+ SCM._subclass_must_implement()
+
+ def add(self, path, return_exit_code=False, recurse=True):
+ self.add_list([path], return_exit_code, recurse)
+
+ def add_list(self, paths, return_exit_code=False, recurse=True):
+ self._subclass_must_implement()
+
+ def delete(self, path):
+ self.delete_list([path])
+
+ def delete_list(self, paths):
+ self._subclass_must_implement()
+
+ def move(self, origin, destination):
+ self._subclass_must_implement()
+
+ def exists(self, path):
+ self._subclass_must_implement()
+
+ def changed_files(self, git_commit=None):
+ self._subclass_must_implement()
+
+ def _added_files(self):
+ self._subclass_must_implement()
+
+ def _deleted_files(self):
+ self._subclass_must_implement()
+
+ def display_name(self):
+ self._subclass_must_implement()
+
+ def _head_svn_revision(self):
+ return self.svn_revision(self.checkout_root)
+
+ def svn_revision(self, path):
+ """Returns the latest svn revision found in the checkout."""
+ self._subclass_must_implement()
+
+ def timestamp_of_revision(self, path, revision):
+ self._subclass_must_implement()
+
+ def blame(self, path):
+ self._subclass_must_implement()
+
+ def has_working_directory_changes(self):
+ self._subclass_must_implement()
+
+ #--------------------------------------------------------------------------
+ # Subclasses must indicate if they support local commits,
+ # but the SCM baseclass will only call local_commits methods when this is true.
+ @staticmethod
+ def supports_local_commits():
+ SCM._subclass_must_implement()
+
+ def commit_locally_with_message(self, message, commit_all_working_directory_changes=True):
+ _log.error("Your source control manager does not support local commits.")
+ sys.exit(1)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py
new file mode 100644
index 0000000..7bd6add
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py
@@ -0,0 +1,112 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.executive_mock import MockExecutive
+
+
+class MockSCM(object):
+ executable_name = "MockSCM"
+
+ def __init__(self, filesystem=None, executive=None):
+ self.checkout_root = "/mock-checkout/third_party/WebKit"
+ self.added_paths = set()
+ self._filesystem = filesystem or MockFileSystem()
+ self._executive = executive or MockExecutive()
+
+ def add(self, destination_path, return_exit_code=False):
+ self.add_list([destination_path], return_exit_code)
+
+ def add_list(self, destination_paths, return_exit_code=False):
+ self.added_paths.update(set(destination_paths))
+ if return_exit_code:
+ return 0
+
+ def has_working_directory_changes(self):
+ return False
+
+ def ensure_cleanly_tracking_remote_master(self):
+ pass
+
+ def current_branch(self):
+ return "mock-branch-name"
+
+ def checkout_branch(self, name):
+ pass
+
+ def create_clean_branch(self, name):
+ pass
+
+ def delete_branch(self, name):
+ pass
+
+ def supports_local_commits(self):
+ return True
+
+ def exists(self, path):
+ # TestRealMain.test_real_main (and several other rebaseline tests) are sensitive to this return value.
+ # We should make those tests more robust, but for now we just return True always (since no test needs otherwise).
+ return True
+
+ def absolute_path(self, *comps):
+ return self._filesystem.join(self.checkout_root, *comps)
+
+ def svn_revision(self, path):
+ return '5678'
+
+ def svn_revision_from_git_commit(self, git_commit):
+ if git_commit == '6469e754a1':
+ return 1234
+ if git_commit == '624c3081c0':
+ return 5678
+ if git_commit == '624caaaaaa':
+ return 10000
+ return None
+
+ def timestamp_of_revision(self, path, revision):
+ return '2013-02-01 08:48:05 +0000'
+
+ def commit_locally_with_message(self, message, commit_all_working_directory_changes=True):
+ pass
+
+ def delete(self, path):
+ return self.delete_list([path])
+
+ def delete_list(self, paths):
+ if not self._filesystem:
+ return
+ for path in paths:
+ if self._filesystem.exists(path):
+ self._filesystem.remove(path)
+
+ def move(self, origin, destination):
+ if self._filesystem:
+ self._filesystem.move(self.absolute_path(origin), self.absolute_path(destination))
+
+ def changed_files(self):
+ return []
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py
new file mode 100644
index 0000000..1542127
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py
@@ -0,0 +1,705 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2011 Daniel Bates (dbates@intudata.com). All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import atexit
+import os
+import shutil
+import unittest
+
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.checkout.scm.detection import detect_scm_system
+from webkitpy.common.checkout.scm.git import Git, AmbiguousCommitError
+from webkitpy.common.checkout.scm.scm import SCM
+from webkitpy.common.checkout.scm.svn import SVN
+
+
+# We cache the mock SVN repo so that we don't create it again for each call to an SVNTest or GitTest test_ method.
+# We store it in a global variable so that we can delete this cached repo on exit(3).
+original_cwd = None
+cached_svn_repo_path = None
+
+@atexit.register
+def delete_cached_svn_repo_at_exit():
+ if cached_svn_repo_path:
+ os.chdir(original_cwd)
+ shutil.rmtree(cached_svn_repo_path)
+
+
+class SCMTestBase(unittest.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(SCMTestBase, self).__init__(*args, **kwargs)
+ self.scm = None
+ self.executive = None
+ self.fs = None
+ self.original_cwd = None
+
+ def setUp(self):
+ self.executive = Executive()
+ self.fs = FileSystem()
+ self.original_cwd = self.fs.getcwd()
+
+ def tearDown(self):
+ self._chdir(self.original_cwd)
+
+ def _join(self, *comps):
+ return self.fs.join(*comps)
+
+ def _chdir(self, path):
+ self.fs.chdir(path)
+
+ def _mkdir(self, path):
+ assert not self.fs.exists(path)
+ self.fs.maybe_make_directory(path)
+
+ def _mkdtemp(self, **kwargs):
+ return str(self.fs.mkdtemp(**kwargs))
+
+ def _remove(self, path):
+ self.fs.remove(path)
+
+ def _rmtree(self, path):
+ self.fs.rmtree(path)
+
+ def _run(self, *args, **kwargs):
+ return self.executive.run_command(*args, **kwargs)
+
+ def _run_silent(self, args, **kwargs):
+ self.executive.run_command(args, **kwargs)
+
+ def _write_text_file(self, path, contents):
+ self.fs.write_text_file(path, contents)
+
+ def _write_binary_file(self, path, contents):
+ self.fs.write_binary_file(path, contents)
+
+ def _make_diff(self, command, *args):
+ # We use this wrapper to disable output decoding. diffs should be treated as
+ # binary files since they may include text files of multiple differnet encodings.
+ return self._run([command, "diff"] + list(args), decode_output=False)
+
+ def _svn_diff(self, *args):
+ return self._make_diff("svn", *args)
+
+ def _git_diff(self, *args):
+ return self._make_diff("git", *args)
+
+ def _svn_add(self, path):
+ self._run(["svn", "add", path])
+
+ def _svn_commit(self, message):
+ self._run(["svn", "commit", "--quiet", "--message", message])
+
+ # This is a hot function since it's invoked by unittest before calling each test_ method in SVNTest and
+ # GitTest. We create a mock SVN repo once and then perform an SVN checkout from a filesystem copy of
+ # it since it's expensive to create the mock repo.
+ def _set_up_svn_checkout(self):
+ global cached_svn_repo_path
+ global original_cwd
+ if not cached_svn_repo_path:
+ cached_svn_repo_path = self._set_up_svn_repo()
+ original_cwd = self.original_cwd
+
+ self.temp_directory = self._mkdtemp(suffix="svn_test")
+ self.svn_repo_path = self._join(self.temp_directory, "repo")
+ self.svn_repo_url = "file://%s" % self.svn_repo_path
+ self.svn_checkout_path = self._join(self.temp_directory, "checkout")
+ shutil.copytree(cached_svn_repo_path, self.svn_repo_path)
+ self._run(['svn', 'checkout', '--quiet', self.svn_repo_url + "/trunk", self.svn_checkout_path])
+
+ def _set_up_svn_repo(self):
+ svn_repo_path = self._mkdtemp(suffix="svn_test_repo")
+ svn_repo_url = "file://%s" % svn_repo_path # Not sure this will work on windows
+ # git svn complains if we don't pass --pre-1.5-compatible, not sure why:
+ # Expected FS format '2'; found format '3' at /usr/local/libexec/git-core//git-svn line 1477
+ self._run(['svnadmin', 'create', '--pre-1.5-compatible', svn_repo_path])
+
+ # Create a test svn checkout
+ svn_checkout_path = self._mkdtemp(suffix="svn_test_checkout")
+ self._run(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
+
+ # Create and checkout a trunk dir to match the standard svn configuration to match git-svn's expectations
+ self._chdir(svn_checkout_path)
+ self._mkdir('trunk')
+ self._svn_add('trunk')
+ # We can add tags and branches as well if we ever need to test those.
+ self._svn_commit('add trunk')
+
+ self._rmtree(svn_checkout_path)
+ self._chdir(self.original_cwd)
+
+ self._set_up_svn_test_commits(svn_repo_url + "/trunk")
+ return svn_repo_path
+
+ def _set_up_svn_test_commits(self, svn_repo_url):
+ svn_checkout_path = self._mkdtemp(suffix="svn_test_checkout")
+ self._run(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
+
+ # Add some test commits
+ self._chdir(svn_checkout_path)
+
+ self._write_text_file("test_file", "test1")
+ self._svn_add("test_file")
+ self._svn_commit("initial commit")
+
+ self._write_text_file("test_file", "test1test2")
+ # This used to be the last commit, but doing so broke
+ # GitTest.test_apply_git_patch which use the inverse diff of the last commit.
+ # svn-apply fails to remove directories in Git, see:
+ # https://bugs.webkit.org/show_bug.cgi?id=34871
+ self._mkdir("test_dir")
+ # Slash should always be the right path separator since we use cygwin on Windows.
+ test_file3_path = "test_dir/test_file3"
+ self._write_text_file(test_file3_path, "third file")
+ self._svn_add("test_dir")
+ self._svn_commit("second commit")
+
+ self._write_text_file("test_file", "test1test2test3\n")
+ self._write_text_file("test_file2", "second file")
+ self._svn_add("test_file2")
+ self._svn_commit("third commit")
+
+ # This 4th commit is used to make sure that our patch file handling
+ # code correctly treats patches as binary and does not attempt to
+ # decode them assuming they're utf-8.
+ self._write_binary_file("test_file", u"latin1 test: \u00A0\n".encode("latin-1"))
+ self._write_binary_file("test_file2", u"utf-8 test: \u00A0\n".encode("utf-8"))
+ self._svn_commit("fourth commit")
+
+ # svn does not seem to update after commit as I would expect.
+ self._run(['svn', 'update'])
+ self._rmtree(svn_checkout_path)
+ self._chdir(self.original_cwd)
+
+ def _tear_down_svn_checkout(self):
+ self._rmtree(self.temp_directory)
+
+ def _shared_test_add_recursively(self):
+ self._mkdir("added_dir")
+ self._write_text_file("added_dir/added_file", "new stuff")
+ self.scm.add("added_dir/added_file")
+ self.assertIn("added_dir/added_file", self.scm._added_files())
+
+ def _shared_test_delete_recursively(self):
+ self._mkdir("added_dir")
+ self._write_text_file("added_dir/added_file", "new stuff")
+ self.scm.add("added_dir/added_file")
+ self.assertIn("added_dir/added_file", self.scm._added_files())
+ self.scm.delete("added_dir/added_file")
+ self.assertNotIn("added_dir", self.scm._added_files())
+
+ def _shared_test_delete_recursively_or_not(self):
+ self._mkdir("added_dir")
+ self._write_text_file("added_dir/added_file", "new stuff")
+ self._write_text_file("added_dir/another_added_file", "more new stuff")
+ self.scm.add("added_dir/added_file")
+ self.scm.add("added_dir/another_added_file")
+ self.assertIn("added_dir/added_file", self.scm._added_files())
+ self.assertIn("added_dir/another_added_file", self.scm._added_files())
+ self.scm.delete("added_dir/added_file")
+ self.assertIn("added_dir/another_added_file", self.scm._added_files())
+
+ def _shared_test_exists(self, scm, commit_function):
+ self._chdir(scm.checkout_root)
+ self.assertFalse(scm.exists('foo.txt'))
+ self._write_text_file('foo.txt', 'some stuff')
+ self.assertFalse(scm.exists('foo.txt'))
+ scm.add('foo.txt')
+ commit_function('adding foo')
+ self.assertTrue(scm.exists('foo.txt'))
+ scm.delete('foo.txt')
+ commit_function('deleting foo')
+ self.assertFalse(scm.exists('foo.txt'))
+
+ def _shared_test_move(self):
+ self._write_text_file('added_file', 'new stuff')
+ self.scm.add('added_file')
+ self.scm.move('added_file', 'moved_file')
+ self.assertIn('moved_file', self.scm._added_files())
+
+ def _shared_test_move_recursive(self):
+ self._mkdir("added_dir")
+ self._write_text_file('added_dir/added_file', 'new stuff')
+ self._write_text_file('added_dir/another_added_file', 'more new stuff')
+ self.scm.add('added_dir')
+ self.scm.move('added_dir', 'moved_dir')
+ self.assertIn('moved_dir/added_file', self.scm._added_files())
+ self.assertIn('moved_dir/another_added_file', self.scm._added_files())
+
+
+class SVNTest(SCMTestBase):
+ def setUp(self):
+ super(SVNTest, self).setUp()
+ self._set_up_svn_checkout()
+ self._chdir(self.svn_checkout_path)
+ self.scm = detect_scm_system(self.svn_checkout_path)
+ self.scm.svn_server_realm = None
+
+ def tearDown(self):
+ super(SVNTest, self).tearDown()
+ self._tear_down_svn_checkout()
+
+ def test_detect_scm_system_relative_url(self):
+ scm = detect_scm_system(".")
+ # I wanted to assert that we got the right path, but there was some
+ # crazy magic with temp folder names that I couldn't figure out.
+ self.assertTrue(scm.checkout_root)
+
+ def test_detection(self):
+ self.assertEqual(self.scm.display_name(), "svn")
+ self.assertEqual(self.scm.supports_local_commits(), False)
+
+ def test_add_recursively(self):
+ self._shared_test_add_recursively()
+
+ def test_delete(self):
+ self._chdir(self.svn_checkout_path)
+ self.scm.delete("test_file")
+ self.assertIn("test_file", self.scm._deleted_files())
+
+ def test_delete_list(self):
+ self._chdir(self.svn_checkout_path)
+ self.scm.delete_list(["test_file", "test_file2"])
+ self.assertIn("test_file", self.scm._deleted_files())
+ self.assertIn("test_file2", self.scm._deleted_files())
+
+ def test_delete_recursively(self):
+ self._shared_test_delete_recursively()
+
+ def test_delete_recursively_or_not(self):
+ self._shared_test_delete_recursively_or_not()
+
+ def test_move(self):
+ self._shared_test_move()
+
+ def test_move_recursive(self):
+ self._shared_test_move_recursive()
+
+
+class GitTest(SCMTestBase):
+ def setUp(self):
+ super(GitTest, self).setUp()
+ self._set_up_git_checkouts()
+
+ def tearDown(self):
+ super(GitTest, self).tearDown()
+ self._tear_down_git_checkouts()
+
+ def _set_up_git_checkouts(self):
+ """Sets up fresh git repository with one commit. Then sets up a second git repo that tracks the first one."""
+
+ self.untracking_checkout_path = self._mkdtemp(suffix="git_test_checkout2")
+ self._run(['git', 'init', self.untracking_checkout_path])
+
+ self._chdir(self.untracking_checkout_path)
+ self._write_text_file('foo_file', 'foo')
+ self._run(['git', 'add', 'foo_file'])
+ self._run(['git', 'commit', '-am', 'dummy commit'])
+ self.untracking_scm = detect_scm_system(self.untracking_checkout_path)
+
+ self.tracking_git_checkout_path = self._mkdtemp(suffix="git_test_checkout")
+ self._run(['git', 'clone', '--quiet', self.untracking_checkout_path, self.tracking_git_checkout_path])
+ self._chdir(self.tracking_git_checkout_path)
+ self.tracking_scm = detect_scm_system(self.tracking_git_checkout_path)
+
+ def _tear_down_git_checkouts(self):
+ self._run(['rm', '-rf', self.tracking_git_checkout_path])
+ self._run(['rm', '-rf', self.untracking_checkout_path])
+
+ def test_remote_branch_ref(self):
+ self.assertEqual(self.tracking_scm._remote_branch_ref(), 'refs/remotes/origin/master')
+ self._chdir(self.untracking_checkout_path)
+ self.assertRaises(ScriptError, self.untracking_scm._remote_branch_ref)
+
+ def test_multiple_remotes(self):
+ self._run(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote1'])
+ self._run(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote2'])
+ self.assertEqual(self.tracking_scm._remote_branch_ref(), 'remote1')
+
+ def test_create_patch(self):
+ self._write_text_file('test_file_commit1', 'contents')
+ self._run(['git', 'add', 'test_file_commit1'])
+ scm = self.tracking_scm
+ scm.commit_locally_with_message('message')
+
+ patch = scm.create_patch()
+ self.assertNotRegexpMatches(patch, r'Subversion Revision:')
+
+ def test_exists(self):
+ scm = self.untracking_scm
+ self._shared_test_exists(scm, scm.commit_locally_with_message)
+
+ def test_rename_files(self):
+ scm = self.tracking_scm
+ scm.move('foo_file', 'bar_file')
+ scm.commit_locally_with_message('message')
+
+
+class GitSVNTest(SCMTestBase):
+ def setUp(self):
+ super(GitSVNTest, self).setUp()
+ self._set_up_svn_checkout()
+ self._set_up_gitsvn_checkout()
+ self.scm = detect_scm_system(self.git_checkout_path)
+ self.scm.svn_server_realm = None
+
+ def tearDown(self):
+ super(GitSVNTest, self).tearDown()
+ self._tear_down_svn_checkout()
+ self._tear_down_gitsvn_checkout()
+
+ def _set_up_gitsvn_checkout(self):
+ self.git_checkout_path = self._mkdtemp(suffix="git_test_checkout")
+ # --quiet doesn't make git svn silent
+ self._run_silent(['git', 'svn', 'clone', '-T', 'trunk', self.svn_repo_url, self.git_checkout_path])
+ self._chdir(self.git_checkout_path)
+ self.git_v2 = self._run(['git', '--version']).startswith('git version 2')
+ if self.git_v2:
+ # The semantics of 'git svn clone -T' changed in v2 (apparently), so the branch names are different.
+ # This works around it, for compatibility w/ v1.
+ self._run_silent(['git', 'branch', 'trunk', 'origin/trunk'])
+
+ def _tear_down_gitsvn_checkout(self):
+ self._rmtree(self.git_checkout_path)
+
+ def test_detection(self):
+ self.assertEqual(self.scm.display_name(), "git")
+ self.assertEqual(self.scm.supports_local_commits(), True)
+
+ def test_read_git_config(self):
+ key = 'test.git-config'
+ value = 'git-config value'
+ self._run(['git', 'config', key, value])
+ self.assertEqual(self.scm.read_git_config(key), value)
+
+ def test_local_commits(self):
+ test_file = self._join(self.git_checkout_path, 'test_file')
+ self._write_text_file(test_file, 'foo')
+ self._run(['git', 'commit', '-a', '-m', 'local commit'])
+
+ self.assertEqual(len(self.scm._local_commits()), 1)
+
+ def test_discard_local_commits(self):
+ test_file = self._join(self.git_checkout_path, 'test_file')
+ self._write_text_file(test_file, 'foo')
+ self._run(['git', 'commit', '-a', '-m', 'local commit'])
+
+ self.assertEqual(len(self.scm._local_commits()), 1)
+ self.scm._discard_local_commits()
+ self.assertEqual(len(self.scm._local_commits()), 0)
+
+ def test_delete_branch(self):
+ new_branch = 'foo'
+
+ self._run(['git', 'checkout', '-b', new_branch])
+ self.assertEqual(self._run(['git', 'symbolic-ref', 'HEAD']).strip(), 'refs/heads/' + new_branch)
+
+ self._run(['git', 'checkout', '-b', 'bar'])
+ self.scm.delete_branch(new_branch)
+
+ self.assertNotRegexpMatches(self._run(['git', 'branch']), r'foo')
+
+ def test_rebase_in_progress(self):
+ svn_test_file = self._join(self.svn_checkout_path, 'test_file')
+ self._write_text_file(svn_test_file, "svn_checkout")
+ self._run(['svn', 'commit', '--message', 'commit to conflict with git commit'], cwd=self.svn_checkout_path)
+
+ git_test_file = self._join(self.git_checkout_path, 'test_file')
+ self._write_text_file(git_test_file, "git_checkout")
+ self._run(['git', 'commit', '-a', '-m', 'commit to be thrown away by rebase abort'])
+
+ # Should fail due to a conflict leaving us mid-rebase.
+ # we use self._run_slient because --quiet doesn't actually make git svn silent.
+ self.assertRaises(ScriptError, self._run_silent, ['git', 'svn', '--quiet', 'rebase'])
+
+ self.assertTrue(self.scm._rebase_in_progress())
+
+ # Make sure our cleanup works.
+ self.scm._discard_working_directory_changes()
+ self.assertFalse(self.scm._rebase_in_progress())
+
+ # Make sure cleanup doesn't throw when no rebase is in progress.
+ self.scm._discard_working_directory_changes()
+
+ def _local_commit(self, filename, contents, message):
+ self._write_text_file(filename, contents)
+ self._run(['git', 'add', filename])
+ self.scm.commit_locally_with_message(message)
+
+ def _one_local_commit(self):
+ self._local_commit('test_file_commit1', 'more test content', 'another test commit')
+
+ def _one_local_commit_plus_working_copy_changes(self):
+ self._one_local_commit()
+ self._write_text_file('test_file_commit2', 'still more test content')
+ self._run(['git', 'add', 'test_file_commit2'])
+
+ def _second_local_commit(self):
+ self._local_commit('test_file_commit2', 'still more test content', 'yet another test commit')
+
+ def _two_local_commits(self):
+ self._one_local_commit()
+ self._second_local_commit()
+
+ def _three_local_commits(self):
+ self._local_commit('test_file_commit0', 'more test content', 'another test commit')
+ self._two_local_commits()
+
+ def test_locally_commit_all_working_copy_changes(self):
+ self._local_commit('test_file', 'test content', 'test commit')
+ self._write_text_file('test_file', 'changed test content')
+ self.assertTrue(self.scm.has_working_directory_changes())
+ self.scm.commit_locally_with_message('all working copy changes')
+ self.assertFalse(self.scm.has_working_directory_changes())
+
+ def test_locally_commit_no_working_copy_changes(self):
+ self._local_commit('test_file', 'test content', 'test commit')
+ self._write_text_file('test_file', 'changed test content')
+ self.assertTrue(self.scm.has_working_directory_changes())
+ self.assertRaises(ScriptError, self.scm.commit_locally_with_message, 'no working copy changes', False)
+
+ def _test_upstream_branch(self):
+ self._run(['git', 'checkout', '-t', '-b', 'my-branch'])
+ self._run(['git', 'checkout', '-t', '-b', 'my-second-branch'])
+ self.assertEqual(self.scm._upstream_branch(), 'my-branch')
+
+ def test_remote_branch_ref(self):
+ remote_branch_ref = self.scm._remote_branch_ref()
+ if self.git_v2:
+ self.assertEqual(remote_branch_ref, 'refs/remotes/origin/trunk')
+ else:
+ self.assertEqual(remote_branch_ref, 'refs/remotes/trunk')
+
+ def test_create_patch_local_plus_working_copy(self):
+ self._one_local_commit_plus_working_copy_changes()
+ patch = self.scm.create_patch()
+ self.assertRegexpMatches(patch, r'test_file_commit1')
+ self.assertRegexpMatches(patch, r'test_file_commit2')
+
+ def test_create_patch(self):
+ self._one_local_commit_plus_working_copy_changes()
+ patch = self.scm.create_patch()
+ self.assertRegexpMatches(patch, r'test_file_commit2')
+ self.assertRegexpMatches(patch, r'test_file_commit1')
+ self.assertRegexpMatches(patch, r'Subversion Revision: 5')
+
+ def test_create_patch_after_merge(self):
+ self._run(['git', 'checkout', '-b', 'dummy-branch', 'trunk~3'])
+ self._one_local_commit()
+ self._run(['git', 'merge', 'trunk'])
+
+ patch = self.scm.create_patch()
+ self.assertRegexpMatches(patch, r'test_file_commit1')
+ self.assertRegexpMatches(patch, r'Subversion Revision: 5')
+
+ def test_create_patch_with_changed_files(self):
+ self._one_local_commit_plus_working_copy_changes()
+ patch = self.scm.create_patch(changed_files=['test_file_commit2'])
+ self.assertRegexpMatches(patch, r'test_file_commit2')
+
+ def test_create_patch_with_rm_and_changed_files(self):
+ self._one_local_commit_plus_working_copy_changes()
+ self._remove('test_file_commit1')
+ patch = self.scm.create_patch()
+ patch_with_changed_files = self.scm.create_patch(changed_files=['test_file_commit1', 'test_file_commit2'])
+ self.assertEqual(patch, patch_with_changed_files)
+
+ def test_create_patch_git_commit(self):
+ self._two_local_commits()
+ patch = self.scm.create_patch(git_commit="HEAD^")
+ self.assertRegexpMatches(patch, r'test_file_commit1')
+ self.assertNotRegexpMatches(patch, r'test_file_commit2')
+
+ def test_create_patch_git_commit_range(self):
+ self._three_local_commits()
+ patch = self.scm.create_patch(git_commit="HEAD~2..HEAD")
+ self.assertNotRegexpMatches(patch, r'test_file_commit0')
+ self.assertRegexpMatches(patch, r'test_file_commit2')
+ self.assertRegexpMatches(patch, r'test_file_commit1')
+
+ def test_create_patch_working_copy_only(self):
+ self._one_local_commit_plus_working_copy_changes()
+ patch = self.scm.create_patch(git_commit="HEAD....")
+ self.assertNotRegexpMatches(patch, r'test_file_commit1')
+ self.assertRegexpMatches(patch, r'test_file_commit2')
+
+ def test_create_patch_multiple_local_commits(self):
+ self._two_local_commits()
+ patch = self.scm.create_patch()
+ self.assertRegexpMatches(patch, r'test_file_commit2')
+ self.assertRegexpMatches(patch, r'test_file_commit1')
+
+ def test_create_patch_not_synced(self):
+ self._run(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
+ self._two_local_commits()
+ patch = self.scm.create_patch()
+ self.assertNotRegexpMatches(patch, r'test_file2')
+ self.assertRegexpMatches(patch, r'test_file_commit2')
+ self.assertRegexpMatches(patch, r'test_file_commit1')
+
+ def test_create_binary_patch(self):
+ # Create a git binary patch and check the contents.
+ test_file_name = 'binary_file'
+ test_file_path = self.fs.join(self.git_checkout_path, test_file_name)
+ file_contents = ''.join(map(chr, range(256)))
+ self._write_binary_file(test_file_path, file_contents)
+ self._run(['git', 'add', test_file_name])
+ patch = self.scm.create_patch()
+ self.assertRegexpMatches(patch, r'\nliteral 0\n')
+ self.assertRegexpMatches(patch, r'\nliteral 256\n')
+
+ # Check if we can create a patch from a local commit.
+ self._write_binary_file(test_file_path, file_contents)
+ self._run(['git', 'add', test_file_name])
+ self._run(['git', 'commit', '-m', 'binary diff'])
+
+ patch_from_local_commit = self.scm.create_patch('HEAD')
+ self.assertRegexpMatches(patch_from_local_commit, r'\nliteral 0\n')
+ self.assertRegexpMatches(patch_from_local_commit, r'\nliteral 256\n')
+
+
+ def test_changed_files_local_plus_working_copy(self):
+ self._one_local_commit_plus_working_copy_changes()
+ files = self.scm.changed_files()
+ self.assertIn('test_file_commit1', files)
+ self.assertIn('test_file_commit2', files)
+
+ # working copy should *not* be in the list.
+ files = self.scm.changed_files('trunk..')
+ self.assertIn('test_file_commit1', files)
+ self.assertNotIn('test_file_commit2', files)
+
+ # working copy *should* be in the list.
+ files = self.scm.changed_files('trunk....')
+ self.assertIn('test_file_commit1', files)
+ self.assertIn('test_file_commit2', files)
+
+ def test_changed_files_git_commit(self):
+ self._two_local_commits()
+ files = self.scm.changed_files(git_commit="HEAD^")
+ self.assertIn('test_file_commit1', files)
+ self.assertNotIn('test_file_commit2', files)
+
+ def test_changed_files_git_commit_range(self):
+ self._three_local_commits()
+ files = self.scm.changed_files(git_commit="HEAD~2..HEAD")
+ self.assertNotIn('test_file_commit0', files)
+ self.assertIn('test_file_commit1', files)
+ self.assertIn('test_file_commit2', files)
+
+ def test_changed_files_working_copy_only(self):
+ self._one_local_commit_plus_working_copy_changes()
+ files = self.scm.changed_files(git_commit="HEAD....")
+ self.assertNotIn('test_file_commit1', files)
+ self.assertIn('test_file_commit2', files)
+
+ def test_changed_files_multiple_local_commits(self):
+ self._two_local_commits()
+ files = self.scm.changed_files()
+ self.assertIn('test_file_commit2', files)
+ self.assertIn('test_file_commit1', files)
+
+ def test_changed_files_not_synced(self):
+ self._run(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
+ self._two_local_commits()
+ files = self.scm.changed_files()
+ self.assertNotIn('test_file2', files)
+ self.assertIn('test_file_commit2', files)
+ self.assertIn('test_file_commit1', files)
+
+ def test_changed_files_upstream(self):
+ self._run(['git', 'checkout', '-t', '-b', 'my-branch'])
+ self._one_local_commit()
+ self._run(['git', 'checkout', '-t', '-b', 'my-second-branch'])
+ self._second_local_commit()
+ self._write_text_file('test_file_commit0', 'more test content')
+ self._run(['git', 'add', 'test_file_commit0'])
+
+ # equivalent to 'git diff my-branch..HEAD, should not include working changes
+ files = self.scm.changed_files(git_commit='UPSTREAM..')
+ self.assertNotIn('test_file_commit1', files)
+ self.assertIn('test_file_commit2', files)
+ self.assertNotIn('test_file_commit0', files)
+
+ # equivalent to 'git diff my-branch', *should* include working changes
+ files = self.scm.changed_files(git_commit='UPSTREAM....')
+ self.assertNotIn('test_file_commit1', files)
+ self.assertIn('test_file_commit2', files)
+ self.assertIn('test_file_commit0', files)
+
+ def test_add_recursively(self):
+ self._shared_test_add_recursively()
+
+ def test_delete(self):
+ self._two_local_commits()
+ self.scm.delete('test_file_commit1')
+ self.assertIn("test_file_commit1", self.scm._deleted_files())
+
+ def test_delete_list(self):
+ self._two_local_commits()
+ self.scm.delete_list(["test_file_commit1", "test_file_commit2"])
+ self.assertIn("test_file_commit1", self.scm._deleted_files())
+ self.assertIn("test_file_commit2", self.scm._deleted_files())
+
+ def test_delete_recursively(self):
+ self._shared_test_delete_recursively()
+
+ def test_delete_recursively_or_not(self):
+ self._shared_test_delete_recursively_or_not()
+
+ def test_move(self):
+ self._shared_test_move()
+
+ def test_move_recursive(self):
+ self._shared_test_move_recursive()
+
+ def test_exists(self):
+ self._shared_test_exists(self.scm, self.scm.commit_locally_with_message)
+
+
+class GitTestWithMock(SCMTestBase):
+ def make_scm(self):
+ scm = Git(cwd=".", executive=MockExecutive(), filesystem=MockFileSystem())
+ scm.read_git_config = lambda *args, **kw: "MOCKKEY:MOCKVALUE"
+ return scm
+
+ def test_timestamp_of_revision(self):
+ scm = self.make_scm()
+ scm.find_checkout_root = lambda path: ''
+ scm._run_git = lambda args: 'Date: 2013-02-08 08:05:49 +0000'
+ self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-08T08:05:49Z')
+
+ scm._run_git = lambda args: 'Date: 2013-02-08 01:02:03 +0130'
+ self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-07T23:32:03Z')
+
+ scm._run_git = lambda args: 'Date: 2013-02-08 01:55:21 -0800'
+ self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-08T09:55:21Z')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/svn.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/svn.py
new file mode 100644
index 0000000..1f2a059
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checkout/scm/svn.py
@@ -0,0 +1,206 @@
+# Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import random
+import re
+import shutil
+import string
+import sys
+import tempfile
+
+from webkitpy.common.memoized import memoized
+from webkitpy.common.system.executive import Executive, ScriptError
+
+from .scm import SCM
+
+_log = logging.getLogger(__name__)
+
+
+class SVN(SCM):
+
+ executable_name = "svn"
+
+ _svn_metadata_files = frozenset(['.svn', '_svn'])
+
+ def __init__(self, cwd, patch_directories, **kwargs):
+ SCM.__init__(self, cwd, **kwargs)
+ self._bogus_dir = None
+ if patch_directories == []:
+ raise Exception(message='Empty list of patch directories passed to SCM.__init__')
+ elif patch_directories == None:
+ self._patch_directories = [self._filesystem.relpath(cwd, self.checkout_root)]
+ else:
+ self._patch_directories = patch_directories
+
+ @classmethod
+ def in_working_directory(cls, path, executive=None):
+ if os.path.isdir(os.path.join(path, '.svn')):
+ # This is a fast shortcut for svn info that is usually correct for SVN < 1.7,
+ # but doesn't work for SVN >= 1.7.
+ return True
+
+ executive = executive or Executive()
+ svn_info_args = [cls.executable_name, 'info']
+ try:
+ exit_code = executive.run_command(svn_info_args, cwd=path, return_exit_code=True)
+ except OSError, e:
+ # svn is not installed
+ return False
+ return (exit_code == 0)
+
+ def _find_uuid(self, path):
+ if not self.in_working_directory(path):
+ return None
+ return self.value_from_svn_info(path, 'Repository UUID')
+
+ @classmethod
+ def value_from_svn_info(cls, path, field_name):
+ svn_info_args = [cls.executable_name, 'info']
+ # FIXME: This method should use a passed in executive or be made an instance method and use self._executive.
+ info_output = Executive().run_command(svn_info_args, cwd=path).rstrip()
+ match = re.search("^%s: (?P<value>.+)$" % field_name, info_output, re.MULTILINE)
+ if not match:
+ raise ScriptError(script_args=svn_info_args, message='svn info did not contain a %s.' % field_name)
+ return match.group('value').rstrip('\r')
+
+ def find_checkout_root(self, path):
+ uuid = self._find_uuid(path)
+ # If |path| is not in a working directory, we're supposed to return |path|.
+ if not uuid:
+ return path
+ # Search up the directory hierarchy until we find a different UUID.
+ last_path = None
+ while True:
+ if uuid != self._find_uuid(path):
+ return last_path
+ last_path = path
+ (path, last_component) = self._filesystem.split(path)
+ if last_path == path:
+ return None
+
+ def _run_svn(self, args, **kwargs):
+ return self._run([self.executable_name] + args, **kwargs)
+
+ @memoized
+ def _svn_version(self):
+ return self._run_svn(['--version', '--quiet'])
+
+ def has_working_directory_changes(self):
+ # FIXME: What about files which are not committed yet?
+ return self._run_svn(["diff"], cwd=self.checkout_root, decode_output=False) != ""
+
+ def status_command(self):
+ return [self.executable_name, 'status']
+
+ def _status_regexp(self, expected_types):
+ field_count = 6 if self._svn_version() > "1.6" else 5
+ return "^(?P<status>[%s]).{%s} (?P<filename>.+)$" % (expected_types, field_count)
+
+ def _add_parent_directories(self, path, recurse):
+ """Does 'svn add' to the path and its parents."""
+ if self.in_working_directory(path):
+ return
+ self.add(path, recurse=recurse)
+
+ def add_list(self, paths, return_exit_code=False, recurse=True):
+ for path in paths:
+ self._add_parent_directories(os.path.dirname(os.path.abspath(path)),
+ recurse=False)
+ if recurse:
+ cmd = ["add"] + paths
+ else:
+ cmd = ["add", "--depth", "empty"] + paths
+ return self._run_svn(cmd, return_exit_code=return_exit_code)
+
+ def _delete_parent_directories(self, path):
+ if not self.in_working_directory(path):
+ return
+ if set(os.listdir(path)) - self._svn_metadata_files:
+ return # Directory has non-trivial files in it.
+ self.delete(path)
+
+ def delete_list(self, paths):
+ for path in paths:
+ abs_path = os.path.abspath(path)
+ parent, base = os.path.split(abs_path)
+ result = self._run_svn(["delete", "--force", base], cwd=parent)
+ self._delete_parent_directories(os.path.dirname(abs_path))
+ return result
+
+ def move(self, origin, destination):
+ return self._run_svn(["mv", "--force", origin, destination], return_exit_code=True)
+
+ def exists(self, path):
+ return not self._run_svn(["info", path], return_exit_code=True, decode_output=False)
+
+ def changed_files(self, git_commit=None):
+ status_command = [self.executable_name, "status"]
+ status_command.extend(self._patch_directories)
+ # ACDMR: Addded, Conflicted, Deleted, Modified or Replaced
+ return self._run_status_and_extract_filenames(status_command, self._status_regexp("ACDMR"))
+
+ def _added_files(self):
+ return self._run_status_and_extract_filenames(self.status_command(), self._status_regexp("A"))
+
+ def _deleted_files(self):
+ return self._run_status_and_extract_filenames(self.status_command(), self._status_regexp("D"))
+
+ @staticmethod
+ def supports_local_commits():
+ return False
+
+ def display_name(self):
+ return "svn"
+
+ def svn_revision(self, path):
+ return self.value_from_svn_info(path, 'Revision')
+
+ def timestamp_of_revision(self, path, revision):
+ # We use --xml to get timestamps like 2013-02-08T08:18:04.964409Z
+ repository_root = self.value_from_svn_info(self.checkout_root, 'Repository Root')
+ info_output = Executive().run_command([self.executable_name, 'log', '-r', revision, '--xml', repository_root], cwd=path).rstrip()
+ match = re.search(r"^<date>(?P<value>.+)</date>\r?$", info_output, re.MULTILINE)
+ return match.group('value')
+
+ def create_patch(self, git_commit=None, changed_files=None):
+ """Returns a byte array (str()) representing the patch file.
+ Patch files are effectively binary since they may contain
+ files of multiple different encodings."""
+ if changed_files == []:
+ return ""
+ elif changed_files == None:
+ changed_files = []
+ return self._run([self._filesystem.join(self.checkout_root, 'Tools', 'Scripts', 'svn-create-patch')] + changed_files,
+ cwd=self.checkout_root, return_stderr=False,
+ decode_output=False)
+
+ def blame(self, path):
+ return self._run_svn(['blame', path])
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/checksvnconfigfile.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/checksvnconfigfile.py
new file mode 100644
index 0000000..c261a49
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/checksvnconfigfile.py
@@ -0,0 +1,64 @@
+# Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This file is used by:
+# webkitpy/style/checkers/png.py
+
+import os
+import re
+
+
+def check(host, fs):
+ """
+ check the svn config file
+ return with three logical value:
+ is svn config file missing, is auto-props missing, is the svn:mime-type for png missing
+ """
+
+ cfg_file_path = config_file_path(host, fs)
+
+ try:
+ config_file = fs.read_text_file(cfg_file_path)
+ except IOError:
+ return (True, True, True)
+
+ errorcode_autoprop = not re.search("^\s*enable-auto-props\s*=\s*yes", config_file, re.MULTILINE)
+ errorcode_png = not re.search("^\s*\*\.png\s*=\s*svn:mime-type=image/png", config_file, re.MULTILINE)
+
+ return (False, errorcode_autoprop, errorcode_png)
+
+
+def config_file_path(host, fs):
+ if host.platform.is_win():
+ config_file_path = fs.join(os.environ['APPDATA'], "Subversion", "config")
+ else:
+ config_file_path = fs.join(fs.expanduser("~"), ".subversion", "config")
+ return config_file_path
+
+
+def errorstr_autoprop(config_file_path):
+ return 'Have to enable auto props in the subversion config file (%s "enable-auto-props = yes"). ' % config_file_path
+
+
+def errorstr_png(config_file_path):
+ return 'Have to set the svn:mime-type in the subversion config file (%s "*.png = svn:mime-type=image/png").' % config_file_path
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/config/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/config/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/config/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/config/irc.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/config/irc.py
new file mode 100755
index 0000000..6dd299d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/config/irc.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+server = "irc.freenode.net"
+port = 6667
+channel = "#blink"
+nickname = "commit-bot"
+
+update_wait_seconds = 10
+retry_attempts = 8
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/config/orderfile b/src/third_party/blink/Tools/Scripts/webkitpy/common/config/orderfile
new file mode 100644
index 0000000..9fb4977
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/config/orderfile
@@ -0,0 +1,8 @@
+Source*ChangeLog
+Source*
+Tools*ChangeLog
+Tools*
+Websites*ChangeLog
+Websites*
+LayoutTests*ChangeLog
+LayoutTests*
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/config/ports_mock.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/config/ports_mock.py
new file mode 100644
index 0000000..26d3372
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/config/ports_mock.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class MockPort(object):
+ def name(self):
+ return "MockPort"
+
+ def check_webkit_style_command(self):
+ return ["mock-check-webkit-style"]
+
+ def run_python_unittests_command(self):
+ return ['mock-test-webkitpy']
+
+ def run_perl_unittests_command(self):
+ return ['mock-test-webkitperl']
+
+ def run_webkit_unit_tests_command(self):
+ return ['mock-run-webkit-unit-tests']
+
+ def run_webkit_tests_command(self):
+ return ['mock-run-webkit-tests']
+
+ def run_bindings_tests_command(self):
+ return ['mock-run-bindings-tests']
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/config/urls.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/config/urls.py
new file mode 100644
index 0000000..0d50438
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/config/urls.py
@@ -0,0 +1,59 @@
+# Copyright (c) 2010, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+
+def view_source_url(local_path):
+ return "https://src.chromium.org/viewvc/blink/trunk/%s" % local_path
+
+
+def chromium_results_url_base():
+ return 'https://storage.googleapis.com/chromium-layout-test-archives'
+
+
+def chromium_results_url_base_for_builder(builder_name):
+ return '%s/%s' % (chromium_results_url_base(), re.sub('[ .()]', '_', builder_name))
+
+
+def chromium_results_zip_url(builder_name):
+ return chromium_results_url_base_for_builder(builder_name) + '/results/layout-test-results.zip'
+
+
+def chromium_accumulated_results_url_base_for_builder(builder_name):
+ return chromium_results_url_base_for_builder(builder_name) + "/results/layout-test-results"
+
+
+chromium_lkgr_url = "http://chromium-status.appspot.com/lkgr"
+contribution_guidelines = "http://webkit.org/coding/contributing.html"
+
+chromium_buildbot_url = "http://build.chromium.org/p/chromium.webkit"
+
+chromium_webkit_sheriff_url = "http://build.chromium.org/p/chromium.webkit/sheriff_webkit.js"
+
+omahaproxy_url = "http://omahaproxy.appspot.com/"
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/find_files.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/find_files.py
new file mode 100644
index 0000000..7a10120
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/find_files.py
@@ -0,0 +1,84 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""This module is used to find files used by run-webkit-tests and
+perftestrunner. It exposes one public function - find() - which takes
+an optional list of paths, optional set of skipped directories and optional
+filter callback.
+
+If a list is passed in, the returned list of files is constrained to those
+found under the paths passed in. i.e. calling find(["LayoutTests/fast"])
+will only return files under that directory.
+
+If a set of skipped directories is passed in, the function will filter out
+the files lying in these directories i.e. find(["LayoutTests"], set(["fast"]))
+will return everything except files in fast subfolder.
+
+If a callback is passed in, it will be called for the each file and the file
+will be included into the result if the callback returns True.
+The callback has to take three arguments: filesystem, dirname and filename."""
+
+import itertools
+
+
+def find(filesystem, base_dir, paths=None, skipped_directories=None, file_filter=None, directory_sort_key=None):
+ """Finds the set of tests under a given list of sub-paths.
+
+ Args:
+ paths: a list of path expressions relative to base_dir
+ to search. Glob patterns are ok, as are path expressions with
+ forward slashes on Windows. If paths is empty, we look at
+ everything under the base_dir.
+ """
+
+ paths = paths or ['*']
+ skipped_directories = skipped_directories or set(['.svn', '_svn'])
+ return _normalized_find(filesystem, _normalize(filesystem, base_dir, paths), skipped_directories, file_filter, directory_sort_key)
+
+
+def _normalize(filesystem, base_dir, paths):
+ return [filesystem.normpath(filesystem.join(base_dir, path)) for path in paths]
+
+
+def _normalized_find(filesystem, paths, skipped_directories, file_filter, directory_sort_key):
+ """Finds the set of tests under the list of paths.
+
+ Args:
+ paths: a list of absolute path expressions to search.
+ Glob patterns are ok.
+ """
+
+ paths_to_walk = itertools.chain(*(filesystem.glob(path) for path in paths))
+
+ def sort_by_directory_key(files_list):
+ if directory_sort_key:
+ files_list.sort(key=directory_sort_key)
+ return files_list
+
+ all_files = itertools.chain(*(sort_by_directory_key(filesystem.files_under(path, skipped_directories, file_filter)) for path in paths_to_walk))
+ return all_files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/find_files_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/find_files_unittest.py
new file mode 100644
index 0000000..f0fe01c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/find_files_unittest.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import unittest
+
+from webkitpy.common.system.filesystem import FileSystem
+import find_files
+
+
+class MockWinFileSystem(object):
+ def join(self, *paths):
+ return '\\'.join(paths)
+
+ def normpath(self, path):
+ return path.replace('/', '\\')
+
+
+class TestWinNormalize(unittest.TestCase):
+ def assert_filesystem_normalizes(self, filesystem):
+ self.assertEqual(find_files._normalize(filesystem, "c:\\foo",
+ ['fast/html', 'fast/canvas/*', 'compositing/foo.html']),
+ ['c:\\foo\\fast\html', 'c:\\foo\\fast\canvas\*', 'c:\\foo\compositing\\foo.html'])
+
+ def test_mocked_win(self):
+ # This tests test_files.normalize, using portable behavior emulating
+ # what we think Windows is supposed to do. This test will run on all
+ # platforms.
+ self.assert_filesystem_normalizes(MockWinFileSystem())
+
+ def test_win(self):
+ # This tests the actual windows platform, to ensure we get the same
+ # results that we get in test_mocked_win().
+ if sys.platform != 'win32':
+ return
+ self.assert_filesystem_normalizes(FileSystem())
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/host.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/host.py
new file mode 100644
index 0000000..bcce9d5
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/host.py
@@ -0,0 +1,136 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import sys
+
+from webkitpy.common.checkout.scm.detection import SCMDetector
+from webkitpy.common.memoized import memoized
+from webkitpy.common.net import buildbot, web
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.layout_tests.port.factory import PortFactory
+
+
+_log = logging.getLogger(__name__)
+
+
+class Host(SystemHost):
+ def __init__(self):
+ SystemHost.__init__(self)
+ self.web = web.Web()
+
+ self._scm = None
+
+ # Everything below this line is WebKit-specific and belongs on a higher-level object.
+ self.buildbot = buildbot.BuildBot()
+
+ # FIXME: Unfortunately Port objects are currently the central-dispatch objects of the NRWT world.
+ # In order to instantiate a port correctly, we have to pass it at least an executive, user, scm, and filesystem
+ # so for now we just pass along the whole Host object.
+ # FIXME: PortFactory doesn't belong on this Host object if Port is going to have a Host (circular dependency).
+ self.port_factory = PortFactory(self)
+
+ self._engage_awesome_locale_hacks()
+
+ # We call this from the Host constructor, as it's one of the
+ # earliest calls made for all webkitpy-based programs.
+ def _engage_awesome_locale_hacks(self):
+ # To make life easier on our non-english users, we override
+ # the locale environment variables inside webkitpy.
+ # If we don't do this, programs like SVN will output localized
+ # messages and svn.py will fail to parse them.
+ # FIXME: We should do these overrides *only* for the subprocesses we know need them!
+ # This hack only works in unix environments.
+ os.environ['LANGUAGE'] = 'en'
+ os.environ['LANG'] = 'en_US.UTF-8'
+ os.environ['LC_MESSAGES'] = 'en_US.UTF-8'
+ os.environ['LC_ALL'] = ''
+
+ # FIXME: This is a horrible, horrible hack for WinPort and should be removed.
+ # Maybe this belongs in SVN in some more generic "find the svn binary" codepath?
+ # Or possibly Executive should have a way to emulate shell path-lookups?
+ # FIXME: Unclear how to test this, since it currently mutates global state on SVN.
+ def _engage_awesome_windows_hacks(self):
+ try:
+ self.executive.run_command(['svn', 'help'])
+ except OSError, e:
+ try:
+ self.executive.run_command(['svn.bat', 'help'])
+ # The Win port uses the depot_tools package, which contains a number
+ # of development tools, including Python and svn. Instead of using a
+ # real svn executable, depot_tools indirects via a batch file, called
+ # svn.bat. This batch file allows depot_tools to auto-update the real
+ # svn executable, which is contained in a subdirectory.
+ #
+ # That's all fine and good, except that subprocess.popen can detect
+ # the difference between a real svn executable and batch file when we
+ # don't provide use shell=True. Rather than use shell=True on Windows,
+ # We hack the svn.bat name into the SVN class.
+ _log.debug('Engaging svn.bat Windows hack.')
+ from webkitpy.common.checkout.scm.svn import SVN
+ SVN.executable_name = 'svn.bat'
+ except OSError, e:
+ _log.debug('Failed to engage svn.bat Windows hack.')
+ try:
+ self.executive.run_command(['git', 'help'])
+ except OSError, e:
+ try:
+ self.executive.run_command(['git.bat', 'help'])
+ # The Win port uses the depot_tools package, which contains a number
+ # of development tools, including Python and git. Instead of using a
+ # real git executable, depot_tools indirects via a batch file, called
+ # git.bat. This batch file allows depot_tools to auto-update the real
+ # git executable, which is contained in a subdirectory.
+ #
+ # That's all fine and good, except that subprocess.popen can detect
+ # the difference between a real git executable and batch file when we
+ # don't provide use shell=True. Rather than use shell=True on Windows,
+ # We hack the git.bat name into the SVN class.
+ _log.debug('Engaging git.bat Windows hack.')
+ from webkitpy.common.checkout.scm.git import Git
+ Git.executable_name = 'git.bat'
+ except OSError, e:
+ _log.debug('Failed to engage git.bat Windows hack.')
+
+ def initialize_scm(self, patch_directories=None):
+ if sys.platform == "win32":
+ self._engage_awesome_windows_hacks()
+ detector = SCMDetector(self.filesystem, self.executive)
+ self._scm = detector.default_scm(patch_directories)
+
+ def scm(self):
+ return self._scm
+
+ def scm_for_path(self, path):
+ # FIXME: make scm() be a wrapper around this, and clean up the way
+ # callers call initialize_scm() (to remove patch_directories) and scm().
+ if sys.platform == "win32":
+ self._engage_awesome_windows_hacks()
+ return SCMDetector(self.filesystem, self.executive).detect_scm_system(path)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/host_mock.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/host_mock.py
new file mode 100644
index 0000000..b51eea6
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/host_mock.py
@@ -0,0 +1,72 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.checkout.scm.scm_mock import MockSCM
+from webkitpy.common.net.buildbot.buildbot_mock import MockBuildBot
+from webkitpy.common.net.web_mock import MockWeb
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+# New-style ports need to move down into webkitpy.common.
+from webkitpy.layout_tests.port.factory import PortFactory
+from webkitpy.layout_tests.port.test import add_unit_tests_to_mock_filesystem
+
+
+class MockHost(MockSystemHost):
+ def __init__(self, log_executive=False, executive_throws_when_run=None, initialize_scm_by_default=True, web=None, scm=None):
+ MockSystemHost.__init__(self, log_executive, executive_throws_when_run)
+ add_unit_tests_to_mock_filesystem(self.filesystem)
+ self.web = web or MockWeb()
+
+ self._scm = scm
+ # FIXME: we should never initialize the SCM by default, since the real
+ # object doesn't either. This has caused at least one bug (see bug 89498).
+ if initialize_scm_by_default:
+ self.initialize_scm()
+ self.buildbot = MockBuildBot()
+
+ # Note: We're using a real PortFactory here. Tests which don't wish to depend
+ # on the list of known ports should override this with a MockPortFactory.
+ self.port_factory = PortFactory(self)
+
+ def initialize_scm(self, patch_directories=None):
+ if not self._scm:
+ self._scm = MockSCM(filesystem=self.filesystem, executive=self.executive)
+ # Various pieces of code (wrongly) call filesystem.chdir(checkout_root).
+ # Making the checkout_root exist in the mock filesystem makes that chdir not raise.
+ self.filesystem.maybe_make_directory(self._scm.checkout_root)
+
+ def scm(self):
+ return self._scm
+
+ def scm_for_path(self, path):
+ # FIXME: consider supporting more than one SCM so that we can do more comprehensive testing.
+ self.initialize_scm()
+ return self._scm
+
+ def checkout(self):
+ return self._checkout
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/memoized.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/memoized.py
new file mode 100644
index 0000000..dc844a5
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/memoized.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Python does not (yet) seem to provide automatic memoization. So we've
+# written a small decorator to do so.
+
+import functools
+
+
+class memoized(object):
+ def __init__(self, function):
+ self._function = function
+ self._results_cache = {}
+
+ def __call__(self, *args):
+ try:
+ return self._results_cache[args]
+ except KeyError:
+ # If we didn't find the args in our cache, call and save the results.
+ result = self._function(*args)
+ self._results_cache[args] = result
+ return result
+ # FIXME: We may need to handle TypeError here in the case
+ # that "args" is not a valid dictionary key.
+
+ # Use python "descriptor" protocol __get__ to appear
+ # invisible during property access.
+ def __get__(self, instance, owner):
+ # Return a function partial with obj already bound as self.
+ return functools.partial(self.__call__, instance)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/memoized_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/memoized_unittest.py
new file mode 100644
index 0000000..dd7c793
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/memoized_unittest.py
@@ -0,0 +1,65 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.memoized import memoized
+
+
+class _TestObject(object):
+ def __init__(self):
+ self.callCount = 0
+
+ @memoized
+ def memoized_add(self, argument):
+ """testing docstring"""
+ self.callCount += 1
+ if argument is None:
+ return None # Avoid the TypeError from None + 1
+ return argument + 1
+
+
+class MemoizedTest(unittest.TestCase):
+ def test_caching(self):
+ test = _TestObject()
+ test.callCount = 0
+ self.assertEqual(test.memoized_add(1), 2)
+ self.assertEqual(test.callCount, 1)
+ self.assertEqual(test.memoized_add(1), 2)
+ self.assertEqual(test.callCount, 1)
+
+ # Validate that callCount is working as expected.
+ self.assertEqual(test.memoized_add(2), 3)
+ self.assertEqual(test.callCount, 2)
+
+ def test_tearoff(self):
+ test = _TestObject()
+ # Make sure that get()/tear-offs work:
+ tearoff = test.memoized_add
+ self.assertEqual(tearoff(4), 5)
+ self.assertEqual(test.callCount, 1)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/message_pool.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/message_pool.py
new file mode 100644
index 0000000..2e8eb7d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/message_pool.py
@@ -0,0 +1,326 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Module for handling messages and concurrency for run-webkit-tests
+and test-webkitpy. This module follows the design for multiprocessing.Pool
+and concurrency.futures.ProcessPoolExecutor, with the following differences:
+
+* Tasks are executed in stateful subprocesses via objects that implement the
+ Worker interface - this allows the workers to share state across tasks.
+* The pool provides an asynchronous event-handling interface so the caller
+ may receive events as tasks are processed.
+
+If you don't need these features, use multiprocessing.Pool or concurrency.futures
+intead.
+
+"""
+
+import cPickle
+import logging
+import multiprocessing
+import Queue
+import sys
+import time
+import traceback
+
+
+from webkitpy.common.host import Host
+from webkitpy.common.system import stack_utils
+
+
+_log = logging.getLogger(__name__)
+
+
+def get(caller, worker_factory, num_workers, host=None):
+ """Returns an object that exposes a run() method that takes a list of test shards and runs them in parallel."""
+ return _MessagePool(caller, worker_factory, num_workers, host)
+
+
+class _MessagePool(object):
+ def __init__(self, caller, worker_factory, num_workers, host=None):
+ self._caller = caller
+ self._worker_factory = worker_factory
+ self._num_workers = num_workers
+ self._workers = []
+ self._workers_stopped = set()
+ self._host = host
+ self._name = 'manager'
+ self._running_inline = (self._num_workers == 1)
+ if self._running_inline:
+ self._messages_to_worker = Queue.Queue()
+ self._messages_to_manager = Queue.Queue()
+ else:
+ self._messages_to_worker = multiprocessing.Queue()
+ self._messages_to_manager = multiprocessing.Queue()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ self._close()
+ return False
+
+ def run(self, shards):
+ """Posts a list of messages to the pool and waits for them to complete."""
+ for message in shards:
+ self._messages_to_worker.put(_Message(self._name, message[0], message[1:], from_user=True, logs=()))
+
+ for _ in xrange(self._num_workers):
+ self._messages_to_worker.put(_Message(self._name, 'stop', message_args=(), from_user=False, logs=()))
+
+ self.wait()
+
+ def _start_workers(self):
+ assert not self._workers
+ self._workers_stopped = set()
+ host = None
+ if self._running_inline or self._can_pickle(self._host):
+ host = self._host
+
+ for worker_number in xrange(self._num_workers):
+ worker = _Worker(host, self._messages_to_manager, self._messages_to_worker, self._worker_factory, worker_number, self._running_inline, self if self._running_inline else None, self._worker_log_level())
+ self._workers.append(worker)
+ worker.start()
+
+ def _worker_log_level(self):
+ log_level = logging.NOTSET
+ for handler in logging.root.handlers:
+ if handler.level != logging.NOTSET:
+ if log_level == logging.NOTSET:
+ log_level = handler.level
+ else:
+ log_level = min(log_level, handler.level)
+ return log_level
+
+ def wait(self):
+ try:
+ self._start_workers()
+ if self._running_inline:
+ self._workers[0].run()
+ self._loop(block=False)
+ else:
+ self._loop(block=True)
+ finally:
+ self._close()
+
+ def _close(self):
+ for worker in self._workers:
+ if worker.is_alive():
+ worker.terminate()
+ worker.join()
+ self._workers = []
+ if not self._running_inline:
+ # FIXME: This is a hack to get multiprocessing to not log tracebacks during shutdown :(.
+ multiprocessing.util._exiting = True
+ if self._messages_to_worker:
+ self._messages_to_worker.close()
+ self._messages_to_worker = None
+ if self._messages_to_manager:
+ self._messages_to_manager.close()
+ self._messages_to_manager = None
+
+ def _log_messages(self, messages):
+ for message in messages:
+ logging.root.handle(message)
+
+ def _handle_done(self, source):
+ self._workers_stopped.add(source)
+
+ @staticmethod
+ def _handle_worker_exception(source, exception_type, exception_value, _):
+ if exception_type == KeyboardInterrupt:
+ raise exception_type(exception_value)
+ raise WorkerException(str(exception_value))
+
+ def _can_pickle(self, host):
+ try:
+ cPickle.dumps(host)
+ return True
+ except TypeError:
+ return False
+
+ def _loop(self, block):
+ try:
+ while True:
+ if len(self._workers_stopped) == len(self._workers):
+ block = False
+ message = self._messages_to_manager.get(block)
+ self._log_messages(message.logs)
+ if message.from_user:
+ self._caller.handle(message.name, message.src, *message.args)
+ continue
+ method = getattr(self, '_handle_' + message.name)
+ assert method, 'bad message %s' % repr(message)
+ method(message.src, *message.args)
+ except Queue.Empty:
+ pass
+
+
+class WorkerException(BaseException):
+ """Raised when we receive an unexpected/unknown exception from a worker."""
+ pass
+
+
+class _Message(object):
+ def __init__(self, src, message_name, message_args, from_user, logs):
+ self.src = src
+ self.name = message_name
+ self.args = message_args
+ self.from_user = from_user
+ self.logs = logs
+
+ def __repr__(self):
+ return '_Message(src=%s, name=%s, args=%s, from_user=%s, logs=%s)' % (self.src, self.name, self.args, self.from_user, self.logs)
+
+
+class _Worker(multiprocessing.Process):
+ def __init__(self, host, messages_to_manager, messages_to_worker, worker_factory, worker_number, running_inline, manager, log_level):
+ super(_Worker, self).__init__()
+ self.host = host
+ self.worker_number = worker_number
+ self.name = 'worker/%d' % worker_number
+ self.log_messages = []
+ self.log_level = log_level
+ self._running = False
+ self._running_inline = running_inline
+ self._manager = manager
+
+ self._messages_to_manager = messages_to_manager
+ self._messages_to_worker = messages_to_worker
+ self._worker = worker_factory(self)
+ self._logger = None
+ self._log_handler = None
+
+ def terminate(self):
+ if self._worker:
+ if hasattr(self._worker, 'stop'):
+ self._worker.stop()
+ self._worker = None
+ if self.is_alive():
+ super(_Worker, self).terminate()
+
+ def _close(self):
+ if self._log_handler and self._logger:
+ self._logger.removeHandler(self._log_handler)
+ self._log_handler = None
+ self._logger = None
+
+ def start(self):
+ if not self._running_inline:
+ super(_Worker, self).start()
+
+ def run(self):
+ if not self.host:
+ self.host = Host()
+ if not self._running_inline:
+ self._set_up_logging()
+
+ worker = self._worker
+ exception_msg = ""
+ _log.debug("%s starting" % self.name)
+ self._running = True
+
+ try:
+ if hasattr(worker, 'start'):
+ worker.start()
+ while self._running:
+ message = self._messages_to_worker.get()
+ if message.from_user:
+ worker.handle(message.name, message.src, *message.args)
+ self._yield_to_manager()
+ else:
+ assert message.name == 'stop', 'bad message %s' % repr(message)
+ break
+
+ _log.debug("%s exiting" % self.name)
+ except Queue.Empty:
+ assert False, '%s: ran out of messages in worker queue.' % self.name
+ except KeyboardInterrupt, e:
+ self._raise(sys.exc_info())
+ except Exception, e:
+ self._raise(sys.exc_info())
+ finally:
+ try:
+ if hasattr(worker, 'stop'):
+ worker.stop()
+ finally:
+ self._post(name='done', args=(), from_user=False)
+ self._close()
+
+ def stop_running(self):
+ self._running = False
+
+ def post(self, name, *args):
+ self._post(name, args, from_user=True)
+ self._yield_to_manager()
+
+ def _yield_to_manager(self):
+ if self._running_inline:
+ self._manager._loop(block=False)
+
+ def _post(self, name, args, from_user):
+ log_messages = self.log_messages
+ self.log_messages = []
+ self._messages_to_manager.put(_Message(self.name, name, args, from_user, log_messages))
+
+ def _raise(self, exc_info):
+ exception_type, exception_value, exception_traceback = exc_info
+ if self._running_inline:
+ raise exception_type, exception_value, exception_traceback
+
+ if exception_type == KeyboardInterrupt:
+ _log.debug("%s: interrupted, exiting" % self.name)
+ stack_utils.log_traceback(_log.debug, exception_traceback)
+ else:
+ _log.error("%s: %s('%s') raised:" % (self.name, exception_value.__class__.__name__, str(exception_value)))
+ stack_utils.log_traceback(_log.error, exception_traceback)
+ # Since tracebacks aren't picklable, send the extracted stack instead.
+ stack = traceback.extract_tb(exception_traceback)
+ self._post(name='worker_exception', args=(exception_type, exception_value, stack), from_user=False)
+
+ def _set_up_logging(self):
+ self._logger = logging.getLogger()
+
+ # The unix multiprocessing implementation clones any log handlers into the child process,
+ # so we remove them to avoid duplicate logging.
+ for h in self._logger.handlers:
+ self._logger.removeHandler(h)
+
+ self._log_handler = _WorkerLogHandler(self)
+ self._logger.addHandler(self._log_handler)
+ self._logger.setLevel(self.log_level)
+
+
+class _WorkerLogHandler(logging.Handler):
+ def __init__(self, worker):
+ logging.Handler.__init__(self)
+ self._worker = worker
+ self.setLevel(worker.log_level)
+
+ def emit(self, record):
+ self._worker.log_messages.append(record)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py
new file mode 100644
index 0000000..9f7af98
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""In order for the multiprocessing module to spawn children correctly on
+Windows, we need to be running a Python module that can be imported
+(which means a file in sys.path that ends in .py). In addition, we need to
+ensure that sys.path / PYTHONPATH is set and propagating correctly.
+
+This module enforces that."""
+
+import os
+import subprocess
+import sys
+
+from webkitpy.common import version_check # 'unused import' pylint: disable=W0611
+
+
+def run(*parts):
+ up = os.path.dirname
+ script_dir = up(up(up(os.path.abspath(__file__))))
+ env = os.environ
+ if 'PYTHONPATH' in env:
+ if script_dir not in env['PYTHONPATH']:
+ env['PYTHONPATH'] = env['PYTHONPATH'] + os.pathsep + script_dir
+ else:
+ env['PYTHONPATH'] = script_dir
+ module_path = os.path.join(script_dir, *parts)
+ cmd = [sys.executable, module_path] + sys.argv[1:]
+
+ proc = subprocess.Popen(cmd, env=env)
+ try:
+ proc.wait()
+ except KeyboardInterrupt:
+ # We need a second wait in order to make sure the subprocess exits fully.
+ # FIXME: It would be nice if we could put a timeout on this.
+ proc.wait()
+ sys.exit(proc.returncode)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/buildbot/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/buildbot/__init__.py
new file mode 100644
index 0000000..631ef6b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/buildbot/__init__.py
@@ -0,0 +1,5 @@
+# Required for Python to search this directory for module files
+
+# We only export public API here.
+# It's unclear if Builder and Build need to be public.
+from .buildbot import BuildBot, Builder, Build
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
new file mode 100644
index 0000000..b5dfb45
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
@@ -0,0 +1,400 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import operator
+import re
+import urllib
+import urllib2
+
+import webkitpy.common.config.urls as config_urls
+from webkitpy.common.memoized import memoized
+from webkitpy.common.net.layouttestresults import LayoutTestResults
+from webkitpy.common.net.networktransaction import NetworkTransaction
+from webkitpy.common.system.logutils import get_logger
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
+
+
+_log = get_logger(__file__)
+
+
+class Builder(object):
+ def __init__(self, name, buildbot):
+ self._name = name
+ self._buildbot = buildbot
+ self._builds_cache = {}
+ self._revision_to_build_number = None
+
+ def name(self):
+ return self._name
+
+ def results_url(self):
+ return config_urls.chromium_results_url_base_for_builder(self._name)
+
+ def accumulated_results_url(self):
+ return config_urls.chromium_accumulated_results_url_base_for_builder(self._name)
+
+ def latest_layout_test_results_url(self):
+ return self.accumulated_results_url() or self.latest_cached_build().results_url();
+
+ @memoized
+ def latest_layout_test_results(self):
+ return self.fetch_layout_test_results(self.latest_layout_test_results_url())
+
+ def _fetch_file_from_results(self, results_url, file_name):
+ # It seems this can return None if the url redirects and then returns 404.
+ result = urllib2.urlopen("%s/%s" % (results_url, file_name))
+ if not result:
+ return None
+ # urlopen returns a file-like object which sometimes works fine with str()
+ # but sometimes is a addinfourl object. In either case calling read() is correct.
+ return result.read()
+
+ def fetch_layout_test_results(self, results_url):
+ # FIXME: This should cache that the result was a 404 and stop hitting the network.
+ results_file = NetworkTransaction(convert_404_to_None=True).run(lambda: self._fetch_file_from_results(results_url, "failing_results.json"))
+ return LayoutTestResults.results_from_string(results_file)
+
+ def url_encoded_name(self):
+ return urllib.quote(self._name)
+
+ def url(self):
+ return "%s/builders/%s" % (self._buildbot.buildbot_url, self.url_encoded_name())
+
+ # This provides a single place to mock
+ def _fetch_build(self, build_number):
+ build_dictionary = self._buildbot._fetch_build_dictionary(self, build_number)
+ if not build_dictionary:
+ return None
+ revision_string = build_dictionary['sourceStamp']['revision']
+ return Build(self,
+ build_number=int(build_dictionary['number']),
+ # 'revision' may be None if a trunk build was started by the force-build button on the web page.
+ revision=(int(revision_string) if revision_string else None),
+ # Buildbot uses any nubmer other than 0 to mean fail. Since we fetch with
+ # filter=1, passing builds may contain no 'results' value.
+ is_green=(not build_dictionary.get('results')),
+ )
+
+ def build(self, build_number):
+ if not build_number:
+ return None
+ cached_build = self._builds_cache.get(build_number)
+ if cached_build:
+ return cached_build
+
+ build = self._fetch_build(build_number)
+ self._builds_cache[build_number] = build
+ return build
+
+ def latest_cached_build(self):
+ revision_build_pairs = self.revision_build_pairs_with_results()
+ revision_build_pairs.sort(key=lambda i: i[1])
+ latest_build_number = revision_build_pairs[-1][1]
+ return self.build(latest_build_number)
+
+ file_name_regexp = re.compile(r"r(?P<revision>\d+) \((?P<build_number>\d+)\)")
+ def _revision_and_build_for_filename(self, filename):
+ # Example: "r47483 (1)/" or "r47483 (1).zip"
+ match = self.file_name_regexp.match(filename)
+ if not match:
+ return None
+ return (int(match.group("revision")), int(match.group("build_number")))
+
+ def _fetch_revision_to_build_map(self):
+ # All _fetch requests go through _buildbot for easier mocking
+ # FIXME: This should use NetworkTransaction's 404 handling instead.
+ try:
+ # FIXME: This method is horribly slow due to the huge network load.
+ # FIXME: This is a poor way to do revision -> build mapping.
+ # Better would be to ask buildbot through some sort of API.
+ print "Loading revision/build list from %s." % self.results_url()
+ print "This may take a while..."
+ result_files = self._buildbot._fetch_twisted_directory_listing(self.results_url())
+ except urllib2.HTTPError, error:
+ if error.code != 404:
+ raise
+ _log.debug("Revision/build list failed to load.")
+ result_files = []
+ return dict(self._file_info_list_to_revision_to_build_list(result_files))
+
+ def _file_info_list_to_revision_to_build_list(self, file_info_list):
+ # This assumes there was only one build per revision, which is false but we don't care for now.
+ revisions_and_builds = []
+ for file_info in file_info_list:
+ revision_and_build = self._revision_and_build_for_filename(file_info["filename"])
+ if revision_and_build:
+ revisions_and_builds.append(revision_and_build)
+ return revisions_and_builds
+
+ def _revision_to_build_map(self):
+ if not self._revision_to_build_number:
+ self._revision_to_build_number = self._fetch_revision_to_build_map()
+ return self._revision_to_build_number
+
+ def revision_build_pairs_with_results(self):
+ return self._revision_to_build_map().items()
+
+ # This assumes there can be only one build per revision, which is false, but we don't care for now.
+ def build_for_revision(self, revision, allow_failed_lookups=False):
+ # NOTE: This lookup will fail if that exact revision was never built.
+ build_number = self._revision_to_build_map().get(int(revision))
+ if not build_number:
+ return None
+ build = self.build(build_number)
+ if not build and allow_failed_lookups:
+ # Builds for old revisions with fail to lookup via buildbot's json api.
+ build = Build(self,
+ build_number=build_number,
+ revision=revision,
+ is_green=False,
+ )
+ return build
+
+
+class Build(object):
+ def __init__(self, builder, build_number, revision, is_green):
+ self._builder = builder
+ self._number = build_number
+ self._revision = revision
+ self._is_green = is_green
+
+ @staticmethod
+ def build_url(builder, build_number):
+ return "%s/builds/%s" % (builder.url(), build_number)
+
+ def url(self):
+ return self.build_url(self.builder(), self._number)
+
+ def results_url(self):
+ results_directory = "r%s (%s)" % (self.revision(), self._number)
+ return "%s/%s" % (self._builder.results_url(), urllib.quote(results_directory))
+
+ def results_zip_url(self):
+ return "%s.zip" % self.results_url()
+
+ def builder(self):
+ return self._builder
+
+ def revision(self):
+ return self._revision
+
+ def is_green(self):
+ return self._is_green
+
+ def previous_build(self):
+ # previous_build() allows callers to avoid assuming build numbers are sequential.
+ # They may not be sequential across all master changes, or when non-trunk builds are made.
+ return self._builder.build(self._number - 1)
+
+
+class BuildBot(object):
+ _builder_factory = Builder
+ _default_url = config_urls.chromium_buildbot_url
+
+ def __init__(self, url=None):
+ self.buildbot_url = url if url else self._default_url
+ self._builder_by_name = {}
+
+ def _parse_last_build_cell(self, builder, cell):
+ status_link = cell.find('a')
+ if status_link:
+ # Will be either a revision number or a build number
+ revision_string = status_link.string
+ # If revision_string has non-digits assume it's not a revision number.
+ builder['built_revision'] = int(revision_string) \
+ if not re.match('\D', revision_string) \
+ else None
+
+ # FIXME: We treat slave lost as green even though it is not to
+ # work around the Qts bot being on a broken internet connection.
+ # The real fix is https://bugs.webkit.org/show_bug.cgi?id=37099
+ builder['is_green'] = not re.search('fail', cell.renderContents()) or \
+ not not re.search('lost', cell.renderContents())
+
+ status_link_regexp = r"builders/(?P<builder_name>.*)/builds/(?P<build_number>\d+)"
+ link_match = re.match(status_link_regexp, status_link['href'])
+ builder['build_number'] = int(link_match.group("build_number"))
+ else:
+ # We failed to find a link in the first cell, just give up. This
+ # can happen if a builder is just-added, the first cell will just
+ # be "no build"
+ # Other parts of the code depend on is_green being present.
+ builder['is_green'] = False
+ builder['built_revision'] = None
+ builder['build_number'] = None
+
+ def _parse_current_build_cell(self, builder, cell):
+ activity_lines = cell.renderContents().split("<br />")
+ builder["activity"] = activity_lines[0] # normally "building" or "idle"
+ # The middle lines document how long left for any current builds.
+ match = re.match("(?P<pending_builds>\d) pending", activity_lines[-1])
+ builder["pending_builds"] = int(match.group("pending_builds")) if match else 0
+
+ def _parse_builder_status_from_row(self, status_row):
+ status_cells = status_row.findAll('td')
+ builder = {}
+
+ # First cell is the name
+ name_link = status_cells[0].find('a')
+ builder["name"] = unicode(name_link.string)
+
+ self._parse_last_build_cell(builder, status_cells[1])
+ self._parse_current_build_cell(builder, status_cells[2])
+ return builder
+
+ def _matches_regexps(self, builder_name, name_regexps):
+ for name_regexp in name_regexps:
+ if re.match(name_regexp, builder_name):
+ return True
+ return False
+
+ # FIXME: These _fetch methods should move to a networking class.
+ def _fetch_build_dictionary(self, builder, build_number):
+ # Note: filter=1 will remove None and {} and '', which cuts noise but can
+ # cause keys to be missing which you might otherwise expect.
+ # FIXME: The bot sends a *huge* amount of data for each request, we should
+ # find a way to reduce the response size further.
+ json_url = "%s/json/builders/%s/builds/%s?filter=1" % (self.buildbot_url, urllib.quote(builder.name()), build_number)
+ try:
+ return json.load(urllib2.urlopen(json_url))
+ except urllib2.URLError, err:
+ build_url = Build.build_url(builder, build_number)
+ _log.error("Error fetching data for %s build %s (%s, json: %s): %s" % (builder.name(), build_number, build_url, json_url, err))
+ return None
+ except ValueError, err:
+ build_url = Build.build_url(builder, build_number)
+ _log.error("Error decoding json data from %s: %s" % (build_url, err))
+ return None
+
+ def _fetch_one_box_per_builder(self):
+ build_status_url = "%s/one_box_per_builder" % self.buildbot_url
+ return urllib2.urlopen(build_status_url)
+
+ def _file_cell_text(self, file_cell):
+ """Traverses down through firstChild elements until one containing a string is found, then returns that string"""
+ element = file_cell
+ while element.string is None and element.contents:
+ element = element.contents[0]
+ return element.string
+
+ def _parse_twisted_file_row(self, file_row):
+ string_or_empty = lambda string: unicode(string) if string else u""
+ file_cells = file_row.findAll('td')
+ return {
+ "filename": string_or_empty(self._file_cell_text(file_cells[0])),
+ "size": string_or_empty(self._file_cell_text(file_cells[1])),
+ "type": string_or_empty(self._file_cell_text(file_cells[2])),
+ "encoding": string_or_empty(self._file_cell_text(file_cells[3])),
+ }
+
+ def _parse_twisted_directory_listing(self, page):
+ soup = BeautifulSoup(page)
+ # HACK: Match only table rows with a class to ignore twisted header/footer rows.
+ file_rows = soup.find('table').findAll('tr', {'class': re.compile(r'\b(?:directory|file)\b')})
+ return [self._parse_twisted_file_row(file_row) for file_row in file_rows]
+
+ # FIXME: There should be a better way to get this information directly from twisted.
+ def _fetch_twisted_directory_listing(self, url):
+ return self._parse_twisted_directory_listing(urllib2.urlopen(url))
+
+ def builders(self):
+ return [self.builder_with_name(status["name"]) for status in self.builder_statuses()]
+
+ # This method pulls from /one_box_per_builder as an efficient way to get information about
+ def builder_statuses(self):
+ soup = BeautifulSoup(self._fetch_one_box_per_builder())
+ return [self._parse_builder_status_from_row(status_row) for status_row in soup.find('table').findAll('tr')]
+
+ def builder_with_name(self, name):
+ builder = self._builder_by_name.get(name)
+ if not builder:
+ builder = self._builder_factory(name, self)
+ self._builder_by_name[name] = builder
+ return builder
+
+ # This makes fewer requests than calling Builder.latest_build would. It grabs all builder
+ # statuses in one request using self.builder_statuses (fetching /one_box_per_builder instead of builder pages).
+ def _latest_builds_from_builders(self):
+ builder_statuses = self.builder_statuses()
+ return [self.builder_with_name(status["name"]).build(status["build_number"]) for status in builder_statuses]
+
+ def _build_at_or_before_revision(self, build, revision):
+ while build:
+ if build.revision() <= revision:
+ return build
+ build = build.previous_build()
+
+ def _fetch_builder_page(self, builder):
+ builder_page_url = "%s/builders/%s?numbuilds=100" % (self.buildbot_url, urllib2.quote(builder.name()))
+ return urllib2.urlopen(builder_page_url)
+
+ def _revisions_for_builder(self, builder):
+ soup = BeautifulSoup(self._fetch_builder_page(builder))
+ revisions = []
+ for status_row in soup.find('table').findAll('tr'):
+ revision_anchor = status_row.find('a')
+ table_cells = status_row.findAll('td')
+ if not table_cells or len(table_cells) < 3 or not table_cells[2].string:
+ continue
+ if revision_anchor and revision_anchor.string and re.match(r'^\d+$', revision_anchor.string):
+ revisions.append((int(revision_anchor.string), 'success' in table_cells[2].string))
+ return revisions
+
+ def _find_green_revision(self, builder_revisions):
+ revision_statuses = {}
+ for builder in builder_revisions:
+ for revision, succeeded in builder_revisions[builder]:
+ revision_statuses.setdefault(revision, set())
+ if succeeded and revision_statuses[revision] != None:
+ revision_statuses[revision].add(builder)
+ else:
+ revision_statuses[revision] = None
+
+ # In descending order, look for a revision X with successful builds
+ # Once we found X, check if remaining builders succeeded in the neighborhood of X.
+ revisions_in_order = sorted(revision_statuses.keys(), reverse=True)
+ for i, revision in enumerate(revisions_in_order):
+ if not revision_statuses[revision]:
+ continue
+
+ builders_succeeded_in_future = set()
+ for future_revision in sorted(revisions_in_order[:i + 1]):
+ if not revision_statuses[future_revision]:
+ break
+ builders_succeeded_in_future = builders_succeeded_in_future.union(revision_statuses[future_revision])
+
+ builders_succeeded_in_past = set()
+ for past_revision in revisions_in_order[i:]:
+ if not revision_statuses[past_revision]:
+ break
+ builders_succeeded_in_past = builders_succeeded_in_past.union(revision_statuses[past_revision])
+
+ if len(builders_succeeded_in_future) == len(builder_revisions) and len(builders_succeeded_in_past) == len(builder_revisions):
+ return revision
+ return None
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py
new file mode 100644
index 0000000..c8f31b4
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py
@@ -0,0 +1,88 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+
+from webkitpy.common.net.layouttestresults import LayoutTestResults
+from webkitpy.common.net import layouttestresults_unittest
+
+_log = logging.getLogger(__name__)
+
+
+class MockBuild(object):
+ def __init__(self, build_number, revision, is_green):
+ self._number = build_number
+ self._revision = revision
+ self._is_green = is_green
+
+class MockBuilder(object):
+ def __init__(self, name):
+ self._name = name
+
+ def name(self):
+ return self._name
+
+ def build(self, build_number):
+ return MockBuild(build_number=build_number, revision=1234, is_green=False)
+
+ def results_url(self):
+ return "http://example.com/builders/%s/results" % self.name()
+
+ def accumulated_results_url(self):
+ return "http://example.com/f/builders/%s/results/layout-test-results" % self.name()
+
+ def latest_layout_test_results_url(self):
+ return self.accumulated_results_url()
+
+ def latest_layout_test_results(self):
+ return LayoutTestResults.results_from_string(layouttestresults_unittest.LayoutTestResultsTest.example_full_results_json)
+
+class MockBuildBot(object):
+ def __init__(self):
+ self._mock_builder1_status = {
+ "name": "Builder1",
+ "is_green": True,
+ "activity": "building",
+ }
+ self._mock_builder2_status = {
+ "name": "Builder2",
+ "is_green": True,
+ "activity": "idle",
+ }
+
+ def builder_with_name(self, name):
+ return MockBuilder(name)
+
+ def builder_statuses(self):
+ return [
+ self._mock_builder1_status,
+ self._mock_builder2_status,
+ ]
+
+ def light_tree_on_fire(self):
+ self._mock_builder2_status["is_green"] = False
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
new file mode 100644
index 0000000..5b11640
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
@@ -0,0 +1,422 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.net.layouttestresults import LayoutTestResults
+from webkitpy.common.net.buildbot import BuildBot, Builder, Build
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
+
+
+class BuilderTest(unittest.TestCase):
+ def _mock_test_result(self, testname):
+ return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
+
+ def _install_fetch_build(self, failure):
+ def _mock_fetch_build(build_number):
+ build = Build(
+ builder=self.builder,
+ build_number=build_number,
+ revision=build_number + 1000,
+ is_green=build_number < 4
+ )
+ return build
+ self.builder._fetch_build = _mock_fetch_build
+
+ def setUp(self):
+ self.buildbot = BuildBot()
+ self.builder = Builder(u"Test Builder \u2661", self.buildbot)
+ self._install_fetch_build(lambda build_number: ["test1", "test2"])
+
+ def test_latest_layout_test_results(self):
+ self.builder.fetch_layout_test_results = lambda results_url: LayoutTestResults(None)
+ self.builder.accumulated_results_url = lambda: "http://dummy_url.org"
+ self.assertTrue(self.builder.latest_layout_test_results())
+
+ def test_build_caching(self):
+ self.assertEqual(self.builder.build(10), self.builder.build(10))
+
+ def test_build_and_revision_for_filename(self):
+ expectations = {
+ "r47483 (1)/" : (47483, 1),
+ "r47483 (1).zip" : (47483, 1),
+ "random junk": None,
+ }
+ for filename, revision_and_build in expectations.items():
+ self.assertEqual(self.builder._revision_and_build_for_filename(filename), revision_and_build)
+
+ def test_file_info_list_to_revision_to_build_list(self):
+ file_info_list = [
+ {"filename": "r47483 (1)/"},
+ {"filename": "r47483 (1).zip"},
+ {"filename": "random junk"},
+ ]
+ builds_and_revisions_list = [(47483, 1), (47483, 1)]
+ self.assertEqual(self.builder._file_info_list_to_revision_to_build_list(file_info_list), builds_and_revisions_list)
+
+ def test_fetch_build(self):
+ buildbot = BuildBot()
+ builder = Builder(u"Test Builder \u2661", buildbot)
+
+ def mock_fetch_build_dictionary(self, build_number):
+ build_dictionary = {
+ "sourceStamp": {
+ "revision": None, # revision=None means a trunk build started from the force-build button on the builder page.
+ },
+ "number": int(build_number),
+ # Intentionally missing the 'results' key, meaning it's a "pass" build.
+ }
+ return build_dictionary
+ buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
+ self.assertIsNotNone(builder._fetch_build(1))
+
+ def test_results_url(self):
+ builder = BuildBot().builder_with_name('WebKit Mac10.8 (dbg)')
+ self.assertEqual(builder.results_url(),
+ 'https://storage.googleapis.com/chromium-layout-test-archives/WebKit_Mac10_8__dbg_')
+
+ def test_accumulated_results_url(self):
+ builder = BuildBot().builder_with_name('WebKit Mac10.8 (dbg)')
+ self.assertEqual(builder.accumulated_results_url(),
+ 'https://storage.googleapis.com/chromium-layout-test-archives/WebKit_Mac10_8__dbg_/results/layout-test-results')
+
+
+class BuildBotTest(unittest.TestCase):
+
+ _example_one_box_status = '''
+ <table>
+ <tr>
+ <td class="box"><a href="builders/Windows%20Debug%20%28Tests%29">Windows Debug (Tests)</a></td>
+ <td align="center" class="LastBuild box success"><a href="builders/Windows%20Debug%20%28Tests%29/builds/3693">47380</a><br />build<br />successful</td>
+ <td align="center" class="Activity building">building<br />ETA in<br />~ 14 mins<br />at 13:40</td>
+ <tr>
+ <td class="box"><a href="builders/SnowLeopard%20Intel%20Release">SnowLeopard Intel Release</a></td>
+ <td class="LastBuild box" >no build</td>
+ <td align="center" class="Activity building">building<br />< 1 min</td>
+ <tr>
+ <td class="box"><a href="builders/Qt%20Linux%20Release">Qt Linux Release</a></td>
+ <td align="center" class="LastBuild box failure"><a href="builders/Qt%20Linux%20Release/builds/654">47383</a><br />failed<br />compile-webkit</td>
+ <td align="center" class="Activity idle">idle<br />3 pending</td>
+ <tr>
+ <td class="box"><a href="builders/Qt%20Windows%2032-bit%20Debug">Qt Windows 32-bit Debug</a></td>
+ <td align="center" class="LastBuild box failure"><a href="builders/Qt%20Windows%2032-bit%20Debug/builds/2090">60563</a><br />failed<br />failed<br />slave<br />lost</td>
+ <td align="center" class="Activity building">building<br />ETA in<br />~ 5 mins<br />at 08:25</td>
+ </table>
+'''
+ _expected_example_one_box_parsings = [
+ {
+ 'is_green': True,
+ 'build_number' : 3693,
+ 'name': u'Windows Debug (Tests)',
+ 'built_revision': 47380,
+ 'activity': 'building',
+ 'pending_builds': 0,
+ },
+ {
+ 'is_green': False,
+ 'build_number' : None,
+ 'name': u'SnowLeopard Intel Release',
+ 'built_revision': None,
+ 'activity': 'building',
+ 'pending_builds': 0,
+ },
+ {
+ 'is_green': False,
+ 'build_number' : 654,
+ 'name': u'Qt Linux Release',
+ 'built_revision': 47383,
+ 'activity': 'idle',
+ 'pending_builds': 3,
+ },
+ {
+ 'is_green': True,
+ 'build_number' : 2090,
+ 'name': u'Qt Windows 32-bit Debug',
+ 'built_revision': 60563,
+ 'activity': 'building',
+ 'pending_builds': 0,
+ },
+ ]
+
+ def test_status_parsing(self):
+ buildbot = BuildBot()
+
+ soup = BeautifulSoup(self._example_one_box_status)
+ status_table = soup.find("table")
+ input_rows = status_table.findAll('tr')
+
+ for x in range(len(input_rows)):
+ status_row = input_rows[x]
+ expected_parsing = self._expected_example_one_box_parsings[x]
+
+ builder = buildbot._parse_builder_status_from_row(status_row)
+
+ # Make sure we aren't parsing more or less than we expect
+ self.assertEqual(builder.keys(), expected_parsing.keys())
+
+ for key, expected_value in expected_parsing.items():
+ self.assertEqual(builder[key], expected_value, ("Builder %d parse failure for key: %s: Actual='%s' Expected='%s'" % (x, key, builder[key], expected_value)))
+
+ def test_builder_with_name(self):
+ buildbot = BuildBot()
+
+ builder = buildbot.builder_with_name("Test Builder")
+ self.assertEqual(builder.name(), "Test Builder")
+ self.assertEqual(builder.url(), "http://build.chromium.org/p/chromium.webkit/builders/Test%20Builder")
+ self.assertEqual(builder.url_encoded_name(), "Test%20Builder")
+ self.assertEqual(builder.results_url(), "https://storage.googleapis.com/chromium-layout-test-archives/Test_Builder")
+
+ # Override _fetch_build_dictionary function to not touch the network.
+ def mock_fetch_build_dictionary(self, build_number):
+ build_dictionary = {
+ "sourceStamp": {
+ "revision" : 2 * build_number,
+ },
+ "number" : int(build_number),
+ "results" : build_number % 2, # 0 means pass
+ }
+ return build_dictionary
+ buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
+
+ build = builder.build(10)
+ self.assertEqual(build.builder(), builder)
+ self.assertEqual(build.url(), "http://build.chromium.org/p/chromium.webkit/builders/Test%20Builder/builds/10")
+ self.assertEqual(build.results_url(), "https://storage.googleapis.com/chromium-layout-test-archives/Test_Builder/r20%20%2810%29")
+ self.assertEqual(build.revision(), 20)
+ self.assertTrue(build.is_green())
+
+ build = build.previous_build()
+ self.assertEqual(build.builder(), builder)
+ self.assertEqual(build.url(), "http://build.chromium.org/p/chromium.webkit/builders/Test%20Builder/builds/9")
+ self.assertEqual(build.results_url(), "https://storage.googleapis.com/chromium-layout-test-archives/Test_Builder/r18%20%289%29")
+ self.assertEqual(build.revision(), 18)
+ self.assertFalse(build.is_green())
+
+ self.assertIsNone(builder.build(None))
+
+ _example_directory_listing = '''
+<h1>Directory listing for /results/SnowLeopard Intel Leaks/</h1>
+
+<table>
+ <tr class="alt">
+ <th>Filename</th>
+ <th>Size</th>
+ <th>Content type</th>
+ <th>Content encoding</th>
+ </tr>
+<tr class="directory ">
+ <td><a href="r47483%20%281%29/"><b>r47483 (1)/</b></a></td>
+ <td><b></b></td>
+ <td><b>[Directory]</b></td>
+ <td><b></b></td>
+</tr>
+<tr class="file alt">
+ <td><a href="r47484%20%282%29.zip">r47484 (2).zip</a></td>
+ <td>89K</td>
+ <td>[application/zip]</td>
+ <td></td>
+</tr>
+'''
+ _expected_files = [
+ {
+ "filename" : "r47483 (1)/",
+ "size" : "",
+ "type" : "[Directory]",
+ "encoding" : "",
+ },
+ {
+ "filename" : "r47484 (2).zip",
+ "size" : "89K",
+ "type" : "[application/zip]",
+ "encoding" : "",
+ },
+ ]
+
+ def test_parse_build_to_revision_map(self):
+ buildbot = BuildBot()
+ files = buildbot._parse_twisted_directory_listing(self._example_directory_listing)
+ self.assertEqual(self._expected_files, files)
+
+ _fake_builder_page = '''
+ <body>
+ <div class="content">
+ <h1>Some Builder</h1>
+ <p>(<a href="../waterfall?show=Some Builder">view in waterfall</a>)</p>
+ <div class="column">
+ <h2>Recent Builds:</h2>
+ <table class="info">
+ <tr>
+ <th>Time</th>
+ <th>Revision</th>
+ <th>Result</th> <th>Build #</th>
+ <th>Info</th>
+ </tr>
+ <tr class="alt">
+ <td>Jan 10 15:49</td>
+ <td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
+ <td class="success">failure</td> <td><a href=".../37604">#37604</a></td>
+ <td class="left">Build successful</td>
+ </tr>
+ <tr class="">
+ <td>Jan 10 15:32</td>
+ <td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
+ <td class="success">failure</td> <td><a href=".../37603">#37603</a></td>
+ <td class="left">Build successful</td>
+ </tr>
+ <tr class="alt">
+ <td>Jan 10 15:18</td>
+ <td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
+ <td class="success">success</td> <td><a href=".../37602">#37602</a></td>
+ <td class="left">Build successful</td>
+ </tr>
+ <tr class="">
+ <td>Jan 10 14:51</td>
+ <td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
+ <td class="failure">failure</td> <td><a href=".../37601">#37601</a></td>
+ <td class="left">Failed compile-webkit</td>
+ </tr>
+ </table>
+ </body>'''
+ _fake_builder_page_without_success = '''
+ <body>
+ <table>
+ <tr class="alt">
+ <td>Jan 10 15:49</td>
+ <td><span class="revision" title="Revision 104643"><a href="http://trac.webkit.org/changeset/104643">104643</a></span></td>
+ <td class="success">failure</td>
+ </tr>
+ <tr class="">
+ <td>Jan 10 15:32</td>
+ <td><span class="revision" title="Revision 104636"><a href="http://trac.webkit.org/changeset/104636">104636</a></span></td>
+ <td class="success">failure</td>
+ </tr>
+ <tr class="alt">
+ <td>Jan 10 15:18</td>
+ <td><span class="revision" title="Revision 104635"><a href="http://trac.webkit.org/changeset/104635">104635</a></span></td>
+ <td class="success">failure</td>
+ </tr>
+ <tr class="">
+ <td>Jan 10 11:58</td>
+ <td><span class="revision" title="Revision ??"><a href="http://trac.webkit.org/changeset/%3F%3F">??</a></span></td>
+ <td class="retry">retry</td>
+ </tr>
+ <tr class="">
+ <td>Jan 10 14:51</td>
+ <td><span class="revision" title="Revision 104633"><a href="http://trac.webkit.org/changeset/104633">104633</a></span></td>
+ <td class="failure">failure</td>
+ </tr>
+ </table>
+ </body>'''
+
+ def test_revisions_for_builder(self):
+ buildbot = BuildBot()
+ buildbot._fetch_builder_page = lambda builder: builder.page
+ builder_with_success = Builder('Some builder', None)
+ builder_with_success.page = self._fake_builder_page
+ self.assertEqual(buildbot._revisions_for_builder(builder_with_success), [(104643, False), (104636, False), (104635, True), (104633, False)])
+
+ builder_without_success = Builder('Some builder', None)
+ builder_without_success.page = self._fake_builder_page_without_success
+ self.assertEqual(buildbot._revisions_for_builder(builder_without_success), [(104643, False), (104636, False), (104635, False), (104633, False)])
+
+ def test_find_green_revision(self):
+ buildbot = BuildBot()
+ self.assertEqual(buildbot._find_green_revision({
+ 'Builder 1': [(1, True), (3, True)],
+ 'Builder 2': [(1, True), (3, False)],
+ 'Builder 3': [(1, True), (3, True)],
+ }), 1)
+ self.assertEqual(buildbot._find_green_revision({
+ 'Builder 1': [(1, False), (3, True)],
+ 'Builder 2': [(1, True), (3, True)],
+ 'Builder 3': [(1, True), (3, True)],
+ }), 3)
+ self.assertEqual(buildbot._find_green_revision({
+ 'Builder 1': [(1, True), (2, True)],
+ 'Builder 2': [(1, False), (2, True), (3, True)],
+ 'Builder 3': [(1, True), (3, True)],
+ }), None)
+ self.assertEqual(buildbot._find_green_revision({
+ 'Builder 1': [(1, True), (2, True)],
+ 'Builder 2': [(1, True), (2, True), (3, True)],
+ 'Builder 3': [(1, True), (3, True)],
+ }), 2)
+ self.assertEqual(buildbot._find_green_revision({
+ 'Builder 1': [(1, False), (2, True)],
+ 'Builder 2': [(1, True), (3, True)],
+ 'Builder 3': [(1, True), (3, True)],
+ }), None)
+ self.assertEqual(buildbot._find_green_revision({
+ 'Builder 1': [(1, True), (3, True)],
+ 'Builder 2': [(1, False), (2, True), (3, True), (4, True)],
+ 'Builder 3': [(2, True), (4, True)],
+ }), 3)
+ self.assertEqual(buildbot._find_green_revision({
+ 'Builder 1': [(1, True), (3, True)],
+ 'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
+ 'Builder 3': [(2, True), (4, True)],
+ }), None)
+ self.assertEqual(buildbot._find_green_revision({
+ 'Builder 1': [(1, True), (3, True)],
+ 'Builder 2': [(1, False), (2, True), (3, True), (4, False)],
+ 'Builder 3': [(2, True), (3, True), (4, True)],
+ }), 3)
+ self.assertEqual(buildbot._find_green_revision({
+ 'Builder 1': [(1, True), (2, True)],
+ 'Builder 2': [],
+ 'Builder 3': [(1, True), (2, True)],
+ }), None)
+ self.assertEqual(buildbot._find_green_revision({
+ 'Builder 1': [(1, True), (3, False), (5, True), (10, True), (12, False)],
+ 'Builder 2': [(1, True), (3, False), (7, True), (9, True), (12, False)],
+ 'Builder 3': [(1, True), (3, True), (7, True), (11, False), (12, True)],
+ }), 7)
+
+ def _fetch_build(self, build_number):
+ if build_number == 5:
+ return "correct build"
+ return "wrong build"
+
+ def _fetch_revision_to_build_map(self):
+ return {'r5': 5, 'r2': 2, 'r3': 3}
+
+ def test_latest_cached_build(self):
+ b = Builder('builder', BuildBot())
+ b._fetch_build = self._fetch_build
+ b._fetch_revision_to_build_map = self._fetch_revision_to_build_map
+ self.assertEqual("correct build", b.latest_cached_build())
+
+ def results_url(self):
+ return "some-url"
+
+ def test_results_zip_url(self):
+ b = Build(None, 123, 123, False)
+ b.results_url = self.results_url
+ self.assertEqual("some-url.zip", b.results_zip_url())
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/file_uploader.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/file_uploader.py
new file mode 100644
index 0000000..871295b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/file_uploader.py
@@ -0,0 +1,107 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import mimetypes
+import time
+import urllib2
+
+from webkitpy.common.net.networktransaction import NetworkTransaction, NetworkTimeout
+
+
+def get_mime_type(filename):
+ return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
+
+
+# FIXME: Rather than taking tuples, this function should take more structured data.
+def _encode_multipart_form_data(fields, files):
+ """Encode form fields for multipart/form-data.
+
+ Args:
+ fields: A sequence of (name, value) elements for regular form fields.
+ files: A sequence of (name, filename, value) elements for data to be
+ uploaded as files.
+ Returns:
+ (content_type, body) ready for httplib.HTTP instance.
+
+ Source:
+ http://code.google.com/p/rietveld/source/browse/trunk/upload.py
+ """
+ BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
+ CRLF = '\r\n'
+ lines = []
+
+ for key, value in fields:
+ lines.append('--' + BOUNDARY)
+ lines.append('Content-Disposition: form-data; name="%s"' % key)
+ lines.append('')
+ if isinstance(value, unicode):
+ value = value.encode('utf-8')
+ lines.append(value)
+
+ for key, filename, value in files:
+ lines.append('--' + BOUNDARY)
+ lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
+ lines.append('Content-Type: %s' % get_mime_type(filename))
+ lines.append('')
+ if isinstance(value, unicode):
+ value = value.encode('utf-8')
+ lines.append(value)
+
+ lines.append('--' + BOUNDARY + '--')
+ lines.append('')
+ body = CRLF.join(lines)
+ content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
+ return content_type, body
+
+
+class FileUploader(object):
+ def __init__(self, url, timeout_seconds):
+ self._url = url
+ self._timeout_seconds = timeout_seconds
+
+ def upload_single_text_file(self, filesystem, content_type, filename):
+ return self._upload_data(content_type, filesystem.read_text_file(filename))
+
+ def upload_as_multipart_form_data(self, filesystem, files, attrs):
+ file_objs = []
+ for filename, path in files:
+ file_objs.append(('file', filename, filesystem.read_binary_file(path)))
+
+ # FIXME: We should use the same variable names for the formal and actual parameters.
+ content_type, data = _encode_multipart_form_data(attrs, file_objs)
+ return self._upload_data(content_type, data)
+
+ def _upload_data(self, content_type, data):
+ def callback():
+ # FIXME: Setting a timeout, either globally using socket.setdefaulttimeout()
+ # or in urlopen(), doesn't appear to work on Mac 10.5 with Python 2.7.
+ # For now we will ignore the timeout value and hope for the best.
+ request = urllib2.Request(self._url, data, {"Content-Type": content_type})
+ return urllib2.urlopen(request)
+
+ return NetworkTransaction(timeout_seconds=self._timeout_seconds).run(callback)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/layouttestresults.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/layouttestresults.py
new file mode 100644
index 0000000..2597e0a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/layouttestresults.py
@@ -0,0 +1,116 @@
+# Copyright (c) 2010, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import logging
+
+from webkitpy.common.memoized import memoized
+from webkitpy.layout_tests.layout_package import json_results_generator
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models.test_expectations import TestExpectations
+
+_log = logging.getLogger(__name__)
+
+
+# These are helper functions for navigating the results json structure.
+def for_each_test(tree, handler, prefix=''):
+ for key in tree:
+ new_prefix = (prefix + '/' + key) if prefix else key
+ if 'actual' not in tree[key]:
+ for_each_test(tree[key], handler, new_prefix)
+ else:
+ handler(new_prefix, tree[key])
+
+
+def result_for_test(tree, test):
+ parts = test.split('/')
+ for part in parts:
+ if part not in tree:
+ return None
+ tree = tree[part]
+ return tree
+
+
+class JSONTestResult(object):
+ def __init__(self, test_name, result_dict):
+ self._test_name = test_name
+ self._result_dict = result_dict
+
+ def did_pass_or_run_as_expected(self):
+ return self.did_pass() or self.did_run_as_expected()
+
+ def did_pass(self):
+ return test_expectations.PASS in self._actual_as_tokens()
+
+ def did_run_as_expected(self):
+ return 'is_unexpected' not in self._result_dict
+
+ def _tokenize(self, results_string):
+ tokens = map(TestExpectations.expectation_from_string, results_string.split(' '))
+ if None in tokens:
+ _log.warning("Unrecognized result in %s" % results_string)
+ return set(tokens)
+
+ @memoized
+ def _actual_as_tokens(self):
+ actual_results = self._result_dict['actual']
+ return self._tokenize(actual_results)
+
+
+# FIXME: This should be unified with ResultsSummary or other NRWT layout tests code
+# in the layout_tests package.
+# This doesn't belong in common.net, but we don't have a better place for it yet.
+class LayoutTestResults(object):
+ @classmethod
+ def results_from_string(cls, string):
+ if not string:
+ return None
+
+ content_string = json_results_generator.strip_json_wrapper(string)
+ json_dict = json.loads(content_string)
+ if not json_dict:
+ return None
+ return cls(json_dict)
+
+ def __init__(self, parsed_json):
+ self._results = parsed_json
+
+ def run_was_interrupted(self):
+ return self._results["interrupted"]
+
+ def builder_name(self):
+ return self._results["builder_name"]
+
+ def blink_revision(self):
+ return int(self._results["blink_revision"])
+
+ def actual_results(self, test):
+ result = result_for_test(self._results["tests"], test)
+ if result:
+ return result["actual"]
+ return ""
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
new file mode 100644
index 0000000..a7760d2
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
@@ -0,0 +1,108 @@
+# Copyright (c) 2010, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.net.layouttestresults import LayoutTestResults
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
+
+
+class LayoutTestResultsTest(unittest.TestCase):
+ # The real files have no whitespace, but newlines make this much more readable.
+ example_full_results_json = """ADD_RESULTS({
+ "tests": {
+ "fast": {
+ "dom": {
+ "prototype-inheritance.html": {
+ "expected": "PASS",
+ "actual": "TEXT",
+ "is_unexpected": true
+ },
+ "prototype-banana.html": {
+ "expected": "FAIL",
+ "actual": "PASS",
+ "is_unexpected": true
+ },
+ "prototype-taco.html": {
+ "expected": "PASS",
+ "actual": "PASS TEXT",
+ "is_unexpected": true
+ },
+ "prototype-chocolate.html": {
+ "expected": "FAIL",
+ "actual": "IMAGE+TEXT"
+ },
+ "prototype-strawberry.html": {
+ "expected": "PASS",
+ "actual": "IMAGE PASS",
+ "is_unexpected": true
+ }
+ }
+ },
+ "svg": {
+ "dynamic-updates": {
+ "SVGFEDropShadowElement-dom-stdDeviation-attr.html": {
+ "expected": "PASS",
+ "actual": "IMAGE",
+ "has_stderr": true,
+ "is_unexpected": true
+ }
+ }
+ }
+ },
+ "skipped": 450,
+ "num_regressions": 15,
+ "layout_tests_dir": "\/b\/build\/slave\/Webkit_Mac10_5\/build\/src\/third_party\/WebKit\/LayoutTests",
+ "version": 3,
+ "num_passes": 77,
+ "has_pretty_patch": false,
+ "fixable": 1220,
+ "num_flaky": 0,
+ "blink_revision": "1234",
+ "has_wdiff": false
+});"""
+
+ def test_results_from_string(self):
+ self.assertIsNone(LayoutTestResults.results_from_string(None))
+ self.assertIsNone(LayoutTestResults.results_from_string(""))
+
+ def test_was_interrupted(self):
+ self.assertTrue(LayoutTestResults.results_from_string('ADD_RESULTS({"tests":{},"interrupted":true});').run_was_interrupted())
+ self.assertFalse(LayoutTestResults.results_from_string('ADD_RESULTS({"tests":{},"interrupted":false});').run_was_interrupted())
+
+ def test_blink_revision(self):
+ self.assertEqual(LayoutTestResults.results_from_string(self.example_full_results_json).blink_revision(), 1234)
+
+ def test_actual_results(self):
+ results = LayoutTestResults.results_from_string(self.example_full_results_json)
+ self.assertEqual(results.actual_results("fast/dom/prototype-banana.html"), "PASS")
+ self.assertEqual(results.actual_results("fast/dom/prototype-taco.html"), "PASS TEXT")
+ self.assertEqual(results.actual_results("nonexistant.html"), "")
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/networktransaction.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/networktransaction.py
new file mode 100644
index 0000000..60acaab
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/networktransaction.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import time
+import urllib2
+
+_log = logging.getLogger(__name__)
+
+
+class NetworkTimeout(Exception):
+ def __str__(self):
+ return 'NetworkTimeout'
+
+
+class NetworkTransaction(object):
+ def __init__(self, initial_backoff_seconds=10, grown_factor=1.5, timeout_seconds=(10 * 60), convert_404_to_None=False):
+ self._initial_backoff_seconds = initial_backoff_seconds
+ self._grown_factor = grown_factor
+ self._timeout_seconds = timeout_seconds
+ self._convert_404_to_None = convert_404_to_None
+
+ def run(self, request):
+ self._total_sleep = 0
+ self._backoff_seconds = self._initial_backoff_seconds
+ while True:
+ try:
+ return request()
+ except urllib2.HTTPError, e:
+ if self._convert_404_to_None and e.code == 404:
+ return None
+ self._check_for_timeout()
+ _log.warn("Received HTTP status %s loading \"%s\". Retrying in %s seconds..." % (e.code, e.filename, self._backoff_seconds))
+ self._sleep()
+
+ def _check_for_timeout(self):
+ if self._total_sleep + self._backoff_seconds > self._timeout_seconds:
+ raise NetworkTimeout()
+
+ def _sleep(self):
+ time.sleep(self._backoff_seconds)
+ self._total_sleep += self._backoff_seconds
+ self._backoff_seconds *= self._grown_factor
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py
new file mode 100644
index 0000000..e1451fb
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py
@@ -0,0 +1,93 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from urllib2 import HTTPError
+from webkitpy.common.net.networktransaction import NetworkTransaction, NetworkTimeout
+from webkitpy.common.system.logtesting import LoggingTestCase
+
+
+class NetworkTransactionTest(LoggingTestCase):
+ exception = Exception("Test exception")
+
+ def test_success(self):
+ transaction = NetworkTransaction()
+ self.assertEqual(transaction.run(lambda: 42), 42)
+
+ def _raise_exception(self):
+ raise self.exception
+
+ def test_exception(self):
+ transaction = NetworkTransaction()
+ did_process_exception = False
+ did_throw_exception = True
+ try:
+ transaction.run(lambda: self._raise_exception())
+ did_throw_exception = False
+ except Exception, e:
+ did_process_exception = True
+ self.assertEqual(e, self.exception)
+ self.assertTrue(did_throw_exception)
+ self.assertTrue(did_process_exception)
+
+ def _raise_500_error(self):
+ self._run_count += 1
+ if self._run_count < 3:
+ raise HTTPError("http://example.com/", 500, "internal server error", None, None)
+ return 42
+
+ def _raise_404_error(self):
+ raise HTTPError("http://foo.com/", 404, "not found", None, None)
+
+ def test_retry(self):
+ self._run_count = 0
+ transaction = NetworkTransaction(initial_backoff_seconds=0)
+ self.assertEqual(transaction.run(lambda: self._raise_500_error()), 42)
+ self.assertEqual(self._run_count, 3)
+ self.assertLog(['WARNING: Received HTTP status 500 loading "http://example.com/". '
+ 'Retrying in 0 seconds...\n',
+ 'WARNING: Received HTTP status 500 loading "http://example.com/". '
+ 'Retrying in 0.0 seconds...\n'])
+
+ def test_convert_404_to_None(self):
+ transaction = NetworkTransaction(convert_404_to_None=True)
+ self.assertEqual(transaction.run(lambda: self._raise_404_error()), None)
+
+ def test_timeout(self):
+ self._run_count = 0
+ transaction = NetworkTransaction(initial_backoff_seconds=60*60, timeout_seconds=60)
+ did_process_exception = False
+ did_throw_exception = True
+ try:
+ transaction.run(lambda: self._raise_500_error())
+ did_throw_exception = False
+ except NetworkTimeout, e:
+ did_process_exception = True
+ self.assertTrue(did_throw_exception)
+ self.assertTrue(did_process_exception)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/sheriff_calendar.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/sheriff_calendar.py
new file mode 100644
index 0000000..bb60fca
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/sheriff_calendar.py
@@ -0,0 +1,59 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+import urllib2
+
+# This is based on code from:
+# https://chromium.googlesource.com/chromium/tools/build/+/master/scripts/tools/blink_roller/auto_roll.py
+# Ideally we should share code between these.
+
+# FIXME: This probably belongs in config.py?
+BLINK_SHERIFF_URL = (
+ 'http://build.chromium.org/p/chromium.webkit/sheriff_webkit.js')
+
+
+# Does not support unicode or special characters.
+VALID_EMAIL_REGEXP = re.compile(r'^[A-Za-z0-9\.&\'\+-/=_]+@[A-Za-z0-9\.-]+$')
+
+
+def _complete_email(name):
+ """If the name does not include '@', append '@chromium.org'."""
+ if '@' not in name:
+ return name + '@chromium.org'
+ return name
+
+
+def _names_from_sheriff_js(sheriff_js):
+ match = re.match(r'document.write\(\'(.*)\'\)', sheriff_js)
+ emails_string = match.group(1)
+ # Detect 'none (channel is sheriff)' text and ignore it.
+ if 'channel is sheriff' in emails_string.lower():
+ return []
+ return map(str.strip, emails_string.split(','))
+
+
+def _email_is_valid(email):
+ """Determines whether the given email address is valid."""
+ return VALID_EMAIL_REGEXP.match(email) is not None
+
+
+def _filter_emails(emails):
+ """Returns the given list with any invalid email addresses removed."""
+ rv = []
+ for email in emails:
+ if _email_is_valid(email):
+ rv.append(email)
+ else:
+ print 'WARNING: Not including %s (invalid email address)' % email
+ return rv
+
+
+def _emails_from_url(sheriff_url):
+ sheriff_js = urllib2.urlopen(sheriff_url).read()
+ return map(_complete_email, _names_from_sheriff_js(sheriff_js))
+
+
+def current_gardener_emails():
+ return _emails_from_url(BLINK_SHERIFF_URL)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/sheriff_calendar_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/sheriff_calendar_unittest.py
new file mode 100644
index 0000000..8c57123
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/sheriff_calendar_unittest.py
@@ -0,0 +1,56 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This is based on code from:
+# https://chromium.googlesource.com/chromium/tools/build/+/master/scripts/tools/blink_roller/auto_roll_test.py
+# Ideally we should share code between these.
+
+
+from webkitpy.common.system.outputcapture import OutputCaptureTestCaseBase
+import sheriff_calendar as calendar
+
+
+class SheriffCalendarTest(OutputCaptureTestCaseBase):
+ def test_complete_email(self):
+ expected_emails = ['foo@chromium.org', 'bar@google.com', 'baz@chromium.org']
+ names = ['foo', 'bar@google.com', 'baz']
+ self.assertEqual(map(calendar._complete_email, names), expected_emails)
+
+ def test_emails(self):
+ expected_emails = ['foo@bar.com', 'baz@baz.com']
+ calendar._emails_from_url = lambda urls: expected_emails
+ self.assertEqual(calendar.current_gardener_emails(), expected_emails)
+
+ def _assert_parse(self, js_string, expected_emails):
+ self.assertEqual(calendar._names_from_sheriff_js(js_string), expected_emails)
+
+ def test_names_from_sheriff_js(self):
+ self._assert_parse('document.write(\'none (channel is sheriff)\')', [])
+ self._assert_parse('document.write(\'foo, bar\')', ['foo', 'bar'])
+
+ def test_email_regexp(self):
+ self.assertTrue(calendar._email_is_valid('somebody@example.com'))
+ self.assertTrue(calendar._email_is_valid('somebody@example.domain.com'))
+ self.assertTrue(calendar._email_is_valid('somebody@example-domain.com'))
+ self.assertTrue(calendar._email_is_valid('some.body@example.com'))
+ self.assertTrue(calendar._email_is_valid('some_body@example.com'))
+ self.assertTrue(calendar._email_is_valid('some+body@example.com'))
+ self.assertTrue(calendar._email_is_valid('some+body@com'))
+ self.assertTrue(calendar._email_is_valid('some/body@example.com'))
+ # These are valid according to the standard, but not supported here.
+ self.assertFalse(calendar._email_is_valid('some~body@example.com'))
+ self.assertFalse(calendar._email_is_valid('some!body@example.com'))
+ self.assertFalse(calendar._email_is_valid('some?body@example.com'))
+ self.assertFalse(calendar._email_is_valid('some" "body@example.com'))
+ self.assertFalse(calendar._email_is_valid('"{somebody}"@example.com'))
+ # Bogus.
+ self.assertFalse(calendar._email_is_valid('rm -rf /#@example.com'))
+ self.assertFalse(calendar._email_is_valid('some body@example.com'))
+ self.assertFalse(calendar._email_is_valid('[some body]@example.com'))
+
+ def test_filter_emails(self):
+ input_emails = ['foo@bar.com', 'baz@baz.com', 'bogus email @ !!!']
+ expected_emails = ['foo@bar.com', 'baz@baz.com']
+ self.assertEquals(calendar._filter_emails(input_emails), expected_emails)
+ self.assertStdout('WARNING: Not including bogus email @ !!! (invalid email address)\n')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/web.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/web.py
new file mode 100644
index 0000000..b8a06e5
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/web.py
@@ -0,0 +1,36 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import urllib2
+
+from webkitpy.common.net.networktransaction import NetworkTransaction
+
+
+class Web(object):
+ def get_binary(self, url, convert_404_to_None=False):
+ return NetworkTransaction(convert_404_to_None=convert_404_to_None).run(lambda: urllib2.urlopen(url).read())
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/net/web_mock.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/web_mock.py
new file mode 100644
index 0000000..b53cb66
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/net/web_mock.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+
+
+class MockWeb(object):
+ def __init__(self, urls=None):
+ self.urls = urls or {}
+ self.urls_fetched = []
+
+ def get_binary(self, url, convert_404_to_None=False):
+ self.urls_fetched.append(url)
+ if url in self.urls:
+ return self.urls[url]
+ return "MOCK Web result, convert 404 to None=%s" % convert_404_to_None
+
+
+# FIXME: Classes which are using Browser probably want to use Web instead.
+class MockBrowser(object):
+ params = {}
+
+ def open(self, url):
+ pass
+
+ def select_form(self, name):
+ pass
+
+ def __setitem__(self, key, value):
+ self.params[key] = value
+
+ def submit(self):
+ return StringIO.StringIO()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/prettypatch.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/prettypatch.py
new file mode 100644
index 0000000..46ab4f2
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/prettypatch.py
@@ -0,0 +1,64 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import tempfile
+
+
+class PrettyPatch(object):
+ def __init__(self, executive):
+ self._executive = executive
+
+ def pretty_diff_file(self, diff):
+ # Diffs can contain multiple text files of different encodings
+ # so we always deal with them as byte arrays, not unicode strings.
+ assert(isinstance(diff, str))
+ pretty_diff = self.pretty_diff(diff)
+ diff_file = tempfile.NamedTemporaryFile(suffix=".html")
+ diff_file.write(pretty_diff)
+ diff_file.flush()
+ return diff_file
+
+ def pretty_diff(self, diff):
+ # pretify.rb will hang forever if given no input.
+ # Avoid the hang by returning an empty string.
+ if not diff:
+ return ""
+
+ pretty_patch_path = os.path.join(os.path.dirname(__file__), '..', '..',
+ 'webkitruby', 'PrettyPatch')
+ prettify_path = os.path.join(pretty_patch_path, "prettify.rb")
+ args = [
+ "ruby",
+ "-I",
+ pretty_patch_path,
+ prettify_path,
+ ]
+ # PrettyPatch does not modify the encoding of the diff output
+ # so we can't expect it to be utf-8.
+ return self._executive.run_command(args, input=diff, decode_output=False)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/read_checksum_from_png.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/read_checksum_from_png.py
new file mode 100644
index 0000000..7431f47
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/read_checksum_from_png.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+def read_checksum(filehandle):
+ # We expect the comment to be at the beginning of the file.
+ data = filehandle.read(2048)
+ comment_key = 'tEXtchecksum\x00'
+ comment_pos = data.find(comment_key)
+ if comment_pos == -1:
+ return
+
+ checksum_pos = comment_pos + len(comment_key)
+ return data[checksum_pos:checksum_pos + 32]
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/read_checksum_from_png_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/read_checksum_from_png_unittest.py
new file mode 100644
index 0000000..9966d64
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/read_checksum_from_png_unittest.py
@@ -0,0 +1,40 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import unittest
+
+from webkitpy.common import read_checksum_from_png
+
+
+class ReadChecksumFromPngTest(unittest.TestCase):
+ def test_read_checksum(self):
+ # Test a file with the comment.
+ filehandle = StringIO.StringIO('''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x03 \x00\x00\x02X\x08\x02\x00\x00\x00\x15\x14\x15'\x00\x00\x00)tEXtchecksum\x003c4134fe2739880353f91c5b84cadbaaC\xb8?\xec\x00\x00\x16\xfeIDATx\x9c\xed\xdd[\x8cU\xe5\xc1\xff\xf15T\x18\x0ea,)\xa6\x80XZ<\x10\n\xd6H\xc4V\x88}\xb5\xa9\xd6r\xd5\x0bki0\xa6\xb5ih\xd2\xde\x98PHz\xd1\x02=\\q#\x01\x8b\xa5rJ\x8b\x88i\xacM\xc5h\x8cbMk(\x1ez@!\x0c\xd5\xd2\xc2\xb44\x1c\x848\x1dF(\xeb\x7f\xb1\xff\xd9\xef~g\xd6\xde3\xe0o\x10\xec\xe7sa6{\xd6z\xd6\xb3\xd7\xf3\xa8_7\xdbM[Y\x96\x05\x00\x009\xc3\xde\xeb\t\x00\x00\xbc\xdf\x08,\x00\x800\x81\x05\x00\x10&\xb0\x00\x00\xc2\x04\x16\x00@\x98\xc0\x02\x00\x08\x13X\x00\x00a\x02\x0b\x00 Lx01\x00\x84\t,\x00\x800\x81\x05\x00\x10\xd64\xb0\xda\x9a\xdb\xb6m\xdb\xb4i\xd3\xfa\x9fr\xf3\xcd7\x0f\xe5T\x07\xe5\xd4\xa9''')
+ checksum = read_checksum_from_png.read_checksum(filehandle)
+ self.assertEqual('3c4134fe2739880353f91c5b84cadbaa', checksum)
+
+ # Test a file without the comment.
+ filehandle = StringIO.StringIO('''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x03 \x00\x00\x02X\x08\x02\x00\x00\x00\x15\x14\x15'\x00\x00\x16\xfeIDATx\x9c\xed\xdd[\x8cU\xe5\xc1\xff\xf15T\x18\x0ea,)\xa6\x80XZ<\x10\n\xd6H\xc4V\x88}\xb5\xa9\xd6r\xd5\x0bki0\xa6\xb5ih\xd2\xde\x98PHz\xd1\x02=\\q#\x01\x8b\xa5rJ\x8b\x88i\xacM\xc5h\x8cbMk(\x1ez@!\x0c\xd5\xd2\xc2\xb44\x1c\x848\x1dF(\xeb\x7f\xb1\xff\xd9\xef~g\xd6\xde3\xe0o\x10\xec\xe7sa6{\xd6z\xd6\xb3\xd7\xf3\xa8_7\xdbM[Y\x96\x05\x00\x009\xc3\xde\xeb\t\x00\x00\xbc\xdf\x08,\x00\x800\x81\x05\x00\x10&\xb0\x00\x00\xc2\x04\x16\x00@\x98\xc0\x02\x00\x08\x13X\x00\x00a\x02\x0b\x00 Lx01\x00\x84\t,\x00\x800\x81\x05\x00\x10\xd64\xb0\xda\x9a\xdb\xb6m\xdb\xb4i\xd3\xfa\x9fr\xf3\xcd7\x0f\xe5T\x07\xe5\xd4\xa9S\x8b\x17/\x1e?~\xfc\xf8\xf1\xe3\xef\xbf\xff\xfe\xf7z:M5\xbb\x87\x17\xcbUZ\x8f|V\xd7\xbd\x10\xb6\xcd{b\x88\xf6j\xb3\x9b?\x14\x9b\xa1>\xe6\xf9\xd9\xcf\x00\x17\x93''')
+ checksum = read_checksum_from_png.read_checksum(filehandle)
+ self.assertIsNone(checksum)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/crashlogs.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/crashlogs.py
new file mode 100644
index 0000000..270ca81
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/crashlogs.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2011, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+
+class CrashLogs(object):
+ def __init__(self, host):
+ self._host = host
+
+ def find_newest_log(self, process_name, pid=None, include_errors=False, newer_than=None):
+ if self._host.platform.is_mac():
+ return self._find_newest_log_darwin(process_name, pid, include_errors, newer_than)
+ return None
+
+ def _log_directory_darwin(self):
+ log_directory = self._host.filesystem.expanduser("~")
+ log_directory = self._host.filesystem.join(log_directory, "Library", "Logs")
+ if self._host.filesystem.exists(self._host.filesystem.join(log_directory, "DiagnosticReports")):
+ log_directory = self._host.filesystem.join(log_directory, "DiagnosticReports")
+ else:
+ log_directory = self._host.filesystem.join(log_directory, "CrashReporter")
+ return log_directory
+
+ def _find_newest_log_darwin(self, process_name, pid, include_errors, newer_than):
+ def is_crash_log(fs, dirpath, basename):
+ return basename.startswith(process_name + "_") and basename.endswith(".crash")
+
+ log_directory = self._log_directory_darwin()
+ logs = self._host.filesystem.files_under(log_directory, file_filter=is_crash_log)
+ first_line_regex = re.compile(r'^Process:\s+(?P<process_name>.*) \[(?P<pid>\d+)\]$')
+ errors = ''
+ for path in reversed(sorted(logs)):
+ try:
+ if not newer_than or self._host.filesystem.mtime(path) > newer_than:
+ f = self._host.filesystem.read_text_file(path)
+ match = first_line_regex.match(f[0:f.find('\n')])
+ if match and match.group('process_name') == process_name and (pid is None or int(match.group('pid')) == pid):
+ return errors + f
+ except IOError, e:
+ if include_errors:
+ errors += "ERROR: Failed to read '%s': %s\n" % (path, str(e))
+ except OSError, e:
+ if include_errors:
+ errors += "ERROR: Failed to read '%s': %s\n" % (path, str(e))
+
+ if include_errors and errors:
+ return errors
+ return None
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py
new file mode 100644
index 0000000..82b5388
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py
@@ -0,0 +1,114 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.crashlogs import CrashLogs
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+
+def make_mock_crash_report_darwin(process_name, pid):
+ return """Process: {process_name} [{pid}]
+Path: /Volumes/Data/slave/snowleopard-intel-release-tests/build/WebKitBuild/Release/{process_name}
+Identifier: {process_name}
+Version: ??? (???)
+Code Type: X86-64 (Native)
+Parent Process: Python [2578]
+
+Date/Time: 2011-12-07 13:27:34.816 -0800
+OS Version: Mac OS X 10.6.8 (10K549)
+Report Version: 6
+
+Interval Since Last Report: 1660 sec
+Crashes Since Last Report: 1
+Per-App Crashes Since Last Report: 1
+Anonymous UUID: 507D4EEB-9D70-4E2E-B322-2D2F0ABFEDC0
+
+Exception Type: EXC_BREAKPOINT (SIGTRAP)
+Exception Codes: 0x0000000000000002, 0x0000000000000000
+Crashed Thread: 0
+
+Dyld Error Message:
+ Library not loaded: /Volumes/Data/WebKit-BuildSlave/snowleopard-intel-release/build/WebKitBuild/Release/WebCore.framework/Versions/A/WebCore
+ Referenced from: /Volumes/Data/slave/snowleopard-intel-release/build/WebKitBuild/Release/WebKit.framework/Versions/A/WebKit
+ Reason: image not found
+
+Binary Images:
+ 0x7fff5fc00000 - 0x7fff5fc3be0f dyld 132.1 (???) <29DECB19-0193-2575-D838-CF743F0400B2> /usr/lib/dyld
+
+System Profile:
+Model: Xserve3,1, BootROM XS31.0081.B04, 8 processors, Quad-Core Intel Xeon, 2.26 GHz, 6 GB, SMC 1.43f4
+Graphics: NVIDIA GeForce GT 120, NVIDIA GeForce GT 120, PCIe, 256 MB
+Memory Module: global_name
+Network Service: Ethernet 2, Ethernet, en1
+PCI Card: NVIDIA GeForce GT 120, sppci_displaycontroller, MXM-Slot
+Serial ATA Device: OPTIARC DVD RW AD-5670S
+""".format(process_name=process_name, pid=pid)
+
+class CrashLogsTest(unittest.TestCase):
+ def test_find_log_darwin(self):
+ if not SystemHost().platform.is_mac():
+ return
+
+ older_mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28528)
+ mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28530)
+ newer_mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28529)
+ other_process_mock_crash_report = make_mock_crash_report_darwin('FooProcess', 28527)
+ misformatted_mock_crash_report = 'Junk that should not appear in a crash report' + make_mock_crash_report_darwin('DumpRenderTree', 28526)[200:]
+ files = {}
+ files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150718_quadzen.crash'] = older_mock_crash_report
+ files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash'] = mock_crash_report
+ files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150720_quadzen.crash'] = newer_mock_crash_report
+ files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150721_quadzen.crash'] = None
+ files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150722_quadzen.crash'] = other_process_mock_crash_report
+ files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150723_quadzen.crash'] = misformatted_mock_crash_report
+ filesystem = MockFileSystem(files)
+ crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
+ log = crash_logs.find_newest_log("DumpRenderTree")
+ self.assertMultiLineEqual(log, newer_mock_crash_report)
+ log = crash_logs.find_newest_log("DumpRenderTree", 28529)
+ self.assertMultiLineEqual(log, newer_mock_crash_report)
+ log = crash_logs.find_newest_log("DumpRenderTree", 28530)
+ self.assertMultiLineEqual(log, mock_crash_report)
+ log = crash_logs.find_newest_log("DumpRenderTree", 28531)
+ self.assertIsNone(log)
+ log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0)
+ self.assertIsNone(log)
+
+ def bad_read(path):
+ raise IOError('IOError: No such file or directory')
+
+ def bad_mtime(path):
+ raise OSError('OSError: No such file or directory')
+
+ filesystem.read_text_file = bad_read
+ log = crash_logs.find_newest_log("DumpRenderTree", 28531, include_errors=True)
+ self.assertIn('IOError: No such file or directory', log)
+
+ filesystem = MockFileSystem(files)
+ crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
+ filesystem.mtime = bad_mtime
+ log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0, include_errors=True)
+ self.assertIn('OSError: No such file or directory', log)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/environment.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/environment.py
new file mode 100644
index 0000000..c5a7239
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/environment.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class Environment(object):
+ def __init__(self, env=None):
+ self.env = env or {}
+
+ def to_dictionary(self):
+ return self.env
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/executive.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/executive.py
new file mode 100644
index 0000000..5d53ab9
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/executive.py
@@ -0,0 +1,462 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import errno
+import logging
+import multiprocessing
+import os
+import signal
+import subprocess
+import sys
+import time
+
+from webkitpy.common.system.outputtee import Tee
+from webkitpy.common.system.filesystem import FileSystem
+
+
+_log = logging.getLogger(__name__)
+
+
+class ScriptError(Exception):
+
+ def __init__(self,
+ message=None,
+ script_args=None,
+ exit_code=None,
+ output=None,
+ cwd=None,
+ output_limit=500):
+ shortened_output = output
+ if output and output_limit and len(output) > output_limit:
+ shortened_output = "Last %s characters of output:\n%s" % (output_limit, output[-output_limit:])
+
+ if not message:
+ message = 'Failed to run "%s"' % repr(script_args)
+ if exit_code:
+ message += " exit_code: %d" % exit_code
+ if cwd:
+ message += " cwd: %s" % cwd
+
+ if shortened_output:
+ message += "\n\noutput: %s" % shortened_output
+
+ Exception.__init__(self, message)
+ self.script_args = script_args # 'args' is already used by Exception
+ self.exit_code = exit_code
+ self.output = output
+ self.cwd = cwd
+
+ def message_with_output(self):
+ return unicode(self)
+
+ def command_name(self):
+ command_path = self.script_args
+ if type(command_path) is list:
+ command_path = command_path[0]
+ return os.path.basename(command_path)
+
+
+class Executive(object):
+ PIPE = subprocess.PIPE
+ STDOUT = subprocess.STDOUT
+
+ def _should_close_fds(self):
+ # We need to pass close_fds=True to work around Python bug #2320
+ # (otherwise we can hang when we kill DumpRenderTree when we are running
+ # multiple threads). See http://bugs.python.org/issue2320 .
+ # Note that close_fds isn't supported on Windows, but this bug only
+ # shows up on Mac and Linux.
+ return sys.platform not in ('win32', 'cygwin')
+
+ def _run_command_with_teed_output(self, args, teed_output, **kwargs):
+ child_process = self.popen(args,
+ stdout=self.PIPE,
+ stderr=self.STDOUT,
+ close_fds=self._should_close_fds(),
+ **kwargs)
+
+ # Use our own custom wait loop because Popen ignores a tee'd
+ # stderr/stdout.
+ # FIXME: This could be improved not to flatten output to stdout.
+ while True:
+ output_line = child_process.stdout.readline()
+ if output_line == "" and child_process.poll() != None:
+ # poll() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
+ return child_process.poll()
+ # We assume that the child process wrote to us in utf-8,
+ # so no re-encoding is necessary before writing here.
+ teed_output.write(output_line)
+
+ def cpu_count(self):
+ return multiprocessing.cpu_count()
+
+ @staticmethod
+ def interpreter_for_script(script_path, fs=None):
+ fs = fs or FileSystem()
+ lines = fs.read_text_file(script_path).splitlines()
+ if not len(lines):
+ return None
+ first_line = lines[0]
+ if not first_line.startswith('#!'):
+ return None
+ if first_line.find('python') > -1:
+ return sys.executable
+ if first_line.find('perl') > -1:
+ return 'perl'
+ if first_line.find('ruby') > -1:
+ return 'ruby'
+ return None
+
+ @staticmethod
+ def shell_command_for_script(script_path, fs=None):
+ fs = fs or FileSystem()
+ # Win32 does not support shebang. We need to detect the interpreter ourself.
+ if sys.platform == 'win32':
+ interpreter = Executive.interpreter_for_script(script_path, fs)
+ if interpreter:
+ return [interpreter, script_path]
+ return [script_path]
+
+ def kill_process(self, pid):
+ """Attempts to kill the given pid.
+ Will fail silently if pid does not exist or insufficient permisssions."""
+ if sys.platform == "win32":
+ # We only use taskkill.exe on windows (not cygwin) because subprocess.pid
+ # is a CYGWIN pid and taskkill.exe expects a windows pid.
+ # Thankfully os.kill on CYGWIN handles either pid type.
+ command = ["taskkill.exe", "/f", "/t", "/pid", pid]
+ # taskkill will exit 128 if the process is not found. We should log.
+ self.run_command(command, error_handler=self.ignore_error)
+ return
+
+ # According to http://docs.python.org/library/os.html
+ # os.kill isn't available on Windows. python 2.5.5 os.kill appears
+ # to work in cygwin, however it occasionally raises EAGAIN.
+ retries_left = 10 if sys.platform == "cygwin" else 1
+ while retries_left > 0:
+ try:
+ retries_left -= 1
+ os.kill(pid, signal.SIGKILL)
+ _ = os.waitpid(pid, os.WNOHANG)
+ except OSError, e:
+ if e.errno == errno.EAGAIN:
+ if retries_left <= 0:
+ _log.warn("Failed to kill pid %s. Too many EAGAIN errors." % pid)
+ continue
+ if e.errno == errno.ESRCH: # The process does not exist.
+ return
+ if e.errno == errno.EPIPE: # The process has exited already on cygwin
+ return
+ if e.errno == errno.ECHILD:
+ # Can't wait on a non-child process, but the kill worked.
+ return
+ if e.errno == errno.EACCES and sys.platform == 'cygwin':
+ # Cygwin python sometimes can't kill native processes.
+ return
+ raise
+
+ def _win32_check_running_pid(self, pid):
+ # importing ctypes at the top-level seems to cause weird crashes at
+ # exit under cygwin on apple's win port. Only win32 needs cygwin, so
+ # we import it here instead. See https://bugs.webkit.org/show_bug.cgi?id=91682
+ import ctypes
+
+ class PROCESSENTRY32(ctypes.Structure):
+ _fields_ = [("dwSize", ctypes.c_ulong),
+ ("cntUsage", ctypes.c_ulong),
+ ("th32ProcessID", ctypes.c_ulong),
+ ("th32DefaultHeapID", ctypes.POINTER(ctypes.c_ulong)),
+ ("th32ModuleID", ctypes.c_ulong),
+ ("cntThreads", ctypes.c_ulong),
+ ("th32ParentProcessID", ctypes.c_ulong),
+ ("pcPriClassBase", ctypes.c_ulong),
+ ("dwFlags", ctypes.c_ulong),
+ ("szExeFile", ctypes.c_char * 260)]
+
+ CreateToolhelp32Snapshot = ctypes.windll.kernel32.CreateToolhelp32Snapshot
+ Process32First = ctypes.windll.kernel32.Process32First
+ Process32Next = ctypes.windll.kernel32.Process32Next
+ CloseHandle = ctypes.windll.kernel32.CloseHandle
+ TH32CS_SNAPPROCESS = 0x00000002 # win32 magic number
+ hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
+ pe32 = PROCESSENTRY32()
+ pe32.dwSize = ctypes.sizeof(PROCESSENTRY32)
+ result = False
+ if not Process32First(hProcessSnap, ctypes.byref(pe32)):
+ _log.debug("Failed getting first process.")
+ CloseHandle(hProcessSnap)
+ return result
+ while True:
+ if pe32.th32ProcessID == pid:
+ result = True
+ break
+ if not Process32Next(hProcessSnap, ctypes.byref(pe32)):
+ break
+ CloseHandle(hProcessSnap)
+ return result
+
+ def check_running_pid(self, pid):
+ """Return True if pid is alive, otherwise return False."""
+ if sys.platform == 'win32':
+ return self._win32_check_running_pid(pid)
+
+ try:
+ os.kill(pid, 0)
+ return True
+ except OSError:
+ return False
+
+ def running_pids(self, process_name_filter=None):
+ if not process_name_filter:
+ process_name_filter = lambda process_name: True
+
+ running_pids = []
+
+ if sys.platform in ("win32", "cygwin"):
+ # FIXME: running_pids isn't implemented on Windows yet...
+ return []
+
+ ps_process = self.popen(['ps', '-eo', 'pid,comm'], stdout=self.PIPE, stderr=self.PIPE)
+ stdout, _ = ps_process.communicate()
+ for line in stdout.splitlines():
+ try:
+ # In some cases the line can contain one or more
+ # leading white-spaces, so strip it before split.
+ pid, process_name = line.strip().split(' ', 1)
+ if process_name_filter(process_name):
+ running_pids.append(int(pid))
+ except ValueError, e:
+ pass
+
+ return sorted(running_pids)
+
+ def wait_newest(self, process_name_filter=None):
+ if not process_name_filter:
+ process_name_filter = lambda process_name: True
+
+ running_pids = self.running_pids(process_name_filter)
+ if not running_pids:
+ return
+ pid = running_pids[-1]
+
+ while self.check_running_pid(pid):
+ time.sleep(0.25)
+
+ def wait_limited(self, pid, limit_in_seconds=None, check_frequency_in_seconds=None):
+ seconds_left = limit_in_seconds or 10
+ sleep_length = check_frequency_in_seconds or 1
+ while seconds_left > 0 and self.check_running_pid(pid):
+ seconds_left -= sleep_length
+ time.sleep(sleep_length)
+
+ def _windows_image_name(self, process_name):
+ name, extension = os.path.splitext(process_name)
+ if not extension:
+ # taskkill expects processes to end in .exe
+ # If necessary we could add a flag to disable appending .exe.
+ process_name = "%s.exe" % name
+ return process_name
+
+ def interrupt(self, pid):
+ interrupt_signal = signal.SIGINT
+ # FIXME: The python docs seem to imply that platform == 'win32' may need to use signal.CTRL_C_EVENT
+ # http://docs.python.org/2/library/signal.html
+ try:
+ os.kill(pid, interrupt_signal)
+ except OSError:
+ # Silently ignore when the pid doesn't exist.
+ # It's impossible for callers to avoid race conditions with process shutdown.
+ pass
+
+ # Error handlers do not need to be static methods once all callers are
+ # updated to use an Executive object.
+
+ @staticmethod
+ def default_error_handler(error):
+ raise error
+
+ @staticmethod
+ def ignore_error(error):
+ pass
+
+ def _compute_stdin(self, input):
+ """Returns (stdin, string_to_communicate)"""
+ # FIXME: We should be returning /dev/null for stdin
+ # or closing stdin after process creation to prevent
+ # child processes from getting input from the user.
+ if not input:
+ return (None, None)
+ if hasattr(input, "read"): # Check if the input is a file.
+ return (input, None) # Assume the file is in the right encoding.
+
+ # Popen in Python 2.5 and before does not automatically encode unicode objects.
+ # http://bugs.python.org/issue5290
+ # See https://bugs.webkit.org/show_bug.cgi?id=37528
+ # for an example of a regresion caused by passing a unicode string directly.
+ # FIXME: We may need to encode differently on different platforms.
+ if isinstance(input, unicode):
+ input = input.encode(self._child_process_encoding())
+ return (self.PIPE, input)
+
+ def command_for_printing(self, args):
+ """Returns a print-ready string representing command args.
+ The string should be copy/paste ready for execution in a shell."""
+ args = self._stringify_args(args)
+ escaped_args = []
+ for arg in args:
+ if isinstance(arg, unicode):
+ # Escape any non-ascii characters for easy copy/paste
+ arg = arg.encode("unicode_escape")
+ # FIXME: Do we need to fix quotes here?
+ escaped_args.append(arg)
+ return " ".join(escaped_args)
+
+ def run_command(self,
+ args,
+ cwd=None,
+ env=None,
+ input=None,
+ error_handler=None,
+ return_exit_code=False,
+ return_stderr=True,
+ decode_output=True, debug_logging=True):
+ """Popen wrapper for convenience and to work around python bugs."""
+ assert(isinstance(args, list) or isinstance(args, tuple))
+ start_time = time.time()
+
+ stdin, string_to_communicate = self._compute_stdin(input)
+ stderr = self.STDOUT if return_stderr else None
+
+ process = self.popen(args,
+ stdin=stdin,
+ stdout=self.PIPE,
+ stderr=stderr,
+ cwd=cwd,
+ env=env,
+ close_fds=self._should_close_fds())
+ output = process.communicate(string_to_communicate)[0]
+
+ # run_command automatically decodes to unicode() unless explicitly told not to.
+ if decode_output:
+ output = output.decode(self._child_process_encoding())
+
+ # wait() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
+ exit_code = process.wait()
+
+ if debug_logging:
+ _log.debug('"%s" took %.2fs' % (self.command_for_printing(args), time.time() - start_time))
+
+ if return_exit_code:
+ return exit_code
+
+ if exit_code:
+ script_error = ScriptError(script_args=args,
+ exit_code=exit_code,
+ output=output,
+ cwd=cwd)
+ (error_handler or self.default_error_handler)(script_error)
+ return output
+
+ def _child_process_encoding(self):
+ # Win32 Python 2.x uses CreateProcessA rather than CreateProcessW
+ # to launch subprocesses, so we have to encode arguments using the
+ # current code page.
+ if sys.platform == 'win32' and sys.version < '3':
+ return 'mbcs'
+ # All other platforms use UTF-8.
+ # FIXME: Using UTF-8 on Cygwin will confuse Windows-native commands
+ # which will expect arguments to be encoded using the current code
+ # page.
+ return 'utf-8'
+
+ def _should_encode_child_process_arguments(self):
+ # Cygwin's Python's os.execv doesn't support unicode command
+ # arguments, and neither does Cygwin's execv itself.
+ if sys.platform == 'cygwin':
+ return True
+
+ # Win32 Python 2.x uses CreateProcessA rather than CreateProcessW
+ # to launch subprocesses, so we have to encode arguments using the
+ # current code page.
+ if sys.platform == 'win32' and sys.version < '3':
+ return True
+
+ return False
+
+ def _encode_argument_if_needed(self, argument):
+ if not self._should_encode_child_process_arguments():
+ return argument
+ return argument.encode(self._child_process_encoding())
+
+ def _stringify_args(self, args):
+ # Popen will throw an exception if args are non-strings (like int())
+ string_args = map(unicode, args)
+ # The Windows implementation of Popen cannot handle unicode strings. :(
+ return map(self._encode_argument_if_needed, string_args)
+
+ # The only required arugment to popen is named "args", the rest are optional keyword arguments.
+ def popen(self, args, **kwargs):
+ # FIXME: We should always be stringifying the args, but callers who pass shell=True
+ # expect that the exact bytes passed will get passed to the shell (even if they're wrongly encoded).
+ # shell=True is wrong for many other reasons, and we should remove this
+ # hack as soon as we can fix all callers to not use shell=True.
+ if kwargs.get('shell') == True:
+ string_args = args
+ else:
+ string_args = self._stringify_args(args)
+ return subprocess.Popen(string_args, **kwargs)
+
+ def call(self, args, **kwargs):
+ return subprocess.call(self._stringify_args(args), **kwargs)
+
+ def run_in_parallel(self, command_lines_and_cwds, processes=None):
+ """Runs a list of (cmd_line list, cwd string) tuples in parallel and returns a list of (retcode, stdout, stderr) tuples."""
+ assert len(command_lines_and_cwds)
+ return self.map(_run_command_thunk, command_lines_and_cwds, processes)
+
+ def map(self, thunk, arglist, processes=None):
+ if sys.platform in ('cygwin', 'win32') or len(arglist) == 1:
+ return map(thunk, arglist)
+ pool = multiprocessing.Pool(processes=(processes or multiprocessing.cpu_count()))
+ try:
+ return pool.map(thunk, arglist)
+ finally:
+ pool.close()
+ pool.join()
+
+
+def _run_command_thunk(cmd_line_and_cwd):
+ # Note that this needs to be a bare module (and hence Picklable) method to work with multiprocessing.Pool.
+ (cmd_line, cwd) = cmd_line_and_cwd
+ proc = subprocess.Popen(cmd_line, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = proc.communicate()
+ return (proc.returncode, stdout, stderr)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/executive_mock.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/executive_mock.py
new file mode 100644
index 0000000..1f137de
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/executive_mock.py
@@ -0,0 +1,198 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import logging
+import os
+
+from webkitpy.common.system.executive import ScriptError
+
+_log = logging.getLogger(__name__)
+
+
+class MockProcess(object):
+ def __init__(self, stdout='MOCK STDOUT\n', stderr=''):
+ self.pid = 42
+ self.stdout = StringIO.StringIO(stdout)
+ self.stderr = StringIO.StringIO(stderr)
+ self.stdin = StringIO.StringIO()
+ self.returncode = 0
+
+ def wait(self):
+ return
+
+ def poll(self):
+ # Consider the process completed when all the stdout and stderr has been read.
+ if self.stdout.len != self.stdout.tell() or self.stderr.len != self.stderr.tell():
+ return None
+ return self.returncode
+
+# FIXME: This should be unified with MockExecutive2
+class MockExecutive(object):
+ PIPE = "MOCK PIPE"
+ STDOUT = "MOCK STDOUT"
+
+ @staticmethod
+ def ignore_error(error):
+ pass
+
+ def __init__(self, should_log=False, should_throw=False, should_throw_when_run=None):
+ self._should_log = should_log
+ self._should_throw = should_throw
+ self._should_throw_when_run = should_throw_when_run or set()
+ # FIXME: Once executive wraps os.getpid() we can just use a static pid for "this" process.
+ self._running_pids = {'test-webkitpy': os.getpid()}
+ self._proc = None
+ self.calls = []
+
+ def check_running_pid(self, pid):
+ return pid in self._running_pids.values()
+
+ def running_pids(self, process_name_filter):
+ running_pids = []
+ for process_name, process_pid in self._running_pids.iteritems():
+ if process_name_filter(process_name):
+ running_pids.append(process_pid)
+
+ _log.info("MOCK running_pids: %s" % running_pids)
+ return running_pids
+
+ def command_for_printing(self, args):
+ string_args = map(unicode, args)
+ return " ".join(string_args)
+
+ def run_command(self,
+ args,
+ cwd=None,
+ input=None,
+ error_handler=None,
+ return_exit_code=False,
+ return_stderr=True,
+ decode_output=False,
+ env=None,
+ debug_logging=False):
+
+ self.calls.append(args)
+
+ assert(isinstance(args, list) or isinstance(args, tuple))
+ if self._should_log:
+ env_string = ""
+ if env:
+ env_string = ", env=%s" % env
+ input_string = ""
+ if input:
+ input_string = ", input=%s" % input
+ _log.info("MOCK run_command: %s, cwd=%s%s%s" % (args, cwd, env_string, input_string))
+ output = "MOCK output of child process"
+
+ if self._should_throw_when_run.intersection(args):
+ raise ScriptError("Exception for %s" % args, output="MOCK command output")
+
+ if self._should_throw:
+ raise ScriptError("MOCK ScriptError", output=output)
+ return output
+
+ def cpu_count(self):
+ return 2
+
+ def kill_all(self, process_name):
+ pass
+
+ def kill_process(self, pid):
+ pass
+
+ def popen(self, args, cwd=None, env=None, **kwargs):
+ self.calls.append(args)
+ if self._should_log:
+ cwd_string = ""
+ if cwd:
+ cwd_string = ", cwd=%s" % cwd
+ env_string = ""
+ if env:
+ env_string = ", env=%s" % env
+ _log.info("MOCK popen: %s%s%s" % (args, cwd_string, env_string))
+ if not self._proc:
+ self._proc = MockProcess()
+ return self._proc
+
+ def call(self, args, **kwargs):
+ self.calls.append(args)
+ _log.info('Mock call: %s' % args)
+
+ def run_in_parallel(self, commands):
+ assert len(commands)
+
+ num_previous_calls = len(self.calls)
+ command_outputs = []
+ for cmd_line, cwd in commands:
+ command_outputs.append([0, self.run_command(cmd_line, cwd=cwd), ''])
+
+ new_calls = self.calls[num_previous_calls:]
+ self.calls = self.calls[:num_previous_calls]
+ self.calls.append(new_calls)
+ return command_outputs
+
+ def map(self, thunk, arglist, processes=None):
+ return map(thunk, arglist)
+
+
+class MockExecutive2(MockExecutive):
+ """MockExecutive2 is like MockExecutive except it doesn't log anything."""
+
+ def __init__(self, output='', exit_code=0, exception=None, run_command_fn=None, stderr=''):
+ self._output = output
+ self._stderr = stderr
+ self._exit_code = exit_code
+ self._exception = exception
+ self._run_command_fn = run_command_fn
+ self.calls = []
+
+ def run_command(self,
+ args,
+ cwd=None,
+ input=None,
+ error_handler=None,
+ return_exit_code=False,
+ return_stderr=True,
+ decode_output=False,
+ env=None,
+ debug_logging=False):
+ self.calls.append(args)
+ assert(isinstance(args, list) or isinstance(args, tuple))
+ if self._exception:
+ raise self._exception # pylint: disable=E0702
+ if self._run_command_fn:
+ return self._run_command_fn(args)
+ if return_exit_code:
+ return self._exit_code
+ if self._exit_code and error_handler:
+ script_error = ScriptError(script_args=args, exit_code=self._exit_code, output=self._output)
+ error_handler(script_error)
+ if return_stderr:
+ return self._output + self._stderr
+ return self._output
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/executive_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/executive_unittest.py
new file mode 100644
index 0000000..0f3d917
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/executive_unittest.py
@@ -0,0 +1,210 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2009 Daniel Bates (dbates@intudata.com). All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import errno
+import signal
+import subprocess
+import sys
+import time
+import unittest
+
+# Since we execute this script directly as part of the unit tests, we need to ensure
+# that Tools/Scripts is in sys.path for the next imports to work correctly.
+script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
+if script_dir not in sys.path:
+ sys.path.append(script_dir)
+
+
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+
+
+class ScriptErrorTest(unittest.TestCase):
+ def test_message_with_output(self):
+ error = ScriptError('My custom message!', '', -1)
+ self.assertEqual(error.message_with_output(), 'My custom message!')
+ error = ScriptError('My custom message!', '', -1, 'My output.')
+ self.assertEqual(error.message_with_output(), 'My custom message!\n\noutput: My output.')
+ error = ScriptError('', 'my_command!', -1, 'My output.', '/Users/username/blah')
+ self.assertEqual(error.message_with_output(), 'Failed to run "\'my_command!\'" exit_code: -1 cwd: /Users/username/blah\n\noutput: My output.')
+ error = ScriptError('', 'my_command!', -1, 'ab' + '1' * 499)
+ self.assertEqual(error.message_with_output(), 'Failed to run "\'my_command!\'" exit_code: -1\n\noutput: Last 500 characters of output:\nb' + '1' * 499)
+
+ def test_message_with_tuple(self):
+ error = ScriptError('', ('my', 'command'), -1, 'My output.', '/Users/username/blah')
+ self.assertEqual(error.message_with_output(), 'Failed to run "(\'my\', \'command\')" exit_code: -1 cwd: /Users/username/blah\n\noutput: My output.')
+
+def never_ending_command():
+ """Arguments for a command that will never end (useful for testing process
+ killing). It should be a process that is unlikely to already be running
+ because all instances will be killed."""
+ if sys.platform == 'win32':
+ return ['wmic']
+ return ['yes']
+
+
+def command_line(cmd, *args):
+ return [sys.executable, __file__, '--' + cmd] + list(args)
+
+
+class ExecutiveTest(unittest.TestCase):
+ def assert_interpreter_for_content(self, intepreter, content):
+ fs = MockFileSystem()
+
+ tempfile, temp_name = fs.open_binary_tempfile('')
+ tempfile.write(content)
+ tempfile.close()
+ file_interpreter = Executive.interpreter_for_script(temp_name, fs)
+
+ self.assertEqual(file_interpreter, intepreter)
+
+ def test_interpreter_for_script(self):
+ self.assert_interpreter_for_content(None, '')
+ self.assert_interpreter_for_content(None, 'abcd\nefgh\nijklm')
+ self.assert_interpreter_for_content(None, '##/usr/bin/perl')
+ self.assert_interpreter_for_content('perl', '#!/usr/bin/env perl')
+ self.assert_interpreter_for_content('perl', '#!/usr/bin/env perl\nfirst\nsecond')
+ self.assert_interpreter_for_content('perl', '#!/usr/bin/perl')
+ self.assert_interpreter_for_content('perl', '#!/usr/bin/perl -w')
+ self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/env python')
+ self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/env python\nfirst\nsecond')
+ self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/python')
+ self.assert_interpreter_for_content('ruby', '#!/usr/bin/env ruby')
+ self.assert_interpreter_for_content('ruby', '#!/usr/bin/env ruby\nfirst\nsecond')
+ self.assert_interpreter_for_content('ruby', '#!/usr/bin/ruby')
+
+ def test_run_command_with_bad_command(self):
+ def run_bad_command():
+ Executive().run_command(["foo_bar_command_blah"], error_handler=Executive.ignore_error, return_exit_code=True)
+ self.assertRaises(OSError, run_bad_command)
+
+ def test_run_command_args_type(self):
+ executive = Executive()
+ self.assertRaises(AssertionError, executive.run_command, "echo")
+ self.assertRaises(AssertionError, executive.run_command, u"echo")
+ executive.run_command(command_line('echo', 'foo'))
+ executive.run_command(tuple(command_line('echo', 'foo')))
+
+ def test_auto_stringify_args(self):
+ executive = Executive()
+ executive.run_command(command_line('echo', 1))
+ executive.popen(command_line('echo', 1), stdout=executive.PIPE).wait()
+ self.assertEqual('echo 1', executive.command_for_printing(['echo', 1]))
+
+ def test_popen_args(self):
+ executive = Executive()
+ # Explicitly naming the 'args' argument should not thow an exception.
+ executive.popen(args=command_line('echo', 1), stdout=executive.PIPE).wait()
+
+ def test_run_command_with_unicode(self):
+ """Validate that it is safe to pass unicode() objects
+ to Executive.run* methods, and they will return unicode()
+ objects by default unless decode_output=False"""
+ unicode_tor_input = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
+ if sys.platform == 'win32':
+ encoding = 'mbcs'
+ else:
+ encoding = 'utf-8'
+ encoded_tor = unicode_tor_input.encode(encoding)
+ # On Windows, we expect the unicode->mbcs->unicode roundtrip to be
+ # lossy. On other platforms, we expect a lossless roundtrip.
+ if sys.platform == 'win32':
+ unicode_tor_output = encoded_tor.decode(encoding)
+ else:
+ unicode_tor_output = unicode_tor_input
+
+ executive = Executive()
+
+ output = executive.run_command(command_line('cat'), input=unicode_tor_input)
+ self.assertEqual(output, unicode_tor_output)
+
+ output = executive.run_command(command_line('echo', unicode_tor_input))
+ self.assertEqual(output, unicode_tor_output)
+
+ output = executive.run_command(command_line('echo', unicode_tor_input), decode_output=False)
+ self.assertEqual(output, encoded_tor)
+
+ # Make sure that str() input also works.
+ output = executive.run_command(command_line('cat'), input=encoded_tor, decode_output=False)
+ self.assertEqual(output, encoded_tor)
+
+ def test_kill_process(self):
+ executive = Executive()
+ process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE)
+ self.assertEqual(process.poll(), None) # Process is running
+ executive.kill_process(process.pid)
+
+ # Killing again should fail silently.
+ executive.kill_process(process.pid)
+
+ def _assert_windows_image_name(self, name, expected_windows_name):
+ executive = Executive()
+ windows_name = executive._windows_image_name(name)
+ self.assertEqual(windows_name, expected_windows_name)
+
+ def test_windows_image_name(self):
+ self._assert_windows_image_name("foo", "foo.exe")
+ self._assert_windows_image_name("foo.exe", "foo.exe")
+ self._assert_windows_image_name("foo.com", "foo.com")
+ # If the name looks like an extension, even if it isn't
+ # supposed to, we have no choice but to return the original name.
+ self._assert_windows_image_name("foo.baz", "foo.baz")
+ self._assert_windows_image_name("foo.baz.exe", "foo.baz.exe")
+
+ def test_check_running_pid(self):
+ executive = Executive()
+ self.assertTrue(executive.check_running_pid(os.getpid()))
+ # Maximum pid number on Linux is 32768 by default
+ self.assertFalse(executive.check_running_pid(100000))
+
+ def test_running_pids(self):
+ if sys.platform in ("win32", "cygwin"):
+ return # This function isn't implemented on Windows yet.
+
+ executive = Executive()
+ pids = executive.running_pids()
+ self.assertIn(os.getpid(), pids)
+
+ def test_run_in_parallel_assert_nonempty(self):
+ self.assertRaises(AssertionError, Executive().run_in_parallel, [])
+
+
+def main(platform, stdin, stdout, cmd, args):
+ if platform == 'win32' and hasattr(stdout, 'fileno'):
+ import msvcrt
+ msvcrt.setmode(stdout.fileno(), os.O_BINARY)
+ if cmd == '--cat':
+ stdout.write(stdin.read())
+ elif cmd == '--echo':
+ stdout.write(' '.join(args))
+ return 0
+
+if __name__ == '__main__' and len(sys.argv) > 1 and sys.argv[1] in ('--cat', '--echo'):
+ sys.exit(main(sys.platform, sys.stdin, sys.stdout, sys.argv[1], sys.argv[2:]))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/filesystem.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/filesystem.py
new file mode 100644
index 0000000..fdf4347
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/filesystem.py
@@ -0,0 +1,272 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Wrapper object for the file system / source tree."""
+
+import codecs
+import errno
+import exceptions
+import glob
+import hashlib
+import os
+import shutil
+import sys
+import tempfile
+import time
+
+class FileSystem(object):
+ """FileSystem interface for webkitpy.
+
+ Unless otherwise noted, all paths are allowed to be either absolute
+ or relative."""
+ sep = os.sep
+ pardir = os.pardir
+
+ def abspath(self, path):
+ return os.path.abspath(path)
+
+ def realpath(self, path):
+ return os.path.realpath(path)
+
+ def path_to_module(self, module_name):
+ """A wrapper for all calls to __file__ to allow easy unit testing."""
+ # FIXME: This is the only use of sys in this file. It's possible this function should move elsewhere.
+ return sys.modules[module_name].__file__ # __file__ is always an absolute path.
+
+ def expanduser(self, path):
+ return os.path.expanduser(path)
+
+ def basename(self, path):
+ return os.path.basename(path)
+
+ def chdir(self, path):
+ return os.chdir(path)
+
+ def copyfile(self, source, destination):
+ shutil.copyfile(source, destination)
+
+ def dirname(self, path):
+ return os.path.dirname(path)
+
+ def exists(self, path):
+ return os.path.exists(path)
+
+ def files_under(self, path, dirs_to_skip=[], file_filter=None):
+ """Return the list of all files under the given path in topdown order.
+
+ Args:
+ dirs_to_skip: a list of directories to skip over during the
+ traversal (e.g., .svn, resources, etc.)
+ file_filter: if not None, the filter will be invoked
+ with the filesystem object and the dirname and basename of
+ each file found. The file is included in the result if the
+ callback returns True.
+ """
+ def filter_all(fs, dirpath, basename):
+ return True
+
+ file_filter = file_filter or filter_all
+ files = []
+ if self.isfile(path):
+ if file_filter(self, self.dirname(path), self.basename(path)):
+ files.append(path)
+ return files
+
+ if self.basename(path) in dirs_to_skip:
+ return []
+
+ for (dirpath, dirnames, filenames) in os.walk(path):
+ for d in dirs_to_skip:
+ if d in dirnames:
+ dirnames.remove(d)
+
+ for filename in filenames:
+ if file_filter(self, dirpath, filename):
+ files.append(self.join(dirpath, filename))
+ return files
+
+ def getcwd(self):
+ return os.getcwd()
+
+ def glob(self, path):
+ return glob.glob(path)
+
+ def isabs(self, path):
+ return os.path.isabs(path)
+
+ def isfile(self, path):
+ return os.path.isfile(path)
+
+ def isdir(self, path):
+ return os.path.isdir(path)
+
+ def join(self, *comps):
+ return os.path.join(*comps)
+
+ def listdir(self, path):
+ return os.listdir(path)
+
+ def walk(self, top):
+ return os.walk(top)
+
+ def mkdtemp(self, **kwargs):
+ """Create and return a uniquely named directory.
+
+ This is like tempfile.mkdtemp, but if used in a with statement
+ the directory will self-delete at the end of the block (if the
+ directory is empty; non-empty directories raise errors). The
+ directory can be safely deleted inside the block as well, if so
+ desired.
+
+ Note that the object returned is not a string and does not support all of the string
+ methods. If you need a string, coerce the object to a string and go from there.
+ """
+ class TemporaryDirectory(object):
+ def __init__(self, **kwargs):
+ self._kwargs = kwargs
+ self._directory_path = tempfile.mkdtemp(**self._kwargs)
+
+ def __str__(self):
+ return self._directory_path
+
+ def __enter__(self):
+ return self._directory_path
+
+ def __exit__(self, type, value, traceback):
+ # Only self-delete if necessary.
+
+ # FIXME: Should we delete non-empty directories?
+ if os.path.exists(self._directory_path):
+ os.rmdir(self._directory_path)
+
+ return TemporaryDirectory(**kwargs)
+
+ def maybe_make_directory(self, *path):
+ """Create the specified directory if it doesn't already exist."""
+ try:
+ os.makedirs(self.join(*path))
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ def move(self, source, destination):
+ shutil.move(source, destination)
+
+ def mtime(self, path):
+ return os.stat(path).st_mtime
+
+ def normpath(self, path):
+ return os.path.normpath(path)
+
+ def open_binary_tempfile(self, suffix):
+ """Create, open, and return a binary temp file. Returns a tuple of the file and the name."""
+ temp_fd, temp_name = tempfile.mkstemp(suffix)
+ f = os.fdopen(temp_fd, 'wb')
+ return f, temp_name
+
+ def open_binary_file_for_reading(self, path):
+ return codecs.open(path, 'rb')
+
+ def read_binary_file(self, path):
+ """Return the contents of the file at the given path as a byte string."""
+ with file(path, 'rb') as f:
+ return f.read()
+
+ def write_binary_file(self, path, contents):
+ with file(path, 'wb') as f:
+ f.write(contents)
+
+ def open_text_file_for_reading(self, path):
+ # Note: There appears to be an issue with the returned file objects
+ # not being seekable. See http://stackoverflow.com/questions/1510188/can-seek-and-tell-work-with-utf-8-encoded-documents-in-python .
+ return codecs.open(path, 'r', 'utf8')
+
+ def open_text_file_for_writing(self, path):
+ return codecs.open(path, 'w', 'utf8')
+
+ def read_text_file(self, path):
+ """Return the contents of the file at the given path as a Unicode string.
+
+ The file is read assuming it is a UTF-8 encoded file with no BOM."""
+ with codecs.open(path, 'r', 'utf8') as f:
+ return f.read()
+
+ def write_text_file(self, path, contents):
+ """Write the contents to the file at the given location.
+
+ The file is written encoded as UTF-8 with no BOM."""
+ with codecs.open(path, 'w', 'utf8') as f:
+ f.write(contents)
+
+ def sha1(self, path):
+ contents = self.read_binary_file(path)
+ return hashlib.sha1(contents).hexdigest()
+
+ def relpath(self, path, start='.'):
+ return os.path.relpath(path, start)
+
+ class _WindowsError(exceptions.OSError):
+ """Fake exception for Linux and Mac."""
+ pass
+
+ def remove(self, path, osremove=os.remove):
+ """On Windows, if a process was recently killed and it held on to a
+ file, the OS will hold on to the file for a short while. This makes
+ attempts to delete the file fail. To work around that, this method
+ will retry for a few seconds until Windows is done with the file."""
+ try:
+ exceptions.WindowsError
+ except AttributeError:
+ exceptions.WindowsError = FileSystem._WindowsError
+
+ retry_timeout_sec = 3.0
+ sleep_interval = 0.1
+ while True:
+ try:
+ osremove(path)
+ return True
+ except exceptions.WindowsError, e:
+ time.sleep(sleep_interval)
+ retry_timeout_sec -= sleep_interval
+ if retry_timeout_sec < 0:
+ raise e
+
+ def rmtree(self, path):
+ """Delete the directory rooted at path, whether empty or not."""
+ shutil.rmtree(path, ignore_errors=True)
+
+ def copytree(self, source, destination):
+ shutil.copytree(source, destination)
+
+ def split(self, path):
+ """Return (dirname, basename + '.' + ext)"""
+ return os.path.split(path)
+
+ def splitext(self, path):
+ """Return (dirname + os.sep + basename, '.' + ext)"""
+ return os.path.splitext(path)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/filesystem_mock.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
new file mode 100644
index 0000000..0dbf74d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
@@ -0,0 +1,489 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import errno
+import hashlib
+import os
+import re
+
+from webkitpy.common.system import path
+
+
+class MockFileSystem(object):
+ sep = '/'
+ pardir = '..'
+
+ def __init__(self, files=None, dirs=None, cwd='/'):
+ """Initializes a "mock" filesystem that can be used to completely
+ stub out a filesystem.
+
+ Args:
+ files: a dict of filenames -> file contents. A file contents
+ value of None is used to indicate that the file should
+ not exist.
+ """
+ self.files = files or {}
+ self.written_files = {}
+ self.last_tmpdir = None
+ self.current_tmpno = 0
+ self.cwd = cwd
+ self.dirs = set(dirs or [])
+ self.dirs.add(cwd)
+ for f in self.files:
+ d = self.dirname(f)
+ while not d in self.dirs:
+ self.dirs.add(d)
+ d = self.dirname(d)
+
+ def clear_written_files(self):
+ # This function can be used to track what is written between steps in a test.
+ self.written_files = {}
+
+ def _raise_not_found(self, path):
+ raise IOError(errno.ENOENT, path, os.strerror(errno.ENOENT))
+
+ def _split(self, path):
+ # This is not quite a full implementation of os.path.split
+ # http://docs.python.org/library/os.path.html#os.path.split
+ if self.sep in path:
+ return path.rsplit(self.sep, 1)
+ return ('', path)
+
+ def abspath(self, path):
+ if os.path.isabs(path):
+ return self.normpath(path)
+ return self.abspath(self.join(self.cwd, path))
+
+ def realpath(self, path):
+ return self.abspath(path)
+
+ def basename(self, path):
+ return self._split(path)[1]
+
+ def expanduser(self, path):
+ if path[0] != "~":
+ return path
+ parts = path.split(self.sep, 1)
+ home_directory = self.sep + "Users" + self.sep + "mock"
+ if len(parts) == 1:
+ return home_directory
+ return home_directory + self.sep + parts[1]
+
+ def path_to_module(self, module_name):
+ return "/mock-checkout/third_party/WebKit/Tools/Scripts/" + module_name.replace('.', '/') + ".py"
+
+ def chdir(self, path):
+ path = self.normpath(path)
+ if not self.isdir(path):
+ raise OSError(errno.ENOENT, path, os.strerror(errno.ENOENT))
+ self.cwd = path
+
+ def copyfile(self, source, destination):
+ if not self.exists(source):
+ self._raise_not_found(source)
+ if self.isdir(source):
+ raise IOError(errno.EISDIR, source, os.strerror(errno.EISDIR))
+ if self.isdir(destination):
+ raise IOError(errno.EISDIR, destination, os.strerror(errno.EISDIR))
+ if not self.exists(self.dirname(destination)):
+ raise IOError(errno.ENOENT, destination, os.strerror(errno.ENOENT))
+
+ self.files[destination] = self.files[source]
+ self.written_files[destination] = self.files[source]
+
+ def dirname(self, path):
+ return self._split(path)[0]
+
+ def exists(self, path):
+ return self.isfile(path) or self.isdir(path)
+
+ def files_under(self, path, dirs_to_skip=[], file_filter=None):
+ def filter_all(fs, dirpath, basename):
+ return True
+
+ file_filter = file_filter or filter_all
+ files = []
+ if self.isfile(path):
+ if file_filter(self, self.dirname(path), self.basename(path)) and self.files[path] is not None:
+ files.append(path)
+ return files
+
+ if self.basename(path) in dirs_to_skip:
+ return []
+
+ if not path.endswith(self.sep):
+ path += self.sep
+
+ dir_substrings = [self.sep + d + self.sep for d in dirs_to_skip]
+ for filename in self.files:
+ if not filename.startswith(path):
+ continue
+
+ suffix = filename[len(path) - 1:]
+ if any(dir_substring in suffix for dir_substring in dir_substrings):
+ continue
+
+ dirpath, basename = self._split(filename)
+ if file_filter(self, dirpath, basename) and self.files[filename] is not None:
+ files.append(filename)
+
+ return files
+
+ def getcwd(self):
+ return self.cwd
+
+ def glob(self, glob_string):
+ # FIXME: This handles '*', but not '?', '[', or ']'.
+ glob_string = re.escape(glob_string)
+ glob_string = glob_string.replace('\\*', '[^\\/]*') + '$'
+ glob_string = glob_string.replace('\\/', '/')
+ path_filter = lambda path: re.match(glob_string, path)
+
+ # We could use fnmatch.fnmatch, but that might not do the right thing on windows.
+ existing_files = [path for path, contents in self.files.items() if contents is not None]
+ return filter(path_filter, existing_files) + filter(path_filter, self.dirs)
+
+ def isabs(self, path):
+ return path.startswith(self.sep)
+
+ def isfile(self, path):
+ return path in self.files and self.files[path] is not None
+
+ def isdir(self, path):
+ return self.normpath(path) in self.dirs
+
+ def _slow_but_correct_join(self, *comps):
+ return re.sub(re.escape(os.path.sep), self.sep, os.path.join(*comps))
+
+ def join(self, *comps):
+ # This function is called a lot, so we optimize it; there are
+ # unittests to check that we match _slow_but_correct_join(), above.
+ path = ''
+ sep = self.sep
+ for comp in comps:
+ if not comp:
+ continue
+ if comp[0] == sep:
+ path = comp
+ continue
+ if path:
+ path += sep
+ path += comp
+ if comps[-1] == '' and path:
+ path += '/'
+ path = path.replace(sep + sep, sep)
+ return path
+
+ def listdir(self, path):
+ root, dirs, files = list(self.walk(path))[0]
+ return dirs + files
+
+ def walk(self, top):
+ sep = self.sep
+ if not self.isdir(top):
+ raise OSError("%s is not a directory" % top)
+
+ if not top.endswith(sep):
+ top += sep
+
+ dirs = []
+ files = []
+ for f in self.files:
+ if self.exists(f) and f.startswith(top):
+ remaining = f[len(top):]
+ if sep in remaining:
+ dir = remaining[:remaining.index(sep)]
+ if not dir in dirs:
+ dirs.append(dir)
+ else:
+ files.append(remaining)
+ return [(top[:-1], dirs, files)]
+
+ def mtime(self, path):
+ if self.exists(path):
+ return 0
+ self._raise_not_found(path)
+
+ def _mktemp(self, suffix='', prefix='tmp', dir=None, **kwargs):
+ if dir is None:
+ dir = self.sep + '__im_tmp'
+ curno = self.current_tmpno
+ self.current_tmpno += 1
+ self.last_tmpdir = self.join(dir, '%s_%u_%s' % (prefix, curno, suffix))
+ return self.last_tmpdir
+
+ def mkdtemp(self, **kwargs):
+ class TemporaryDirectory(object):
+ def __init__(self, fs, **kwargs):
+ self._kwargs = kwargs
+ self._filesystem = fs
+ self._directory_path = fs._mktemp(**kwargs)
+ fs.maybe_make_directory(self._directory_path)
+
+ def __str__(self):
+ return self._directory_path
+
+ def __enter__(self):
+ return self._directory_path
+
+ def __exit__(self, type, value, traceback):
+ # Only self-delete if necessary.
+
+ # FIXME: Should we delete non-empty directories?
+ if self._filesystem.exists(self._directory_path):
+ self._filesystem.rmtree(self._directory_path)
+
+ return TemporaryDirectory(fs=self, **kwargs)
+
+ def maybe_make_directory(self, *path):
+ norm_path = self.normpath(self.join(*path))
+ while norm_path and not self.isdir(norm_path):
+ self.dirs.add(norm_path)
+ norm_path = self.dirname(norm_path)
+
+ def move(self, source, destination):
+ if not self.exists(source):
+ self._raise_not_found(source)
+ if self.isfile(source):
+ self.files[destination] = self.files[source]
+ self.written_files[destination] = self.files[destination]
+ self.files[source] = None
+ self.written_files[source] = None
+ return
+ self.copytree(source, destination)
+ self.rmtree(source)
+
+ def _slow_but_correct_normpath(self, path):
+ return re.sub(re.escape(os.path.sep), self.sep, os.path.normpath(path))
+
+ def normpath(self, path):
+ # This function is called a lot, so we try to optimize the common cases
+ # instead of always calling _slow_but_correct_normpath(), above.
+ if '..' in path or '/./' in path:
+ # This doesn't happen very often; don't bother trying to optimize it.
+ return self._slow_but_correct_normpath(path)
+ if not path:
+ return '.'
+ if path == '/':
+ return path
+ if path == '/.':
+ return '/'
+ if path.endswith('/.'):
+ return path[:-2]
+ if path.endswith('/'):
+ return path[:-1]
+ return path
+
+ def open_binary_tempfile(self, suffix=''):
+ path = self._mktemp(suffix)
+ return (WritableBinaryFileObject(self, path), path)
+
+ def open_binary_file_for_reading(self, path):
+ if self.files[path] is None:
+ self._raise_not_found(path)
+ return ReadableBinaryFileObject(self, path, self.files[path])
+
+ def read_binary_file(self, path):
+ # Intentionally raises KeyError if we don't recognize the path.
+ if self.files[path] is None:
+ self._raise_not_found(path)
+ return self.files[path]
+
+ def write_binary_file(self, path, contents):
+ # FIXME: should this assert if dirname(path) doesn't exist?
+ self.maybe_make_directory(self.dirname(path))
+ self.files[path] = contents
+ self.written_files[path] = contents
+
+ def open_text_file_for_reading(self, path):
+ if self.files[path] is None:
+ self._raise_not_found(path)
+ return ReadableTextFileObject(self, path, self.files[path])
+
+ def open_text_file_for_writing(self, path):
+ return WritableTextFileObject(self, path)
+
+ def read_text_file(self, path):
+ return self.read_binary_file(path).decode('utf-8')
+
+ def write_text_file(self, path, contents):
+ return self.write_binary_file(path, contents.encode('utf-8'))
+
+ def sha1(self, path):
+ contents = self.read_binary_file(path)
+ return hashlib.sha1(contents).hexdigest()
+
+ def relpath(self, path, start='.'):
+ # Since os.path.relpath() calls os.path.normpath()
+ # (see http://docs.python.org/library/os.path.html#os.path.abspath )
+ # it also removes trailing slashes and converts forward and backward
+ # slashes to the preferred slash os.sep.
+ start = self.abspath(start)
+ path = self.abspath(path)
+
+ common_root = start
+ dot_dot = ''
+ while not common_root == '':
+ if path.startswith(common_root):
+ break
+ common_root = self.dirname(common_root)
+ dot_dot += '..' + self.sep
+
+ rel_path = path[len(common_root):]
+
+ if not rel_path:
+ return '.'
+
+ if rel_path[0] == self.sep:
+ # It is probably sufficient to remove just the first character
+ # since os.path.normpath() collapses separators, but we use
+ # lstrip() just to be sure.
+ rel_path = rel_path.lstrip(self.sep)
+ elif not common_root == '/':
+ # We are in the case typified by the following example:
+ # path = "/tmp/foobar", start = "/tmp/foo" -> rel_path = "bar"
+ common_root = self.dirname(common_root)
+ dot_dot += '..' + self.sep
+ rel_path = path[len(common_root) + 1:]
+
+ return dot_dot + rel_path
+
+ def remove(self, path):
+ if self.files[path] is None:
+ self._raise_not_found(path)
+ self.files[path] = None
+ self.written_files[path] = None
+
+ def rmtree(self, path):
+ path = self.normpath(path)
+
+ for f in self.files:
+ # We need to add a trailing separator to path to avoid matching
+ # cases like path='/foo/b' and f='/foo/bar/baz'.
+ if f == path or f.startswith(path + self.sep):
+ self.files[f] = None
+
+ self.dirs = set(filter(lambda d: not (d == path or d.startswith(path + self.sep)), self.dirs))
+
+ def copytree(self, source, destination):
+ source = self.normpath(source)
+ destination = self.normpath(destination)
+
+ for source_file in list(self.files):
+ if source_file.startswith(source):
+ destination_path = self.join(destination, self.relpath(source_file, source))
+ self.maybe_make_directory(self.dirname(destination_path))
+ self.files[destination_path] = self.files[source_file]
+
+ def split(self, path):
+ idx = path.rfind(self.sep)
+ if idx == -1:
+ return ('', path)
+ return (path[:idx], path[(idx + 1):])
+
+ def splitext(self, path):
+ idx = path.rfind('.')
+ if idx == -1:
+ idx = len(path)
+ return (path[0:idx], path[idx:])
+
+
+class WritableBinaryFileObject(object):
+ def __init__(self, fs, path):
+ self.fs = fs
+ self.path = path
+ self.closed = False
+ self.fs.files[path] = ""
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.close()
+
+ def close(self):
+ self.closed = True
+
+ def write(self, str):
+ self.fs.files[self.path] += str
+ self.fs.written_files[self.path] = self.fs.files[self.path]
+
+
+class WritableTextFileObject(WritableBinaryFileObject):
+ def write(self, str):
+ WritableBinaryFileObject.write(self, str.encode('utf-8'))
+
+
+class ReadableBinaryFileObject(object):
+ def __init__(self, fs, path, data):
+ self.fs = fs
+ self.path = path
+ self.closed = False
+ self.data = data
+ self.offset = 0
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.close()
+
+ def close(self):
+ self.closed = True
+
+ def read(self, bytes=None):
+ if not bytes:
+ return self.data[self.offset:]
+ start = self.offset
+ self.offset += bytes
+ return self.data[start:self.offset]
+
+
+class ReadableTextFileObject(ReadableBinaryFileObject):
+ def __init__(self, fs, path, data):
+ super(ReadableTextFileObject, self).__init__(fs, path, StringIO.StringIO(data.decode("utf-8")))
+
+ def close(self):
+ self.data.close()
+ super(ReadableTextFileObject, self).close()
+
+ def read(self, bytes=-1):
+ return self.data.read(bytes)
+
+ def readline(self, length=None):
+ return self.data.readline(length)
+
+ def __iter__(self):
+ return self.data.__iter__()
+
+ def next(self):
+ return self.data.next()
+
+ def seek(self, offset, whence=os.SEEK_SET):
+ self.data.seek(offset, whence)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/filesystem_mock_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/filesystem_mock_unittest.py
new file mode 100644
index 0000000..2dc02f5
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/filesystem_mock_unittest.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import re
+import unittest
+
+
+from webkitpy.common.system import filesystem_mock
+from webkitpy.common.system import filesystem_unittest
+
+
+class MockFileSystemTest(unittest.TestCase, filesystem_unittest.GenericFileSystemTests):
+ def setUp(self):
+ self.fs = filesystem_mock.MockFileSystem()
+ self.setup_generic_test_dir()
+
+ def tearDown(self):
+ self.teardown_generic_test_dir()
+ self.fs = None
+
+ def quick_check(self, test_fn, good_fn, *tests):
+ for test in tests:
+ if hasattr(test, '__iter__'):
+ expected = good_fn(*test)
+ actual = test_fn(*test)
+ else:
+ expected = good_fn(test)
+ actual = test_fn(test)
+ self.assertEqual(expected, actual, 'given %s, expected %s, got %s' % (repr(test), repr(expected), repr(actual)))
+
+ def test_join(self):
+ self.quick_check(self.fs.join,
+ self.fs._slow_but_correct_join,
+ ('',),
+ ('', 'bar'),
+ ('foo',),
+ ('foo/',),
+ ('foo', ''),
+ ('foo/', ''),
+ ('foo', 'bar'),
+ ('foo', '/bar'),
+ )
+
+ def test_normpath(self):
+ self.quick_check(self.fs.normpath,
+ self.fs._slow_but_correct_normpath,
+ '',
+ '/',
+ '.',
+ '/.',
+ 'foo',
+ 'foo/',
+ 'foo/.',
+ 'foo/bar',
+ '/foo',
+ 'foo/../bar',
+ 'foo/../bar/baz',
+ '../foo')
+
+ def test_relpath_win32(self):
+ pass
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py
new file mode 100644
index 0000000..3f7dedb
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py
@@ -0,0 +1,326 @@
+# vim: set fileencoding=utf-8 :
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# NOTE: The fileencoding comment on the first line of the file is
+# important; without it, Python will choke while trying to parse the file,
+# since it includes non-ASCII characters.
+
+import os
+import stat
+import sys
+import tempfile
+import unittest
+
+from webkitpy.common.system.filesystem import FileSystem
+
+
+class GenericFileSystemTests(object):
+ """Tests that should pass on either a real or mock filesystem."""
+ # pylint gets confused about this being a mixin: pylint: disable=E1101
+ def setup_generic_test_dir(self):
+ fs = self.fs
+ self.generic_test_dir = str(self.fs.mkdtemp())
+ self.orig_cwd = fs.getcwd()
+ fs.chdir(self.generic_test_dir)
+ fs.write_text_file('foo.txt', 'foo')
+ fs.write_text_file('foobar', 'foobar')
+ fs.maybe_make_directory('foodir')
+ fs.write_text_file(fs.join('foodir', 'baz'), 'baz')
+ fs.chdir(self.orig_cwd)
+
+ def teardown_generic_test_dir(self):
+ self.fs.rmtree(self.generic_test_dir)
+ self.fs.chdir(self.orig_cwd)
+ self.generic_test_dir = None
+
+ def test_glob__trailing_asterisk(self):
+ self.fs.chdir(self.generic_test_dir)
+ self.assertEqual(set(self.fs.glob('fo*')), set(['foo.txt', 'foobar', 'foodir']))
+
+ def test_glob__leading_asterisk(self):
+ self.fs.chdir(self.generic_test_dir)
+ self.assertEqual(set(self.fs.glob('*xt')), set(['foo.txt']))
+
+ def test_glob__middle_asterisk(self):
+ self.fs.chdir(self.generic_test_dir)
+ self.assertEqual(set(self.fs.glob('f*r')), set(['foobar', 'foodir']))
+
+ def test_glob__period_is_escaped(self):
+ self.fs.chdir(self.generic_test_dir)
+ self.assertEqual(set(self.fs.glob('foo.*')), set(['foo.txt']))
+
+ def test_relpath_unix(self):
+ if sys.platform == 'win32':
+ return
+ self.assertEqual(self.fs.relpath('aaa/bbb'), 'aaa/bbb')
+ self.assertEqual(self.fs.relpath('aaa/bbb/'), 'aaa/bbb')
+ self.assertEqual(self.fs.relpath('aaa/bbb/.'), 'aaa/bbb')
+ self.assertEqual(self.fs.relpath('aaa/./bbb'), 'aaa/bbb')
+ self.assertEqual(self.fs.relpath('aaa/../bbb/'), 'bbb')
+ self.assertEqual(self.fs.relpath('aaa/bbb', 'aaa/bbb'), '.')
+ self.assertEqual(self.fs.relpath('aaa/bbb/ccc', 'aaa/bbb'), 'ccc')
+ self.assertEqual(self.fs.relpath('aaa/./ccc', 'aaa/bbb'), '../ccc')
+ self.assertEqual(self.fs.relpath('aaa/../ccc', 'aaa/bbb'), '../../ccc')
+ self.assertEqual(self.fs.relpath('aaa/bbb', 'aaa/ccc'), '../bbb')
+ self.assertEqual(self.fs.relpath('aaa/bbb', 'ccc/ddd'), '../../aaa/bbb')
+ self.assertEqual(self.fs.relpath('aaa/bbb', 'aaa/b'), '../bbb')
+ self.assertEqual(self.fs.relpath('aaa/bbb', 'a/bbb'), '../../aaa/bbb')
+
+ def test_relpath_win32(self):
+ if sys.platform != 'win32':
+ return
+ self.assertEqual(self.fs.relpath('aaa\\bbb'), 'aaa\\bbb')
+ self.assertEqual(self.fs.relpath('aaa\\bbb\\'), 'aaa\\bbb')
+ self.assertEqual(self.fs.relpath('aaa\\bbb\\.'), 'aaa\\bbb')
+ self.assertEqual(self.fs.relpath('aaa\\.\\bbb'), 'aaa\\bbb')
+ self.assertEqual(self.fs.relpath('aaa\\..\\bbb\\'), 'bbb')
+ self.assertEqual(self.fs.relpath('aaa\\bbb', 'aaa\\bbb'), '.')
+ self.assertEqual(self.fs.relpath('aaa\\bbb\\ccc', 'aaa\\bbb'), 'ccc')
+ self.assertEqual(self.fs.relpath('aaa\\.\\ccc', 'aaa\\bbb'), '..\\ccc')
+ self.assertEqual(self.fs.relpath('aaa\\..\\ccc', 'aaa\\bbb'), '..\\..\\ccc')
+ self.assertEqual(self.fs.relpath('aaa\\bbb', 'aaa\\ccc'), '..\\bbb')
+ self.assertEqual(self.fs.relpath('aaa\\bbb', 'ccc\\ddd'), '..\\..\\aaa\\bbb')
+ self.assertEqual(self.fs.relpath('aaa\\bbb', 'aaa\\b'), '..\\bbb')
+ self.assertEqual(self.fs.relpath('aaa\\bbb', 'a\\bbb'), '..\\..\\aaa\\bbb')
+
+ def test_rmtree(self):
+ self.fs.chdir(self.generic_test_dir)
+ self.fs.rmtree('foo')
+ self.assertTrue(self.fs.exists('foodir'))
+ self.assertTrue(self.fs.exists(self.fs.join('foodir', 'baz')))
+ self.fs.rmtree('foodir')
+ self.assertFalse(self.fs.exists('foodir'))
+ self.assertFalse(self.fs.exists(self.fs.join('foodir', 'baz')))
+
+ def test_copytree(self):
+ self.fs.chdir(self.generic_test_dir)
+ self.fs.copytree('foodir/', 'bardir/')
+ self.assertTrue(self.fs.exists('bardir'))
+ self.assertTrue(self.fs.exists(self.fs.join('bardir', 'baz')))
+
+ def test_move(self):
+ self.fs.chdir(self.generic_test_dir)
+ self.fs.move('foo.txt', 'bar.txt')
+ self.assertFalse(self.fs.exists('foo.txt'))
+ self.assertTrue(self.fs.exists('bar.txt'))
+ self.fs.move('foodir', 'bardir')
+ self.assertFalse(self.fs.exists('foodir'))
+ self.assertFalse(self.fs.exists(self.fs.join('foodir', 'baz')))
+ self.assertTrue(self.fs.exists('bardir'))
+ self.assertTrue(self.fs.exists(self.fs.join('bardir', 'baz')))
+
+class RealFileSystemTest(unittest.TestCase, GenericFileSystemTests):
+ def setUp(self):
+ self.fs = FileSystem()
+ self.setup_generic_test_dir()
+
+ self._this_dir = os.path.dirname(os.path.abspath(__file__))
+ self._missing_file = os.path.join(self._this_dir, 'missing_file.py')
+ self._this_file = os.path.join(self._this_dir, 'filesystem_unittest.py')
+
+ def tearDown(self):
+ self.teardown_generic_test_dir()
+ self.fs = None
+
+ def test_chdir(self):
+ fs = FileSystem()
+ cwd = fs.getcwd()
+ newdir = '/'
+ if sys.platform == 'win32':
+ newdir = 'c:\\'
+ fs.chdir(newdir)
+ self.assertEqual(fs.getcwd(), newdir)
+ fs.chdir(cwd)
+
+ def test_chdir__notexists(self):
+ fs = FileSystem()
+ newdir = '/dirdoesnotexist'
+ if sys.platform == 'win32':
+ newdir = 'c:\\dirdoesnotexist'
+ self.assertRaises(OSError, fs.chdir, newdir)
+
+ def test_exists__true(self):
+ fs = FileSystem()
+ self.assertTrue(fs.exists(self._this_file))
+
+ def test_exists__false(self):
+ fs = FileSystem()
+ self.assertFalse(fs.exists(self._missing_file))
+
+ def test_getcwd(self):
+ fs = FileSystem()
+ self.assertTrue(fs.exists(fs.getcwd()))
+
+ def test_isdir__true(self):
+ fs = FileSystem()
+ self.assertTrue(fs.isdir(self._this_dir))
+
+ def test_isdir__false(self):
+ fs = FileSystem()
+ self.assertFalse(fs.isdir(self._this_file))
+
+ def test_join(self):
+ fs = FileSystem()
+ self.assertEqual(fs.join('foo', 'bar'),
+ os.path.join('foo', 'bar'))
+
+ def test_listdir(self):
+ fs = FileSystem()
+ with fs.mkdtemp(prefix='filesystem_unittest_') as d:
+ self.assertEqual(fs.listdir(d), [])
+ new_file = os.path.join(d, 'foo')
+ fs.write_text_file(new_file, u'foo')
+ self.assertEqual(fs.listdir(d), ['foo'])
+ os.remove(new_file)
+
+ def test_walk(self):
+ fs = FileSystem()
+ with fs.mkdtemp(prefix='filesystem_unittest_') as d:
+ self.assertEqual(list(fs.walk(d)), [(d, [], [])])
+ new_file = os.path.join(d, 'foo')
+ fs.write_text_file(new_file, u'foo')
+ self.assertEqual(list(fs.walk(d)), [(d, [], ['foo'])])
+ os.remove(new_file)
+
+ def test_maybe_make_directory__success(self):
+ fs = FileSystem()
+
+ with fs.mkdtemp(prefix='filesystem_unittest_') as base_path:
+ sub_path = os.path.join(base_path, "newdir")
+ self.assertFalse(os.path.exists(sub_path))
+ self.assertFalse(fs.isdir(sub_path))
+
+ fs.maybe_make_directory(sub_path)
+ self.assertTrue(os.path.exists(sub_path))
+ self.assertTrue(fs.isdir(sub_path))
+
+ # Make sure we can re-create it.
+ fs.maybe_make_directory(sub_path)
+ self.assertTrue(os.path.exists(sub_path))
+ self.assertTrue(fs.isdir(sub_path))
+
+ # Clean up.
+ os.rmdir(sub_path)
+
+ self.assertFalse(os.path.exists(base_path))
+ self.assertFalse(fs.isdir(base_path))
+
+ def test_maybe_make_directory__failure(self):
+ # FIXME: os.chmod() doesn't work on Windows to set directories
+ # as readonly, so we skip this test for now.
+ if sys.platform in ('win32', 'cygwin'):
+ return
+
+ fs = FileSystem()
+ with fs.mkdtemp(prefix='filesystem_unittest_') as d:
+ # Remove write permissions on the parent directory.
+ os.chmod(d, stat.S_IRUSR)
+
+ # Now try to create a sub directory - should fail.
+ sub_dir = fs.join(d, 'subdir')
+ self.assertRaises(OSError, fs.maybe_make_directory, sub_dir)
+
+ # Clean up in case the test failed and we did create the
+ # directory.
+ if os.path.exists(sub_dir):
+ os.rmdir(sub_dir)
+
+ def test_read_and_write_text_file(self):
+ fs = FileSystem()
+ text_path = None
+
+ unicode_text_string = u'\u016An\u012Dc\u014Dde\u033D'
+ hex_equivalent = '\xC5\xAA\x6E\xC4\xAD\x63\xC5\x8D\x64\x65\xCC\xBD'
+ try:
+ text_path = tempfile.mktemp(prefix='tree_unittest_')
+ file = fs.open_text_file_for_writing(text_path)
+ file.write(unicode_text_string)
+ file.close()
+
+ file = fs.open_text_file_for_reading(text_path)
+ read_text = file.read()
+ file.close()
+
+ self.assertEqual(read_text, unicode_text_string)
+ finally:
+ if text_path and fs.isfile(text_path):
+ os.remove(text_path)
+
+ def test_read_and_write_file(self):
+ fs = FileSystem()
+ text_path = None
+ binary_path = None
+
+ unicode_text_string = u'\u016An\u012Dc\u014Dde\u033D'
+ hex_equivalent = '\xC5\xAA\x6E\xC4\xAD\x63\xC5\x8D\x64\x65\xCC\xBD'
+ try:
+ text_path = tempfile.mktemp(prefix='tree_unittest_')
+ binary_path = tempfile.mktemp(prefix='tree_unittest_')
+ fs.write_text_file(text_path, unicode_text_string)
+ contents = fs.read_binary_file(text_path)
+ self.assertEqual(contents, hex_equivalent)
+
+ fs.write_binary_file(binary_path, hex_equivalent)
+ text_contents = fs.read_text_file(binary_path)
+ self.assertEqual(text_contents, unicode_text_string)
+ finally:
+ if text_path and fs.isfile(text_path):
+ os.remove(text_path)
+ if binary_path and fs.isfile(binary_path):
+ os.remove(binary_path)
+
+ def test_read_binary_file__missing(self):
+ fs = FileSystem()
+ self.assertRaises(IOError, fs.read_binary_file, self._missing_file)
+
+ def test_read_text_file__missing(self):
+ fs = FileSystem()
+ self.assertRaises(IOError, fs.read_text_file, self._missing_file)
+
+ def test_remove_file_with_retry(self):
+ RealFileSystemTest._remove_failures = 2
+
+ def remove_with_exception(filename):
+ RealFileSystemTest._remove_failures -= 1
+ if RealFileSystemTest._remove_failures >= 0:
+ try:
+ raise WindowsError
+ except NameError:
+ raise FileSystem._WindowsError
+
+ fs = FileSystem()
+ self.assertTrue(fs.remove('filename', remove_with_exception))
+ self.assertEqual(-1, RealFileSystemTest._remove_failures)
+
+ def test_sep(self):
+ fs = FileSystem()
+
+ self.assertEqual(fs.sep, os.sep)
+ self.assertEqual(fs.join("foo", "bar"),
+ os.path.join("foo", "bar"))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/logtesting.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/logtesting.py
new file mode 100644
index 0000000..0cfa6cb
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/logtesting.py
@@ -0,0 +1,258 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Supports the unit-testing of logging code.
+
+Provides support for unit-testing messages logged using the built-in
+logging module.
+
+Inherit from the LoggingTestCase class for basic testing needs. For
+more advanced needs (e.g. unit-testing methods that configure logging),
+see the TestLogStream class, and perhaps also the LogTesting class.
+
+"""
+
+import logging
+import unittest
+
+
+class TestLogStream(object):
+
+ """Represents a file-like object for unit-testing logging.
+
+ This is meant for passing to the logging.StreamHandler constructor.
+ Log messages captured by instances of this object can be tested
+ using self.assertMessages() below.
+
+ """
+
+ def __init__(self, test_case):
+ """Create an instance.
+
+ Args:
+ test_case: A unittest.TestCase instance.
+
+ """
+ self._test_case = test_case
+ self.messages = []
+ """A list of log messages written to the stream."""
+
+ # Python documentation says that any object passed to the StreamHandler
+ # constructor should support write() and flush():
+ #
+ # http://docs.python.org/library/logging.html#module-logging.handlers
+ def write(self, message):
+ self.messages.append(message)
+
+ def flush(self):
+ pass
+
+ def assertMessages(self, messages):
+ """Assert that the given messages match the logged messages.
+
+ messages: A list of log message strings.
+
+ """
+ self._test_case.assertEqual(messages, self.messages)
+
+
+class LogTesting(object):
+
+ """Supports end-to-end unit-testing of log messages.
+
+ Sample usage:
+
+ class SampleTest(unittest.TestCase):
+
+ def setUp(self):
+ self._log = LogTesting.setUp(self) # Turn logging on.
+
+ def tearDown(self):
+ self._log.tearDown() # Turn off and reset logging.
+
+ def test_logging_in_some_method(self):
+ call_some_method() # Contains calls to _log.info(), etc.
+
+ # Check the resulting log messages.
+ self._log.assertMessages(["INFO: expected message #1",
+ "WARNING: expected message #2"])
+
+ """
+
+ def __init__(self, test_stream, handler):
+ """Create an instance.
+
+ This method should never be called directly. Instances should
+ instead be created using the static setUp() method.
+
+ Args:
+ test_stream: A TestLogStream instance.
+ handler: The handler added to the logger.
+
+ """
+ self._test_stream = test_stream
+ self._handler = handler
+
+ @staticmethod
+ def _getLogger():
+ """Return the logger being tested."""
+ # It is possible we might want to return something other than
+ # the root logger in some special situation. For now, the
+ # root logger seems to suffice.
+ return logging.getLogger()
+
+ @staticmethod
+ def setUp(test_case, logging_level=logging.INFO):
+ """Configure logging for unit testing.
+
+ Configures the root logger to log to a testing log stream.
+ Only messages logged at or above the given level are logged
+ to the stream. Messages logged to the stream are formatted
+ in the following way, for example--
+
+ "INFO: This is a test log message."
+
+ This method should normally be called in the setUp() method
+ of a unittest.TestCase. See the docstring of this class
+ for more details.
+
+ Returns:
+ A LogTesting instance.
+
+ Args:
+ test_case: A unittest.TestCase instance.
+ logging_level: An integer logging level that is the minimum level
+ of log messages you would like to test.
+
+ """
+ stream = TestLogStream(test_case)
+ handler = logging.StreamHandler(stream)
+ handler.setLevel(logging_level)
+ formatter = logging.Formatter("%(levelname)s: %(message)s")
+ handler.setFormatter(formatter)
+
+ # Notice that we only change the root logger by adding a handler
+ # to it. In particular, we do not reset its level using
+ # logger.setLevel(). This ensures that we have not interfered
+ # with how the code being tested may have configured the root
+ # logger.
+ logger = LogTesting._getLogger()
+ logger.addHandler(handler)
+
+ return LogTesting(stream, handler)
+
+ def tearDown(self):
+ """Assert there are no remaining log messages, and reset logging.
+
+ This method asserts that there are no more messages in the array of
+ log messages, and then restores logging to its original state.
+ This method should normally be called in the tearDown() method of a
+ unittest.TestCase. See the docstring of this class for more details.
+
+ """
+ self.assertMessages([])
+ logger = LogTesting._getLogger()
+ logger.removeHandler(self._handler)
+
+ def messages(self):
+ """Return the current list of log messages."""
+ return self._test_stream.messages
+
+ # FIXME: Add a clearMessages() method for cases where the caller
+ # deliberately doesn't want to assert every message.
+
+ # We clear the log messages after asserting since they are no longer
+ # needed after asserting. This serves two purposes: (1) it simplifies
+ # the calling code when we want to check multiple logging calls in a
+ # single test method, and (2) it lets us check in the tearDown() method
+ # that there are no remaining log messages to be asserted.
+ #
+ # The latter ensures that no extra log messages are getting logged that
+ # the caller might not be aware of or may have forgotten to check for.
+ # This gets us a bit more mileage out of our tests without writing any
+ # additional code.
+ def assertMessages(self, messages):
+ """Assert the current array of log messages, and clear its contents.
+
+ Args:
+ messages: A list of log message strings.
+
+ """
+ try:
+ self._test_stream.assertMessages(messages)
+ finally:
+ # We want to clear the array of messages even in the case of
+ # an Exception (e.g. an AssertionError). Otherwise, another
+ # AssertionError can occur in the tearDown() because the
+ # array might not have gotten emptied.
+ self._test_stream.messages = []
+
+
+# This class needs to inherit from unittest.TestCase. Otherwise, the
+# setUp() and tearDown() methods will not get fired for test case classes
+# that inherit from this class -- even if the class inherits from *both*
+# unittest.TestCase and LoggingTestCase.
+#
+# FIXME: Rename this class to LoggingTestCaseBase to be sure that
+# the unittest module does not interpret this class as a unittest
+# test case itself.
+class LoggingTestCase(unittest.TestCase):
+
+ """Supports end-to-end unit-testing of log messages.
+
+ Sample usage:
+
+ class SampleTest(LoggingTestCase):
+
+ def test_logging_in_some_method(self):
+ call_some_method() # Contains calls to _log.info(), etc.
+
+ # Check the resulting log messages.
+ self.assertLog(["INFO: expected message #1",
+ "WARNING: expected message #2"])
+
+ """
+
+ def setUp(self):
+ self._log = LogTesting.setUp(self)
+
+ def tearDown(self):
+ self._log.tearDown()
+
+ def logMessages(self):
+ """Return the current list of log messages."""
+ return self._log.messages()
+
+ # FIXME: Add a clearMessages() method for cases where the caller
+ # deliberately doesn't want to assert every message.
+
+ # See the code comments preceding LogTesting.assertMessages() for
+ # an explanation of why we clear the array of messages after
+ # asserting its contents.
+ def assertLog(self, messages):
+ """Assert the current array of log messages, and clear its contents.
+
+ Args:
+ messages: A list of log message strings.
+
+ """
+ self._log.assertMessages(messages)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/logutils.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/logutils.py
new file mode 100644
index 0000000..e75ff2c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/logutils.py
@@ -0,0 +1,207 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Supports webkitpy logging."""
+
+# FIXME: Move this file to webkitpy/python24 since logging needs to
+# be configured prior to running version-checking code.
+
+import logging
+import os
+import sys
+
+import webkitpy
+
+
+_log = logging.getLogger(__name__)
+
+# We set these directory paths lazily in get_logger() below.
+_scripts_dir = ""
+"""The normalized, absolute path to the ...Scripts directory."""
+
+_webkitpy_dir = ""
+"""The normalized, absolute path to the ...Scripts/webkitpy directory."""
+
+
+def _normalize_path(path):
+ """Return the given path normalized.
+
+ Converts a path to an absolute path, removes any trailing slashes,
+ removes any extension, and lower-cases it.
+
+ """
+ path = os.path.abspath(path)
+ path = os.path.normpath(path)
+ path = os.path.splitext(path)[0] # Remove the extension, if any.
+ path = path.lower()
+
+ return path
+
+
+# Observe that the implementation of this function does not require
+# the use of any hard-coded strings like "webkitpy", etc.
+#
+# The main benefit this function has over using--
+#
+# _log = logging.getLogger(__name__)
+#
+# is that get_logger() returns the same value even if __name__ is
+# "__main__" -- i.e. even if the module is the script being executed
+# from the command-line.
+def get_logger(path):
+ """Return a logging.logger for the given path.
+
+ Returns:
+ A logger whose name is the name of the module corresponding to
+ the given path. If the module is in webkitpy, the name is
+ the fully-qualified dotted module name beginning with webkitpy....
+ Otherwise, the name is the base name of the module (i.e. without
+ any dotted module name prefix).
+
+ Args:
+ path: The path of the module. Normally, this parameter should be
+ the __file__ variable of the module.
+
+ Sample usage:
+
+ from webkitpy.common.system import logutils
+
+ _log = logutils.get_logger(__file__)
+
+ """
+ # Since we assign to _scripts_dir and _webkitpy_dir in this function,
+ # we need to declare them global.
+ global _scripts_dir
+ global _webkitpy_dir
+
+ path = _normalize_path(path)
+
+ # Lazily evaluate _webkitpy_dir and _scripts_dir.
+ if not _scripts_dir:
+ # The normalized, absolute path to ...Scripts/webkitpy/__init__.
+ webkitpy_path = _normalize_path(webkitpy.__file__)
+
+ _webkitpy_dir = os.path.split(webkitpy_path)[0]
+ _scripts_dir = os.path.split(_webkitpy_dir)[0]
+
+ if path.startswith(_webkitpy_dir):
+ # Remove the initial Scripts directory portion, so the path
+ # starts with /webkitpy, for example "/webkitpy/init/logutils".
+ path = path[len(_scripts_dir):]
+
+ parts = []
+ while True:
+ (path, tail) = os.path.split(path)
+ if not tail:
+ break
+ parts.insert(0, tail)
+
+ logger_name = ".".join(parts) # For example, webkitpy.common.system.logutils.
+ else:
+ # The path is outside of webkitpy. Default to the basename
+ # without the extension.
+ basename = os.path.basename(path)
+ logger_name = os.path.splitext(basename)[0]
+
+ return logging.getLogger(logger_name)
+
+
+def _default_handlers(stream, logging_level):
+ """Return a list of the default logging handlers to use.
+
+ Args:
+ stream: See the configure_logging() docstring.
+
+ """
+ # Create the filter.
+ def should_log(record):
+ """Return whether a logging.LogRecord should be logged."""
+ if record.name.startswith("webkitpy.thirdparty"):
+ return False
+ return True
+
+ logging_filter = logging.Filter()
+ logging_filter.filter = should_log
+
+ # Create the handler.
+ handler = logging.StreamHandler(stream)
+ if logging_level == logging.DEBUG:
+ formatter = logging.Formatter("%(name)s: [%(levelname)s] %(message)s")
+ else:
+ formatter = logging.Formatter("%(message)s")
+
+ handler.setFormatter(formatter)
+ handler.addFilter(logging_filter)
+
+ return [handler]
+
+
+def configure_logging(logging_level=None, logger=None, stream=None,
+ handlers=None):
+ """Configure logging for standard purposes.
+
+ Returns:
+ A list of references to the logging handlers added to the root
+ logger. This allows the caller to later remove the handlers
+ using logger.removeHandler. This is useful primarily during unit
+ testing where the caller may want to configure logging temporarily
+ and then undo the configuring.
+
+ Args:
+ logging_level: The minimum logging level to log. Defaults to
+ logging.INFO.
+ logger: A logging.logger instance to configure. This parameter
+ should be used only in unit tests. Defaults to the
+ root logger.
+ stream: A file-like object to which to log used in creating the default
+ handlers. The stream must define an "encoding" data attribute,
+ or else logging raises an error. Defaults to sys.stderr.
+ handlers: A list of logging.Handler instances to add to the logger
+ being configured. If this parameter is provided, then the
+ stream parameter is not used.
+
+ """
+ # If the stream does not define an "encoding" data attribute, the
+ # logging module can throw an error like the following:
+ #
+ # Traceback (most recent call last):
+ # File "/System/Library/Frameworks/Python.framework/Versions/2.6/...
+ # lib/python2.6/logging/__init__.py", line 761, in emit
+ # self.stream.write(fs % msg.encode(self.stream.encoding))
+ # LookupError: unknown encoding: unknown
+ if logging_level is None:
+ logging_level = logging.INFO
+ if logger is None:
+ logger = logging.getLogger()
+ if stream is None:
+ stream = sys.stderr
+ if handlers is None:
+ handlers = _default_handlers(stream, logging_level)
+
+ logger.setLevel(logging_level)
+
+ for handler in handlers:
+ logger.addHandler(handler)
+
+ _log.debug("Debug logging enabled.")
+
+ return handlers
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/logutils_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/logutils_unittest.py
new file mode 100644
index 0000000..6d7cc4d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/logutils_unittest.py
@@ -0,0 +1,158 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for logutils.py."""
+
+import logging
+import os
+import unittest
+
+from webkitpy.common.system.logtesting import LogTesting
+from webkitpy.common.system.logtesting import TestLogStream
+from webkitpy.common.system import logutils
+
+
+class GetLoggerTest(unittest.TestCase):
+
+ """Tests get_logger()."""
+
+ def test_get_logger_in_webkitpy(self):
+ logger = logutils.get_logger(__file__)
+ self.assertEqual(logger.name, "webkitpy.common.system.logutils_unittest")
+
+ def test_get_logger_not_in_webkitpy(self):
+ # Temporarily change the working directory so that we
+ # can test get_logger() for a path outside of webkitpy.
+ working_directory = os.getcwd()
+ root_dir = "/"
+ os.chdir(root_dir)
+
+ logger = logutils.get_logger("/Tools/Scripts/test-webkitpy")
+ self.assertEqual(logger.name, "test-webkitpy")
+
+ logger = logutils.get_logger("/Tools/Scripts/test-webkitpy.py")
+ self.assertEqual(logger.name, "test-webkitpy")
+
+ os.chdir(working_directory)
+
+
+class ConfigureLoggingTestBase(unittest.TestCase):
+
+ """Base class for configure_logging() unit tests."""
+
+ def _logging_level(self):
+ raise Exception("Not implemented.")
+
+ def setUp(self):
+ log_stream = TestLogStream(self)
+
+ # Use a logger other than the root logger or one prefixed with
+ # "webkitpy." so as not to conflict with test-webkitpy logging.
+ logger = logging.getLogger("unittest")
+
+ # Configure the test logger not to pass messages along to the
+ # root logger. This prevents test messages from being
+ # propagated to loggers used by test-webkitpy logging (e.g.
+ # the root logger).
+ logger.propagate = False
+
+ logging_level = self._logging_level()
+ self._handlers = logutils.configure_logging(logging_level=logging_level,
+ logger=logger,
+ stream=log_stream)
+ self._log = logger
+ self._log_stream = log_stream
+
+ def tearDown(self):
+ """Reset logging to its original state.
+
+ This method ensures that the logging configuration set up
+ for a unit test does not affect logging in other unit tests.
+
+ """
+ logger = self._log
+ for handler in self._handlers:
+ logger.removeHandler(handler)
+
+ def _assert_log_messages(self, messages):
+ """Assert that the logged messages equal the given messages."""
+ self._log_stream.assertMessages(messages)
+
+
+class ConfigureLoggingTest(ConfigureLoggingTestBase):
+
+ """Tests configure_logging() with the default logging level."""
+
+ def _logging_level(self):
+ return None
+
+ def test_info_message(self):
+ self._log.info("test message")
+ self._assert_log_messages(["test message\n"])
+
+ def test_debug_message(self):
+ self._log.debug("test message")
+ self._assert_log_messages([])
+
+ def test_below_threshold_message(self):
+ # We test the boundary case of a logging level equal to 19.
+ # In practice, we will probably only be calling log.debug(),
+ # which corresponds to a logging level of 10.
+ level = logging.INFO - 1 # Equals 19.
+ self._log.log(level, "test message")
+ self._assert_log_messages([])
+
+ def test_two_messages(self):
+ self._log.info("message1")
+ self._log.info("message2")
+ self._assert_log_messages(["message1\n",
+ "message2\n"])
+
+
+class ConfigureLoggingVerboseTest(ConfigureLoggingTestBase):
+ def _logging_level(self):
+ return logging.DEBUG
+
+ def test_info_message(self):
+ self._log.info("test message")
+ self._assert_log_messages(["unittest: [INFO] test message\n"])
+
+ def test_debug_message(self):
+ self._log.debug("test message")
+ self._assert_log_messages(["unittest: [DEBUG] test message\n"])
+
+class ConfigureLoggingCustomLevelTest(ConfigureLoggingTestBase):
+
+ """Tests configure_logging() with a custom logging level."""
+
+ _level = 36
+
+ def _logging_level(self):
+ return self._level
+
+ def test_logged_message(self):
+ self._log.log(self._level, "test message")
+ self._assert_log_messages(["test message\n"])
+
+ def test_below_threshold_message(self):
+ self._log.log(self._level - 1, "test message")
+ self._assert_log_messages([])
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/outputcapture.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/outputcapture.py
new file mode 100644
index 0000000..b9b0a08
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/outputcapture.py
@@ -0,0 +1,123 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Class for unittest support. Used for capturing stderr/stdout.
+
+import logging
+import sys
+import unittest
+
+from StringIO import StringIO
+
+
+class OutputCapture(object):
+
+ def __init__(self):
+ self.saved_outputs = dict()
+ self._log_level = logging.INFO
+
+ def set_log_level(self, log_level):
+ self._log_level = log_level
+ if hasattr(self, '_logs_handler'):
+ self._logs_handler.setLevel(self._log_level)
+
+ def _capture_output_with_name(self, output_name):
+ stream = getattr(sys, output_name)
+ captured_output = StringIO()
+ self.saved_outputs[output_name] = stream
+ setattr(sys, output_name, captured_output)
+ return captured_output
+
+ def _restore_output_with_name(self, output_name):
+ captured_output = getattr(sys, output_name).getvalue()
+ setattr(sys, output_name, self.saved_outputs[output_name])
+ del self.saved_outputs[output_name]
+ return captured_output
+
+ def capture_output(self):
+ self._logs = StringIO()
+ self._logs_handler = logging.StreamHandler(self._logs)
+ self._logs_handler.setLevel(self._log_level)
+ self._logger = logging.getLogger()
+ self._orig_log_level = self._logger.level
+ self._logger.addHandler(self._logs_handler)
+ self._logger.setLevel(min(self._log_level, self._orig_log_level))
+ return (self._capture_output_with_name("stdout"), self._capture_output_with_name("stderr"))
+
+ def restore_output(self):
+ self._logger.removeHandler(self._logs_handler)
+ self._logger.setLevel(self._orig_log_level)
+ self._logs_handler.flush()
+ self._logs.flush()
+ logs_string = self._logs.getvalue()
+ delattr(self, '_logs_handler')
+ delattr(self, '_logs')
+ return (self._restore_output_with_name("stdout"), self._restore_output_with_name("stderr"), logs_string)
+
+ def assert_outputs(self, testcase, function, args=[], kwargs={}, expected_stdout="", expected_stderr="", expected_exception=None, expected_logs=None):
+ self.capture_output()
+ try:
+ if expected_exception:
+ return_value = testcase.assertRaises(expected_exception, function, *args, **kwargs)
+ else:
+ return_value = function(*args, **kwargs)
+ finally:
+ (stdout_string, stderr_string, logs_string) = self.restore_output()
+
+ if hasattr(testcase, 'assertMultiLineEqual'):
+ testassert = testcase.assertMultiLineEqual
+ else:
+ testassert = testcase.assertEqual
+
+ testassert(stdout_string, expected_stdout)
+ testassert(stderr_string, expected_stderr)
+ if expected_logs is not None:
+ testassert(logs_string, expected_logs)
+ # This is a little strange, but I don't know where else to return this information.
+ return return_value
+
+
+class OutputCaptureTestCaseBase(unittest.TestCase):
+ maxDiff = None
+
+ def setUp(self):
+ unittest.TestCase.setUp(self)
+ self.output_capture = OutputCapture()
+ (self.__captured_stdout, self.__captured_stderr) = self.output_capture.capture_output()
+
+ def tearDown(self):
+ del self.__captured_stdout
+ del self.__captured_stderr
+ self.output_capture.restore_output()
+ unittest.TestCase.tearDown(self)
+
+ def assertStdout(self, expected_stdout):
+ self.assertEqual(expected_stdout, self.__captured_stdout.getvalue())
+
+ def assertStderr(self, expected_stderr):
+ self.assertEqual(expected_stderr, self.__captured_stderr.getvalue())
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/outputcapture_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/outputcapture_unittest.py
new file mode 100644
index 0000000..321bb10
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/outputcapture_unittest.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+
+
+_log = logging.getLogger(__name__)
+
+
+class OutputCaptureTest(unittest.TestCase):
+ def setUp(self):
+ self.output = OutputCapture()
+
+ def log_all_levels(self):
+ _log.info('INFO')
+ _log.warning('WARN')
+ _log.error('ERROR')
+ _log.critical('CRITICAL')
+
+ def assertLogged(self, expected_logs):
+ actual_stdout, actual_stderr, actual_logs = self.output.restore_output()
+ self.assertEqual('', actual_stdout)
+ self.assertEqual('', actual_stderr)
+ self.assertMultiLineEqual(expected_logs, actual_logs)
+
+ def test_initial_log_level(self):
+ self.output.capture_output()
+ self.log_all_levels()
+ self.assertLogged('INFO\nWARN\nERROR\nCRITICAL\n')
+
+ def test_set_log_level(self):
+ self.output.set_log_level(logging.ERROR)
+ self.output.capture_output()
+ self.log_all_levels()
+ self.output.set_log_level(logging.WARN)
+ self.log_all_levels()
+ self.assertLogged('ERROR\nCRITICAL\nWARN\nERROR\nCRITICAL\n')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/outputtee.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/outputtee.py
new file mode 100644
index 0000000..12366e8
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/outputtee.py
@@ -0,0 +1,79 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import codecs
+import os
+import sys
+
+
+# Simple class to split output between multiple destinations
+class Tee:
+ def __init__(self, *files):
+ self.files = files
+
+ # Callers should pass an already encoded string for writing.
+ def write(self, bytes):
+ for file in self.files:
+ file.write(bytes)
+
+
+class OutputTee:
+ def __init__(self):
+ self._original_stdout = None
+ self._original_stderr = None
+ self._files_for_output = []
+
+ def add_log(self, path):
+ log_file = self._open_log_file(path)
+ self._files_for_output.append(log_file)
+ self._tee_outputs_to_files(self._files_for_output)
+ return log_file
+
+ def remove_log(self, log_file):
+ self._files_for_output.remove(log_file)
+ self._tee_outputs_to_files(self._files_for_output)
+ log_file.close()
+
+ @staticmethod
+ def _open_log_file(log_path):
+ (log_directory, log_name) = os.path.split(log_path)
+ if log_directory and not os.path.exists(log_directory):
+ os.makedirs(log_directory)
+ return codecs.open(log_path, "a+", "utf-8")
+
+ def _tee_outputs_to_files(self, files):
+ if not self._original_stdout:
+ self._original_stdout = sys.stdout
+ self._original_stderr = sys.stderr
+ if files and len(files):
+ sys.stdout = Tee(self._original_stdout, *files)
+ sys.stderr = Tee(self._original_stderr, *files)
+ else:
+ sys.stdout = self._original_stdout
+ sys.stderr = self._original_stderr
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/outputtee_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/outputtee_unittest.py
new file mode 100644
index 0000000..6a509f0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/outputtee_unittest.py
@@ -0,0 +1,43 @@
+# Copyright (C) 2012 Zan Dobersek <zandobersek@gmail.com>
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import unittest
+
+from webkitpy.common.system.outputtee import Tee, OutputTee
+
+
+class SimpleTeeTest(unittest.TestCase):
+ def test_simple_tee(self):
+ file1, file2 = StringIO.StringIO(), StringIO.StringIO()
+ tee = Tee(file1, file2)
+ tee.write("foo bar\n")
+ tee.write("baz\n")
+
+ self.assertEqual(file1.getvalue(), "foo bar\nbaz\n")
+ self.assertEqual(file2.getvalue(), file1.getvalue())
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/path.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/path.py
new file mode 100644
index 0000000..e5a66bf
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/path.py
@@ -0,0 +1,135 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""generic routines to convert platform-specific paths to URIs."""
+
+import atexit
+import subprocess
+import sys
+import threading
+import urllib
+
+
+def abspath_to_uri(platform, path):
+ """Converts a platform-specific absolute path to a file: URL."""
+ return "file:" + _escape(_convert_path(platform, path))
+
+
+def cygpath(path):
+ """Converts an absolute cygwin path to an absolute Windows path."""
+ return _CygPath.convert_using_singleton(path)
+
+
+# Note that this object is not threadsafe and must only be called
+# from multiple threads under protection of a lock (as is done in cygpath())
+class _CygPath(object):
+ """Manages a long-running 'cygpath' process for file conversion."""
+ _lock = None
+ _singleton = None
+
+ @staticmethod
+ def stop_cygpath_subprocess():
+ if not _CygPath._lock:
+ return
+
+ with _CygPath._lock:
+ if _CygPath._singleton:
+ _CygPath._singleton.stop()
+
+ @staticmethod
+ def convert_using_singleton(path):
+ if not _CygPath._lock:
+ _CygPath._lock = threading.Lock()
+
+ with _CygPath._lock:
+ if not _CygPath._singleton:
+ _CygPath._singleton = _CygPath()
+ # Make sure the cygpath subprocess always gets shutdown cleanly.
+ atexit.register(_CygPath.stop_cygpath_subprocess)
+
+ return _CygPath._singleton.convert(path)
+
+ def __init__(self):
+ self._child_process = None
+
+ def start(self):
+ assert(self._child_process is None)
+ args = ['cygpath', '-f', '-', '-wa']
+ self._child_process = subprocess.Popen(args,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+
+ def is_running(self):
+ if not self._child_process:
+ return False
+ return self._child_process.returncode is None
+
+ def stop(self):
+ if self._child_process:
+ self._child_process.stdin.close()
+ self._child_process.wait()
+ self._child_process = None
+
+ def convert(self, path):
+ if not self.is_running():
+ self.start()
+ self._child_process.stdin.write("%s\r\n" % path)
+ self._child_process.stdin.flush()
+ windows_path = self._child_process.stdout.readline().rstrip()
+ # Some versions of cygpath use lowercase drive letters while others
+ # use uppercase. We always convert to uppercase for consistency.
+ windows_path = '%s%s' % (windows_path[0].upper(), windows_path[1:])
+ return windows_path
+
+
+def _escape(path):
+ """Handle any characters in the path that should be escaped."""
+ # FIXME: web browsers don't appear to blindly quote every character
+ # when converting filenames to files. Instead of using urllib's default
+ # rules, we allow a small list of other characters through un-escaped.
+ # It's unclear if this is the best possible solution.
+ return urllib.quote(path, safe='/+:')
+
+
+def _convert_path(platform, path):
+ """Handles any os-specific path separators, mappings, etc."""
+ if platform.is_cygwin():
+ return _winpath_to_uri(cygpath(path))
+ if platform.is_win():
+ return _winpath_to_uri(path)
+ return _unixypath_to_uri(path)
+
+
+def _winpath_to_uri(path):
+ """Converts a window absolute path to a file: URL."""
+ return "///" + path.replace("\\", "/")
+
+
+def _unixypath_to_uri(path):
+ """Converts a unix-style path to a file: URL."""
+ return "//" + path
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/path_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/path_unittest.py
new file mode 100644
index 0000000..c0b8287
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/path_unittest.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import unittest
+
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.system.platforminfo import PlatformInfo
+from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
+from webkitpy.common.system import path
+
+class AbspathTest(unittest.TestCase):
+ def platforminfo(self):
+ return SystemHost().platform
+
+ def test_abspath_to_uri_cygwin(self):
+ if sys.platform != 'cygwin':
+ return
+ self.assertEqual(path.abspath_to_uri(self.platforminfo(), '/cygdrive/c/foo/bar.html'),
+ 'file:///C:/foo/bar.html')
+
+ def test_abspath_to_uri_unixy(self):
+ self.assertEqual(path.abspath_to_uri(MockPlatformInfo(), "/foo/bar.html"),
+ 'file:///foo/bar.html')
+
+ def test_abspath_to_uri_win(self):
+ if sys.platform != 'win32':
+ return
+ self.assertEqual(path.abspath_to_uri(self.platforminfo(), 'c:\\foo\\bar.html'),
+ 'file:///c:/foo/bar.html')
+
+ def test_abspath_to_uri_escaping_unixy(self):
+ self.assertEqual(path.abspath_to_uri(MockPlatformInfo(), '/foo/bar + baz%?.html'),
+ 'file:///foo/bar%20+%20baz%25%3F.html')
+
+ # Note that you can't have '?' in a filename on windows.
+ def test_abspath_to_uri_escaping_cygwin(self):
+ if sys.platform != 'cygwin':
+ return
+ self.assertEqual(path.abspath_to_uri(self.platforminfo(), '/cygdrive/c/foo/bar + baz%.html'),
+ 'file:///C:/foo/bar%20+%20baz%25.html')
+
+ def test_stop_cygpath_subprocess(self):
+ if sys.platform != 'cygwin':
+ return
+
+ # Call cygpath to ensure the subprocess is running.
+ path.cygpath("/cygdrive/c/foo.txt")
+ self.assertTrue(path._CygPath._singleton.is_running())
+
+ # Stop it.
+ path._CygPath.stop_cygpath_subprocess()
+
+ # Ensure that it is stopped.
+ self.assertFalse(path._CygPath._singleton.is_running())
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/platforminfo.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/platforminfo.py
new file mode 100644
index 0000000..a0c8dc3
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/platforminfo.py
@@ -0,0 +1,169 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+import sys
+
+
+class PlatformInfo(object):
+ """This class provides a consistent (and mockable) interpretation of
+ system-specific values (like sys.platform and platform.mac_ver())
+ to be used by the rest of the webkitpy code base.
+
+ Public (static) properties:
+ -- os_name
+ -- os_version
+
+ Note that 'future' is returned for os_version if the operating system is
+ newer than one known to the code.
+ """
+
+ def __init__(self, sys_module, platform_module, executive):
+ self._executive = executive
+ self._platform_module = platform_module
+ self.os_name = self._determine_os_name(sys_module.platform)
+ if self.os_name == 'linux':
+ self.os_version = self._determine_linux_version()
+ if self.os_name == 'freebsd':
+ self.os_version = platform_module.release()
+ if self.os_name.startswith('mac'):
+ self.os_version = self._determine_mac_version(platform_module.mac_ver()[0])
+ if self.os_name.startswith('win'):
+ self.os_version = self._determine_win_version(self._win_version_tuple(sys_module))
+ self._is_cygwin = sys_module.platform == 'cygwin'
+
+ def is_mac(self):
+ return self.os_name == 'mac'
+
+ def is_win(self):
+ return self.os_name == 'win'
+
+ def is_cygwin(self):
+ return self._is_cygwin
+
+ def is_linux(self):
+ return self.os_name == 'linux'
+
+ def is_freebsd(self):
+ return self.os_name == 'freebsd'
+
+ def is_highdpi(self):
+ if self.is_mac():
+ output = self._executive.run_command(['system_profiler', 'SPDisplaysDataType'], error_handler=self._executive.ignore_error)
+ if output and 'Retina: Yes' in output:
+ return True
+ return False
+
+ def display_name(self):
+ # platform.platform() returns Darwin information for Mac, which is just confusing.
+ if self.is_mac():
+ return "Mac OS X %s" % self._platform_module.mac_ver()[0]
+
+ # Returns strings like:
+ # Linux-2.6.18-194.3.1.el5-i686-with-redhat-5.5-Final
+ # Windows-2008ServerR2-6.1.7600
+ return self._platform_module.platform()
+
+ def total_bytes_memory(self):
+ if self.is_mac():
+ return long(self._executive.run_command(["sysctl", "-n", "hw.memsize"]))
+ return None
+
+ def terminal_width(self):
+ """Returns sys.maxint if the width cannot be determined."""
+ try:
+ if self.is_win():
+ # From http://code.activestate.com/recipes/440694-determine-size-of-console-window-on-windows/
+ from ctypes import windll, create_string_buffer
+ handle = windll.kernel32.GetStdHandle(-12) # -12 == stderr
+ console_screen_buffer_info = create_string_buffer(22) # 22 == sizeof(console_screen_buffer_info)
+ if windll.kernel32.GetConsoleScreenBufferInfo(handle, console_screen_buffer_info):
+ import struct
+ _, _, _, _, _, left, _, right, _, _, _ = struct.unpack("hhhhHhhhhhh", console_screen_buffer_info.raw)
+ # Note that we return 1 less than the width since writing into the rightmost column
+ # automatically performs a line feed.
+ return right - left
+ return sys.maxint
+ else:
+ import fcntl
+ import struct
+ import termios
+ packed = fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ, '\0' * 8)
+ _, columns, _, _ = struct.unpack('HHHH', packed)
+ return columns
+ except:
+ return sys.maxint
+
+ def _determine_os_name(self, sys_platform):
+ if sys_platform == 'darwin':
+ return 'mac'
+ if sys_platform.startswith('linux'):
+ return 'linux'
+ if sys_platform in ('win32', 'cygwin'):
+ return 'win'
+ if sys_platform.startswith('freebsd'):
+ return 'freebsd'
+ raise AssertionError('unrecognized platform string "%s"' % sys_platform)
+
+ def _determine_mac_version(self, mac_version_string):
+ release_version = int(mac_version_string.split('.')[1])
+ version_strings = {
+ 5: 'leopard',
+ 6: 'snowleopard',
+ 7: 'lion',
+ 8: 'mountainlion',
+ 9: 'mavericks',
+ }
+ assert release_version >= min(version_strings.keys())
+ return version_strings.get(release_version, 'future')
+
+ def _determine_linux_version(self):
+ # FIXME: we ignore whatever the real version is and pretend it's lucid for now.
+ return 'lucid'
+
+ def _determine_win_version(self, win_version_tuple):
+ if win_version_tuple[:3] == (6, 1, 7600):
+ return '7sp0'
+ if win_version_tuple[:2] == (6, 0):
+ return 'vista'
+ if win_version_tuple[:2] == (5, 1):
+ return 'xp'
+ assert win_version_tuple[0] > 6 or win_version_tuple[1] >= 1, 'Unrecognized Windows version tuple: "%s"' % (win_version_tuple,)
+ return 'future'
+
+ def _win_version_tuple(self, sys_module):
+ if hasattr(sys_module, 'getwindowsversion'):
+ return sys_module.getwindowsversion()
+ return self._win_version_tuple_from_cmd()
+
+ def _win_version_tuple_from_cmd(self):
+ # Note that this should only ever be called on windows, so this should always work.
+ ver_output = self._executive.run_command(['cmd', '/c', 'ver'], decode_output=False)
+ match_object = re.search(r'(?P<major>\d)\.(?P<minor>\d)\.(?P<build>\d+)', ver_output)
+ assert match_object, 'cmd returned an unexpected version string: ' + ver_output
+ return tuple(map(int, match_object.groups()))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/platforminfo_mock.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/platforminfo_mock.py
new file mode 100644
index 0000000..1ba0019
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/platforminfo_mock.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class MockPlatformInfo(object):
+ def __init__(self, os_name='mac', os_version='snowleopard', is_highdpi=False):
+ self.os_name = os_name
+ self.os_version = os_version
+ self._is_highdpi = is_highdpi
+
+ def is_mac(self):
+ return self.os_name == 'mac'
+
+ def is_linux(self):
+ return self.os_name == 'linux'
+
+ def is_win(self):
+ return self.os_name == 'win'
+
+ def is_highdpi(self):
+ return self._is_highdpi
+
+ def is_cygwin(self):
+ return self.os_name == 'cygwin'
+
+ def is_freebsd(self):
+ return self.os_name == 'freebsd'
+
+ def display_name(self):
+ return "MockPlatform 1.0"
+
+ def total_bytes_memory(self):
+ return 3 * 1024 * 1024 * 1024 # 3GB is a reasonable amount of ram to mock.
+
+ def terminal_width(self):
+ return 80
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py
new file mode 100644
index 0000000..c16a16c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py
@@ -0,0 +1,180 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import platform
+import sys
+import unittest
+
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
+from webkitpy.common.system.platforminfo import PlatformInfo
+
+
+def fake_sys(platform_str='darwin', windows_version_tuple=None):
+
+ class FakeSysModule(object):
+ platform = platform_str
+ if windows_version_tuple:
+ getwindowsversion = lambda x: windows_version_tuple
+
+ return FakeSysModule()
+
+
+def fake_platform(mac_version_string='10.6.3', release_string='bar'):
+
+ class FakePlatformModule(object):
+ def mac_ver(self):
+ return tuple([mac_version_string, tuple(['', '', '']), 'i386'])
+
+ def platform(self):
+ return 'foo'
+
+ def release(self):
+ return release_string
+
+ return FakePlatformModule()
+
+
+def fake_executive(output=None):
+ if output:
+ return MockExecutive2(output=output)
+ return MockExecutive2(exception=SystemError)
+
+
+class TestPlatformInfo(unittest.TestCase):
+ def make_info(self, sys_module=None, platform_module=None, executive=None):
+ return PlatformInfo(sys_module or fake_sys(), platform_module or fake_platform(), executive or fake_executive())
+
+ def test_real_code(self):
+ # This test makes sure the real (unmocked) code actually works.
+ info = PlatformInfo(sys, platform, Executive())
+ self.assertNotEquals(info.os_name, '')
+ self.assertNotEquals(info.os_version, '')
+ self.assertNotEquals(info.display_name(), '')
+ self.assertTrue(info.is_mac() or info.is_win() or info.is_linux() or info.is_freebsd())
+ self.assertIsNotNone(info.terminal_width())
+
+ if info.is_mac():
+ self.assertTrue(info.total_bytes_memory() > 0)
+ else:
+ self.assertIsNone(info.total_bytes_memory())
+
+ def test_os_name_and_wrappers(self):
+ info = self.make_info(fake_sys('linux2'))
+ self.assertTrue(info.is_linux())
+ self.assertFalse(info.is_mac())
+ self.assertFalse(info.is_win())
+ self.assertFalse(info.is_freebsd())
+
+ info = self.make_info(fake_sys('linux3'))
+ self.assertTrue(info.is_linux())
+ self.assertFalse(info.is_mac())
+ self.assertFalse(info.is_win())
+ self.assertFalse(info.is_freebsd())
+
+ info = self.make_info(fake_sys('darwin'), fake_platform('10.6.3'))
+ self.assertEqual(info.os_name, 'mac')
+ self.assertFalse(info.is_linux())
+ self.assertTrue(info.is_mac())
+ self.assertFalse(info.is_win())
+ self.assertFalse(info.is_freebsd())
+
+ info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
+ self.assertEqual(info.os_name, 'win')
+ self.assertFalse(info.is_linux())
+ self.assertFalse(info.is_mac())
+ self.assertTrue(info.is_win())
+ self.assertFalse(info.is_freebsd())
+
+ info = self.make_info(fake_sys('cygwin'), executive=fake_executive('6.1.7600'))
+ self.assertEqual(info.os_name, 'win')
+ self.assertFalse(info.is_linux())
+ self.assertFalse(info.is_mac())
+ self.assertTrue(info.is_win())
+ self.assertFalse(info.is_freebsd())
+
+ info = self.make_info(fake_sys('freebsd8'))
+ self.assertEqual(info.os_name, 'freebsd')
+ self.assertFalse(info.is_linux())
+ self.assertFalse(info.is_mac())
+ self.assertFalse(info.is_win())
+ self.assertTrue(info.is_freebsd())
+
+ self.assertRaises(AssertionError, self.make_info, fake_sys('vms'))
+
+ def test_os_version(self):
+ self.assertRaises(AssertionError, self.make_info, fake_sys('darwin'), fake_platform('10.4.3'))
+ self.assertEqual(self.make_info(fake_sys('darwin'), fake_platform('10.5.1')).os_version, 'leopard')
+ self.assertEqual(self.make_info(fake_sys('darwin'), fake_platform('10.6.1')).os_version, 'snowleopard')
+ self.assertEqual(self.make_info(fake_sys('darwin'), fake_platform('10.7.1')).os_version, 'lion')
+ self.assertEqual(self.make_info(fake_sys('darwin'), fake_platform('10.8.1')).os_version, 'mountainlion')
+ self.assertEqual(self.make_info(fake_sys('darwin'), fake_platform('10.9.0')).os_version, 'mavericks')
+ self.assertEqual(self.make_info(fake_sys('darwin'), fake_platform('10.10.0')).os_version, 'future')
+
+ self.assertEqual(self.make_info(fake_sys('linux2')).os_version, 'lucid')
+
+ self.assertEqual(self.make_info(fake_sys('freebsd8'), fake_platform('', '8.3-PRERELEASE')).os_version, '8.3-PRERELEASE')
+ self.assertEqual(self.make_info(fake_sys('freebsd9'), fake_platform('', '9.0-RELEASE')).os_version, '9.0-RELEASE')
+
+ self.assertRaises(AssertionError, self.make_info, fake_sys('win32', tuple([5, 0, 1234])))
+ self.assertEqual(self.make_info(fake_sys('win32', tuple([6, 2, 1234]))).os_version, 'future')
+ self.assertEqual(self.make_info(fake_sys('win32', tuple([6, 1, 7600]))).os_version, '7sp0')
+ self.assertEqual(self.make_info(fake_sys('win32', tuple([6, 0, 1234]))).os_version, 'vista')
+ self.assertEqual(self.make_info(fake_sys('win32', tuple([5, 1, 1234]))).os_version, 'xp')
+
+ self.assertRaises(AssertionError, self.make_info, fake_sys('win32'), executive=fake_executive('5.0.1234'))
+ self.assertEqual(self.make_info(fake_sys('cygwin'), executive=fake_executive('6.2.1234')).os_version, 'future')
+ self.assertEqual(self.make_info(fake_sys('cygwin'), executive=fake_executive('6.1.7600')).os_version, '7sp0')
+ self.assertEqual(self.make_info(fake_sys('cygwin'), executive=fake_executive('6.0.1234')).os_version, 'vista')
+ self.assertEqual(self.make_info(fake_sys('cygwin'), executive=fake_executive('5.1.1234')).os_version, 'xp')
+
+ def test_display_name(self):
+ info = self.make_info(fake_sys('darwin'))
+ self.assertNotEquals(info.display_name(), '')
+
+ info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
+ self.assertNotEquals(info.display_name(), '')
+
+ info = self.make_info(fake_sys('linux2'))
+ self.assertNotEquals(info.display_name(), '')
+
+ info = self.make_info(fake_sys('freebsd9'))
+ self.assertNotEquals(info.display_name(), '')
+
+ def test_total_bytes_memory(self):
+ info = self.make_info(fake_sys('darwin'), fake_platform('10.6.3'), fake_executive('1234'))
+ self.assertEqual(info.total_bytes_memory(), 1234)
+
+ info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
+ self.assertIsNone(info.total_bytes_memory())
+
+ info = self.make_info(fake_sys('linux2'))
+ self.assertIsNone(info.total_bytes_memory())
+
+ info = self.make_info(fake_sys('freebsd9'))
+ self.assertIsNone(info.total_bytes_memory())
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/profiler.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/profiler.py
new file mode 100644
index 0000000..0208cf8
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/profiler.py
@@ -0,0 +1,210 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import re
+import itertools
+
+_log = logging.getLogger(__name__)
+
+
+class ProfilerFactory(object):
+ @classmethod
+ def create_profiler(cls, host, executable_path, output_dir, profiler_name=None, identifier=None):
+ profilers = cls.profilers_for_platform(host.platform)
+ if not profilers:
+ return None
+ profiler_name = profiler_name or cls.default_profiler_name(host.platform)
+ profiler_class = next(itertools.ifilter(lambda profiler: profiler.name == profiler_name, profilers), None)
+ if not profiler_class:
+ return None
+ return profilers[0](host, executable_path, output_dir, identifier)
+
+ @classmethod
+ def default_profiler_name(cls, platform):
+ profilers = cls.profilers_for_platform(platform)
+ return profilers[0].name if profilers else None
+
+ @classmethod
+ def profilers_for_platform(cls, platform):
+ # GooglePProf requires TCMalloc/google-perftools, but is available everywhere.
+ profilers_by_os_name = {
+ 'mac': [IProfiler, Sample, GooglePProf],
+ 'linux': [Perf, GooglePProf],
+ # Note: freebsd, win32 have no profilers defined yet, thus --profile will be ignored
+ # by default, but a profiler can be selected with --profiler=PROFILER explicitly.
+ }
+ return profilers_by_os_name.get(platform.os_name, [])
+
+
+class Profiler(object):
+ # Used by ProfilerFactory to lookup a profiler from the --profiler=NAME option.
+ name = None
+
+ def __init__(self, host, executable_path, output_dir, identifier=None):
+ self._host = host
+ self._executable_path = executable_path
+ self._output_dir = output_dir
+ self._identifier = "test"
+ self._host.filesystem.maybe_make_directory(self._output_dir)
+
+ def adjusted_environment(self, env):
+ return env
+
+ def attach_to_pid(self, pid):
+ pass
+
+ def profile_after_exit(self):
+ pass
+
+
+class SingleFileOutputProfiler(Profiler):
+ def __init__(self, host, executable_path, output_dir, output_suffix, identifier=None):
+ super(SingleFileOutputProfiler, self).__init__(host, executable_path, output_dir, identifier)
+ # FIXME: Currently all reports are kept as test.*, until we fix that, search up to 1000 names before giving up.
+ self._output_path = self._host.workspace.find_unused_filename(self._output_dir, self._identifier, output_suffix, search_limit=1000)
+ assert(self._output_path)
+
+
+class GooglePProf(SingleFileOutputProfiler):
+ name = 'pprof'
+
+ def __init__(self, host, executable_path, output_dir, identifier=None):
+ super(GooglePProf, self).__init__(host, executable_path, output_dir, "pprof", identifier)
+
+ def adjusted_environment(self, env):
+ env['CPUPROFILE'] = self._output_path
+ return env
+
+ def _first_ten_lines_of_profile(self, pprof_output):
+ match = re.search("^Total:[^\n]*\n((?:[^\n]*\n){0,10})", pprof_output, re.MULTILINE)
+ return match.group(1) if match else None
+
+ def _pprof_path(self):
+ # FIXME: We should have code to find the right google-pprof executable, some Googlers have
+ # google-pprof installed as "pprof" on their machines for them.
+ return '/usr/bin/google-pprof'
+
+ def profile_after_exit(self):
+ # google-pprof doesn't check its arguments, so we have to.
+ if not (self._host.filesystem.exists(self._output_path)):
+ print "Failed to gather profile, %s does not exist." % self._output_path
+ return
+
+ pprof_args = [self._pprof_path(), '--text', self._executable_path, self._output_path]
+ profile_text = self._host.executive.run_command(pprof_args)
+ print "First 10 lines of pprof --text:"
+ print self._first_ten_lines_of_profile(profile_text)
+ print "http://google-perftools.googlecode.com/svn/trunk/doc/cpuprofile.html documents output."
+ print
+ print "To interact with the the full profile, including produce graphs:"
+ print ' '.join([self._pprof_path(), self._executable_path, self._output_path])
+
+
+class Perf(SingleFileOutputProfiler):
+ name = 'perf'
+
+ def __init__(self, host, executable_path, output_dir, identifier=None):
+ super(Perf, self).__init__(host, executable_path, output_dir, "data", identifier)
+ self._perf_process = None
+ self._pid_being_profiled = None
+
+ def _perf_path(self):
+ # FIXME: We may need to support finding the perf binary in other locations.
+ return 'perf'
+
+ def attach_to_pid(self, pid):
+ assert(not self._perf_process and not self._pid_being_profiled)
+ self._pid_being_profiled = pid
+ cmd = [self._perf_path(), "record", "--call-graph", "--pid", pid, "--output", self._output_path]
+ self._perf_process = self._host.executive.popen(cmd)
+
+ def _first_ten_lines_of_profile(self, perf_output):
+ match = re.search("^#[^\n]*\n((?: [^\n]*\n){1,10})", perf_output, re.MULTILINE)
+ return match.group(1) if match else None
+
+ def profile_after_exit(self):
+ # Perf doesn't automatically watch the attached pid for death notifications,
+ # so we have to do it for it, and then tell it its time to stop sampling. :(
+ self._host.executive.wait_limited(self._pid_being_profiled, limit_in_seconds=10)
+ perf_exitcode = self._perf_process.poll()
+ if perf_exitcode is None: # This should always be the case, unless perf error'd out early.
+ self._host.executive.interrupt(self._perf_process.pid)
+
+ perf_exitcode = self._perf_process.wait()
+ if perf_exitcode not in (0, -2): # The exit code should always be -2, as we're always interrupting perf.
+ print "'perf record' failed (exit code: %i), can't process results:" % perf_exitcode
+ return
+
+ perf_args = [self._perf_path(), 'report', '--call-graph', 'none', '--input', self._output_path]
+ print "First 10 lines of 'perf report --call-graph=none':"
+
+ print " ".join(perf_args)
+ perf_output = self._host.executive.run_command(perf_args)
+ print self._first_ten_lines_of_profile(perf_output)
+
+ print "To view the full profile, run:"
+ print ' '.join([self._perf_path(), 'report', '-i', self._output_path])
+ print # An extra line between tests looks nicer.
+
+
+class Sample(SingleFileOutputProfiler):
+ name = 'sample'
+
+ def __init__(self, host, executable_path, output_dir, identifier=None):
+ super(Sample, self).__init__(host, executable_path, output_dir, "txt", identifier)
+ self._profiler_process = None
+
+ def attach_to_pid(self, pid):
+ cmd = ["sample", pid, "-mayDie", "-file", self._output_path]
+ self._profiler_process = self._host.executive.popen(cmd)
+
+ def profile_after_exit(self):
+ self._profiler_process.wait()
+
+
+class IProfiler(SingleFileOutputProfiler):
+ name = 'iprofiler'
+
+ def __init__(self, host, executable_path, output_dir, identifier=None):
+ super(IProfiler, self).__init__(host, executable_path, output_dir, "dtps", identifier)
+ self._profiler_process = None
+
+ def attach_to_pid(self, pid):
+ # FIXME: iprofiler requires us to pass the directory separately
+ # from the basename of the file, with no control over the extension.
+ fs = self._host.filesystem
+ cmd = ["iprofiler", "-timeprofiler", "-a", pid,
+ "-d", fs.dirname(self._output_path), "-o", fs.splitext(fs.basename(self._output_path))[0]]
+ # FIXME: Consider capturing instead of letting instruments spam to stderr directly.
+ self._profiler_process = self._host.executive.popen(cmd)
+
+ def profile_after_exit(self):
+ # It seems like a nicer user experiance to wait on the profiler to exit to prevent
+ # it from spewing to stderr at odd times.
+ self._profiler_process.wait()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/profiler_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/profiler_unittest.py
new file mode 100644
index 0000000..2489d1d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/profiler_unittest.py
@@ -0,0 +1,103 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from .profiler import ProfilerFactory, GooglePProf
+
+
+class ProfilerFactoryTest(unittest.TestCase):
+ def _assert_default_profiler_name(self, os_name, expected_profiler_name):
+ profiler_name = ProfilerFactory.default_profiler_name(MockPlatformInfo(os_name))
+ self.assertEqual(profiler_name, expected_profiler_name)
+
+ def test_default_profilers(self):
+ self._assert_default_profiler_name('mac', 'iprofiler')
+ self._assert_default_profiler_name('linux', 'perf')
+ self._assert_default_profiler_name('win32', None)
+ self._assert_default_profiler_name('freebsd', None)
+
+ def test_default_profiler_output(self):
+ host = MockSystemHost()
+ self.assertFalse(host.filesystem.exists("/tmp/output"))
+
+ # Default mocks are Mac, so iprofile should be default.
+ profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
+ self.assertTrue(host.filesystem.exists("/tmp/output"))
+ self.assertEqual(profiler._output_path, "/tmp/output/test.dtps")
+
+ # Linux defaults to perf.
+ host.platform.os_name = 'linux'
+ profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
+ self.assertEqual(profiler._output_path, "/tmp/output/test.data")
+
+
+class GooglePProfTest(unittest.TestCase):
+ def test_pprof_output_regexp(self):
+ pprof_output = """
+sometimes
+there
+is
+junk before the total line
+
+
+Total: 3770 samples
+ 76 2.0% 2.0% 104 2.8% lookup (inline)
+ 60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline)
+ 56 1.5% 5.1% 56 1.5% MaskPtr (inline)
+ 51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken
+ 42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity
+ 35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline)
+ 33 0.9% 9.4% 43 1.1% append (inline)
+ 29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline)
+ 29 0.8% 10.9% 100 2.7% add (inline)
+ 28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline)
+ 25 0.7% 12.3% 27 0.7% WebCore::Private::addChildNodesToDeletionQueue
+ 24 0.6% 12.9% 24 0.6% __memcpy_ssse3_back
+ 23 0.6% 13.6% 23 0.6% intHash (inline)
+ 23 0.6% 14.2% 76 2.0% tcmalloc::FL_Next
+ 23 0.6% 14.8% 95 2.5% tcmalloc::FL_Push
+ 22 0.6% 15.4% 22 0.6% WebCore::MarkupTokenizerBase::InputStreamPreprocessor::peek (inline)
+"""
+ expected_first_ten_lines = """ 76 2.0% 2.0% 104 2.8% lookup (inline)
+ 60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline)
+ 56 1.5% 5.1% 56 1.5% MaskPtr (inline)
+ 51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken
+ 42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity
+ 35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline)
+ 33 0.9% 9.4% 43 1.1% append (inline)
+ 29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline)
+ 29 0.8% 10.9% 100 2.7% add (inline)
+ 28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline)
+"""
+ host = MockSystemHost()
+ profiler = GooglePProf(host, '/bin/executable', '/tmp/output')
+ self.assertEqual(profiler._first_ten_lines_of_profile(pprof_output), expected_first_ten_lines)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/stack_utils.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/stack_utils.py
new file mode 100644
index 0000000..a343807
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/stack_utils.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Simple routines for logging, obtaining thread stack information."""
+
+import sys
+import traceback
+
+
+def log_thread_state(logger, name, thread_id, msg=''):
+ """Log information about the given thread state."""
+ stack = _find_thread_stack(thread_id)
+ assert(stack is not None)
+ logger("")
+ logger("%s (tid %d) %s" % (name, thread_id, msg))
+ _log_stack(logger, stack)
+ logger("")
+
+
+def _find_thread_stack(thread_id):
+ """Returns a stack object that can be used to dump a stack trace for
+ the given thread id (or None if the id is not found)."""
+ for tid, stack in sys._current_frames().items():
+ if tid == thread_id:
+ return stack
+ return None
+
+
+def _log_stack(logger, stack):
+ """Log a stack trace to the logger callback."""
+ for filename, lineno, name, line in traceback.extract_stack(stack):
+ logger('File: "%s", line %d, in %s' % (filename, lineno, name))
+ if line:
+ logger(' %s' % line.strip())
+
+
+def log_traceback(logger, tb):
+ stack = traceback.extract_tb(tb)
+ for frame_str in traceback.format_list(stack):
+ for line in frame_str.split('\n'):
+ if line:
+ logger(" %s" % line)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py
new file mode 100644
index 0000000..76dd6da
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py
@@ -0,0 +1,72 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import unittest
+
+from webkitpy.common.system import outputcapture
+from webkitpy.common.system import stack_utils
+
+
+def current_thread_id():
+ thread_id, _ = sys._current_frames().items()[0]
+ return thread_id
+
+
+class StackUtilsTest(unittest.TestCase):
+ def test_find_thread_stack_found(self):
+ thread_id = current_thread_id()
+ found_stack = stack_utils._find_thread_stack(thread_id)
+ self.assertIsNotNone(found_stack)
+
+ def test_find_thread_stack_not_found(self):
+ found_stack = stack_utils._find_thread_stack(0)
+ self.assertIsNone(found_stack)
+
+ def test_log_thread_state(self):
+ msgs = []
+
+ def logger(msg):
+ msgs.append(msg)
+
+ thread_id = current_thread_id()
+ stack_utils.log_thread_state(logger, "test-thread", thread_id,
+ "is tested")
+ self.assertTrue(msgs)
+
+ def test_log_traceback(self):
+ msgs = []
+
+ def logger(msg):
+ msgs.append(msg)
+
+ try:
+ raise ValueError
+ except:
+ stack_utils.log_traceback(logger, sys.exc_info()[2])
+ self.assertTrue(msgs)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/systemhost.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/systemhost.py
new file mode 100644
index 0000000..a7b7267
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/systemhost.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import platform
+import sys
+
+from webkitpy.common.system import environment, executive, filesystem, platforminfo, user, workspace
+
+
+class SystemHost(object):
+ def __init__(self):
+ self.executable = sys.executable
+ self.executive = executive.Executive()
+ self.filesystem = filesystem.FileSystem()
+ self.user = user.User()
+ self.platform = platforminfo.PlatformInfo(sys, platform, self.executive)
+ self.workspace = workspace.Workspace(self.filesystem, self.executive)
+ self.stdin = sys.stdin
+ self.stdout = sys.stdout
+ self.stderr = sys.stderr
+
+ def copy_current_environment(self):
+ return environment.Environment(os.environ.copy())
+
+ def print_(self, *args, **kwargs):
+ sep = kwargs.get('sep', ' ')
+ end = kwargs.get('end', '\n')
+ stream = kwargs.get('stream', self.stdout)
+ stream.write(sep.join([str(arg) for arg in args]) + end)
+
+ def exit(self, returncode):
+ sys.exit(returncode)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/systemhost_mock.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/systemhost_mock.py
new file mode 100644
index 0000000..ef24804
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/systemhost_mock.py
@@ -0,0 +1,65 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from StringIO import StringIO
+
+from webkitpy.common.system.environment import Environment
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
+from webkitpy.common.system.user_mock import MockUser
+from webkitpy.common.system.workspace_mock import MockWorkspace
+
+
+class MockSystemHost(object):
+ def __init__(self, log_executive=False, executive_throws_when_run=None, os_name=None, os_version=None, executive=None, filesystem=None):
+ self.executable = 'python'
+ self.executive = executive or MockExecutive(should_log=log_executive, should_throw_when_run=executive_throws_when_run)
+ self.filesystem = filesystem or MockFileSystem()
+ self.user = MockUser()
+ self.platform = MockPlatformInfo()
+ if os_name:
+ self.platform.os_name = os_name
+ if os_version:
+ self.platform.os_version = os_version
+
+ # FIXME: Should this take pointers to the filesystem and the executive?
+ self.workspace = MockWorkspace()
+
+ self.stdin = StringIO()
+ self.stdout = StringIO()
+ self.stderr = StringIO()
+
+ def copy_current_environment(self):
+ return Environment({"MOCK_ENVIRON_COPY": '1'})
+
+ def print_(self, *args, **kwargs):
+ sep = kwargs.get('sep', ' ')
+ end = kwargs.get('end', '\n')
+ stream = kwargs.get('stream', self.stdout)
+ stream.write(sep.join([str(arg) for arg in args]) + end)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/user.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/user.py
new file mode 100644
index 0000000..494bf21
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/user.py
@@ -0,0 +1,151 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import getpass
+import logging
+import os
+import platform
+import re
+import shlex
+import subprocess
+import sys
+import webbrowser
+
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.platforminfo import PlatformInfo
+
+
+_log = logging.getLogger(__name__)
+
+
+class User(object):
+ DEFAULT_NO = 'n'
+ DEFAULT_YES = 'y'
+
+ def __init__(self, platforminfo=None):
+ # We cannot get the PlatformInfo object from a SystemHost because
+ # User is part of SystemHost itself.
+ self._platforminfo = platforminfo or PlatformInfo(sys, platform, Executive())
+
+ # FIXME: These are @classmethods because bugzilla.py doesn't have a Tool object (thus no User instance).
+ @classmethod
+ def prompt(cls, message, repeat=1, raw_input=raw_input):
+ response = None
+ while (repeat and not response):
+ repeat -= 1
+ response = raw_input(message)
+ return response
+
+ @classmethod
+ def prompt_password(cls, message, repeat=1):
+ return cls.prompt(message, repeat=repeat, raw_input=getpass.getpass)
+
+ @classmethod
+ def prompt_with_multiple_lists(cls, list_title, subtitles, lists, can_choose_multiple=False, raw_input=raw_input):
+ item_index = 0
+ cumulated_list = []
+ print list_title
+ for i in range(len(subtitles)):
+ print "\n" + subtitles[i]
+ for item in lists[i]:
+ item_index += 1
+ print "%2d. %s" % (item_index, item)
+ cumulated_list += lists[i]
+ return cls._wait_on_list_response(cumulated_list, can_choose_multiple, raw_input)
+
+ @classmethod
+ def _wait_on_list_response(cls, list_items, can_choose_multiple, raw_input):
+ while True:
+ if can_choose_multiple:
+ response = cls.prompt("Enter one or more numbers (comma-separated) or ranges (e.g. 3-7), or \"all\": ", raw_input=raw_input)
+ if not response.strip() or response == "all":
+ return list_items
+
+ try:
+ indices = []
+ for value in re.split("\s*,\s*", response):
+ parts = value.split('-')
+ if len(parts) == 2:
+ indices += range(int(parts[0]) - 1, int(parts[1]))
+ else:
+ indices.append(int(value) - 1)
+ except ValueError, err:
+ continue
+
+ return [list_items[i] for i in indices]
+ else:
+ try:
+ result = int(cls.prompt("Enter a number: ", raw_input=raw_input)) - 1
+ except ValueError, err:
+ continue
+ return list_items[result]
+
+ @classmethod
+ def prompt_with_list(cls, list_title, list_items, can_choose_multiple=False, raw_input=raw_input):
+ print list_title
+ i = 0
+ for item in list_items:
+ i += 1
+ print "%2d. %s" % (i, item)
+ return cls._wait_on_list_response(list_items, can_choose_multiple, raw_input)
+
+ def edit(self, files):
+ editor = os.environ.get("EDITOR") or "vi"
+ args = shlex.split(editor)
+ # Note: Not thread safe: http://bugs.python.org/issue2320
+ subprocess.call(args + files)
+
+ def page(self, message):
+ pager = os.environ.get("PAGER") or "less"
+ try:
+ # Note: Not thread safe: http://bugs.python.org/issue2320
+ child_process = subprocess.Popen([pager], stdin=subprocess.PIPE)
+ child_process.communicate(input=message)
+ except IOError, e:
+ pass
+
+ def confirm(self, message=None, default=DEFAULT_YES, raw_input=raw_input):
+ if not message:
+ message = "Continue?"
+ choice = {'y': 'Y/n', 'n': 'y/N'}[default]
+ response = raw_input("%s [%s]: " % (message, choice))
+ if not response:
+ response = default
+ return response.lower() == 'y'
+
+ def can_open_url(self):
+ try:
+ webbrowser.get()
+ return True
+ except webbrowser.Error, e:
+ return False
+
+ def open_url(self, url):
+ if not self.can_open_url():
+ _log.warn("Failed to open %s" % url)
+ webbrowser.open(url)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/user_mock.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/user_mock.py
new file mode 100644
index 0000000..190dd60
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/user_mock.py
@@ -0,0 +1,65 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+
+_log = logging.getLogger(__name__)
+
+
+class MockUser(object):
+
+ @classmethod
+ def prompt(cls, message, repeat=1, raw_input=raw_input):
+ return "Mock user response"
+
+ @classmethod
+ def prompt_with_list(cls, list_title, list_items, can_choose_multiple=False, raw_input=raw_input):
+ pass
+
+ def __init__(self):
+ self.opened_urls = []
+
+ def edit(self, files):
+ pass
+
+ def page(self, message):
+ pass
+
+ def confirm(self, message=None, default='y'):
+ _log.info(message)
+ return default == 'y'
+
+ def can_open_url(self):
+ return True
+
+ def open_url(self, url):
+ self.opened_urls.append(url)
+ if url.startswith("file://"):
+ _log.info("MOCK: user.open_url: file://...")
+ return
+ _log.info("MOCK: user.open_url: %s" % url)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/user_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/user_unittest.py
new file mode 100644
index 0000000..087f6d8
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/user_unittest.py
@@ -0,0 +1,128 @@
+# Copyright (C) 2010 Research in Motion Ltd. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Research in Motion Ltd. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.user import User
+
+class UserTest(unittest.TestCase):
+
+ example_user_response = "example user response"
+
+ def test_prompt_repeat(self):
+ self.repeatsRemaining = 2
+ def mock_raw_input(message):
+ self.repeatsRemaining -= 1
+ if not self.repeatsRemaining:
+ return UserTest.example_user_response
+ return None
+ self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), UserTest.example_user_response)
+
+ def test_prompt_when_exceeded_repeats(self):
+ self.repeatsRemaining = 2
+ def mock_raw_input(message):
+ self.repeatsRemaining -= 1
+ return None
+ self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), None)
+
+ def test_prompt_with_multiple_lists(self):
+ def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
+ def mock_raw_input(message):
+ return inputs.pop(0)
+ output_capture = OutputCapture()
+ actual_result = output_capture.assert_outputs(
+ self,
+ User.prompt_with_multiple_lists,
+ args=["title", ["subtitle1", "subtitle2"], [["foo", "bar"], ["foobar", "barbaz", "foobaz"]]],
+ kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
+ expected_stdout="title\n\nsubtitle1\n 1. foo\n 2. bar\n\nsubtitle2\n 3. foobar\n 4. barbaz\n 5. foobaz\n")
+ self.assertEqual(actual_result, expected_result)
+ self.assertEqual(len(inputs), 0)
+
+ run_prompt_test(["1"], "foo")
+ run_prompt_test(["badinput", "2"], "bar")
+ run_prompt_test(["3"], "foobar")
+ run_prompt_test(["4"], "barbaz")
+ run_prompt_test(["5"], "foobaz")
+
+ run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
+ run_prompt_test(["1-3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
+ run_prompt_test(["1-2,3"], ["foo", "bar", "foobar"], can_choose_multiple=True)
+ run_prompt_test(["2-1,3"], ["foobar"], can_choose_multiple=True)
+ run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True)
+ run_prompt_test(["all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
+ run_prompt_test([""], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
+ run_prompt_test([" "], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
+ run_prompt_test(["badinput", "all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True)
+
+ def test_prompt_with_list(self):
+ def run_prompt_test(inputs, expected_result, can_choose_multiple=False):
+ def mock_raw_input(message):
+ return inputs.pop(0)
+ output_capture = OutputCapture()
+ actual_result = output_capture.assert_outputs(
+ self,
+ User.prompt_with_list,
+ args=["title", ["foo", "bar"]],
+ kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input},
+ expected_stdout="title\n 1. foo\n 2. bar\n")
+ self.assertEqual(actual_result, expected_result)
+ self.assertEqual(len(inputs), 0)
+
+ run_prompt_test(["1"], "foo")
+ run_prompt_test(["badinput", "2"], "bar")
+
+ run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True)
+ run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True)
+ run_prompt_test(["all"], ["foo", "bar"], can_choose_multiple=True)
+ run_prompt_test([""], ["foo", "bar"], can_choose_multiple=True)
+ run_prompt_test([" "], ["foo", "bar"], can_choose_multiple=True)
+ run_prompt_test(["badinput", "all"], ["foo", "bar"], can_choose_multiple=True)
+
+ def test_confirm(self):
+ test_cases = (
+ (("Continue? [Y/n]: ", True), (User.DEFAULT_YES, 'y')),
+ (("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'n')),
+ (("Continue? [Y/n]: ", True), (User.DEFAULT_YES, '')),
+ (("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'q')),
+ (("Continue? [y/N]: ", True), (User.DEFAULT_NO, 'y')),
+ (("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'n')),
+ (("Continue? [y/N]: ", False), (User.DEFAULT_NO, '')),
+ (("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'q')),
+ )
+ for test_case in test_cases:
+ expected, inputs = test_case
+
+ def mock_raw_input(message):
+ self.assertEqual(expected[0], message)
+ return inputs[1]
+
+ result = User().confirm(default=inputs[0],
+ raw_input=mock_raw_input)
+ self.assertEqual(expected[1], result)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/workspace.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/workspace.py
new file mode 100644
index 0000000..1d92aca
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/workspace.py
@@ -0,0 +1,73 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# A home for file logic which should sit above FileSystem, but
+# below more complicated objects.
+
+import logging
+import zipfile
+
+from webkitpy.common.system.executive import ScriptError
+
+
+_log = logging.getLogger(__name__)
+
+
+class Workspace(object):
+ def __init__(self, filesystem, executive):
+ self._filesystem = filesystem
+ self._executive = executive # FIXME: Remove if create_zip is moved to python.
+
+ def find_unused_filename(self, directory, name, extension, search_limit=100):
+ for count in range(search_limit):
+ if count:
+ target_name = "%s-%s.%s" % (name, count, extension)
+ else:
+ target_name = "%s.%s" % (name, extension)
+ target_path = self._filesystem.join(directory, target_name)
+ if not self._filesystem.exists(target_path):
+ return target_path
+ # If we can't find an unused name in search_limit tries, just give up.
+ return None
+
+ def create_zip(self, zip_path, source_path, zip_class=zipfile.ZipFile):
+ # It's possible to create zips with Python:
+ # zip_file = ZipFile(zip_path, 'w')
+ # for root, dirs, files in os.walk(source_path):
+ # for path in files:
+ # absolute_path = os.path.join(root, path)
+ # zip_file.write(os.path.relpath(path, source_path))
+ # However, getting the paths, encoding and compression correct could be non-trivial.
+ # So, for now we depend on the environment having "zip" installed (likely fails on Win32)
+ try:
+ self._executive.run_command(['zip', '-9', '-r', zip_path, '.'], cwd=source_path)
+ except ScriptError, e:
+ _log.error("Workspace.create_zip failed in %s:\n%s" % (source_path, e.message_with_output()))
+ return None
+
+ return zip_class(zip_path)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/workspace_mock.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/workspace_mock.py
new file mode 100644
index 0000000..02a5f4c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/workspace_mock.py
@@ -0,0 +1,37 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class MockWorkspace(object):
+ def find_unused_filename(self, directory, name, extension, search_limit=10):
+ return "%s/%s.%s" % (directory, name, extension)
+
+ def create_zip(self, zip_path, source_path):
+ self.zip_path = zip_path
+ self.source_path = source_path
+ return object() # Something that is not None
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/system/workspace_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/workspace_unittest.py
new file mode 100644
index 0000000..5d965f0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/system/workspace_unittest.py
@@ -0,0 +1,72 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.workspace import Workspace
+from webkitpy.common.system.executive_mock import MockExecutive
+
+
+class WorkspaceTest(unittest.TestCase):
+
+ def test_find_unused_filename(self):
+ filesystem = MockFileSystem({
+ "dir/foo.jpg": "",
+ "dir/foo-1.jpg": "",
+ "dir/foo-2.jpg": "",
+ })
+ workspace = Workspace(filesystem, None)
+ self.assertEqual(workspace.find_unused_filename("bar", "bar", "bar"), "bar/bar.bar")
+ self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg", search_limit=1), None)
+ self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg", search_limit=2), None)
+ self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg"), "dir/foo-3.jpg")
+
+ def test_create_zip(self):
+ workspace = Workspace(None, MockExecutive(should_log=True))
+ expected_logs = "MOCK run_command: ['zip', '-9', '-r', '/zip/path', '.'], cwd=/source/path\n"
+ class MockZipFile(object):
+ def __init__(self, path):
+ self.filename = path
+ archive = OutputCapture().assert_outputs(self, workspace.create_zip, ["/zip/path", "/source/path", MockZipFile], expected_logs=expected_logs)
+ self.assertEqual(archive.filename, "/zip/path")
+
+ def test_create_zip_exception(self):
+ workspace = Workspace(None, MockExecutive(should_log=True, should_throw=True))
+ expected_logs = """MOCK run_command: ['zip', '-9', '-r', '/zip/path', '.'], cwd=/source/path
+Workspace.create_zip failed in /source/path:
+MOCK ScriptError
+
+output: MOCK output of child process
+"""
+ class MockZipFile(object):
+ def __init__(self, path):
+ self.filename = path
+ archive = OutputCapture().assert_outputs(self, workspace.create_zip, ["/zip/path", "/source/path", MockZipFile], expected_logs=expected_logs)
+ self.assertIsNone(archive)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/version_check.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/version_check.py
new file mode 100644
index 0000000..cf5392a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/version_check.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+if sys.version < '2.7' or sys.version >= '2.8':
+ sys.stderr.write("Unsupported Python version: webkitpy requires 2.7.x, and you're running %s.\n" % sys.version.split()[0])
+ sys.exit(1)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/common/webkit_finder.py b/src/third_party/blink/Tools/Scripts/webkitpy/common/webkit_finder.py
new file mode 100644
index 0000000..f267f0d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/common/webkit_finder.py
@@ -0,0 +1,109 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import sys
+
+
+class WebKitFinder(object):
+ def __init__(self, filesystem):
+ self._filesystem = filesystem
+ self._dirsep = filesystem.sep
+ self._sys_path = sys.path
+ self._env_path = os.environ['PATH'].split(os.pathsep)
+ self._webkit_base = None
+ self._chromium_base = None
+ self._depot_tools = None
+
+ def webkit_base(self):
+ """Returns the absolute path to the top of the WebKit tree.
+
+ Raises an AssertionError if the top dir can't be determined."""
+ # Note: This code somewhat duplicates the code in
+ # scm.find_checkout_root(). However, that code only works if the top
+ # of the SCM repository also matches the top of the WebKit tree. Some SVN users
+ # (the chromium test bots, for example), might only check out subdirectories like
+ # Tools/Scripts. This code will also work if there is no SCM system at all.
+ if not self._webkit_base:
+ self._webkit_base = self._webkit_base
+ module_path = self._filesystem.abspath(self._filesystem.path_to_module(self.__module__))
+ tools_index = module_path.rfind('Tools')
+ assert tools_index != -1, "could not find location of this checkout from %s" % module_path
+ self._webkit_base = self._filesystem.normpath(module_path[0:tools_index - 1])
+ return self._webkit_base
+
+ def chromium_base(self):
+ if not self._chromium_base:
+ self._chromium_base = self._filesystem.dirname(self._filesystem.dirname(self.webkit_base()))
+ return self._chromium_base
+
+ def path_from_webkit_base(self, *comps):
+ return self._filesystem.join(self.webkit_base(), *comps)
+
+ def path_from_chromium_base(self, *comps):
+ return self._filesystem.join(self.chromium_base(), *comps)
+
+ def path_to_script(self, script_name):
+ """Returns the relative path to the script from the top of the WebKit tree."""
+ # This is intentionally relative in order to force callers to consider what
+ # their current working directory is (and change to the top of the tree if necessary).
+ return self._filesystem.join("Tools", "Scripts", script_name)
+
+ def layout_tests_dir(self):
+ return self.path_from_webkit_base('LayoutTests')
+
+ def perf_tests_dir(self):
+ return self.path_from_webkit_base('PerformanceTests')
+
+ def depot_tools_base(self):
+ if not self._depot_tools:
+ # This basically duplicates src/tools/find_depot_tools.py without the side effects
+ # (adding the directory to sys.path and importing breakpad).
+ self._depot_tools = (self._check_paths_for_depot_tools(self._sys_path) or
+ self._check_paths_for_depot_tools(self._env_path) or
+ self._check_upward_for_depot_tools())
+ return self._depot_tools
+
+ def _check_paths_for_depot_tools(self, paths):
+ for path in paths:
+ if path.rstrip(self._dirsep).endswith('depot_tools'):
+ return path
+ return None
+
+ def _check_upward_for_depot_tools(self):
+ fs = self._filesystem
+ prev_dir = ''
+ current_dir = fs.dirname(self._webkit_base)
+ while current_dir != prev_dir:
+ if fs.exists(fs.join(current_dir, 'depot_tools', 'pylint.py')):
+ return fs.join(current_dir, 'depot_tools')
+ prev_dir = current_dir
+ current_dir = fs.dirname(current_dir)
+
+ def path_from_depot_tools_base(self, *comps):
+ return self._filesystem.join(self.depot_tools_base(), *comps)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/formatter/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/formatter/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/formatter/__init__.py
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/formatter/__main__.py b/src/third_party/blink/Tools/Scripts/webkitpy/formatter/__main__.py
new file mode 100644
index 0000000..b767df6
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/formatter/__main__.py
@@ -0,0 +1,11 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+from webkitpy.formatter.main import main
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/formatter/fix_double_quote_strings.py b/src/third_party/blink/Tools/Scripts/webkitpy/formatter/fix_double_quote_strings.py
new file mode 100644
index 0000000..45f988d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/formatter/fix_double_quote_strings.py
@@ -0,0 +1,29 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A 2to3 fixer that converts all string literals to use double quotes.
+
+Strings that contain double quotes will not be modified. Prefixed string
+literals will also not be modified. This affects both single-quoted strings
+and triple-single-quoted strings.
+
+"""
+
+from lib2to3.fixer_base import BaseFix
+from lib2to3.pgen2 import token
+
+
+class FixDoubleQuoteStrings(BaseFix):
+
+ explicit = True
+ _accept_type = token.STRING
+
+ def match(self, node):
+ res = node.value.startswith("'") and '"' not in node.value[1:-1]
+ return res
+
+ def transform(self, node, results):
+ node.value = node.value.replace("'", '"')
+ node.changed()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/formatter/fix_single_quote_strings.py b/src/third_party/blink/Tools/Scripts/webkitpy/formatter/fix_single_quote_strings.py
new file mode 100644
index 0000000..996c0cd
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/formatter/fix_single_quote_strings.py
@@ -0,0 +1,29 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A 2to3 fixer that converts all string literals to use single quotes.
+
+Strings that contain single quotes will not be modified. Prefixed string
+literals will also not be modified. This affect double-quoted strings but
+not triple-double-quote strings.
+
+"""
+
+from lib2to3.fixer_base import BaseFix
+from lib2to3.pgen2 import token
+
+
+class FixSingleQuoteStrings(BaseFix):
+
+ explicit = True
+ _accept_type = token.STRING
+
+ def match(self, node):
+ res = node.value.startswith('"') and not node.value.startswith('"""') and "'" not in node.value[1:-1]
+ return res
+
+ def transform(self, node, results):
+ node.value = node.value.replace('"', "'")
+ node.changed()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/formatter/main.py b/src/third_party/blink/Tools/Scripts/webkitpy/formatter/main.py
new file mode 100644
index 0000000..92dbfa0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/formatter/main.py
@@ -0,0 +1,103 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import lib2to3.refactor
+
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.thirdparty import autopep8
+
+
+def parse_args(args=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--chromium', action='store_const', dest='style', const='chromium', default='blink',
+ help="Format according to Chromium's Python coding styles instead of Blink's.")
+ parser.add_argument('--no-backups', action='store_false', default=True, dest='backup',
+ help='Do not back up files before overwriting them.')
+ parser.add_argument('-j', '--jobs', metavar='n', type=int, default=0,
+ help='Number of parallel jobs; match CPU count if less than 1.')
+ parser.add_argument('files', nargs='*', default=['-'],
+ help="files to format or '-' for standard in")
+ parser.add_argument('--double-quote-strings', action='store_const', dest='quoting', const='double', default='single',
+ help='Rewrite string literals to use double quotes instead of single quotes.')
+ parser.add_argument('--no-autopep8', action='store_true',
+ help='Skip the autopep8 code-formatting step.')
+ parser.add_argument('--leave-strings-alone', action='store_true',
+ help='Do not reformat string literals to use a consistent quote style.')
+ return parser.parse_args(args=args)
+
+
+def main(host=None, args=None):
+ options = parse_args(args)
+ if options.no_autopep8:
+ options.style = None
+
+ if options.leave_strings_alone:
+ options.quoting = None
+
+ autopep8_options = _autopep8_options_for_style(options.style)
+ fixers = _fixers_for_quoting(options.quoting)
+
+ if options.files == ['-']:
+ host = host or SystemHost()
+ host.print_(reformat_source(host.stdin.read(), autopep8_options, fixers, '<stdin>'), end='')
+ return
+
+ # We create the arglist before checking if we need to create a Host, because a
+ # real host is non-picklable and can't be passed to host.executive.map().
+
+ arglist = [(host, name, autopep8_options, fixers, options.backup) for name in options.files]
+ host = host or SystemHost()
+
+ host.executive.map(_reformat_thunk, arglist, processes=options.jobs)
+
+
+def _autopep8_options_for_style(style):
+ return {
+ None: [],
+ 'blink': autopep8.parse_args(['--aggressive',
+ '--max-line-length', '132',
+ '--indent-size', '4',
+ '']),
+ 'chromium': autopep8.parse_args(['--aggressive',
+ '--max-line-length', '80',
+ '--indent-size', '2',
+ '']),
+ }.get(style)
+
+
+def _fixers_for_quoting(quoting):
+ return {
+ None: [],
+ 'double': ['webkitpy.formatter.fix_double_quote_strings'],
+ 'single': ['webkitpy.formatter.fix_single_quote_strings'],
+ }.get(quoting)
+
+
+def _reformat_thunk(args):
+ reformat_file(*args)
+
+
+def reformat_file(host, name, autopep8_options, fixers, should_backup_file):
+ host = host or SystemHost()
+ source = host.filesystem.read_text_file(name)
+ dest = reformat_source(source, autopep8_options, fixers, name)
+ if dest != source:
+ if should_backup_file:
+ host.filesystem.write_text_file(name + '.bak', source)
+ host.filesystem.write_text_file(name, dest)
+
+
+def reformat_source(source, autopep8_options, fixers, name):
+ tmp_str = source
+
+ if autopep8_options:
+ tmp_str = autopep8.fix_code(tmp_str, autopep8_options)
+
+ if fixers:
+ tool = lib2to3.refactor.RefactoringTool(fixer_names=fixers,
+ explicit=fixers)
+ tmp_str = unicode(tool.refactor_string(tmp_str, name=name))
+
+ return tmp_str
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/formatter/main_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/formatter/main_unittest.py
new file mode 100644
index 0000000..2beb7cd
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/formatter/main_unittest.py
@@ -0,0 +1,109 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import StringIO
+import unittest
+
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.formatter.main import main
+
+
+ACTUAL_INPUT = '''
+def foo():
+ """triple-quoted docstring"""
+ try:
+ bar = "bar"
+ long_list = ['this is a list of strings that should be wrapped', "and consistently quoted"]
+ longer_list = ['this is a list of strings that should be wrapped', "and consistently quoted", "because it's important to test quoting"]
+ except Exception, e:
+ pass
+'''
+
+
+EXPECTED_BLINK_OUTPUT = '''
+def foo():
+ """triple-quoted docstring"""
+ try:
+ bar = 'bar'
+ long_list = ['this is a list of strings that should be wrapped', 'and consistently quoted']
+ longer_list = [
+ 'this is a list of strings that should be wrapped',
+ 'and consistently quoted',
+ "because it's important to test quoting"]
+ except Exception as e:
+ pass
+'''
+
+
+EXPECTED_CHROMIUM_OUTPUT = '''
+def foo():
+ """triple-quoted docstring"""
+ try:
+ bar = 'bar'
+ long_list = [
+ 'this is a list of strings that should be wrapped',
+ 'and consistently quoted']
+ longer_list = [
+ 'this is a list of strings that should be wrapped',
+ 'and consistently quoted',
+ "because it's important to test quoting"]
+ except Exception as e:
+ pass
+'''
+
+EXPECTED_ONLY_DOUBLE_QUOTED_OUTPUT = '''
+def foo():
+ """triple-quoted docstring"""
+ try:
+ bar = "bar"
+ long_list = ["this is a list of strings that should be wrapped", "and consistently quoted"]
+ longer_list = ["this is a list of strings that should be wrapped", "and consistently quoted", "because it's important to test quoting"]
+ except Exception, e:
+ pass
+'''
+
+
+class TestMain(unittest.TestCase):
+ maxDiff = 4096
+
+ def test_files_blink(self):
+ host = MockSystemHost()
+ host.filesystem.files = {
+ 'test.py': ACTUAL_INPUT}
+ main(host, ['test.py'])
+ self.assertEqual(host.filesystem.files, {
+ 'test.py': EXPECTED_BLINK_OUTPUT,
+ 'test.py.bak': ACTUAL_INPUT})
+
+ def test_files_blink_no_backup(self):
+ host = MockSystemHost()
+ host.filesystem.files = {
+ 'test.py': ACTUAL_INPUT}
+ main(host, ['--no-backups', 'test.py'])
+ self.assertEqual(host.filesystem.files, {
+ 'test.py': EXPECTED_BLINK_OUTPUT})
+
+ def test_stdin_blink(self):
+ host = MockSystemHost()
+ host.stdin = StringIO.StringIO(ACTUAL_INPUT)
+ main(host, ['-'])
+ self.assertMultiLineEqual(host.stdout.getvalue(), EXPECTED_BLINK_OUTPUT)
+
+ def test_stdin_chromium(self):
+ host = MockSystemHost()
+ host.stdin = StringIO.StringIO(ACTUAL_INPUT)
+ main(host, ['--chromium', '-'])
+ self.assertMultiLineEqual(host.stdout.getvalue(), EXPECTED_CHROMIUM_OUTPUT)
+
+ def test_stdin_no_changes(self):
+ host = MockSystemHost()
+ host.stdin = StringIO.StringIO(ACTUAL_INPUT)
+ main(host, ['--no-autopep8', '--leave-strings-alone', '-'])
+ self.assertMultiLineEqual(host.stdout.getvalue(), ACTUAL_INPUT)
+
+ def test_stdin_only_double_quoting(self):
+ host = MockSystemHost()
+ host.stdin = StringIO.StringIO(ACTUAL_INPUT)
+ main(host, ['--no-autopep8', '--double-quote-strings', '-'])
+ self.assertMultiLineEqual(host.stdout.getvalue(), EXPECTED_ONLY_DOUBLE_QUOTED_OUTPUT)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/bisect_test_ordering.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/bisect_test_ordering.py
new file mode 100644
index 0000000..a10ed15
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/bisect_test_ordering.py
@@ -0,0 +1,170 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import math
+import optparse
+import os
+import subprocess
+import sys
+
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.webkit_finder import WebKitFinder
+
+_log = logging.getLogger(__name__)
+
+
+class Bucket(object):
+ def __init__(self, tests):
+ self.tests = tests
+
+ def size(self):
+ return len(self.tests)
+
+
+class Bisector(object):
+
+ def __init__(self, tests, is_debug):
+ self.executive = Executive()
+ self.tests = tests
+ self.expected_failure = tests[-1]
+ self.is_debug = is_debug
+ self.webkit_finder = WebKitFinder(FileSystem())
+
+ def bisect(self):
+ if self.test_fails_in_isolation():
+ self.buckets = [Bucket([self.expected_failure])]
+ print '%s fails when run in isolation.' % self.expected_failure
+ self.print_result()
+ return 0
+ if not self.test_fails(self.tests):
+ _log.error('%s does not fail' % self.expected_failure)
+ return 1
+ # Split the list of test into buckets. Each bucket has at least one test required to cause
+ # the expected failure at the end. Split buckets in half until there are only buckets left
+ # with one item in them.
+ self.buckets = [Bucket(self.tests[:-1]), Bucket([self.expected_failure])]
+ while not self.is_done():
+ self.print_progress()
+ self.split_largest_bucket()
+ self.print_result()
+ self.verify_non_flaky()
+ return 0
+
+ def test_fails_in_isolation(self):
+ return self.test_bucket_list_fails([Bucket([self.expected_failure])])
+
+ def verify_non_flaky(self):
+ print 'Verifying the failure is not flaky by running 10 times.'
+ count_failures = 0
+ for i in range(0, 10):
+ if self.test_bucket_list_fails(self.buckets):
+ count_failures += 1
+ print 'Failed %d/10 times' % count_failures
+
+ def print_progress(self):
+ count = 0
+ for bucket in self.buckets:
+ count += len(bucket.tests)
+ print '%d tests left, %d buckets' % (count, len(self.buckets))
+
+ def print_result(self):
+ tests = []
+ for bucket in self.buckets:
+ tests += bucket.tests
+ extra_args = ' --debug' if self.is_debug else ''
+ print 'run-webkit-tests%s --child-processes=1 --order=none %s' % (extra_args, " ".join(tests))
+
+ def is_done(self):
+ for bucket in self.buckets:
+ if bucket.size() > 1:
+ return False
+ return True
+
+ def split_largest_bucket(self):
+ index = 0
+ largest_index = 0
+ largest_size = 0
+ for bucket in self.buckets:
+ if bucket.size() > largest_size:
+ largest_index = index
+ largest_size = bucket.size()
+ index += 1
+
+ bucket_to_split = self.buckets[largest_index]
+ halfway_point = int(largest_size / 2)
+ first_half = Bucket(bucket_to_split.tests[:halfway_point])
+ second_half = Bucket(bucket_to_split.tests[halfway_point:])
+
+ buckets_before = self.buckets[:largest_index]
+ buckets_after = self.buckets[largest_index + 1:]
+
+ # Do the second half first because it tends to be faster because the http tests are front-loaded and slow.
+ new_buckets = buckets_before + [second_half] + buckets_after
+ if self.test_bucket_list_fails(new_buckets):
+ self.buckets = new_buckets
+ return
+
+ new_buckets = buckets_before + [first_half] + buckets_after
+ if self.test_bucket_list_fails(new_buckets):
+ self.buckets = new_buckets
+ return
+
+ self.buckets = buckets_before + [first_half, second_half] + buckets_after
+
+ def test_bucket_list_fails(self, buckets):
+ tests = []
+ for bucket in buckets:
+ tests += bucket.tests
+ return self.test_fails(tests)
+
+ def test_fails(self, tests):
+ extra_args = ['--debug'] if self.is_debug else []
+ path_to_run_webkit_tests = self.webkit_finder.path_from_webkit_base('Tools', 'Scripts', 'run-webkit-tests')
+ output = self.executive.popen([path_to_run_webkit_tests, '--child-processes', '1', '--order', 'none', '--no-retry', '--no-show-results', '--verbose'] + extra_args + tests, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ failure_string = self.expected_failure + ' failed'
+ if failure_string in output.stderr.read():
+ return True
+ return False
+
+
+def main(argv):
+ logging.basicConfig()
+
+ option_parser = optparse.OptionParser()
+ option_parser.add_option('--test-list', action='store', help='file that list tests to bisect. The last test in the list is the expected failure.', metavar='FILE'),
+ option_parser.add_option('--debug', action='store_true', default=False, help='whether to use a debug build'),
+ options, args = option_parser.parse_args(argv)
+
+ tests = open(options.test_list).read().strip().split('\n')
+ bisector = Bisector(tests, is_debug=options.debug)
+ return bisector.bisect()
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader.py
new file mode 100644
index 0000000..0728d8a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader.py
@@ -0,0 +1,97 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+
+
+_log = logging.getLogger(__name__)
+
+
+class DumpReader(object):
+ """Base class for breakpad dump readers."""
+
+ def __init__(self, host, build_dir):
+ self._host = host
+ self._build_dir = build_dir
+
+ def check_is_functional(self):
+ """This routine must be implemented by subclasses.
+
+ Returns True if this reader is functional."""
+ raise NotImplementedError()
+
+ def crash_dumps_directory(self):
+ return self._host.filesystem.join(self._build_dir, 'crash-dumps')
+
+ def clobber_old_results(self):
+ if self._host.filesystem.isdir(self.crash_dumps_directory()):
+ self._host.filesystem.rmtree(self.crash_dumps_directory())
+
+ def look_for_new_crash_logs(self, crashed_processes, start_time):
+ if not crashed_processes:
+ return None
+
+ if not self.check_is_functional():
+ return None
+
+ pid_to_minidump = dict()
+ for root, dirs, files in self._host.filesystem.walk(self.crash_dumps_directory()):
+ for dmp in [f for f in files if f.endswith(self._file_extension())]:
+ dmp_file = self._host.filesystem.join(root, dmp)
+ if self._host.filesystem.mtime(dmp_file) < start_time:
+ continue
+ pid = self._get_pid_from_dump(dmp_file)
+ if pid:
+ pid_to_minidump[pid] = dmp_file
+
+ result = dict()
+ for test, process_name, pid in crashed_processes:
+ if str(pid) in pid_to_minidump:
+ stack = self._get_stack_from_dump(pid_to_minidump[str(pid)])
+ if stack:
+ result[test] = stack
+
+ return result
+
+ def _get_pid_from_dump(self, dump_file):
+ """This routine must be implemented by subclasses.
+
+ This routine returns the PID of the crashed process that produced the given dump_file."""
+ raise NotImplementedError()
+
+ def _get_stack_from_dump(self, dump_file):
+ """This routine must be implemented by subclasses.
+
+ Returns the stack stored in the given breakpad dump_file."""
+ raise NotImplementedError()
+
+ def _file_extension(self):
+ """This routine must be implemented by subclasses.
+
+ Returns the file extension of crash dumps written by breakpad."""
+ raise NotImplementedError()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_multipart.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_multipart.py
new file mode 100644
index 0000000..ab19a8a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_multipart.py
@@ -0,0 +1,189 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import cgi
+import logging
+import threading
+import Queue
+
+from webkitpy.common.webkit_finder import WebKitFinder
+from webkitpy.layout_tests.breakpad.dump_reader import DumpReader
+
+
+_log = logging.getLogger(__name__)
+
+
+class DumpReaderMultipart(DumpReader):
+ """Base class for Linux and Android breakpad dump reader."""
+
+ def __init__(self, host, build_dir):
+ super(DumpReaderMultipart, self).__init__(host, build_dir)
+ self._webkit_finder = WebKitFinder(host.filesystem)
+ self._breakpad_tools_available = None
+ self._generated_symbols = False
+
+ def check_is_functional(self):
+ return self._check_breakpad_tools_available()
+
+ def _get_pid_from_dump(self, dump_file):
+ dump = self._read_dump(dump_file)
+ if not dump:
+ return None
+ if 'pid' in dump:
+ return dump['pid'][0]
+ return None
+
+ def _get_stack_from_dump(self, dump_file):
+ dump = self._read_dump(dump_file)
+ if not dump:
+ return None
+ if not 'upload_file_minidump' in dump:
+ return None
+
+ self._generate_breakpad_symbols_if_necessary()
+ f, temp_name = self._host.filesystem.open_binary_tempfile('dmp')
+ f.write("\r\n".join(dump['upload_file_minidump']))
+ f.close()
+
+ cmd = [self._path_to_minidump_stackwalk(), temp_name, self._symbols_dir()]
+ try:
+ stack = self._host.executive.run_command(cmd, return_stderr=False)
+ except:
+ _log.warning('Failed to execute "%s"' % ' '.join(cmd))
+ stack = None
+ finally:
+ self._host.filesystem.remove(temp_name)
+ return stack
+
+ def _read_dump(self, dump_file):
+ with self._host.filesystem.open_binary_file_for_reading(dump_file) as f:
+ boundary = f.readline().strip()[2:]
+ f.seek(0)
+ try:
+ data = cgi.parse_multipart(f, {'boundary': boundary})
+ return data
+ except:
+ pass
+ return None
+
+ def _check_breakpad_tools_available(self):
+ if self._breakpad_tools_available != None:
+ return self._breakpad_tools_available
+
+ REQUIRED_BREAKPAD_TOOLS = [
+ 'dump_syms',
+ 'minidump_stackwalk',
+ ]
+ result = True
+ for binary in REQUIRED_BREAKPAD_TOOLS:
+ full_path = self._host.filesystem.join(self._build_dir, binary)
+ if not self._host.filesystem.exists(full_path):
+ result = False
+ _log.error('Unable to find %s' % binary)
+ _log.error(' at %s' % full_path)
+
+ if not result:
+ _log.error(" Could not find breakpad tools, unexpected crashes won't be symbolized")
+ _log.error(' Did you build the target blink_tests?')
+ _log.error('')
+
+ self._breakpad_tools_available = result
+ return self._breakpad_tools_available
+
+ def _path_to_minidump_stackwalk(self):
+ return self._host.filesystem.join(self._build_dir, "minidump_stackwalk")
+
+ def _path_to_generate_breakpad_symbols(self):
+ return self._webkit_finder.path_from_chromium_base("components", "crash", "tools", "generate_breakpad_symbols.py")
+
+ def _symbols_dir(self):
+ return self._host.filesystem.join(self._build_dir, 'content_shell.syms')
+
+ def _generate_breakpad_symbols_if_necessary(self):
+ if self._generated_symbols:
+ return
+ self._generated_symbols = True
+
+ _log.debug("Generating breakpad symbols")
+ queue = Queue.Queue()
+ thread = threading.Thread(target=_symbolize_keepalive, args=(queue,))
+ thread.start()
+ try:
+ for binary in self._binaries_to_symbolize():
+ _log.debug(' Symbolizing %s' % binary)
+ full_path = self._host.filesystem.join(self._build_dir, binary)
+ cmd = [
+ self._path_to_generate_breakpad_symbols(),
+ '--binary=%s' % full_path,
+ '--symbols-dir=%s' % self._symbols_dir(),
+ '--build-dir=%s' % self._build_dir,
+ ]
+ try:
+ self._host.executive.run_command(cmd)
+ except:
+ _log.error('Failed to execute "%s"' % ' '.join(cmd))
+ finally:
+ queue.put(None)
+ thread.join()
+ _log.debug("Done generating breakpad symbols")
+
+ def _binaries_to_symbolize(self):
+ """This routine must be implemented by subclasses.
+
+ Returns an array of binaries that need to be symbolized."""
+ raise NotImplementedError()
+
+
+def _symbolize_keepalive(queue):
+ while True:
+ _log.debug("waiting for symbolize to complete")
+ try:
+ msg = queue.get(block=True, timeout=60)
+ return
+ except Queue.Empty:
+ pass
+
+
+class DumpReaderLinux(DumpReaderMultipart):
+ """Linux breakpad dump reader."""
+
+ def _binaries_to_symbolize(self):
+ return ['content_shell', 'libtest_netscape_plugin.so', 'libffmpegsumo.so', 'libosmesa.so']
+
+ def _file_extension(self):
+ return 'dmp'
+
+
+class DumpReaderAndroid(DumpReaderMultipart):
+ """Android breakpad dump reader."""
+
+ def _binaries_to_symbolize(self):
+ return ['lib/libcontent_shell_content_view.so']
+
+ def _file_extension(self):
+ return 'dmp'
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_multipart_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_multipart_unittest.py
new file mode 100644
index 0000000..c849c91
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_multipart_unittest.py
@@ -0,0 +1,115 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+import cgi
+
+from webkitpy.common.host import Host
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.layout_tests.breakpad.dump_reader_multipart import DumpReaderMultipart
+
+
+class TestDumpReaderMultipart(unittest.TestCase):
+ _MULTIPART_DUMP = [
+ '--boundary',
+ 'Content-Disposition: form-data; name="prod"',
+ '',
+ 'content_shell',
+ '--boundary',
+ 'Content-Disposition: form-data; name="pid"',
+ '',
+ '4711',
+ '--boundary',
+ 'Content-Disposition: form-data; name="upload_file_minidump"; filename="dump"',
+ 'Content-Type: application/octet-stream',
+ '',
+ 'MDMP',
+ '--boundary--',
+ ]
+
+ def test_check_generate_breakpad_symbols_actually_exists(self):
+ host = Host()
+ dump_reader = DumpReaderMultipart(host, build_dir=None)
+ self.assertTrue(host.filesystem.exists(dump_reader._path_to_generate_breakpad_symbols()))
+
+ def test_check_is_functional_breakpad_tools_not_found(self):
+ host = MockHost()
+
+ build_dir = "/mock-checkout/out/Debug"
+ host.filesystem.maybe_make_directory(build_dir)
+ dump_reader = DumpReaderMultipart(host, build_dir)
+ dump_reader._file_extension = lambda: 'dmp'
+ dump_reader._binaries_to_symbolize = lambda: ['content_shell']
+
+ self.assertFalse(dump_reader.check_is_functional())
+
+ def test_get_pid_from_dump(self):
+ host = MockHost()
+
+ dump_file = '/crash-dumps/dump.dmp'
+ expected_pid = '4711'
+ host.filesystem.write_text_file(dump_file, "\r\n".join(TestDumpReaderMultipart._MULTIPART_DUMP))
+ build_dir = "/mock-checkout/out/Debug"
+ host.filesystem.maybe_make_directory(build_dir)
+ host.filesystem.exists = lambda x: True
+
+ # The mock file object returned by open_binary_file_for_reading doesn't
+ # have readline(), however, the real File object does.
+ host.filesystem.open_binary_file_for_reading = host.filesystem.open_text_file_for_reading
+ dump_reader = DumpReaderMultipart(host, build_dir)
+ dump_reader._file_extension = lambda: 'dmp'
+ dump_reader._binaries_to_symbolize = lambda: ['content_shell']
+
+ self.assertTrue(dump_reader.check_is_functional())
+ self.assertEqual(expected_pid, dump_reader._get_pid_from_dump(dump_file))
+
+ def test_get_stack_from_dump(self):
+ host = MockHost()
+
+ dump_file = '/crash-dumps/dump.dmp'
+ host.filesystem.write_text_file(dump_file, "\r\n".join(TestDumpReaderMultipart._MULTIPART_DUMP))
+ build_dir = "/mock-checkout/out/Debug"
+ host.filesystem.maybe_make_directory(build_dir)
+ host.filesystem.exists = lambda x: True
+
+ # The mock file object returned by open_binary_file_for_reading doesn't
+ # have readline(), however, the real File object does.
+ host.filesystem.open_binary_file_for_reading = host.filesystem.open_text_file_for_reading
+ dump_reader = DumpReaderMultipart(host, build_dir)
+ dump_reader._file_extension = lambda: 'dmp'
+ dump_reader._binaries_to_symbolize = lambda: ['content_shell']
+
+ self.assertTrue(dump_reader.check_is_functional())
+ self.assertEqual("MOCK output of child process", dump_reader._get_stack_from_dump(dump_file))
+ self.assertEqual(2, len(host.executive.calls))
+ cmd_line = " ".join(host.executive.calls[0])
+ self.assertIn('generate_breakpad_symbols.py', cmd_line)
+ cmd_line = " ".join(host.executive.calls[1])
+ self.assertIn('minidump_stackwalk', cmd_line)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_win.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_win.py
new file mode 100644
index 0000000..1314090
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_win.py
@@ -0,0 +1,136 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import shlex
+
+from webkitpy.layout_tests.breakpad.dump_reader import DumpReader
+
+
+_log = logging.getLogger(__name__)
+
+
+class DumpReaderWin(DumpReader):
+ """DumpReader for windows breakpad."""
+
+ def __init__(self, host, build_dir):
+ super(DumpReaderWin, self).__init__(host, build_dir)
+ self._cdb_available = None
+
+ def check_is_functional(self):
+ return self._check_cdb_available()
+
+ def _file_extension(self):
+ return 'txt'
+
+ def _get_pid_from_dump(self, dump_file):
+ with self._host.filesystem.open_text_file_for_reading(dump_file) as f:
+ crash_keys = dict([l.split(':', 1) for l in f.read().splitlines()])
+ if 'pid' in crash_keys:
+ return crash_keys['pid']
+ return None
+
+ def _get_stack_from_dump(self, dump_file):
+ minidump = dump_file[:-3] + 'dmp'
+ cmd = [self._cdb_path, '-y', self._build_dir, '-c', '.lines;.ecxr;k30;q', '-z', minidump]
+ try:
+ stack = self._host.executive.run_command(cmd)
+ except:
+ _log.warning('Failed to execute "%s"' % ' '.join(cmd))
+ else:
+ return stack
+ return None
+
+ def _find_depot_tools_path(self):
+ """Attempt to find depot_tools location in PATH."""
+ for i in os.environ['PATH'].split(os.pathsep):
+ if os.path.isfile(os.path.join(i, 'gclient')):
+ return i
+
+ def _check_cdb_available(self):
+ """Checks whether we can use cdb to symbolize minidumps."""
+ if self._cdb_available != None:
+ return self._cdb_available
+
+ CDB_LOCATION_TEMPLATES = [
+ '%s\\Debugging Tools For Windows',
+ '%s\\Debugging Tools For Windows (x86)',
+ '%s\\Debugging Tools For Windows (x64)',
+ '%s\\Windows Kits\\8.0\\Debuggers\\x86',
+ '%s\\Windows Kits\\8.0\\Debuggers\\x64',
+ '%s\\Windows Kits\\8.1\\Debuggers\\x86',
+ '%s\\Windows Kits\\8.1\\Debuggers\\x64',
+ ]
+
+ program_files_directories = ['C:\\Program Files']
+ program_files = os.environ.get('ProgramFiles')
+ if program_files:
+ program_files_directories.append(program_files)
+ program_files = os.environ.get('ProgramFiles(x86)')
+ if program_files:
+ program_files_directories.append(program_files)
+
+ possible_cdb_locations = []
+ for template in CDB_LOCATION_TEMPLATES:
+ for program_files in program_files_directories:
+ possible_cdb_locations.append(template % program_files)
+
+ gyp_defines = os.environ.get('GYP_DEFINES', [])
+ if gyp_defines:
+ gyp_defines = shlex.split(gyp_defines)
+ if 'windows_sdk_path' in gyp_defines:
+ possible_cdb_locations.extend([
+ '%s\\Debuggers\\x86' % gyp_defines['windows_sdk_path'],
+ '%s\\Debuggers\\x64' % gyp_defines['windows_sdk_path'],
+ ])
+
+ # Look in depot_tools win_toolchain too.
+ depot_tools = self._find_depot_tools_path()
+ if depot_tools:
+ win8sdk = os.path.join(depot_tools, 'win_toolchain', 'vs2013_files', 'win8sdk')
+ possible_cdb_locations.extend([
+ '%s\\Debuggers\\x86' % win8sdk,
+ '%s\\Debuggers\\x64' % win8sdk,
+ ])
+
+ for cdb_path in possible_cdb_locations:
+ cdb = self._host.filesystem.join(cdb_path, 'cdb.exe')
+ try:
+ _ = self._host.executive.run_command([cdb, '-version'])
+ except:
+ pass
+ else:
+ self._cdb_path = cdb
+ self._cdb_available = True
+ return self._cdb_available
+
+ _log.warning("CDB is not installed; can't symbolize minidumps.")
+ _log.warning('')
+ self._cdb_available = False
+ return self._cdb_available
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_win_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_win_unittest.py
new file mode 100644
index 0000000..a40893d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_win_unittest.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.layout_tests.breakpad.dump_reader_win import DumpReaderWin
+
+
+class TestDumpReaderWin(unittest.TestCase):
+ def test_check_is_functional_cdb_not_found(self):
+ host = MockHost()
+ host.executive = MockExecutive(should_throw=True)
+
+ build_dir = "/mock-checkout/out/Debug"
+ host.filesystem.maybe_make_directory(build_dir)
+ dump_reader = DumpReaderWin(host, build_dir)
+
+ self.assertFalse(dump_reader.check_is_functional())
+
+ def test_get_pid_from_dump(self):
+ host = MockHost()
+
+ dump_file = '/crash-dumps/dump.txt'
+ expected_pid = '4711'
+ host.filesystem.write_text_file(dump_file, 'channel:\npid:%s\nplat:Win32\nprod:content_shell\n' % expected_pid)
+ build_dir = "/mock-checkout/out/Debug"
+ host.filesystem.maybe_make_directory(build_dir)
+ dump_reader = DumpReaderWin(host, build_dir)
+
+ self.assertTrue(dump_reader.check_is_functional())
+ self.assertEqual(expected_pid, dump_reader._get_pid_from_dump(dump_file))
+
+ def test_get_stack_from_dump(self):
+ host = MockHost()
+
+ dump_file = '/crash-dumps/dump.dmp'
+ real_dump_file = '/crash-dumps/dump.dmp'
+ host.filesystem.write_text_file(dump_file, 'product:content_shell\n')
+ host.filesystem.write_binary_file(real_dump_file, 'MDMP')
+ build_dir = "/mock-checkout/out/Debug"
+ host.filesystem.maybe_make_directory(build_dir)
+ dump_reader = DumpReaderWin(host, build_dir)
+
+ self.assertTrue(dump_reader.check_is_functional())
+ host.executive.calls = []
+ self.assertEqual("MOCK output of child process", dump_reader._get_stack_from_dump(dump_file))
+ self.assertEqual(1, len(host.executive.calls))
+ cmd_line = " ".join(host.executive.calls[0])
+ self.assertIn('cdb.exe', cmd_line)
+ self.assertIn(real_dump_file, cmd_line)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py
new file mode 100644
index 0000000..391b6fa
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py
@@ -0,0 +1,171 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import errno
+import logging
+import re
+
+from webkitpy.layout_tests.models import test_expectations
+
+
+_log = logging.getLogger(__name__)
+
+
+class LayoutTestFinder(object):
+ def __init__(self, port, options):
+ self._port = port
+ self._options = options
+ self._filesystem = self._port.host.filesystem
+ self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
+
+ def find_tests(self, options, args):
+ paths = self._strip_test_dir_prefixes(args)
+ if options.test_list:
+ paths += self._strip_test_dir_prefixes(self._read_test_names_from_file(options.test_list, self._port.TEST_PATH_SEPARATOR))
+ test_files = self._port.tests(paths)
+ return (paths, test_files)
+
+ def _strip_test_dir_prefixes(self, paths):
+ return [self._strip_test_dir_prefix(path) for path in paths if path]
+
+ def _strip_test_dir_prefix(self, path):
+ # Handle both "LayoutTests/foo/bar.html" and "LayoutTests\foo\bar.html" if
+ # the filesystem uses '\\' as a directory separator.
+ if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):
+ return path[len(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):]
+ if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):
+ return path[len(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):]
+ return path
+
+ def _read_test_names_from_file(self, filenames, test_path_separator):
+ fs = self._filesystem
+ tests = []
+ for filename in filenames:
+ try:
+ if test_path_separator != fs.sep:
+ filename = filename.replace(test_path_separator, fs.sep)
+ file_contents = fs.read_text_file(filename).split('\n')
+ for line in file_contents:
+ line = self._strip_comments(line)
+ if line:
+ tests.append(line)
+ except IOError, e:
+ if e.errno == errno.ENOENT:
+ _log.critical('')
+ _log.critical('--test-list file "%s" not found' % file)
+ raise
+ return tests
+
+ @staticmethod
+ def _strip_comments(line):
+ commentIndex = line.find('//')
+ if commentIndex is -1:
+ commentIndex = len(line)
+
+ line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
+ if line == '':
+ return None
+ else:
+ return line
+
+ def skip_tests(self, paths, all_tests_list, expectations, http_tests):
+ all_tests = set(all_tests_list)
+
+ tests_to_skip = expectations.get_tests_with_result_type(test_expectations.SKIP)
+ if self._options.skip_failing_tests:
+ tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FAIL))
+ tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FLAKY))
+
+ if self._options.skipped == 'only':
+ tests_to_skip = all_tests - tests_to_skip
+ elif self._options.skipped == 'ignore':
+ tests_to_skip = set()
+ elif self._options.skipped != 'always':
+ # make sure we're explicitly running any tests passed on the command line; equivalent to 'default'.
+ tests_to_skip -= set(paths)
+
+ return tests_to_skip
+
+ def split_into_chunks(self, test_names):
+ """split into a list to run and a set to skip, based on --run-chunk and --run-part."""
+ if not self._options.run_chunk and not self._options.run_part:
+ return test_names, set()
+
+ # If the user specifies they just want to run a subset of the tests,
+ # just grab a subset of the non-skipped tests.
+ chunk_value = self._options.run_chunk or self._options.run_part
+ try:
+ (chunk_num, chunk_len) = chunk_value.split(":")
+ chunk_num = int(chunk_num)
+ assert(chunk_num >= 0)
+ test_size = int(chunk_len)
+ assert(test_size > 0)
+ except AssertionError:
+ _log.critical("invalid chunk '%s'" % chunk_value)
+ return (None, None)
+
+ # Get the number of tests
+ num_tests = len(test_names)
+
+ # Get the start offset of the slice.
+ if self._options.run_chunk:
+ chunk_len = test_size
+ # In this case chunk_num can be really large. We need
+ # to make the slave fit in the current number of tests.
+ slice_start = (chunk_num * chunk_len) % num_tests
+ else:
+ # Validate the data.
+ assert(test_size <= num_tests)
+ assert(chunk_num <= test_size)
+
+ # To count the chunk_len, and make sure we don't skip
+ # some tests, we round to the next value that fits exactly
+ # all the parts.
+ rounded_tests = num_tests
+ if rounded_tests % test_size != 0:
+ rounded_tests = (num_tests + test_size - (num_tests % test_size))
+
+ chunk_len = rounded_tests / test_size
+ slice_start = chunk_len * (chunk_num - 1)
+ # It does not mind if we go over test_size.
+
+ # Get the end offset of the slice.
+ slice_end = min(num_tests, slice_start + chunk_len)
+
+ tests_to_run = test_names[slice_start:slice_end]
+
+ _log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start)))
+
+ # If we reached the end and we don't have enough tests, we run some
+ # from the beginning.
+ if slice_end - slice_start < chunk_len:
+ extra = chunk_len - (slice_end - slice_start)
+ _log.debug(' last chunk is partial, appending [0:%d]' % extra)
+ tests_to_run.extend(test_names[0:extra])
+
+ return (tests_to_run, set(test_names) - set(tests_to_run))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
new file mode 100644
index 0000000..86e1a09
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
@@ -0,0 +1,506 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import math
+import threading
+import time
+
+from webkitpy.common import message_pool
+from webkitpy.layout_tests.controllers import single_test_runner
+from webkitpy.layout_tests.models.test_run_results import TestRunResults
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_results
+from webkitpy.tool import grammar
+
+
+_log = logging.getLogger(__name__)
+
+
+TestExpectations = test_expectations.TestExpectations
+
+# Export this so callers don't need to know about message pools.
+WorkerException = message_pool.WorkerException
+
+
+class TestRunInterruptedException(Exception):
+ """Raised when a test run should be stopped immediately."""
+ def __init__(self, reason):
+ Exception.__init__(self)
+ self.reason = reason
+ self.msg = reason
+
+ def __reduce__(self):
+ return self.__class__, (self.reason,)
+
+
+class LayoutTestRunner(object):
+ def __init__(self, options, port, printer, results_directory, test_is_slow_fn):
+ self._options = options
+ self._port = port
+ self._printer = printer
+ self._results_directory = results_directory
+ self._test_is_slow = test_is_slow_fn
+ self._sharder = Sharder(self._port.split_test, self._options.max_locked_shards)
+ self._filesystem = self._port.host.filesystem
+
+ self._expectations = None
+ self._test_inputs = []
+ self._retrying = False
+
+ self._current_run_results = None
+
+ def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, retrying):
+ self._expectations = expectations
+ self._test_inputs = test_inputs
+ self._retrying = retrying
+ self._shards_to_redo = []
+
+ # FIXME: rename all variables to test_run_results or some such ...
+ run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip))
+ self._current_run_results = run_results
+ self._printer.num_tests = len(test_inputs)
+ self._printer.num_completed = 0
+
+ if not retrying:
+ self._printer.print_expected(run_results, self._expectations.get_tests_with_result_type)
+
+ for test_name in set(tests_to_skip):
+ result = test_results.TestResult(test_name)
+ result.type = test_expectations.SKIP
+ run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name))
+
+ self._printer.write_update('Sharding tests ...')
+ locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs,
+ int(self._options.child_processes), self._options.fully_parallel,
+ self._options.run_singly or (self._options.batch_size == 1))
+
+ # We don't have a good way to coordinate the workers so that they don't
+ # try to run the shards that need a lock. The easiest solution is to
+ # run all of the locked shards first.
+ all_shards = locked_shards + unlocked_shards
+ num_workers = min(num_workers, len(all_shards))
+ self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))
+
+ if self._options.dry_run:
+ return run_results
+
+ self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers))
+
+ start_time = time.time()
+ try:
+ with message_pool.get(self, self._worker_factory, num_workers, self._port.host) as pool:
+ pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)
+
+ if self._shards_to_redo:
+ num_workers -= len(self._shards_to_redo)
+ if num_workers > 0:
+ with message_pool.get(self, self._worker_factory, num_workers, self._port.host) as pool:
+ pool.run(('test_list', shard.name, shard.test_inputs) for shard in self._shards_to_redo)
+ except TestRunInterruptedException, e:
+ _log.warning(e.reason)
+ run_results.interrupted = True
+ except KeyboardInterrupt:
+ self._printer.flush()
+ self._printer.writeln('Interrupted, exiting ...')
+ run_results.keyboard_interrupted = True
+ except Exception, e:
+ _log.debug('%s("%s") raised, exiting' % (e.__class__.__name__, str(e)))
+ raise
+ finally:
+ run_results.run_time = time.time() - start_time
+
+ return run_results
+
+ def _worker_factory(self, worker_connection):
+ results_directory = self._results_directory
+ if self._retrying:
+ self._filesystem.maybe_make_directory(self._filesystem.join(self._results_directory, 'retries'))
+ results_directory = self._filesystem.join(self._results_directory, 'retries')
+ return Worker(worker_connection, results_directory, self._options)
+
+ def _mark_interrupted_tests_as_skipped(self, run_results):
+ for test_input in self._test_inputs:
+ if test_input.test_name not in run_results.results_by_name:
+ result = test_results.TestResult(test_input.test_name, [test_failures.FailureEarlyExit()])
+ # FIXME: We probably need to loop here if there are multiple iterations.
+ # FIXME: Also, these results are really neither expected nor unexpected. We probably
+ # need a third type of result.
+ run_results.add(result, expected=False, test_is_slow=self._test_is_slow(test_input.test_name))
+
+ def _interrupt_if_at_failure_limits(self, run_results):
+ # Note: The messages in this method are constructed to match old-run-webkit-tests
+ # so that existing buildbot grep rules work.
+ def interrupt_if_at_failure_limit(limit, failure_count, run_results, message):
+ if limit and failure_count >= limit:
+ message += " %d tests run." % (run_results.expected + run_results.unexpected)
+ self._mark_interrupted_tests_as_skipped(run_results)
+ raise TestRunInterruptedException(message)
+
+ interrupt_if_at_failure_limit(
+ self._options.exit_after_n_failures,
+ run_results.unexpected_failures,
+ run_results,
+ "Exiting early after %d failures." % run_results.unexpected_failures)
+ interrupt_if_at_failure_limit(
+ self._options.exit_after_n_crashes_or_timeouts,
+ run_results.unexpected_crashes + run_results.unexpected_timeouts,
+ run_results,
+ # This differs from ORWT because it does not include WebProcess crashes.
+ "Exiting early after %d crashes and %d timeouts." % (run_results.unexpected_crashes, run_results.unexpected_timeouts))
+
+ def _update_summary_with_result(self, run_results, result):
+ expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests or result.reftest_type, self._options.enable_sanitizer)
+ exp_str = self._expectations.get_expectations_string(result.test_name)
+ got_str = self._expectations.expectation_to_string(result.type)
+
+ if result.device_failed:
+ self._printer.print_finished_test(result, False, exp_str, "Aborted")
+ return
+
+ run_results.add(result, expected, self._test_is_slow(result.test_name))
+ self._printer.print_finished_test(result, expected, exp_str, got_str)
+ self._interrupt_if_at_failure_limits(run_results)
+
+ def handle(self, name, source, *args):
+ method = getattr(self, '_handle_' + name)
+ if method:
+ return method(source, *args)
+ raise AssertionError('unknown message %s received from %s, args=%s' % (name, source, repr(args)))
+
+ def _handle_started_test(self, worker_name, test_input, test_timeout_sec):
+ self._printer.print_started_test(test_input.test_name)
+
+ def _handle_finished_test_list(self, worker_name, list_name):
+ pass
+
+ def _handle_finished_test(self, worker_name, result, log_messages=[]):
+ self._update_summary_with_result(self._current_run_results, result)
+
+ def _handle_device_failed(self, worker_name, list_name, remaining_tests):
+ _log.warning("%s has failed" % worker_name)
+ if remaining_tests:
+ self._shards_to_redo.append(TestShard(list_name, remaining_tests))
+
+class Worker(object):
+ def __init__(self, caller, results_directory, options):
+ self._caller = caller
+ self._worker_number = caller.worker_number
+ self._name = caller.name
+ self._results_directory = results_directory
+ self._options = options
+
+ # The remaining fields are initialized in start()
+ self._host = None
+ self._port = None
+ self._batch_size = None
+ self._batch_count = None
+ self._filesystem = None
+ self._driver = None
+ self._num_tests = 0
+
+ def __del__(self):
+ self.stop()
+
+ def start(self):
+ """This method is called when the object is starting to be used and it is safe
+ for the object to create state that does not need to be pickled (usually this means
+ it is called in a child process)."""
+ self._host = self._caller.host
+ self._filesystem = self._host.filesystem
+ self._port = self._host.port_factory.get(self._options.platform, self._options)
+
+ self._batch_count = 0
+ self._batch_size = self._options.batch_size or 0
+
+ def handle(self, name, source, test_list_name, test_inputs):
+ assert name == 'test_list'
+ for i, test_input in enumerate(test_inputs):
+ device_failed = self._run_test(test_input, test_list_name)
+ if device_failed:
+ self._caller.post('device_failed', test_list_name, test_inputs[i:])
+ self._caller.stop_running()
+ return
+
+ self._caller.post('finished_test_list', test_list_name)
+
+ def _update_test_input(self, test_input):
+ if test_input.reference_files is None:
+ # Lazy initialization.
+ test_input.reference_files = self._port.reference_files(test_input.test_name)
+ if test_input.reference_files:
+ test_input.should_run_pixel_test = True
+ else:
+ test_input.should_run_pixel_test = self._port.should_run_as_pixel_test(test_input)
+
+ def _run_test(self, test_input, shard_name):
+ self._batch_count += 1
+
+ stop_when_done = False
+ if self._batch_size > 0 and self._batch_count >= self._batch_size:
+ self._batch_count = 0
+ stop_when_done = True
+
+ self._update_test_input(test_input)
+ test_timeout_sec = self._timeout(test_input)
+ start = time.time()
+ device_failed = False
+
+ if self._driver and self._driver.has_crashed():
+ self._kill_driver()
+ if not self._driver:
+ self._driver = self._port.create_driver(self._worker_number)
+
+ if not self._driver:
+ # FIXME: Is this the best way to handle a device crashing in the middle of the test, or should we create
+ # a new failure type?
+ device_failed = True
+ return device_failed
+
+ self._caller.post('started_test', test_input, test_timeout_sec)
+ result = single_test_runner.run_single_test(self._port, self._options, self._results_directory,
+ self._name, self._driver, test_input, stop_when_done)
+
+ result.shard_name = shard_name
+ result.worker_name = self._name
+ result.total_run_time = time.time() - start
+ result.test_number = self._num_tests
+ self._num_tests += 1
+ self._caller.post('finished_test', result)
+ self._clean_up_after_test(test_input, result)
+ return result.device_failed
+
+ def stop(self):
+ _log.debug("%s cleaning up" % self._name)
+ self._kill_driver()
+
+ def _timeout(self, test_input):
+ """Compute the appropriate timeout value for a test."""
+ # The driver watchdog uses 2.5x the timeout; we want to be
+ # larger than that. We also add a little more padding if we're
+ # running tests in a separate thread.
+ #
+ # Note that we need to convert the test timeout from a
+ # string value in milliseconds to a float for Python.
+
+ # FIXME: Can we just return the test_input.timeout now?
+ driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0
+
+ def _kill_driver(self):
+ # Be careful about how and when we kill the driver; if driver.stop()
+ # raises an exception, this routine may get re-entered via __del__.
+ driver = self._driver
+ self._driver = None
+ if driver:
+ _log.debug("%s killing driver" % self._name)
+ driver.stop()
+
+
+ def _clean_up_after_test(self, test_input, result):
+ test_name = test_input.test_name
+
+ if result.failures:
+ # Check and kill the driver if we need to.
+ if any([f.driver_needs_restart() for f in result.failures]):
+ self._kill_driver()
+ # Reset the batch count since the shell just bounced.
+ self._batch_count = 0
+
+ # Print the error message(s).
+ _log.debug("%s %s failed:" % (self._name, test_name))
+ for f in result.failures:
+ _log.debug("%s %s" % (self._name, f.message()))
+ elif result.type == test_expectations.SKIP:
+ _log.debug("%s %s skipped" % (self._name, test_name))
+ else:
+ _log.debug("%s %s passed" % (self._name, test_name))
+
+
+class TestShard(object):
+ """A test shard is a named list of TestInputs."""
+
+ def __init__(self, name, test_inputs):
+ self.name = name
+ self.test_inputs = test_inputs
+ self.requires_lock = test_inputs[0].requires_lock
+
+ def __repr__(self):
+ return "TestShard(name='%s', test_inputs=%s, requires_lock=%s'" % (self.name, self.test_inputs, self.requires_lock)
+
+ def __eq__(self, other):
+ return self.name == other.name and self.test_inputs == other.test_inputs
+
+
+class Sharder(object):
+ def __init__(self, test_split_fn, max_locked_shards):
+ self._split = test_split_fn
+ self._max_locked_shards = max_locked_shards
+
+ def shard_tests(self, test_inputs, num_workers, fully_parallel, run_singly):
+ """Groups tests into batches.
+ This helps ensure that tests that depend on each other (aka bad tests!)
+ continue to run together as most cross-tests dependencies tend to
+ occur within the same directory.
+ Return:
+ Two list of TestShards. The first contains tests that must only be
+ run under the server lock, the second can be run whenever.
+ """
+
+ # FIXME: Move all of the sharding logic out of manager into its
+ # own class or module. Consider grouping it with the chunking logic
+ # in prepare_lists as well.
+ if num_workers == 1:
+ return self._shard_in_two(test_inputs)
+ elif fully_parallel:
+ return self._shard_every_file(test_inputs, run_singly)
+ return self._shard_by_directory(test_inputs)
+
+ def _shard_in_two(self, test_inputs):
+ """Returns two lists of shards, one with all the tests requiring a lock and one with the rest.
+
+ This is used when there's only one worker, to minimize the per-shard overhead."""
+ locked_inputs = []
+ unlocked_inputs = []
+ for test_input in test_inputs:
+ if test_input.requires_lock:
+ locked_inputs.append(test_input)
+ else:
+ unlocked_inputs.append(test_input)
+
+ locked_shards = []
+ unlocked_shards = []
+ if locked_inputs:
+ locked_shards = [TestShard('locked_tests', locked_inputs)]
+ if unlocked_inputs:
+ unlocked_shards = [TestShard('unlocked_tests', unlocked_inputs)]
+
+ return locked_shards, unlocked_shards
+
+ def _shard_every_file(self, test_inputs, run_singly):
+ """Returns two lists of shards, each shard containing a single test file.
+
+ This mode gets maximal parallelism at the cost of much higher flakiness."""
+ locked_shards = []
+ unlocked_shards = []
+ virtual_inputs = []
+
+ for test_input in test_inputs:
+ # Note that we use a '.' for the shard name; the name doesn't really
+ # matter, and the only other meaningful value would be the filename,
+ # which would be really redundant.
+ if test_input.requires_lock:
+ locked_shards.append(TestShard('.', [test_input]))
+ elif test_input.test_name.startswith('virtual') and not run_singly:
+ # This violates the spirit of sharding every file, but in practice, since the
+ # virtual test suites require a different commandline flag and thus a restart
+ # of content_shell, it's too slow to shard them fully.
+ virtual_inputs.append(test_input)
+ else:
+ unlocked_shards.append(TestShard('.', [test_input]))
+
+ locked_virtual_shards, unlocked_virtual_shards = self._shard_by_directory(virtual_inputs)
+
+ # The locked shards still need to be limited to self._max_locked_shards in order to not
+ # overload the http server for the http tests.
+ return (self._resize_shards(locked_virtual_shards + locked_shards, self._max_locked_shards, 'locked_shard'),
+ unlocked_virtual_shards + unlocked_shards)
+
+ def _shard_by_directory(self, test_inputs):
+ """Returns two lists of shards, each shard containing all the files in a directory.
+
+ This is the default mode, and gets as much parallelism as we can while
+ minimizing flakiness caused by inter-test dependencies."""
+ locked_shards = []
+ unlocked_shards = []
+ unlocked_slow_shards = []
+ tests_by_dir = {}
+ # FIXME: Given that the tests are already sorted by directory,
+ # we can probably rewrite this to be clearer and faster.
+ for test_input in test_inputs:
+ directory = self._split(test_input.test_name)[0]
+ tests_by_dir.setdefault(directory, [])
+ tests_by_dir[directory].append(test_input)
+
+ for directory, test_inputs in tests_by_dir.iteritems():
+ shard = TestShard(directory, test_inputs)
+ if test_inputs[0].requires_lock:
+ locked_shards.append(shard)
+ # In practice, virtual test suites are slow to run. It's a bit hacky, but
+ # put them first since they're the long-tail of test runtime.
+ elif directory.startswith('virtual'):
+ unlocked_slow_shards.append(shard)
+ else:
+ unlocked_shards.append(shard)
+
+ # Sort the shards by directory name.
+ locked_shards.sort(key=lambda shard: shard.name)
+ unlocked_slow_shards.sort(key=lambda shard: shard.name)
+ unlocked_shards.sort(key=lambda shard: shard.name)
+
+ # Put a ceiling on the number of locked shards, so that we
+ # don't hammer the servers too badly.
+
+ # FIXME: For now, limit to one shard or set it
+ # with the --max-locked-shards. After testing to make sure we
+ # can handle multiple shards, we should probably do something like
+ # limit this to no more than a quarter of all workers, e.g.:
+ # return max(math.ceil(num_workers / 4.0), 1)
+ return (self._resize_shards(locked_shards, self._max_locked_shards, 'locked_shard'),
+ unlocked_slow_shards + unlocked_shards)
+
+ def _resize_shards(self, old_shards, max_new_shards, shard_name_prefix):
+ """Takes a list of shards and redistributes the tests into no more
+ than |max_new_shards| new shards."""
+
+ # This implementation assumes that each input shard only contains tests from a
+ # single directory, and that tests in each shard must remain together; as a
+ # result, a given input shard is never split between output shards.
+ #
+ # Each output shard contains the tests from one or more input shards and
+ # hence may contain tests from multiple directories.
+
+ def divide_and_round_up(numerator, divisor):
+ return int(math.ceil(float(numerator) / divisor))
+
+ def extract_and_flatten(shards):
+ test_inputs = []
+ for shard in shards:
+ test_inputs.extend(shard.test_inputs)
+ return test_inputs
+
+ def split_at(seq, index):
+ return (seq[:index], seq[index:])
+
+ num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards)
+ new_shards = []
+ remaining_shards = old_shards
+ while remaining_shards:
+ some_shards, remaining_shards = split_at(remaining_shards, num_old_per_new)
+ new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_shards) + 1), extract_and_flatten(some_shards)))
+ return new_shards
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py
new file mode 100644
index 0000000..23151b3
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py
@@ -0,0 +1,299 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.layout_tests import run_webkit_tests
+from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner, Sharder, TestRunInterruptedException
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models.test_run_results import TestRunResults
+from webkitpy.layout_tests.models.test_input import TestInput
+from webkitpy.layout_tests.models.test_results import TestResult
+from webkitpy.layout_tests.port.test import TestPort
+
+
+TestExpectations = test_expectations.TestExpectations
+
+
+class FakePrinter(object):
+ num_completed = 0
+ num_tests = 0
+
+ def print_expected(self, run_results, get_tests_with_result_type):
+ pass
+
+ def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
+ pass
+
+ def print_started_test(self, test_name):
+ pass
+
+ def print_finished_test(self, result, expected, exp_str, got_str):
+ pass
+
+ def write(self, msg):
+ pass
+
+ def write_update(self, msg):
+ pass
+
+ def flush(self):
+ pass
+
+
+class LockCheckingRunner(LayoutTestRunner):
+ def __init__(self, port, options, printer, tester, http_lock):
+ super(LockCheckingRunner, self).__init__(options, port, printer, port.results_directory(), lambda test_name: False)
+ self._finished_list_called = False
+ self._tester = tester
+ self._should_have_http_lock = http_lock
+
+ def handle_finished_list(self, source, list_name, num_tests, elapsed_time):
+ if not self._finished_list_called:
+ self._tester.assertEqual(list_name, 'locked_tests')
+ self._tester.assertTrue(self._remaining_locked_shards)
+ self._tester.assertTrue(self._has_http_lock is self._should_have_http_lock)
+
+ super(LockCheckingRunner, self).handle_finished_list(source, list_name, num_tests, elapsed_time)
+
+ if not self._finished_list_called:
+ self._tester.assertEqual(self._remaining_locked_shards, [])
+ self._tester.assertFalse(self._has_http_lock)
+ self._finished_list_called = True
+
+
+class LayoutTestRunnerTests(unittest.TestCase):
+ def _runner(self, port=None):
+ # FIXME: we shouldn't have to use run_webkit_tests.py to get the options we need.
+ options = run_webkit_tests.parse_args(['--platform', 'test-mac-snowleopard'])[0]
+ options.child_processes = '1'
+
+ host = MockHost()
+ port = port or host.port_factory.get(options.platform, options=options)
+ return LockCheckingRunner(port, options, FakePrinter(), self, True)
+
+ def _run_tests(self, runner, tests):
+ test_inputs = [TestInput(test, 6000) for test in tests]
+ expectations = TestExpectations(runner._port, tests)
+ runner.run_tests(expectations, test_inputs, set(), num_workers=1, retrying=False)
+
+ def test_interrupt_if_at_failure_limits(self):
+ runner = self._runner()
+ runner._options.exit_after_n_failures = None
+ runner._options.exit_after_n_crashes_or_times = None
+ test_names = ['passes/text.html', 'passes/image.html']
+ runner._test_inputs = [TestInput(test_name, 6000) for test_name in test_names]
+
+ run_results = TestRunResults(TestExpectations(runner._port, test_names), len(test_names))
+ run_results.unexpected_failures = 100
+ run_results.unexpected_crashes = 50
+ run_results.unexpected_timeouts = 50
+ # No exception when the exit_after* options are None.
+ runner._interrupt_if_at_failure_limits(run_results)
+
+ # No exception when we haven't hit the limit yet.
+ runner._options.exit_after_n_failures = 101
+ runner._options.exit_after_n_crashes_or_timeouts = 101
+ runner._interrupt_if_at_failure_limits(run_results)
+
+ # Interrupt if we've exceeded either limit:
+ runner._options.exit_after_n_crashes_or_timeouts = 10
+ self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
+ self.assertEqual(run_results.results_by_name['passes/text.html'].type, test_expectations.SKIP)
+ self.assertEqual(run_results.results_by_name['passes/image.html'].type, test_expectations.SKIP)
+
+ runner._options.exit_after_n_crashes_or_timeouts = None
+ runner._options.exit_after_n_failures = 10
+ exception = self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
+
+ def test_update_summary_with_result(self):
+ # Reftests expected to be image mismatch should be respected when pixel_tests=False.
+ runner = self._runner()
+ runner._options.pixel_tests = False
+ test = 'failures/expected/reftest.html'
+ expectations = TestExpectations(runner._port, tests=[test])
+ runner._expectations = expectations
+
+ run_results = TestRunResults(expectations, 1)
+ result = TestResult(test_name=test, failures=[test_failures.FailureReftestMismatchDidNotOccur()], reftest_type=['!='])
+ runner._update_summary_with_result(run_results, result)
+ self.assertEqual(1, run_results.expected)
+ self.assertEqual(0, run_results.unexpected)
+
+ run_results = TestRunResults(expectations, 1)
+ result = TestResult(test_name=test, failures=[], reftest_type=['=='])
+ runner._update_summary_with_result(run_results, result)
+ self.assertEqual(0, run_results.expected)
+ self.assertEqual(1, run_results.unexpected)
+
+
+class SharderTests(unittest.TestCase):
+
+ test_list = [
+ "http/tests/websocket/tests/unicode.htm",
+ "animations/keyframes.html",
+ "http/tests/security/view-source-no-refresh.html",
+ "http/tests/websocket/tests/websocket-protocol-ignored.html",
+ "fast/css/display-none-inline-style-change-crash.html",
+ "http/tests/xmlhttprequest/supported-xml-content-types.html",
+ "dom/html/level2/html/HTMLAnchorElement03.html",
+ "ietestcenter/Javascript/11.1.5_4-4-c-1.html",
+ "dom/html/level2/html/HTMLAnchorElement06.html",
+ "perf/object-keys.html",
+ "virtual/threaded/dir/test.html",
+ "virtual/threaded/fast/foo/test.html",
+ ]
+
+ def get_test_input(self, test_file):
+ return TestInput(test_file, requires_lock=(test_file.startswith('http') or test_file.startswith('perf')))
+
+ def get_shards(self, num_workers, fully_parallel, run_singly, test_list=None, max_locked_shards=1):
+ port = TestPort(MockSystemHost())
+ self.sharder = Sharder(port.split_test, max_locked_shards)
+ test_list = test_list or self.test_list
+ return self.sharder.shard_tests([self.get_test_input(test) for test in test_list],
+ num_workers, fully_parallel, run_singly)
+
+ def assert_shards(self, actual_shards, expected_shard_names):
+ self.assertEqual(len(actual_shards), len(expected_shard_names))
+ for i, shard in enumerate(actual_shards):
+ expected_shard_name, expected_test_names = expected_shard_names[i]
+ self.assertEqual(shard.name, expected_shard_name)
+ self.assertEqual([test_input.test_name for test_input in shard.test_inputs],
+ expected_test_names)
+
+ def test_shard_by_dir(self):
+ locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False, run_singly=False)
+
+ # Note that although there are tests in multiple dirs that need locks,
+ # they are crammed into a single shard in order to reduce the # of
+ # workers hitting the server at once.
+ self.assert_shards(locked,
+ [('locked_shard_1',
+ ['http/tests/security/view-source-no-refresh.html',
+ 'http/tests/websocket/tests/unicode.htm',
+ 'http/tests/websocket/tests/websocket-protocol-ignored.html',
+ 'http/tests/xmlhttprequest/supported-xml-content-types.html',
+ 'perf/object-keys.html'])])
+ self.assert_shards(unlocked,
+ [('virtual/threaded/dir', ['virtual/threaded/dir/test.html']),
+ ('virtual/threaded/fast/foo', ['virtual/threaded/fast/foo/test.html']),
+ ('animations', ['animations/keyframes.html']),
+ ('dom/html/level2/html', ['dom/html/level2/html/HTMLAnchorElement03.html',
+ 'dom/html/level2/html/HTMLAnchorElement06.html']),
+ ('fast/css', ['fast/css/display-none-inline-style-change-crash.html']),
+ ('ietestcenter/Javascript', ['ietestcenter/Javascript/11.1.5_4-4-c-1.html'])])
+
+ def test_shard_every_file(self):
+ locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True, max_locked_shards=2, run_singly=False)
+ self.assert_shards(locked,
+ [('locked_shard_1',
+ ['http/tests/websocket/tests/unicode.htm',
+ 'http/tests/security/view-source-no-refresh.html',
+ 'http/tests/websocket/tests/websocket-protocol-ignored.html']),
+ ('locked_shard_2',
+ ['http/tests/xmlhttprequest/supported-xml-content-types.html',
+ 'perf/object-keys.html'])]),
+ self.assert_shards(unlocked,
+ [('virtual/threaded/dir', ['virtual/threaded/dir/test.html']),
+ ('virtual/threaded/fast/foo', ['virtual/threaded/fast/foo/test.html']),
+ ('.', ['animations/keyframes.html']),
+ ('.', ['fast/css/display-none-inline-style-change-crash.html']),
+ ('.', ['dom/html/level2/html/HTMLAnchorElement03.html']),
+ ('.', ['ietestcenter/Javascript/11.1.5_4-4-c-1.html']),
+ ('.', ['dom/html/level2/html/HTMLAnchorElement06.html'])])
+
+ def test_shard_in_two(self):
+ locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False, run_singly=False)
+ self.assert_shards(locked,
+ [('locked_tests',
+ ['http/tests/websocket/tests/unicode.htm',
+ 'http/tests/security/view-source-no-refresh.html',
+ 'http/tests/websocket/tests/websocket-protocol-ignored.html',
+ 'http/tests/xmlhttprequest/supported-xml-content-types.html',
+ 'perf/object-keys.html'])])
+ self.assert_shards(unlocked,
+ [('unlocked_tests',
+ ['animations/keyframes.html',
+ 'fast/css/display-none-inline-style-change-crash.html',
+ 'dom/html/level2/html/HTMLAnchorElement03.html',
+ 'ietestcenter/Javascript/11.1.5_4-4-c-1.html',
+ 'dom/html/level2/html/HTMLAnchorElement06.html',
+ 'virtual/threaded/dir/test.html',
+ 'virtual/threaded/fast/foo/test.html'])])
+
+ def test_shard_in_two_has_no_locked_shards(self):
+ locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False, run_singly=False,
+ test_list=['animations/keyframe.html'])
+ self.assertEqual(len(locked), 0)
+ self.assertEqual(len(unlocked), 1)
+
+ def test_shard_in_two_has_no_unlocked_shards(self):
+ locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False, run_singly=False,
+ test_list=['http/tests/websocket/tests/unicode.htm'])
+ self.assertEqual(len(locked), 1)
+ self.assertEqual(len(unlocked), 0)
+
+ def test_multiple_locked_shards(self):
+ locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False, max_locked_shards=2, run_singly=False)
+ self.assert_shards(locked,
+ [('locked_shard_1',
+ ['http/tests/security/view-source-no-refresh.html',
+ 'http/tests/websocket/tests/unicode.htm',
+ 'http/tests/websocket/tests/websocket-protocol-ignored.html']),
+ ('locked_shard_2',
+ ['http/tests/xmlhttprequest/supported-xml-content-types.html',
+ 'perf/object-keys.html'])])
+
+ locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False, run_singly=False)
+ self.assert_shards(locked,
+ [('locked_shard_1',
+ ['http/tests/security/view-source-no-refresh.html',
+ 'http/tests/websocket/tests/unicode.htm',
+ 'http/tests/websocket/tests/websocket-protocol-ignored.html',
+ 'http/tests/xmlhttprequest/supported-xml-content-types.html',
+ 'perf/object-keys.html'])])
+
+ def test_virtual_shards(self):
+ # With run_singly=False, we try to keep all of the tests in a virtual suite together even
+ # when fully_parallel=True, so that we don't restart every time the command line args change.
+ locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True, max_locked_shards=2, run_singly=False,
+ test_list=['virtual/foo/bar1.html', 'virtual/foo/bar2.html'])
+ self.assert_shards(unlocked,
+ [('virtual/foo', ['virtual/foo/bar1.html', 'virtual/foo/bar2.html'])])
+
+ # But, with run_singly=True, we have to restart every time anyway, so we want full parallelism.
+ locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True, max_locked_shards=2, run_singly=True,
+ test_list=['virtual/foo/bar1.html', 'virtual/foo/bar2.html'])
+ self.assert_shards(unlocked,
+ [('.', ['virtual/foo/bar1.html']),
+ ('.', ['virtual/foo/bar2.html'])])
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
new file mode 100644
index 0000000..c758475
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
@@ -0,0 +1,501 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+The Manager runs a series of tests (TestType interface) against a set
+of test files. If a test file fails a TestType, it returns a list of TestFailure
+objects to the Manager. The Manager then aggregates the TestFailures to
+create a final report.
+"""
+
+import datetime
+import json
+import logging
+import random
+import sys
+import time
+
+from webkitpy.common.net.file_uploader import FileUploader
+from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
+from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
+from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
+from webkitpy.layout_tests.layout_package import json_results_generator
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.models.test_input import TestInput
+
+_log = logging.getLogger(__name__)
+
+# Builder base URL where we have the archived test results.
+BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
+
+TestExpectations = test_expectations.TestExpectations
+
+
+
+class Manager(object):
+ """A class for managing running a series of tests on a series of layout
+ test files."""
+
+ def __init__(self, port, options, printer):
+ """Initialize test runner data structures.
+
+ Args:
+ port: an object implementing port-specific
+ options: a dictionary of command line options
+ printer: a Printer object to record updates to.
+ """
+ self._port = port
+ self._filesystem = port.host.filesystem
+ self._options = options
+ self._printer = printer
+ self._expectations = None
+
+ self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
+ self.PERF_SUBDIR = 'perf'
+ self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
+ self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
+ self.ARCHIVED_RESULTS_LIMIT = 25
+ self._http_server_started = False
+ self._websockets_server_started = False
+
+ self._results_directory = self._port.results_directory()
+ self._finder = LayoutTestFinder(self._port, self._options)
+ self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
+
+ def _collect_tests(self, args):
+ return self._finder.find_tests(self._options, args)
+
+ def _is_http_test(self, test):
+ return self.HTTP_SUBDIR in test or self._is_websocket_test(test)
+
+ def _is_websocket_test(self, test):
+ return self.WEBSOCKET_SUBDIR in test
+
+ def _http_tests(self, test_names):
+ return set(test for test in test_names if self._is_http_test(test))
+
+ def _is_perf_test(self, test):
+ return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
+
+ def _prepare_lists(self, paths, test_names):
+ tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
+ tests_to_run = [test for test in test_names if test not in tests_to_skip]
+
+ if not tests_to_run:
+ return tests_to_run, tests_to_skip
+
+ # Create a sorted list of test files so the subset chunk,
+ # if used, contains alphabetically consecutive tests.
+ if self._options.order == 'natural':
+ tests_to_run.sort(key=self._port.test_key)
+ elif self._options.order == 'random':
+ random.shuffle(tests_to_run)
+ elif self._options.order == 'random-seeded':
+ rnd = random.Random()
+ rnd.seed(4) # http://xkcd.com/221/
+ rnd.shuffle(tests_to_run)
+
+ tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
+ self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
+ tests_to_skip.update(tests_in_other_chunks)
+
+ return tests_to_run, tests_to_skip
+
+ def _test_input_for_file(self, test_file):
+ return TestInput(test_file,
+ self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
+ self._test_requires_lock(test_file),
+ should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file)))
+
+ def _test_requires_lock(self, test_file):
+ """Return True if the test needs to be locked when
+ running multiple copies of NRWTs. Perf tests are locked
+ because heavy load caused by running other tests in parallel
+ might cause some of them to timeout."""
+ return self._is_http_test(test_file) or self._is_perf_test(test_file)
+
+ def _test_is_expected_missing(self, test_file):
+ expectations = self._expectations.model().get_expectations(test_file)
+ return test_expectations.MISSING in expectations or test_expectations.NEEDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in expectations
+
+ def _test_is_slow(self, test_file):
+ return test_expectations.SLOW in self._expectations.model().get_expectations(test_file)
+
+ def needs_servers(self, test_names):
+ return any(self._test_requires_lock(test_name) for test_name in test_names)
+
+ def _rename_results_folder(self):
+ try:
+ timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html"))))
+ except (IOError, OSError), e:
+ # It might be possible that results.html was not generated in previous run, because the test
+ # run was interrupted even before testing started. In those cases, don't archive the folder.
+ # Simply override the current folder contents with new results.
+ import errno
+ if e.errno == errno.EEXIST or e.errno == errno.ENOENT:
+ _log.warning("No results.html file found in previous run, skipping it.")
+ return None
+ archived_name = ''.join((self._filesystem.basename(self._results_directory), "_", timestamp))
+ archived_path = self._filesystem.join(self._filesystem.dirname(self._results_directory), archived_name)
+ self._filesystem.move(self._results_directory, archived_path)
+
+ def _delete_dirs(self, dir_list):
+ for dir in dir_list:
+ self._filesystem.rmtree(dir)
+
+ def _limit_archived_results_count(self):
+ results_directory_path = self._filesystem.dirname(self._results_directory)
+ file_list = self._filesystem.listdir(results_directory_path)
+ results_directories = []
+ for dir in file_list:
+ file_path = self._filesystem.join(results_directory_path, dir)
+ if self._filesystem.isdir(file_path) and self._results_directory in file_path:
+ results_directories.append(file_path)
+ results_directories.sort(key=lambda x: self._filesystem.mtime(x))
+ self._printer.write_update("Clobbering excess archived results in %s" % results_directory_path)
+ self._delete_dirs(results_directories[:-self.ARCHIVED_RESULTS_LIMIT])
+
+ def _set_up_run(self, test_names):
+ self._printer.write_update("Checking build ...")
+ if self._options.build:
+ exit_code = self._port.check_build(self.needs_servers(test_names), self._printer)
+ if exit_code:
+ _log.error("Build check failed")
+ return exit_code
+
+ # This must be started before we check the system dependencies,
+ # since the helper may do things to make the setup correct.
+ if self._options.pixel_tests:
+ self._printer.write_update("Starting pixel test helper ...")
+ self._port.start_helper()
+
+ # Check that the system dependencies (themes, fonts, ...) are correct.
+ if not self._options.nocheck_sys_deps:
+ self._printer.write_update("Checking system dependencies ...")
+ exit_code = self._port.check_sys_deps(self.needs_servers(test_names))
+ if exit_code:
+ self._port.stop_helper()
+ return exit_code
+
+ if self._options.clobber_old_results:
+ self._clobber_old_results()
+ elif self._filesystem.exists(self._results_directory):
+ self._limit_archived_results_count()
+ # Rename the existing results folder for archiving.
+ self._rename_results_folder()
+
+ # Create the output directory if it doesn't already exist.
+ self._port.host.filesystem.maybe_make_directory(self._results_directory)
+
+ self._port.setup_test_run()
+ return test_run_results.OK_EXIT_STATUS
+
+ def run(self, args):
+ """Run the tests and return a RunDetails object with the results."""
+ start_time = time.time()
+ self._printer.write_update("Collecting tests ...")
+ try:
+ paths, test_names = self._collect_tests(args)
+ except IOError:
+ # This is raised if --test-list doesn't exist
+ return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
+
+ self._printer.write_update("Parsing expectations ...")
+ self._expectations = test_expectations.TestExpectations(self._port, test_names)
+
+ tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
+ self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
+
+ # Check to make sure we're not skipping every test.
+ if not tests_to_run:
+ _log.critical('No tests to run.')
+ return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
+
+ exit_code = self._set_up_run(tests_to_run)
+ if exit_code:
+ return test_run_results.RunDetails(exit_code=exit_code)
+
+ # Don't retry failures if an explicit list of tests was passed in.
+ if self._options.retry_failures is None:
+ should_retry_failures = len(paths) < len(test_names)
+ else:
+ should_retry_failures = self._options.retry_failures
+
+ enabled_pixel_tests_in_retry = False
+ try:
+ self._start_servers(tests_to_run)
+
+ initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
+ self._port.num_workers(int(self._options.child_processes)), retrying=False)
+
+ # Don't retry failures when interrupted by user or failures limit exception.
+ should_retry_failures = should_retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
+
+ tests_to_retry = self._tests_to_retry(initial_results)
+ if should_retry_failures and tests_to_retry:
+ enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
+
+ _log.info('')
+ _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
+ _log.info('')
+ retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
+ num_workers=1, retrying=True)
+
+ if enabled_pixel_tests_in_retry:
+ self._options.pixel_tests = False
+ else:
+ retry_results = None
+ finally:
+ self._stop_servers()
+ self._clean_up_run()
+
+ # Some crash logs can take a long time to be written out so look
+ # for new logs after the test run finishes.
+ self._printer.write_update("looking for new crash logs")
+ self._look_for_new_crash_logs(initial_results, start_time)
+ if retry_results:
+ self._look_for_new_crash_logs(retry_results, start_time)
+
+ _log.debug("summarizing results")
+ summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
+ summarized_failing_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, only_include_failing=True)
+
+ exit_code = summarized_failing_results['num_regressions']
+ if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
+ _log.warning('num regressions (%d) exceeds max exit status (%d)' %
+ (exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS))
+ exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS
+
+ if not self._options.dry_run:
+ self._write_json_files(summarized_full_results, summarized_failing_results, initial_results)
+
+ if self._options.write_full_results_to:
+ self._filesystem.copyfile(self._filesystem.join(self._results_directory, "full_results.json"),
+ self._options.write_full_results_to)
+
+ self._upload_json_files()
+
+ results_path = self._filesystem.join(self._results_directory, "results.html")
+ self._copy_results_html_file(results_path)
+ if initial_results.keyboard_interrupted:
+ exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
+ else:
+ if initial_results.interrupted:
+ exit_code = test_run_results.EARLY_EXIT_STATUS
+ if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)):
+ self._port.show_results_html_file(results_path)
+ self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)
+
+ self._check_for_stale_w3c_dir()
+
+ return test_run_results.RunDetails(exit_code, summarized_full_results, summarized_failing_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
+
+ def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
+
+ test_inputs = []
+ for _ in xrange(iterations):
+ for test in tests_to_run:
+ for _ in xrange(repeat_each):
+ test_inputs.append(self._test_input_for_file(test))
+ return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, retrying)
+
+ def _start_servers(self, tests_to_run):
+ if self._port.requires_http_server() or any(self._is_http_test(test) for test in tests_to_run):
+ self._printer.write_update('Starting HTTP server ...')
+ self._port.start_http_server(additional_dirs={}, number_of_drivers=self._options.max_locked_shards)
+ self._http_server_started = True
+
+ if any(self._is_websocket_test(test) for test in tests_to_run):
+ self._printer.write_update('Starting WebSocket server ...')
+ self._port.start_websocket_server()
+ self._websockets_server_started = True
+
+ def _stop_servers(self):
+ if self._http_server_started:
+ self._printer.write_update('Stopping HTTP server ...')
+ self._http_server_started = False
+ self._port.stop_http_server()
+ if self._websockets_server_started:
+ self._printer.write_update('Stopping WebSocket server ...')
+ self._websockets_server_started = False
+ self._port.stop_websocket_server()
+
+ def _clean_up_run(self):
+ _log.debug("Flushing stdout")
+ sys.stdout.flush()
+ _log.debug("Flushing stderr")
+ sys.stderr.flush()
+ _log.debug("Stopping helper")
+ self._port.stop_helper()
+ _log.debug("Cleaning up port")
+ self._port.clean_up_test_run()
+
+ def _check_for_stale_w3c_dir(self):
+ # TODO(dpranke): Remove this check after 1/1/2015 and let people deal with the warnings.
+ # Remove the check in port/base.py as well.
+ fs = self._port.host.filesystem
+ layout_tests_dir = self._port.layout_tests_dir()
+ if fs.isdir(fs.join(layout_tests_dir, 'w3c')):
+ _log.warning('WARNING: You still have the old LayoutTests/w3c directory in your checkout. You should delete it!')
+
+ def _force_pixel_tests_if_needed(self):
+ if self._options.pixel_tests:
+ return False
+
+ _log.debug("Restarting helper")
+ self._port.stop_helper()
+ self._options.pixel_tests = True
+ self._port.start_helper()
+
+ return True
+
+ def _look_for_new_crash_logs(self, run_results, start_time):
+ """Since crash logs can take a long time to be written out if the system is
+ under stress do a second pass at the end of the test run.
+
+ run_results: the results of the test run
+ start_time: time the tests started at. We're looking for crash
+ logs after that time.
+ """
+ crashed_processes = []
+ for test, result in run_results.unexpected_results_by_name.iteritems():
+ if (result.type != test_expectations.CRASH):
+ continue
+ for failure in result.failures:
+ if not isinstance(failure, test_failures.FailureCrash):
+ continue
+ if failure.has_log:
+ continue
+ crashed_processes.append([test, failure.process_name, failure.pid])
+
+ sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
+ if sample_files:
+ for test, sample_file in sample_files.iteritems():
+ writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
+ writer.copy_sample_file(sample_file)
+
+ crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
+ if crash_logs:
+ for test, crash_log in crash_logs.iteritems():
+ writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
+ writer.write_crash_log(crash_log)
+
+ def _clobber_old_results(self):
+ dir_above_results_path = self._filesystem.dirname(self._results_directory)
+ self._printer.write_update("Clobbering old results in %s" % dir_above_results_path)
+ if not self._filesystem.exists(dir_above_results_path):
+ return
+ file_list = self._filesystem.listdir(dir_above_results_path)
+ results_directories = []
+ for dir in file_list:
+ file_path = self._filesystem.join(dir_above_results_path, dir)
+ if self._filesystem.isdir(file_path) and self._results_directory in file_path:
+ results_directories.append(file_path)
+ self._delete_dirs(results_directories)
+
+ # Port specific clean-up.
+ self._port.clobber_old_port_specific_results()
+
+ def _tests_to_retry(self, run_results):
+ return [result.test_name for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS]
+
+ def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results):
+ _log.debug("Writing JSON files in %s." % self._results_directory)
+
+ # FIXME: Upload stats.json to the server and delete times_ms.
+ times_trie = json_results_generator.test_timings_trie(initial_results.results_by_name.values())
+ times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
+ json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
+
+ stats_trie = self._stats_trie(initial_results)
+ stats_path = self._filesystem.join(self._results_directory, "stats.json")
+ self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
+
+ full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
+ json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)
+
+ full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
+ # We write failing_results.json out as jsonp because we need to load it from a file url for results.html and Chromium doesn't allow that.
+ json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS")
+
+ _log.debug("Finished writing JSON files.")
+
+ def _upload_json_files(self):
+ if not self._options.test_results_server:
+ return
+
+ if not self._options.master_name:
+ _log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
+ return
+
+ _log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
+ attrs = [("builder", self._options.builder_name),
+ ("testtype", "layout-tests"),
+ ("master", self._options.master_name)]
+
+ files = [(file, self._filesystem.join(self._results_directory, file)) for file in ["failing_results.json", "full_results.json", "times_ms.json"]]
+
+ url = "http://%s/testfile/upload" % self._options.test_results_server
+ # Set uploading timeout in case appengine server is having problems.
+ # 120 seconds are more than enough to upload test results.
+ uploader = FileUploader(url, 120)
+ try:
+ response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
+ if response:
+ if response.code == 200:
+ _log.debug("JSON uploaded.")
+ else:
+ _log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read()))
+ else:
+ _log.error("JSON upload failed; no response returned")
+ except Exception, err:
+ _log.error("Upload failed: %s" % err)
+
+ def _copy_results_html_file(self, destination_path):
+ base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
+ results_file = self._filesystem.join(base_dir, 'results.html')
+ # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
+ # so make sure it exists before we try to copy it.
+ if self._filesystem.exists(results_file):
+ self._filesystem.copyfile(results_file, destination_path)
+
+ def _stats_trie(self, initial_results):
+ def _worker_number(worker_name):
+ return int(worker_name.split('/')[1]) if worker_name else -1
+
+ stats = {}
+ for result in initial_results.results_by_name.values():
+ if result.type != test_expectations.SKIP:
+ stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
+ stats_trie = {}
+ for name, value in stats.iteritems():
+ json_results_generator.add_path_to_trie(name, value, stats_trie)
+ return stats_trie
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
new file mode 100644
index 0000000..d74cb57
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
@@ -0,0 +1,176 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for manager.py."""
+
+import sys
+import time
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.controllers.manager import Manager
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models.test_run_results import TestRunResults
+from webkitpy.tool.mocktool import MockOptions
+
+
+class FakePrinter(object):
+ def write_update(self, s):
+ pass
+
+
+class ManagerTest(unittest.TestCase):
+ def test_needs_servers(self):
+ def get_manager():
+ host = MockHost()
+ port = host.port_factory.get('test-mac-leopard')
+ manager = Manager(port, options=MockOptions(http=True, max_locked_shards=1), printer=FakePrinter())
+ return manager
+
+ manager = get_manager()
+ self.assertFalse(manager.needs_servers(['fast/html']))
+
+ manager = get_manager()
+ self.assertTrue(manager.needs_servers(['http/tests/misc']))
+
+ def test_servers_started(self):
+ def get_manager(port):
+ manager = Manager(port, options=MockOptions(http=True, max_locked_shards=1), printer=FakePrinter())
+ return manager
+
+ def start_http_server(additional_dirs, number_of_drivers):
+ self.http_started = True
+
+ def start_websocket_server():
+ self.websocket_started = True
+
+ def stop_http_server():
+ self.http_stopped = True
+
+ def stop_websocket_server():
+ self.websocket_stopped = True
+
+ host = MockHost()
+ port = host.port_factory.get('test-mac-leopard')
+ port.start_http_server = start_http_server
+ port.start_websocket_server = start_websocket_server
+ port.stop_http_server = stop_http_server
+ port.stop_websocket_server = stop_websocket_server
+
+ self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
+ manager = get_manager(port)
+ manager._start_servers(['http/tests/foo.html'])
+ self.assertEqual(self.http_started, True)
+ self.assertEqual(self.websocket_started, False)
+ manager._stop_servers()
+ self.assertEqual(self.http_stopped, True)
+ self.assertEqual(self.websocket_stopped, False)
+
+ self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
+ manager._start_servers(['http/tests/websocket/foo.html'])
+ self.assertEqual(self.http_started, True)
+ self.assertEqual(self.websocket_started, True)
+ manager._stop_servers()
+ self.assertEqual(self.http_stopped, True)
+ self.assertEqual(self.websocket_stopped, True)
+
+ self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
+ manager._start_servers(['fast/html/foo.html'])
+ self.assertEqual(self.http_started, False)
+ self.assertEqual(self.websocket_started, False)
+ manager._stop_servers()
+ self.assertEqual(self.http_stopped, False)
+ self.assertEqual(self.websocket_stopped, False)
+
+
+ def test_look_for_new_crash_logs(self):
+ def get_manager():
+ host = MockHost()
+ port = host.port_factory.get('test-mac-leopard')
+ manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=FakePrinter())
+ return manager
+ host = MockHost()
+ port = host.port_factory.get('test-mac-leopard')
+ tests = ['failures/expected/crash.html']
+ expectations = test_expectations.TestExpectations(port, tests)
+ run_results = TestRunResults(expectations, len(tests))
+ manager = get_manager()
+ manager._look_for_new_crash_logs(run_results, time.time())
+
+ def _make_fake_test_result(self, host, results_directory):
+ host.filesystem.maybe_make_directory(results_directory)
+ host.filesystem.write_binary_file(results_directory + '/results.html', 'This is a test results file')
+
+ def test_rename_results_folder(self):
+ host = MockHost()
+ port = host.port_factory.get('test-mac-leopard')
+
+ def get_manager():
+ manager = Manager(port, options=MockOptions(max_locked_shards=1), printer=FakePrinter())
+ return manager
+ self._make_fake_test_result(port.host, '/tmp/layout-test-results')
+ self.assertTrue(port.host.filesystem.exists('/tmp/layout-test-results'))
+ timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(port.host.filesystem.mtime('/tmp/layout-test-results/results.html')))
+ archived_file_name = '/tmp/layout-test-results' + '_' + timestamp
+ manager = get_manager()
+ manager._rename_results_folder()
+ self.assertFalse(port.host.filesystem.exists('/tmp/layout-test-results'))
+ self.assertTrue(port.host.filesystem.exists(archived_file_name))
+
+ def test_clobber_old_results(self):
+ host = MockHost()
+ port = host.port_factory.get('test-mac-leopard')
+
+ def get_manager():
+ manager = Manager(port, options=MockOptions(max_locked_shards=1), printer=FakePrinter())
+ return manager
+ self._make_fake_test_result(port.host, '/tmp/layout-test-results')
+ self.assertTrue(port.host.filesystem.exists('/tmp/layout-test-results'))
+ manager = get_manager()
+ manager._clobber_old_results()
+ self.assertFalse(port.host.filesystem.exists('/tmp/layout-test-results'))
+
+ def test_limit_archived_results_count(self):
+ host = MockHost()
+ port = host.port_factory.get('test-mac-leopard')
+
+ def get_manager():
+ manager = Manager(port, options=MockOptions(max_locked_shards=1), printer=FakePrinter())
+ return manager
+ for x in range(1, 31):
+ dir_name = '/tmp/layout-test-results' + '_' + str(x)
+ self._make_fake_test_result(port.host, dir_name)
+ manager = get_manager()
+ manager._limit_archived_results_count()
+ deleted_dir_count = 0
+ for x in range(1, 31):
+ dir_name = '/tmp/layout-test-results' + '_' + str(x)
+ if not port.host.filesystem.exists(dir_name):
+ deleted_dir_count = deleted_dir_count + 1
+ self.assertEqual(deleted_dir_count, 5)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/repaint_overlay.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/repaint_overlay.py
new file mode 100644
index 0000000..22516bf
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/repaint_overlay.py
@@ -0,0 +1,209 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+
+def result_contains_repaint_rects(text):
+ return isinstance(text, str) and (
+ re.search('"repaintRects": \[$', text, re.MULTILINE) != None or
+ text.find('Minimum repaint:') != -1)
+
+
+def extract_layer_tree(input_str):
+ if not isinstance(input_str, str):
+ return '{}'
+
+ if input_str[0:2] == '{\n':
+ start = 0
+ else:
+ start = input_str.find('\n{\n')
+ if start == -1:
+ return '{}'
+
+ end = input_str.find('\n}\n', start)
+ if end == -1:
+ return '{}'
+
+ # FIXME: There may be multiple layer trees in the result.
+ return input_str[start:end + 3]
+
+
+def generate_repaint_overlay_html(test_name, actual_text, expected_text):
+ if not result_contains_repaint_rects(actual_text) and not result_contains_repaint_rects(expected_text):
+ return ''
+
+ expected_layer_tree = extract_layer_tree(expected_text)
+ actual_layer_tree = extract_layer_tree(actual_text)
+
+ minimum_repaint = '[]'
+ minimum_repaint_match = re.search('Minimum repaint:\n(\[.*\n\])', actual_text, re.DOTALL)
+ if minimum_repaint_match:
+ minimum_repaint = minimum_repaint_match.group(1)
+
+ return """<!DOCTYPE HTML>
+<html>
+<head>
+<title>%(title)s</title>
+<style>
+ body {
+ margin: 0;
+ padding: 0;
+ }
+ iframe {
+ position: absolute;
+ top: 80px;
+ left: 0;
+ border: 0;
+ z-index: -1;
+ }
+ canvas {
+ position: absolute;
+ top: 80px;
+ left: 0;
+ z-index: 1;
+ }
+ #actual, #minimum-repaint {
+ display: none;
+ }
+</style>
+</head>
+<body>
+<a href="http://crbug.com/381221">Known issues</a><br>
+<label><input id="show-test" type="checkbox" checked onchange="toggle_test(this.checked)">Show test</label>
+<label title="See fast/repaint/resources/text-based-repaint.js for how this works">
+ <input id="show-minimum-repaint" type="checkbox" onchange="toggle_minimum_repaint(this.checked)">Minimum repaint
+</label>
+<label><input id="use-solid-colors" type="checkbox" onchange="toggle_solid_color(this.checked)">Use solid colors</label>
+<br>
+<button title="See fast/repaint/resources/text-based-repaint.js for how this works" onclick="highlight_under_repaint()">
+ Highlight under-repaint
+</button>
+<br>
+<span id='type'>Expected Invalidations</span>
+<div id=overlay>
+ <canvas id='minimum-repaint' width='2000' height='2000'></canvas>
+ <canvas id='expected' width='2000' height='2000'></canvas>
+ <canvas id='actual' width='2000' height='2000'></canvas>
+</div>
+<script>
+var overlay_opacity = 0.25;
+
+function toggle_test(show_test) {
+ iframe.style.display = show_test ? 'block' : 'none';
+}
+
+function toggle_minimum_repaint(show_minimum_repaint) {
+ document.getElementById('minimum-repaint').style.display = show_minimum_repaint ? 'block' : 'none';
+}
+
+function toggle_solid_color(use_solid_color) {
+ overlay_opacity = use_solid_color ? 1 : 0.25;
+ draw_repaint_rects();
+ draw_minimum_repaint();
+}
+
+function highlight_under_repaint() {
+ document.getElementById('show-test').checked = false;
+ toggle_test(false);
+ document.getElementById('show-minimum-repaint').checked = true;
+ toggle_minimum_repaint(true);
+ document.getElementById('use-solid-colors').checked = true;
+ toggle_solid_color(true);
+}
+
+var expected = %(expected)s;
+var actual = %(actual)s;
+var minimum_repaint = %(minimum_repaint)s;
+
+function rectsEqual(rect1, rect2) {
+ return rect1[0] == rect2[0] && rect1[1] == rect2[1] && rect1[2] == rect2[2] && rect1[3] == rect2[3];
+}
+
+function draw_rects(context, rects) {
+ for (var i = 0; i < rects.length; ++i) {
+ var rect = rects[i];
+ context.fillRect(rect[0], rect[1], rect[2], rect[3]);
+ }
+}
+
+function draw_layer_rects(context, result) {
+ context.save();
+ if (result.position)
+ context.translate(result.position[0], result.position[1]);
+ var t = result.transform;
+ if (t) {
+ var origin = result.transformOrigin || [result.bounds[0] / 2, result.bounds[1] / 2];
+ context.translate(origin[0], origin[1]);
+ context.transform(t[0][0], t[0][1], t[1][0], t[1][1], t[3][0], t[3][1]);
+ context.translate(-origin[0], -origin[1]);
+ }
+ if (result.repaintRects)
+ draw_rects(context, result.repaintRects);
+ if (result.children) {
+ for (var i = 0; i < result.children.length; ++i)
+ draw_layer_rects(context, result.children[i]);
+ }
+ context.restore();
+}
+
+var expected_canvas = document.getElementById('expected');
+var actual_canvas = document.getElementById('actual');
+var minimum_repaint_canvas = document.getElementById('minimum-repaint');
+
+function draw_repaint_rects() {
+ var expected_ctx = expected_canvas.getContext("2d");
+ expected_ctx.clearRect(0, 0, 2000, 2000);
+ expected_ctx.fillStyle = 'rgba(255, 0, 0, ' + overlay_opacity + ')';
+ draw_layer_rects(expected_ctx, expected);
+
+ var actual_ctx = actual_canvas.getContext("2d");
+ actual_ctx.clearRect(0, 0, 2000, 2000);
+ actual_ctx.fillStyle = 'rgba(0, 255, 0, ' + overlay_opacity + ')';
+ draw_layer_rects(actual_ctx, actual);
+}
+
+function draw_minimum_repaint() {
+ var context = minimum_repaint_canvas.getContext("2d");
+ context.fillStyle = 'rgba(0, 0, 0, 1)';
+ draw_rects(context, minimum_repaint);
+}
+
+draw_repaint_rects();
+draw_minimum_repaint();
+
+var path = decodeURIComponent(location.search).substr(1);
+var iframe = document.createElement('iframe');
+iframe.id = 'test-frame';
+iframe.width = 800;
+iframe.height = 600;
+iframe.src = path;
+
+var overlay = document.getElementById('overlay');
+overlay.appendChild(iframe);
+
+var type = document.getElementById('type');
+var expected_showing = true;
+function flip() {
+ if (expected_showing) {
+ type.textContent = 'Actual Invalidations';
+ expected_canvas.style.display = 'none';
+ actual_canvas.style.display = 'block';
+ } else {
+ type.textContent = 'Expected Invalidations';
+ actual_canvas.style.display = 'none';
+ expected_canvas.style.display = 'block';
+ }
+ expected_showing = !expected_showing
+}
+setInterval(flip, 3000);
+</script>
+</body>
+</html>
+""" % {
+ 'title': test_name,
+ 'expected': expected_layer_tree,
+ 'actual': actual_layer_tree,
+ 'minimum_repaint': minimum_repaint,
+ }
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/repaint_overlay_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/repaint_overlay_unittest.py
new file mode 100644
index 0000000..f9f5870
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/repaint_overlay_unittest.py
@@ -0,0 +1,33 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from webkitpy.layout_tests.controllers import repaint_overlay
+
+
+LAYER_TREE = """{
+ "bounds":[800.00,600.00],
+ "children":[
+ {
+ "position": [8.00, 80.00],
+ "bounds": [800.00, 600.00],
+ "contentsOpaque": true,
+ "drawsContent": true,
+ "repaintRects": [
+ [8, 108, 100, 100],
+ [0, 216, 800, 100]
+ ]
+ }
+ ]
+}
+"""
+
+class TestRepaintOverlay(unittest.TestCase):
+ def test_result_contains_repaint_rects(self):
+ self.assertTrue(repaint_overlay.result_contains_repaint_rects(LAYER_TREE))
+ self.assertFalse(repaint_overlay.result_contains_repaint_rects('ABCD'))
+
+ def test_extract_layer_tree(self):
+ self.assertEquals(LAYER_TREE, repaint_overlay.extract_layer_tree(LAYER_TREE))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
new file mode 100644
index 0000000..0d72215
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
@@ -0,0 +1,425 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import logging
+import re
+import time
+
+from webkitpy.layout_tests.controllers import repaint_overlay
+from webkitpy.layout_tests.controllers import test_result_writer
+from webkitpy.layout_tests.port.driver import DeviceFailure, DriverInput, DriverOutput
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models.test_results import TestResult
+from webkitpy.layout_tests.models import testharness_results
+
+
+_log = logging.getLogger(__name__)
+
+
+def run_single_test(port, options, results_directory, worker_name, driver, test_input, stop_when_done):
+ runner = SingleTestRunner(port, options, results_directory, worker_name, driver, test_input, stop_when_done)
+ try:
+ return runner.run()
+ except DeviceFailure as e:
+ _log.error("device failed: %s", str(e))
+ return TestResult(test_input.test_name, device_failed=True)
+
+
+class SingleTestRunner(object):
+ (ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platform', 'version', 'update')
+
+ def __init__(self, port, options, results_directory, worker_name, driver, test_input, stop_when_done):
+ self._port = port
+ self._filesystem = port.host.filesystem
+ self._options = options
+ self._results_directory = results_directory
+ self._driver = driver
+ self._timeout = test_input.timeout
+ self._worker_name = worker_name
+ self._test_name = test_input.test_name
+ self._should_run_pixel_test = test_input.should_run_pixel_test
+ self._reference_files = test_input.reference_files
+ self._should_add_missing_baselines = test_input.should_add_missing_baselines
+ self._stop_when_done = stop_when_done
+
+ if self._reference_files:
+ # Detect and report a test which has a wrong combination of expectation files.
+ # For example, if 'foo.html' has two expectation files, 'foo-expected.html' and
+ # 'foo-expected.txt', we should warn users. One test file must be used exclusively
+ # in either layout tests or reftests, but not in both.
+ for suffix in ('.txt', '.png', '.wav'):
+ expected_filename = self._port.expected_filename(self._test_name, suffix)
+ if self._filesystem.exists(expected_filename):
+ _log.error('%s is a reftest, but has an unused expectation file. Please remove %s.',
+ self._test_name, expected_filename)
+
+ def _expected_driver_output(self):
+ return DriverOutput(self._port.expected_text(self._test_name),
+ self._port.expected_image(self._test_name),
+ self._port.expected_checksum(self._test_name),
+ self._port.expected_audio(self._test_name))
+
+ def _should_fetch_expected_checksum(self):
+ return self._should_run_pixel_test and not (self._options.new_baseline or self._options.reset_results)
+
+ def _driver_input(self):
+ # The image hash is used to avoid doing an image dump if the
+ # checksums match, so it should be set to a blank value if we
+ # are generating a new baseline. (Otherwise, an image from a
+ # previous run will be copied into the baseline."""
+ image_hash = None
+ if self._should_fetch_expected_checksum():
+ image_hash = self._port.expected_checksum(self._test_name)
+
+ test_base = self._port.lookup_virtual_test_base(self._test_name)
+ if test_base:
+ # If the file actually exists under the virtual dir, we want to use it (largely for virtual references),
+ # but we want to use the extra command line args either way.
+ if self._filesystem.exists(self._port.abspath_for_test(self._test_name)):
+ test_name = self._test_name
+ else:
+ test_name = test_base
+ args = self._port.lookup_virtual_test_args(self._test_name)
+ else:
+ test_name = self._test_name
+ args = self._port.lookup_physical_test_args(self._test_name)
+ return DriverInput(test_name, self._timeout, image_hash, self._should_run_pixel_test, args)
+
+ def run(self):
+ if self._options.enable_sanitizer:
+ return self._run_sanitized_test()
+ if self._reference_files:
+ if self._options.reset_results:
+ reftest_type = set([reference_file[0] for reference_file in self._reference_files])
+ result = TestResult(self._test_name, reftest_type=reftest_type)
+ result.type = test_expectations.SKIP
+ return result
+ return self._run_reftest()
+ if self._options.reset_results:
+ return self._run_rebaseline()
+ return self._run_compare_test()
+
+ def _run_sanitized_test(self):
+ # running a sanitized test means that we ignore the actual test output and just look
+ # for timeouts and crashes (real or forced by the driver). Most crashes should
+ # indicate problems found by a sanitizer (ASAN, LSAN, etc.), but we will report
+ # on other crashes and timeouts as well in order to detect at least *some* basic failures.
+ driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
+ expected_driver_output = self._expected_driver_output()
+ failures = self._handle_error(driver_output)
+ test_result = TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
+ pid=driver_output.pid)
+ test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
+ return test_result
+
+
+ def _run_compare_test(self):
+ driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
+ expected_driver_output = self._expected_driver_output()
+
+ test_result = self._compare_output(expected_driver_output, driver_output)
+ if self._should_add_missing_baselines:
+ self._add_missing_baselines(test_result, driver_output)
+ test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
+ return test_result
+
+ def _run_rebaseline(self):
+ driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
+ failures = self._handle_error(driver_output)
+ test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, None, failures)
+ # FIXME: It the test crashed or timed out, it might be better to avoid
+ # to write new baselines.
+ self._overwrite_baselines(driver_output)
+ return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
+ pid=driver_output.pid)
+
+ _render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\n")
+
+ def _add_missing_baselines(self, test_result, driver_output):
+ missingImage = test_result.has_failure_matching_types(test_failures.FailureMissingImage, test_failures.FailureMissingImageHash)
+ if test_result.has_failure_matching_types(test_failures.FailureMissingResult):
+ self._save_baseline_data(driver_output.text, '.txt', self._location_for_new_baseline(driver_output.text, '.txt'))
+ if test_result.has_failure_matching_types(test_failures.FailureMissingAudio):
+ self._save_baseline_data(driver_output.audio, '.wav', self._location_for_new_baseline(driver_output.audio, '.wav'))
+ if missingImage:
+ self._save_baseline_data(driver_output.image, '.png', self._location_for_new_baseline(driver_output.image, '.png'))
+
+ def _location_for_new_baseline(self, data, extension):
+ if self._options.add_platform_exceptions:
+ return self.VERSION_DIR
+ if extension == '.png':
+ return self.PLATFORM_DIR
+ if extension == '.wav':
+ return self.ALONGSIDE_TEST
+ if extension == '.txt' and self._render_tree_dump_pattern.match(data):
+ return self.PLATFORM_DIR
+ return self.ALONGSIDE_TEST
+
+ def _overwrite_baselines(self, driver_output):
+ location = self.VERSION_DIR if self._options.add_platform_exceptions else self.UPDATE
+ self._save_baseline_data(driver_output.text, '.txt', location)
+ self._save_baseline_data(driver_output.audio, '.wav', location)
+ if self._should_run_pixel_test:
+ self._save_baseline_data(driver_output.image, '.png', location)
+
+ def _save_baseline_data(self, data, extension, location):
+ if data is None:
+ return
+ port = self._port
+ fs = self._filesystem
+ if location == self.ALONGSIDE_TEST:
+ output_dir = fs.dirname(port.abspath_for_test(self._test_name))
+ elif location == self.VERSION_DIR:
+ output_dir = fs.join(port.baseline_version_dir(), fs.dirname(self._test_name))
+ elif location == self.PLATFORM_DIR:
+ output_dir = fs.join(port.baseline_platform_dir(), fs.dirname(self._test_name))
+ elif location == self.UPDATE:
+ output_dir = fs.dirname(port.expected_filename(self._test_name, extension))
+ else:
+ raise AssertionError('unrecognized baseline location: %s' % location)
+
+ fs.maybe_make_directory(output_dir)
+ output_basename = fs.basename(fs.splitext(self._test_name)[0] + "-expected" + extension)
+ output_path = fs.join(output_dir, output_basename)
+ _log.info('Writing new expected result "%s"' % port.relative_test_filename(output_path))
+ port.update_baseline(output_path, data)
+
+ def _handle_error(self, driver_output, reference_filename=None):
+ """Returns test failures if some unusual errors happen in driver's run.
+
+ Args:
+ driver_output: The output from the driver.
+ reference_filename: The full path to the reference file which produced the driver_output.
+ This arg is optional and should be used only in reftests until we have a better way to know
+ which html file is used for producing the driver_output.
+ """
+ failures = []
+ fs = self._filesystem
+ if driver_output.timeout:
+ failures.append(test_failures.FailureTimeout(bool(reference_filename)))
+
+ if reference_filename:
+ testname = self._port.relative_test_filename(reference_filename)
+ else:
+ testname = self._test_name
+
+ if driver_output.crash:
+ failures.append(test_failures.FailureCrash(bool(reference_filename),
+ driver_output.crashed_process_name,
+ driver_output.crashed_pid,
+ bool('No crash log found' not in driver_output.crash_log)))
+ if driver_output.error:
+ _log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname))
+ else:
+ _log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname))
+ elif driver_output.leak:
+ failures.append(test_failures.FailureLeak(bool(reference_filename),
+ driver_output.leak_log))
+ _log.debug("%s %s leaked" % (self._worker_name, testname))
+ elif driver_output.error:
+ _log.debug("%s %s output stderr lines:" % (self._worker_name, testname))
+ for line in driver_output.error.splitlines():
+ _log.debug(" %s" % line)
+ return failures
+
+ def _compare_output(self, expected_driver_output, driver_output):
+ failures = []
+ failures.extend(self._handle_error(driver_output))
+
+ if driver_output.crash:
+ # Don't continue any more if we already have a crash.
+ # In case of timeouts, we continue since we still want to see the text and image output.
+ return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
+ pid=driver_output.pid)
+
+ is_testharness_test, testharness_failures = self._compare_testharness_test(driver_output, expected_driver_output)
+ if is_testharness_test:
+ failures.extend(testharness_failures)
+ else:
+ failures.extend(self._compare_text(expected_driver_output.text, driver_output.text))
+ failures.extend(self._compare_audio(expected_driver_output.audio, driver_output.audio))
+ if self._should_run_pixel_test:
+ failures.extend(self._compare_image(expected_driver_output, driver_output))
+ has_repaint_overlay = (repaint_overlay.result_contains_repaint_rects(expected_driver_output.text) or
+ repaint_overlay.result_contains_repaint_rects(driver_output.text))
+ return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(),
+ pid=driver_output.pid, has_repaint_overlay=has_repaint_overlay)
+
+ def _compare_testharness_test(self, driver_output, expected_driver_output):
+ if expected_driver_output.image or expected_driver_output.audio or expected_driver_output.text:
+ return False, []
+
+ if driver_output.image or driver_output.audio or self._is_render_tree(driver_output.text):
+ return False, []
+
+ text = driver_output.text or ''
+
+ if not testharness_results.is_testharness_output(text):
+ return False, []
+ if not testharness_results.is_testharness_output_passing(text):
+ return True, [test_failures.FailureTestHarnessAssertion()]
+ return True, []
+
+ def _is_render_tree(self, text):
+ return text and "layer at (0,0) size 800x600" in text
+
+ def _compare_text(self, expected_text, actual_text):
+ failures = []
+ if (expected_text and actual_text and
+ # Assuming expected_text is already normalized.
+ self._port.do_text_results_differ(expected_text, self._get_normalized_output_text(actual_text))):
+ failures.append(test_failures.FailureTextMismatch())
+ elif actual_text and not expected_text:
+ failures.append(test_failures.FailureMissingResult())
+ return failures
+
+ def _compare_audio(self, expected_audio, actual_audio):
+ failures = []
+ if (expected_audio and actual_audio and
+ self._port.do_audio_results_differ(expected_audio, actual_audio)):
+ failures.append(test_failures.FailureAudioMismatch())
+ elif actual_audio and not expected_audio:
+ failures.append(test_failures.FailureMissingAudio())
+ return failures
+
+ def _get_normalized_output_text(self, output):
+ """Returns the normalized text output, i.e. the output in which
+ the end-of-line characters are normalized to "\n"."""
+ # Running tests on Windows produces "\r\n". The "\n" part is helpfully
+ # changed to "\r\n" by our system (Python/Cygwin), resulting in
+ # "\r\r\n", when, in fact, we wanted to compare the text output with
+ # the normalized text expectation files.
+ return output.replace("\r\r\n", "\r\n").replace("\r\n", "\n")
+
+ # FIXME: This function also creates the image diff. Maybe that work should
+ # be handled elsewhere?
+ def _compare_image(self, expected_driver_output, driver_output):
+ failures = []
+ # If we didn't produce a hash file, this test must be text-only.
+ if driver_output.image_hash is None:
+ return failures
+ if not expected_driver_output.image:
+ failures.append(test_failures.FailureMissingImage())
+ elif not expected_driver_output.image_hash:
+ failures.append(test_failures.FailureMissingImageHash())
+ elif driver_output.image_hash != expected_driver_output.image_hash:
+ diff, err_str = self._port.diff_image(expected_driver_output.image, driver_output.image)
+ if err_str:
+ _log.warning(' %s : %s' % (self._test_name, err_str))
+ failures.append(test_failures.FailureImageHashMismatch())
+ driver_output.error = (driver_output.error or '') + err_str
+ else:
+ driver_output.image_diff = diff
+ if driver_output.image_diff:
+ failures.append(test_failures.FailureImageHashMismatch())
+ else:
+ # See https://bugs.webkit.org/show_bug.cgi?id=69444 for why this isn't a full failure.
+ _log.warning(' %s -> pixel hash failed (but diff passed)' % self._test_name)
+ return failures
+
+ def _run_reftest(self):
+ test_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
+ total_test_time = 0
+ reference_output = None
+ test_result = None
+
+ # If the test crashed, or timed out, there's no point in running the reference at all.
+ # This can save a lot of execution time if we have a lot of crashes or timeouts.
+ if test_output.crash or test_output.timeout:
+ expected_driver_output = DriverOutput(text=None, image=None, image_hash=None, audio=None)
+ return self._compare_output(expected_driver_output, test_output)
+
+ # A reftest can have multiple match references and multiple mismatch references;
+ # the test fails if any mismatch matches and all of the matches don't match.
+ # To minimize the number of references we have to check, we run all of the mismatches first,
+ # then the matches, and short-circuit out as soon as we can.
+ # Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.
+
+ putAllMismatchBeforeMatch = sorted
+ reference_test_names = []
+ for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
+ if self._port.lookup_virtual_test_base(self._test_name):
+ args = self._port.lookup_virtual_test_args(self._test_name)
+ else:
+ args = self._port.lookup_physical_test_args(self._test_name)
+ reference_test_name = self._port.relative_test_filename(reference_filename)
+ reference_test_names.append(reference_test_name)
+ driver_input = DriverInput(reference_test_name, self._timeout, image_hash=None, should_run_pixel_test=True, args=args)
+ reference_output = self._driver.run_test(driver_input, self._stop_when_done)
+ test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=')
+
+ if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
+ break
+ total_test_time += test_result.test_run_time
+
+ assert(reference_output)
+ test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, reference_output, test_result.failures)
+
+ # FIXME: We don't really deal with a mix of reftest types properly. We pass in a set() to reftest_type
+ # and only really handle the first of the references in the result.
+ reftest_type = list(set([reference_file[0] for reference_file in self._reference_files]))
+ return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time,
+ test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid,
+ references=reference_test_names)
+
+ def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch):
+ total_test_time = reference_driver_output.test_time + actual_driver_output.test_time
+ has_stderr = reference_driver_output.has_stderr() or actual_driver_output.has_stderr()
+ failures = []
+ failures.extend(self._handle_error(actual_driver_output))
+ if failures:
+ # Don't continue any more if we already have crash or timeout.
+ return TestResult(self._test_name, failures, total_test_time, has_stderr)
+ failures.extend(self._handle_error(reference_driver_output, reference_filename=reference_filename))
+ if failures:
+ return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
+
+ if not reference_driver_output.image_hash and not actual_driver_output.image_hash:
+ failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename))
+ elif mismatch:
+ if reference_driver_output.image_hash == actual_driver_output.image_hash:
+ diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
+ if not diff:
+ failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename))
+ elif err_str:
+ _log.error(err_str)
+ else:
+ _log.warning(" %s -> ref test hashes matched but diff failed" % self._test_name)
+
+ elif reference_driver_output.image_hash != actual_driver_output.image_hash:
+ diff, err_str = self._port.diff_image(reference_driver_output.image, actual_driver_output.image)
+ if diff:
+ failures.append(test_failures.FailureReftestMismatch(reference_filename))
+ elif err_str:
+ _log.error(err_str)
+ else:
+ _log.warning(" %s -> ref test hashes didn't match but diff passed" % self._test_name)
+
+ return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
new file mode 100644
index 0000000..127e1e6
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
@@ -0,0 +1,294 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import logging
+
+from webkitpy.layout_tests.controllers import repaint_overlay
+from webkitpy.layout_tests.models import test_failures
+
+
+_log = logging.getLogger(__name__)
+
+
+def write_test_result(filesystem, port, results_directory, test_name, driver_output,
+ expected_driver_output, failures):
+ """Write the test result to the result output directory."""
+ root_output_dir = results_directory
+ writer = TestResultWriter(filesystem, port, root_output_dir, test_name)
+
+ if driver_output.error:
+ writer.write_stderr(driver_output.error)
+
+ for failure in failures:
+ # FIXME: Instead of this long 'if' block, each failure class might
+ # have a responsibility for writing a test result.
+ if isinstance(failure, (test_failures.FailureMissingResult,
+ test_failures.FailureTextMismatch,
+ test_failures.FailureTestHarnessAssertion)):
+ writer.write_text_files(driver_output.text, expected_driver_output.text)
+ writer.create_text_diff_and_write_result(driver_output.text, expected_driver_output.text)
+ elif isinstance(failure, test_failures.FailureMissingImage):
+ writer.write_image_files(driver_output.image, expected_image=None)
+ elif isinstance(failure, test_failures.FailureMissingImageHash):
+ writer.write_image_files(driver_output.image, expected_driver_output.image)
+ elif isinstance(failure, test_failures.FailureImageHashMismatch):
+ writer.write_image_files(driver_output.image, expected_driver_output.image)
+ writer.write_image_diff_files(driver_output.image_diff)
+ elif isinstance(failure, (test_failures.FailureAudioMismatch,
+ test_failures.FailureMissingAudio)):
+ writer.write_audio_files(driver_output.audio, expected_driver_output.audio)
+ elif isinstance(failure, test_failures.FailureCrash):
+ crashed_driver_output = expected_driver_output if failure.is_reftest else driver_output
+ writer.write_crash_log(crashed_driver_output.crash_log)
+ elif isinstance(failure, test_failures.FailureLeak):
+ writer.write_leak_log(driver_output.leak_log)
+ elif isinstance(failure, test_failures.FailureReftestMismatch):
+ writer.write_image_files(driver_output.image, expected_driver_output.image)
+ # FIXME: This work should be done earlier in the pipeline (e.g., when we compare images for non-ref tests).
+ # FIXME: We should always have 2 images here.
+ if driver_output.image and expected_driver_output.image:
+ diff_image, err_str = port.diff_image(expected_driver_output.image, driver_output.image)
+ if diff_image:
+ writer.write_image_diff_files(diff_image)
+ else:
+ _log.warn('ref test mismatch did not produce an image diff.')
+ writer.write_image_files(driver_output.image, expected_image=None)
+ if filesystem.exists(failure.reference_filename):
+ writer.write_reftest(failure.reference_filename)
+ else:
+ _log.warn("reference %s was not found" % failure.reference_filename)
+ elif isinstance(failure, test_failures.FailureReftestMismatchDidNotOccur):
+ writer.write_image_files(driver_output.image, expected_image=None)
+ if filesystem.exists(failure.reference_filename):
+ writer.write_reftest(failure.reference_filename)
+ else:
+ _log.warn("reference %s was not found" % failure.reference_filename)
+ else:
+ assert isinstance(failure, (test_failures.FailureTimeout, test_failures.FailureReftestNoImagesGenerated))
+
+ if expected_driver_output is not None:
+ writer.create_repaint_overlay_result(driver_output.text, expected_driver_output.text)
+
+
+class TestResultWriter(object):
+ """A class which handles all writing operations to the result directory."""
+
+ # Filename pieces when writing failures to the test results directory.
+ FILENAME_SUFFIX_ACTUAL = "-actual"
+ FILENAME_SUFFIX_EXPECTED = "-expected"
+ FILENAME_SUFFIX_DIFF = "-diff"
+ FILENAME_SUFFIX_STDERR = "-stderr"
+ FILENAME_SUFFIX_CRASH_LOG = "-crash-log"
+ FILENAME_SUFFIX_SAMPLE = "-sample"
+ FILENAME_SUFFIX_LEAK_LOG = "-leak-log"
+ FILENAME_SUFFIX_WDIFF = "-wdiff.html"
+ FILENAME_SUFFIX_PRETTY_PATCH = "-pretty-diff.html"
+ FILENAME_SUFFIX_IMAGE_DIFF = "-diff.png"
+ FILENAME_SUFFIX_IMAGE_DIFFS_HTML = "-diffs.html"
+ FILENAME_SUFFIX_OVERLAY = "-overlay.html"
+
+ def __init__(self, filesystem, port, root_output_dir, test_name):
+ self._filesystem = filesystem
+ self._port = port
+ self._root_output_dir = root_output_dir
+ self._test_name = test_name
+
+ def _make_output_directory(self):
+ """Creates the output directory (if needed) for a given test filename."""
+ fs = self._filesystem
+ output_filename = fs.join(self._root_output_dir, self._test_name)
+ fs.maybe_make_directory(fs.dirname(output_filename))
+
+ def output_filename(self, modifier):
+ """Returns a filename inside the output dir that contains modifier.
+
+ For example, if test name is "fast/dom/foo.html" and modifier is "-expected.txt",
+ the return value is "/<path-to-root-output-dir>/fast/dom/foo-expected.txt".
+
+ Args:
+ modifier: a string to replace the extension of filename with
+
+ Return:
+ The absolute path to the output filename
+ """
+ fs = self._filesystem
+ output_filename = fs.join(self._root_output_dir, self._test_name)
+ return fs.splitext(output_filename)[0] + modifier
+
+ def _write_file(self, path, contents):
+ if contents is not None:
+ self._make_output_directory()
+ self._filesystem.write_binary_file(path, contents)
+
+ def _output_testname(self, modifier):
+ fs = self._filesystem
+ return fs.splitext(fs.basename(self._test_name))[0] + modifier
+
+ def write_output_files(self, file_type, output, expected):
+ """Writes the test output, the expected output in the results directory.
+
+ The full output filename of the actual, for example, will be
+ <filename>-actual<file_type>
+ For instance,
+ my_test-actual.txt
+
+ Args:
+ file_type: A string describing the test output file type, e.g. ".txt"
+ output: A string containing the test output
+ expected: A string containing the expected test output
+ """
+ actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
+ expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
+
+ self._write_file(actual_filename, output)
+ self._write_file(expected_filename, expected)
+
+ def write_stderr(self, error):
+ filename = self.output_filename(self.FILENAME_SUFFIX_STDERR + ".txt")
+ self._write_file(filename, error)
+
+ def write_crash_log(self, crash_log):
+ filename = self.output_filename(self.FILENAME_SUFFIX_CRASH_LOG + ".txt")
+ self._write_file(filename, crash_log.encode('utf8', 'replace'))
+
+ def write_leak_log(self, leak_log):
+ filename = self.output_filename(self.FILENAME_SUFFIX_LEAK_LOG + ".txt")
+ self._write_file(filename, leak_log)
+
+ def copy_sample_file(self, sample_file):
+ filename = self.output_filename(self.FILENAME_SUFFIX_SAMPLE + ".txt")
+ self._filesystem.copyfile(sample_file, filename)
+
+ def write_text_files(self, actual_text, expected_text):
+ self.write_output_files(".txt", actual_text, expected_text)
+
+ def create_text_diff_and_write_result(self, actual_text, expected_text):
+ # FIXME: This function is actually doing the diffs as well as writing results.
+ # It might be better to extract code which does 'diff' and make it a separate function.
+ if not actual_text or not expected_text:
+ return
+
+ file_type = '.txt'
+ actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
+ expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
+ # We treat diff output as binary. Diff output may contain multiple files
+ # in conflicting encodings.
+ diff = self._port.diff_text(expected_text, actual_text, expected_filename, actual_filename)
+ diff_filename = self.output_filename(self.FILENAME_SUFFIX_DIFF + file_type)
+ self._write_file(diff_filename, diff)
+
+ # Shell out to wdiff to get colored inline diffs.
+ if self._port.wdiff_available():
+ wdiff = self._port.wdiff_text(expected_filename, actual_filename)
+ wdiff_filename = self.output_filename(self.FILENAME_SUFFIX_WDIFF)
+ self._write_file(wdiff_filename, wdiff)
+
+ # Use WebKit's PrettyPatch.rb to get an HTML diff.
+ if self._port.pretty_patch_available():
+ pretty_patch = self._port.pretty_patch_text(diff_filename)
+ pretty_patch_filename = self.output_filename(self.FILENAME_SUFFIX_PRETTY_PATCH)
+ self._write_file(pretty_patch_filename, pretty_patch)
+
+ def create_repaint_overlay_result(self, actual_text, expected_text):
+ html = repaint_overlay.generate_repaint_overlay_html(self._test_name, actual_text, expected_text)
+ if html:
+ overlay_filename = self.output_filename(self.FILENAME_SUFFIX_OVERLAY)
+ self._write_file(overlay_filename, html)
+
+ def write_audio_files(self, actual_audio, expected_audio):
+ self.write_output_files('.wav', actual_audio, expected_audio)
+
+ def write_image_files(self, actual_image, expected_image):
+ self.write_output_files('.png', actual_image, expected_image)
+
+ def write_image_diff_files(self, image_diff):
+ diff_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFF)
+ self._write_file(diff_filename, image_diff)
+
+ diffs_html_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFFS_HTML)
+ # FIXME: old-run-webkit-tests shows the diff percentage as the text contents of the "diff" link.
+ # FIXME: old-run-webkit-tests include a link to the test file.
+ html = """<!DOCTYPE HTML>
+<html>
+<head>
+<title>%(title)s</title>
+<style>.label{font-weight:bold}</style>
+</head>
+<body>
+Difference between images: <a href="%(diff_filename)s">diff</a><br>
+<div class=imageText></div>
+<div class=imageContainer data-prefix="%(prefix)s">Loading...</div>
+<script>
+(function() {
+ var preloadedImageCount = 0;
+ function preloadComplete() {
+ ++preloadedImageCount;
+ if (preloadedImageCount < 2)
+ return;
+ toggleImages();
+ setInterval(toggleImages, 2000)
+ }
+
+ function preloadImage(url) {
+ image = new Image();
+ image.addEventListener('load', preloadComplete);
+ image.src = url;
+ return image;
+ }
+
+ function toggleImages() {
+ if (text.textContent == 'Expected Image') {
+ text.textContent = 'Actual Image';
+ container.replaceChild(actualImage, container.firstChild);
+ } else {
+ text.textContent = 'Expected Image';
+ container.replaceChild(expectedImage, container.firstChild);
+ }
+ }
+
+ var text = document.querySelector('.imageText');
+ var container = document.querySelector('.imageContainer');
+ var actualImage = preloadImage(container.getAttribute('data-prefix') + '-actual.png');
+ var expectedImage = preloadImage(container.getAttribute('data-prefix') + '-expected.png');
+})();
+</script>
+</body>
+</html>
+""" % {
+ 'title': self._test_name,
+ 'diff_filename': self._output_testname(self.FILENAME_SUFFIX_IMAGE_DIFF),
+ 'prefix': self._output_testname(''),
+ }
+ self._write_file(diffs_html_filename, html)
+
+ def write_reftest(self, src_filepath):
+ fs = self._filesystem
+ dst_dir = fs.dirname(fs.join(self._root_output_dir, self._test_name))
+ dst_filepath = fs.join(dst_dir, fs.basename(src_filepath))
+ self._write_file(dst_filepath, fs.read_binary_file(src_filepath))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
new file mode 100644
index 0000000..213b677
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import optparse
+import unittest
+
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.layout_tests.controllers.test_result_writer import write_test_result
+from webkitpy.layout_tests.port.driver import DriverOutput
+from webkitpy.layout_tests.port.test import TestPort
+from webkitpy.layout_tests.models import test_failures
+
+
+class TestResultWriterTests(unittest.TestCase):
+ def run_test(self, failures=None, files=None):
+ failures = failures or []
+ host = MockSystemHost()
+ host.filesystem.files = files or {}
+ port = TestPort(host=host, port_name='test-mac-snowleopard', options=optparse.Values())
+ actual_output = DriverOutput(text='', image=None, image_hash=None, audio=None)
+ expected_output = DriverOutput(text='', image=None, image_hash=None, audio=None)
+ write_test_result(host.filesystem, port, '/tmp', 'foo.html', actual_output, expected_output, failures)
+ return host.filesystem.written_files
+
+ def test_success(self):
+ # Nothing is written when the test passes.
+ written_files = self.run_test(failures=[])
+ self.assertEqual(written_files, {})
+
+ def test_reference_exists(self):
+ failure = test_failures.FailureReftestMismatch()
+ failure.reference_filename = '/src/exists-expected.html'
+ files = {'/src/exists-expected.html': 'yup'}
+ written_files = self.run_test(failures=[failure], files=files)
+ self.assertEqual(written_files, {'/tmp/exists-expected.html': 'yup'})
+
+ failure = test_failures.FailureReftestMismatchDidNotOccur()
+ failure.reference_filename = '/src/exists-expected-mismatch.html'
+ files = {'/src/exists-expected-mismatch.html': 'yup'}
+ written_files = self.run_test(failures=[failure], files=files)
+ self.assertEqual(written_files, {'/tmp/exists-expected-mismatch.html': 'yup'})
+
+ def test_reference_is_missing(self):
+ failure = test_failures.FailureReftestMismatch()
+ failure.reference_filename = 'notfound.html'
+ written_files = self.run_test(failures=[failure], files={})
+ self.assertEqual(written_files, {})
+
+ failure = test_failures.FailureReftestMismatchDidNotOccur()
+ failure.reference_filename = 'notfound.html'
+ written_files = self.run_test(failures=[failure], files={})
+ self.assertEqual(written_files, {})
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/generate_results_dashboard.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/generate_results_dashboard.py
new file mode 100644
index 0000000..e8a18ca
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/generate_results_dashboard.py
@@ -0,0 +1,152 @@
+# Copyright (C) 2014 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import logging
+
+
+class ProcessJsonData(object):
+
+ def __init__(self, current_result_json_dict, old_failing_results_list, old_full_results_list):
+ self._current_result_json_dict = current_result_json_dict
+ self._old_failing_results_list = old_failing_results_list
+ self._old_full_results_list = old_full_results_list
+ self._final_result = []
+
+ def _get_test_result(self, test_result_data):
+ actual = test_result_data['actual']
+ expected = test_result_data['expected']
+ if actual == 'SKIP':
+ return actual
+ if actual == expected:
+ return 'HASSTDERR' if test_result_data.get('has_stderr') == 'true' else 'PASS'
+ else:
+ return actual
+
+ def _recurse_json_object(self, json_object, key_list):
+ for key in key_list:
+ try:
+ json_object = json_object[key]
+ except KeyError:
+ return 'NOTFOUND'
+ return self._get_test_result(json_object)
+
+ def _process_previous_json_results(self, key_list):
+ row = []
+ length = len(self._old_failing_results_list)
+ for index in range(0, length):
+ result = self._recurse_json_object(self._old_failing_results_list[index]["tests"], key_list)
+ if result == 'NOTFOUND':
+ result = self._recurse_json_object(self._old_full_results_list[index]["tests"], key_list)
+ row.append(result)
+ return row
+
+ def _add_archived_result(self, json_object, result):
+ json_object['archived_results'] = result
+
+ def _process_json_object(self, json_object, keyList):
+ for key, subdict in json_object.iteritems():
+ if type(subdict) == dict:
+ self._process_json_object(subdict, keyList + [key])
+ else:
+ row = [self._get_test_result(json_object)]
+ row += self._process_previous_json_results(keyList)
+ json_object.clear()
+ self._add_archived_result(json_object, row)
+ return
+
+ def generate_archived_result(self):
+ for key in self._current_result_json_dict["tests"]:
+ self._process_json_object(self._current_result_json_dict["tests"][key], [key])
+ return self._current_result_json_dict
+
+
+class DashBoardGenerator(object):
+
+ def __init__(self, port):
+ self._port = port
+ self._filesystem = port.host.filesystem
+ self._results_directory = self._port.results_directory()
+ self._results_directory_path = self._filesystem.dirname(self._results_directory)
+ self._current_result_json_dict = {}
+ self._old_failing_results_list = []
+ self._old_full_results_list = []
+ self._final_result = []
+
+ def _add_individual_result_links(self, results_directories):
+ archived_results_file_list = [(file + '/results.html') for file in results_directories]
+ archived_results_file_list.insert(0, 'results.html')
+ self._current_result_json_dict['result_links'] = archived_results_file_list
+
+ def _copy_dashboard_html(self):
+ dashboard_file = self._filesystem.join(self._results_directory, 'dashboard.html')
+ dashboard_html_file_path = self._filesystem.join(self._port.layout_tests_dir(), 'fast/harness/archived-results-dashboard.html')
+ if not self._filesystem.exists(dashboard_file):
+ if self._filesystem.exists(dashboard_html_file_path):
+ self._filesystem.copyfile(dashboard_html_file_path, dashboard_file)
+
+ def _initialize(self):
+ file_list = self._filesystem.listdir(self._results_directory_path)
+ results_directories = []
+ for dir in file_list:
+ full_dir_path = self._filesystem.join(self._results_directory_path, dir)
+ if self._filesystem.isdir(full_dir_path):
+ if self._results_directory in full_dir_path:
+ results_directories.append(full_dir_path)
+ results_directories.sort(reverse=True, key=lambda x: self._filesystem.mtime(x))
+ current_failing_results_json_file = self._filesystem.join(results_directories[0], 'failing_results.json')
+ input_json_string = self._filesystem.read_text_file(current_failing_results_json_file)
+ input_json_string = input_json_string[12:-2] # Remove preceeding string ADD_RESULTS( and ); at the end
+ self._current_result_json_dict['tests'] = json.loads(input_json_string)['tests']
+ results_directories = results_directories[1:]
+
+ # To add hyperlink to individual results.html
+ self._add_individual_result_links(results_directories)
+
+ # Load the remaining stale layout test results Json's to create the dashboard
+ for json_file in results_directories:
+ failing_json_file_path = self._filesystem.join(json_file, 'failing_results.json')
+ full_json_file_path = self._filesystem.join(json_file, 'full_results.json')
+ json_string = self._filesystem.read_text_file(failing_json_file_path)
+ json_string = json_string[12:-2] # Remove preceeding string ADD_RESULTS( and ); at the end
+ self._old_failing_results_list.append(json.loads(json_string))
+ json_string_full_result = self._filesystem.read_text_file(full_json_file_path)
+ self._old_full_results_list.append(json.loads(json_string_full_result))
+ self._copy_dashboard_html()
+
+ def generate(self):
+ self._initialize()
+
+ # There must be atleast one archived result to be processed
+ if self._current_result_json_dict:
+ process_json_data = ProcessJsonData(self._current_result_json_dict, self._old_failing_results_list, self._old_full_results_list)
+ self._final_result = process_json_data.generate_archived_result()
+ final_json = json.dumps(self._final_result)
+ final_json = 'ADD_RESULTS(' + final_json + ');'
+ archived_results_file_path = self._filesystem.join(self._results_directory, 'archived_results.json')
+ self._filesystem.write_text_file(archived_results_file_path, final_json)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations.py
new file mode 100644
index 0000000..9b0f5d5
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations.py
@@ -0,0 +1,255 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Generates a fake TestExpectations file consisting of flaky tests from the bot
+corresponding to the give port."""
+
+import json
+import logging
+import os.path
+import urllib
+import urllib2
+
+from webkitpy.layout_tests.port import builders
+from webkitpy.layout_tests.models.test_expectations import TestExpectations
+from webkitpy.layout_tests.models.test_expectations import TestExpectationLine
+
+
+_log = logging.getLogger(__name__)
+
+
+# results.json v4 format:
+# {
+# 'version': 4,
+# 'builder name' : {
+# 'blinkRevision': [],
+# 'tests': {
+# 'directory' { # Each path component is a dictionary.
+# 'testname.html': {
+# 'expected' : 'FAIL', # expectation name
+# 'results': [], # Run-length encoded result.
+# 'times': [],
+# 'bugs': [], # bug urls
+# }
+# }
+# }
+# 'buildNumbers': [],
+# 'secondsSinceEpoch': [],
+# 'chromeRevision': [],
+# 'failure_map': { } # Map from letter code to expectation name.
+# },
+class ResultsJSON(object):
+ TESTS_KEY = 'tests'
+ FAILURE_MAP_KEY = 'failure_map'
+ RESULTS_KEY = 'results'
+ EXPECTATIONS_KEY = 'expected'
+ BUGS_KEY = 'bugs'
+ RLE_LENGTH = 0
+ RLE_VALUE = 1
+
+ # results.json was originally designed to support
+ # multiple builders in one json file, so the builder_name
+ # is needed to figure out which builder this json file
+ # refers to (and thus where the results are stored)
+ def __init__(self, builder_name, json_dict):
+ self.builder_name = builder_name
+ self._json = json_dict
+
+ def _walk_trie(self, trie, parent_path):
+ for name, value in trie.items():
+ full_path = os.path.join(parent_path, name)
+
+ # FIXME: If we ever have a test directory self.RESULTS_KEY
+ # ("results"), this logic will break!
+ if self.RESULTS_KEY not in value:
+ for path, results in self._walk_trie(value, full_path):
+ yield path, results
+ else:
+ yield full_path, value
+
+ def walk_results(self, full_path=''):
+ tests_trie = self._json[self.builder_name][self.TESTS_KEY]
+ return self._walk_trie(tests_trie, parent_path='')
+
+ def expectation_for_type(self, type_char):
+ return self._json[self.builder_name][self.FAILURE_MAP_KEY][type_char]
+
+ # Knowing how to parse the run-length-encoded values in results.json
+ # is a detail of this class.
+ def occurances_and_type_from_result_item(self, item):
+ return item[self.RLE_LENGTH], item[self.RLE_VALUE]
+
+
+class BotTestExpectationsFactory(object):
+ RESULTS_URL_PREFIX = 'http://test-results.appspot.com/testfile?master=ChromiumWebkit&testtype=layout-tests&name=results-small.json&builder='
+
+ def _results_json_for_port(self, port_name, builder_category):
+ if builder_category == 'deps':
+ builder = builders.deps_builder_name_for_port_name(port_name)
+ else:
+ builder = builders.builder_name_for_port_name(port_name)
+
+ if not builder:
+ return None
+ return self._results_json_for_builder(builder)
+
+ def _results_json_for_builder(self, builder):
+ results_url = self.RESULTS_URL_PREFIX + urllib.quote(builder)
+ try:
+ _log.debug('Fetching flakiness data from appengine.')
+ return ResultsJSON(builder, json.load(urllib2.urlopen(results_url)))
+ except urllib2.URLError as error:
+ _log.warning('Could not retrieve flakiness data from the bot. url: %s', results_url)
+ _log.warning(error)
+
+ def expectations_for_port(self, port_name, builder_category='layout'):
+ # FIXME: This only grabs release builder's flakiness data. If we're running debug,
+ # when we should grab the debug builder's data.
+ # FIXME: What should this do if there is no debug builder for a port, e.g. we have
+ # no debug XP builder? Should it use the release bot or another Windows debug bot?
+ # At the very least, it should log an error.
+ results_json = self._results_json_for_port(port_name, builder_category)
+ if not results_json:
+ return None
+ return BotTestExpectations(results_json)
+
+ def expectations_for_builder(self, builder):
+ results_json = self._results_json_for_builder(builder)
+ if not results_json:
+ return None
+ return BotTestExpectations(results_json)
+
+class BotTestExpectations(object):
+ # FIXME: Get this from the json instead of hard-coding it.
+ RESULT_TYPES_TO_IGNORE = ['N', 'X', 'Y']
+
+ # specifiers arg is used in unittests to avoid the static dependency on builders.
+ def __init__(self, results_json, specifiers=None):
+ self.results_json = results_json
+ self.specifiers = specifiers or set(builders.specifiers_for_builder(results_json.builder_name))
+
+ def _line_from_test_and_flaky_types_and_bug_urls(self, test_path, flaky_types, bug_urls):
+ line = TestExpectationLine()
+ line.original_string = test_path
+ line.name = test_path
+ line.filename = test_path
+ line.path = test_path # FIXME: Should this be normpath?
+ line.matching_tests = [test_path]
+ line.bugs = bug_urls if bug_urls else ["Bug(gardener)"]
+ line.expectations = sorted(map(self.results_json.expectation_for_type, flaky_types))
+ line.specifiers = self.specifiers
+ return line
+
+ def flakes_by_path(self, only_ignore_very_flaky):
+ """Sets test expectations to bot results if there are at least two distinct results."""
+ flakes_by_path = {}
+ for test_path, entry in self.results_json.walk_results():
+ results_dict = entry[self.results_json.RESULTS_KEY]
+ flaky_types = self._flaky_types_in_results(results_dict, only_ignore_very_flaky)
+ if len(flaky_types) <= 1:
+ continue
+ flakes_by_path[test_path] = sorted(map(self.results_json.expectation_for_type, flaky_types))
+ return flakes_by_path
+
+ def unexpected_results_by_path(self):
+ """For tests with unexpected results, returns original expectations + results."""
+ def exp_to_string(exp):
+ return TestExpectations.EXPECTATIONS_TO_STRING.get(exp, None).upper()
+
+ def string_to_exp(string):
+ # Needs a bit more logic than the method above,
+ # since a PASS is 0 and evaluates to False.
+ result = TestExpectations.EXPECTATIONS.get(string.lower(), None)
+ if not result is None:
+ return result
+ raise ValueError(string)
+
+ unexpected_results_by_path = {}
+ for test_path, entry in self.results_json.walk_results():
+ # Expectations for this test. No expectation defaults to PASS.
+ exp_string = entry.get(self.results_json.EXPECTATIONS_KEY, u'PASS')
+
+ # All run-length-encoded results for this test.
+ results_dict = entry.get(self.results_json.RESULTS_KEY, {})
+
+ # Set of expectations for this test.
+ expectations = set(map(string_to_exp, exp_string.split(' ')))
+
+ # Set of distinct results for this test.
+ result_types = self._flaky_types_in_results(results_dict)
+
+ # Distinct results as non-encoded strings.
+ result_strings = map(self.results_json.expectation_for_type, result_types)
+
+ # Distinct resulting expectations.
+ result_exp = map(string_to_exp, result_strings)
+
+ expected = lambda e: TestExpectations.result_was_expected(e, expectations, False)
+
+ additional_expectations = set(e for e in result_exp if not expected(e))
+
+ # Test did not have unexpected results.
+ if not additional_expectations:
+ continue
+
+ expectations.update(additional_expectations)
+ unexpected_results_by_path[test_path] = sorted(map(exp_to_string, expectations))
+ return unexpected_results_by_path
+
+ def expectation_lines(self, only_ignore_very_flaky=False):
+ lines = []
+ for test_path, entry in self.results_json.walk_results():
+ results_array = entry[self.results_json.RESULTS_KEY]
+ flaky_types = self._flaky_types_in_results(results_array, only_ignore_very_flaky)
+ if len(flaky_types) > 1:
+ bug_urls = entry.get(self.results_json.BUGS_KEY)
+ line = self._line_from_test_and_flaky_types_and_bug_urls(test_path, flaky_types, bug_urls)
+ lines.append(line)
+ return lines
+
+ def _flaky_types_in_results(self, run_length_encoded_results, only_ignore_very_flaky=False):
+ results_map = {}
+ seen_results = {}
+
+ for result_item in run_length_encoded_results:
+ _, result_type = self.results_json.occurances_and_type_from_result_item(result_item)
+ if result_type in self.RESULT_TYPES_TO_IGNORE:
+ continue
+
+ if only_ignore_very_flaky and result_type not in seen_results:
+ # Only consider a short-lived result if we've seen it more than once.
+ # Otherwise, we include lots of false-positives due to tests that fail
+ # for a couple runs and then start passing.
+ # FIXME: Maybe we should make this more liberal and consider it a flake
+ # even if we only see that failure once.
+ seen_results[result_type] = True
+ continue
+
+ results_map[result_type] = True
+
+ return results_map.keys()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations_unittest.py
new file mode 100644
index 0000000..85ac851
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations_unittest.py
@@ -0,0 +1,198 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.layout_package import bot_test_expectations
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.port import builders
+
+
+class BotTestExpectationsFactoryTest(unittest.TestCase):
+ def fake_results_json_for_builder(self, builder):
+ return bot_test_expectations.ResultsJSON(builder, 'Dummy content')
+
+ def test_expectations_for_builder(self):
+ factory = bot_test_expectations.BotTestExpectationsFactory()
+ factory._results_json_for_builder = self.fake_results_json_for_builder
+
+ old_builders = builders._exact_matches
+ builders._exact_matches = {
+ "Dummy builder name": {"port_name": "dummy-port", "specifiers": []},
+ }
+
+ try:
+ self.assertIsNotNone(factory.expectations_for_builder('Dummy builder name'))
+ finally:
+ builders._exact_matches = old_builders
+
+ def test_expectations_for_port(self):
+ factory = bot_test_expectations.BotTestExpectationsFactory()
+ factory._results_json_for_builder = self.fake_results_json_for_builder
+
+ old_builders = builders._exact_matches
+ builders._exact_matches = {
+ "Dummy builder name": {"port_name": "dummy-port", "specifiers": []},
+ }
+
+ try:
+ self.assertIsNotNone(factory.expectations_for_port('dummy-port'))
+ finally:
+ builders._exact_matches = old_builders
+
+
+class BotTestExpectationsTest(unittest.TestCase):
+ # FIXME: Find a way to import this map from Tools/TestResultServer/model/jsonresults.py.
+ FAILURE_MAP = {"A": "AUDIO", "C": "CRASH", "F": "TEXT", "I": "IMAGE", "O": "MISSING",
+ "N": "NO DATA", "P": "PASS", "T": "TIMEOUT", "Y": "NOTRUN", "X": "SKIP", "Z": "IMAGE+TEXT", "K": "LEAK"}
+
+ # All result_string's in this file expect newest result
+ # on left: "PFF", means it just passed after 2 failures.
+
+ def _assert_is_flaky(self, results_string, should_be_flaky):
+ results_json = self._results_json_from_test_data({})
+ expectations = bot_test_expectations.BotTestExpectations(results_json, set('test'))
+ length_encoded = self._results_from_string(results_string)['results']
+ num_actual_results = len(expectations._flaky_types_in_results(length_encoded, only_ignore_very_flaky=True))
+ if should_be_flaky:
+ self.assertGreater(num_actual_results, 1)
+ else:
+ self.assertEqual(num_actual_results, 1)
+
+ def test_basic_flaky(self):
+ self._assert_is_flaky('PFF', False) # Used to fail, but now passes.
+ self._assert_is_flaky('FFP', False) # Just started failing.
+ self._assert_is_flaky('PFPF', True) # Seen both failures and passes.
+ # self._assert_is_flaky('PPPF', True) # Should be counted as flaky but isn't yet.
+ self._assert_is_flaky('FPPP', False) # Just started failing, not flaky.
+ self._assert_is_flaky('PFFP', True) # Failed twice in a row, still flaky.
+ # Failing 3+ times in a row is unlikely to be flaky, but rather a transient failure on trunk.
+ # self._assert_is_flaky('PFFFP', False)
+ # self._assert_is_flaky('PFFFFP', False)
+
+ def _results_json_from_test_data(self, test_data):
+ test_data[bot_test_expectations.ResultsJSON.FAILURE_MAP_KEY] = self.FAILURE_MAP
+ json_dict = {
+ 'builder': test_data,
+ }
+ return bot_test_expectations.ResultsJSON('builder', json_dict)
+
+ def _results_from_string(self, results_string):
+ results_list = []
+ last_char = None
+ for char in results_string:
+ if char != last_char:
+ results_list.insert(0, [1, char])
+ else:
+ results_list[0][0] += 1
+ return {'results': results_list}
+
+ def _assert_expectations(self, test_data, expectations_string, only_ignore_very_flaky):
+ results_json = self._results_json_from_test_data(test_data)
+ expectations = bot_test_expectations.BotTestExpectations(results_json, set('test'))
+ self.assertEqual(expectations.flakes_by_path(only_ignore_very_flaky), expectations_string)
+
+ def _assert_unexpected_results(self, test_data, expectations_string):
+ results_json = self._results_json_from_test_data(test_data)
+ expectations = bot_test_expectations.BotTestExpectations(results_json, set('test'))
+ self.assertEqual(expectations.unexpected_results_by_path(), expectations_string)
+
+ def test_basic(self):
+ test_data = {
+ 'tests': {
+ 'foo': {
+ 'veryflaky.html': self._results_from_string('FPFP'),
+ 'maybeflaky.html': self._results_from_string('PPFP'),
+ 'notflakypass.html': self._results_from_string('PPPP'),
+ 'notflakyfail.html': self._results_from_string('FFFF'),
+ }
+ }
+ }
+ self._assert_expectations(test_data, {
+ 'foo/veryflaky.html': sorted(["TEXT", "PASS"]),
+ }, only_ignore_very_flaky=True)
+
+ self._assert_expectations(test_data, {
+ 'foo/veryflaky.html': sorted(["TEXT", "PASS"]),
+ 'foo/maybeflaky.html': sorted(["TEXT", "PASS"]),
+ }, only_ignore_very_flaky=False)
+
+ def test_all_failure_types(self):
+ test_data = {
+ 'tests': {
+ 'foo': {
+ 'allfailures.html': self._results_from_string('FPFPCNCNTXTXIZIZOCOCYKYK'),
+ 'imageplustextflake.html': self._results_from_string('ZPZPPPPPPPPPPPPPPPPP'),
+ }
+ }
+ }
+ self._assert_expectations(test_data, {
+ 'foo/imageplustextflake.html': sorted(["IMAGE+TEXT", "PASS"]),
+ 'foo/allfailures.html': sorted(["TEXT", "PASS", "IMAGE+TEXT", "TIMEOUT", "CRASH", "IMAGE", "MISSING", "LEAK"]),
+ }, only_ignore_very_flaky=True)
+
+ def test_unexpected_results_no_unexpected(self):
+ test_data = {
+ 'tests': {
+ 'foo': {
+ 'pass1.html': {'results': [[4, 'P']]},
+ 'pass2.html': {'results': [[2, 'Z']], 'expected': 'PASS FAIL'},
+ 'fail.html': {'results': [[2, 'P'], [1, 'F']], 'expected': 'PASS FAIL'},
+ 'not_run.html': {'results': []},
+ 'crash.html': {'results': [[2, 'F'], [1, 'C']], 'expected': 'CRASH FAIL WONTFIX'},
+ }
+ }
+ }
+ self._assert_unexpected_results(test_data, {})
+
+ def test_unexpected_results_all_unexpected(self):
+ test_data = {
+ 'tests': {
+ 'foo': {
+ 'pass1.html': {'results': [[4, 'P']], 'expected': 'FAIL'},
+ 'pass2.html': {'results': [[2, 'P']], 'expected': 'IMAGE'},
+ 'fail.html': {'results': [[4, 'F']]},
+ 'f_p.html': {'results': [[1, 'F'], [2, 'P']]},
+ 'crash.html': {'results': [[2, 'F'], [1, 'C']], 'expected': 'WONTFIX'},
+ 'image.html': {'results': [[2, 'F'], [1, 'I']], 'expected': 'CRASH FAIL'},
+ 'i_f.html': {'results': [[1, 'F'], [5, 'I']], 'expected': 'PASS'},
+ 'all.html': self._results_from_string('FPFPCNCNTXTXIZIZOCOCYKYK'),
+ }
+ }
+ }
+ self.maxDiff = None
+ self._assert_unexpected_results(test_data, {
+ 'foo/pass1.html': sorted(["FAIL", "PASS"]),
+ 'foo/pass2.html': sorted(["IMAGE", "PASS"]),
+ 'foo/fail.html': sorted(["TEXT", "PASS"]),
+ 'foo/f_p.html': sorted(["TEXT", "PASS"]),
+ 'foo/crash.html': sorted(["WONTFIX", "CRASH", "TEXT"]),
+ 'foo/image.html': sorted(["CRASH", "FAIL", "IMAGE"]),
+ 'foo/i_f.html': sorted(["PASS", "IMAGE", "TEXT"]),
+ 'foo/all.html': sorted(["TEXT", "PASS", "IMAGE+TEXT", "TIMEOUT", "CRASH", "IMAGE", "MISSING", "LEAK"]),
+ })
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
new file mode 100644
index 0000000..c4faef5
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -0,0 +1,141 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import logging
+
+_log = logging.getLogger(__name__)
+
+_JSON_PREFIX = "ADD_RESULTS("
+_JSON_SUFFIX = ");"
+
+
+def has_json_wrapper(string):
+ return string.startswith(_JSON_PREFIX) and string.endswith(_JSON_SUFFIX)
+
+
+def strip_json_wrapper(json_content):
+ # FIXME: Kill this code once the server returns json instead of jsonp.
+ if has_json_wrapper(json_content):
+ return json_content[len(_JSON_PREFIX):len(json_content) - len(_JSON_SUFFIX)]
+ return json_content
+
+
+def load_json(filesystem, file_path):
+ content = filesystem.read_text_file(file_path)
+ content = strip_json_wrapper(content)
+ return json.loads(content)
+
+
+def write_json(filesystem, json_object, file_path, callback=None):
+ # Specify separators in order to get compact encoding.
+ json_string = json.dumps(json_object, separators=(',', ':'))
+ if callback:
+ json_string = callback + "(" + json_string + ");"
+ filesystem.write_text_file(file_path, json_string)
+
+
+def convert_trie_to_flat_paths(trie, prefix=None):
+ """Converts the directory structure in the given trie to flat paths, prepending a prefix to each."""
+ result = {}
+ for name, data in trie.iteritems():
+ if prefix:
+ name = prefix + "/" + name
+
+ if len(data) and not "results" in data:
+ result.update(convert_trie_to_flat_paths(data, name))
+ else:
+ result[name] = data
+
+ return result
+
+
+def add_path_to_trie(path, value, trie):
+ """Inserts a single flat directory path and associated value into a directory trie structure."""
+ if not "/" in path:
+ trie[path] = value
+ return
+
+ directory, slash, rest = path.partition("/")
+ if not directory in trie:
+ trie[directory] = {}
+ add_path_to_trie(rest, value, trie[directory])
+
+
+def test_timings_trie(individual_test_timings):
+ """Breaks a test name into chunks by directory and puts the test time as a value in the lowest part, e.g.
+ foo/bar/baz.html: 1ms
+ foo/bar/baz1.html: 3ms
+
+ becomes
+ foo: {
+ bar: {
+ baz.html: 1,
+ baz1.html: 3
+ }
+ }
+ """
+ trie = {}
+ for test_result in individual_test_timings:
+ test = test_result.test_name
+
+ add_path_to_trie(test, int(1000 * test_result.test_run_time), trie)
+
+ return trie
+
+
+# FIXME: We already have a TestResult class in test_results.py
+class TestResult(object):
+ """A simple class that represents a single test result."""
+
+ # Test modifier constants.
+ (NONE, FAILS, FLAKY, DISABLED) = range(4)
+
+ def __init__(self, test, failed=False, elapsed_time=0):
+ self.test_name = test
+ self.failed = failed
+ self.test_run_time = elapsed_time
+
+ test_name = test
+ try:
+ test_name = test.split('.')[1]
+ except IndexError:
+ _log.warn("Invalid test name: %s.", test)
+ pass
+
+ if test_name.startswith('FAILS_'):
+ self.modifier = self.FAILS
+ elif test_name.startswith('FLAKY_'):
+ self.modifier = self.FLAKY
+ elif test_name.startswith('DISABLED_'):
+ self.modifier = self.DISABLED
+ else:
+ self.modifier = self.NONE
+
+ def fixable(self):
+ return self.failed or self.modifier == self.DISABLED
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
new file mode 100644
index 0000000..fdb0a1a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import unittest
+
+from webkitpy.layout_tests.layout_package import json_results_generator
+
+
+class JSONGeneratorTest(unittest.TestCase):
+ def setUp(self):
+ self.builder_name = 'DUMMY_BUILDER_NAME'
+ self.build_name = 'DUMMY_BUILD_NAME'
+ self.build_number = 'DUMMY_BUILDER_NUMBER'
+
+ # For archived results.
+ self._json = None
+ self._num_runs = 0
+ self._tests_set = set([])
+ self._test_timings = {}
+ self._failed_count_map = {}
+
+ self._PASS_count = 0
+ self._DISABLED_count = 0
+ self._FLAKY_count = 0
+ self._FAILS_count = 0
+ self._fixable_count = 0
+
+ def test_strip_json_wrapper(self):
+ json = "['contents']"
+ self.assertEqual(json_results_generator.strip_json_wrapper(json_results_generator._JSON_PREFIX + json + json_results_generator._JSON_SUFFIX), json)
+ self.assertEqual(json_results_generator.strip_json_wrapper(json), json)
+
+ def _find_test_in_trie(self, path, trie):
+ nodes = path.split("/")
+ sub_trie = trie
+ for node in nodes:
+ self.assertIn(node, sub_trie)
+ sub_trie = sub_trie[node]
+ return sub_trie
+
+ def test_test_timings_trie(self):
+ individual_test_timings = []
+ individual_test_timings.append(json_results_generator.TestResult('foo/bar/baz.html', elapsed_time=1.2))
+ individual_test_timings.append(json_results_generator.TestResult('bar.html', elapsed_time=0.0001))
+ trie = json_results_generator.test_timings_trie(individual_test_timings)
+
+ expected_trie = {
+ 'bar.html': 0,
+ 'foo': {
+ 'bar': {
+ 'baz.html': 1200,
+ }
+ }
+ }
+
+ self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_tests_mover.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_tests_mover.py
new file mode 100755
index 0000000..d589925
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_tests_mover.py
@@ -0,0 +1,323 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Moves a directory of LayoutTests.
+
+Given a path to a directory of LayoutTests, moves that directory, including all recursive children,
+to the specified destination path. Updates all references in tests and resources to reflect the new
+location. Also moves any corresponding platform-specific expected results and updates the test
+expectations to reflect the move.
+
+If the destination directory does not exist, it and any missing parent directories are created. If
+the destination directory already exists, the child members of the origin directory are added to the
+destination directory. If any of the child members clash with existing members of the destination
+directory, the move fails.
+
+Note that when new entries are added to the test expectations, no attempt is made to group or merge
+them with existing entries. This should be be done manually and with lint-test-expectations.
+"""
+
+import copy
+import logging
+import optparse
+import os
+import re
+import urlparse
+
+from webkitpy.common.checkout.scm.detection import SCMDetector
+from webkitpy.common.host import Host
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.layout_tests.port.base import Port
+from webkitpy.layout_tests.models.test_expectations import TestExpectations
+
+
+logging.basicConfig()
+_log = logging.getLogger(__name__)
+_log.setLevel(logging.INFO)
+
+PLATFORM_DIRECTORY = 'platform'
+
+class LayoutTestsMover(object):
+
+ def __init__(self, port=None):
+ self._port = port
+ if not self._port:
+ host = Host()
+ # Given that we use include_overrides=False and model_all_expectations=True when
+ # constructing the TestExpectations object, it doesn't matter which Port object we use.
+ self._port = host.port_factory.get()
+ self._port.host.initialize_scm()
+ self._filesystem = self._port.host.filesystem
+ self._scm = self._port.host.scm()
+ self._layout_tests_root = self._port.layout_tests_dir()
+
+ def _scm_path(self, *paths):
+ return self._filesystem.join('LayoutTests', *paths)
+
+ def _is_child_path(self, parent, possible_child):
+ normalized_parent = self._filesystem.normpath(parent)
+ normalized_child = self._filesystem.normpath(possible_child)
+ # We need to add a trailing separator to parent to avoid returning true for cases like
+ # parent='/foo/b', and possible_child='/foo/bar/baz'.
+ return normalized_parent == normalized_child or normalized_child.startswith(normalized_parent + self._filesystem.sep)
+
+ def _move_path(self, path, origin, destination):
+ if not self._is_child_path(origin, path):
+ return path
+ return self._filesystem.normpath(self._filesystem.join(destination, self._filesystem.relpath(path, origin)))
+
+ def _validate_input(self):
+ if not self._filesystem.isdir(self._absolute_origin):
+ raise Exception('Source path %s is not a directory' % self._origin)
+ if not self._is_child_path(self._layout_tests_root, self._absolute_origin):
+ raise Exception('Source path %s is not in LayoutTests directory' % self._origin)
+ if self._filesystem.isfile(self._absolute_destination):
+ raise Exception('Destination path %s is a file' % self._destination)
+ if not self._is_child_path(self._layout_tests_root, self._absolute_destination):
+ raise Exception('Destination path %s is not in LayoutTests directory' % self._destination)
+
+ # If destination is an existing directory, we move the children of origin into destination.
+ # However, if any of the children of origin would clash with existing children of
+ # destination, we fail.
+ # FIXME: Consider adding support for recursively moving into an existing directory.
+ if self._filesystem.isdir(self._absolute_destination):
+ for file_path in self._filesystem.listdir(self._absolute_origin):
+ if self._filesystem.exists(self._filesystem.join(self._absolute_destination, file_path)):
+ raise Exception('Origin path %s clashes with existing destination path %s' %
+ (self._filesystem.join(self._origin, file_path), self._filesystem.join(self._destination, file_path)))
+
+ def _get_expectations_for_test(self, model, test_path):
+ """Given a TestExpectationsModel object, finds all expectations that match the specified
+ test, specified as a relative path. Handles the fact that expectations may be keyed by
+ directory.
+ """
+ expectations = set()
+ if model.has_test(test_path):
+ expectations.add(model.get_expectation_line(test_path))
+ test_path = self._filesystem.dirname(test_path)
+ while not test_path == '':
+ # The model requires a trailing slash for directories.
+ test_path_for_model = test_path + '/'
+ if model.has_test(test_path_for_model):
+ expectations.add(model.get_expectation_line(test_path_for_model))
+ test_path = self._filesystem.dirname(test_path)
+ return expectations
+
+ def _get_expectations(self, model, path):
+ """Given a TestExpectationsModel object, finds all expectations for all tests under the
+ specified relative path.
+ """
+ expectations = set()
+ for test in self._filesystem.files_under(self._filesystem.join(self._layout_tests_root, path), dirs_to_skip=['script-tests', 'resources'],
+ file_filter=Port.is_test_file):
+ expectations = expectations.union(self._get_expectations_for_test(model, self._filesystem.relpath(test, self._layout_tests_root)))
+ return expectations
+
+ @staticmethod
+ def _clone_expectation_line_for_path(expectation_line, path):
+ """Clones a TestExpectationLine object and updates the clone to apply to the specified
+ relative path.
+ """
+ clone = copy.copy(expectation_line)
+ clone.original_string = re.compile(expectation_line.name).sub(path, expectation_line.original_string)
+ clone.name = path
+ clone.path = path
+ # FIXME: Should we search existing expectations for matches, like in
+ # TestExpectationsParser._collect_matching_tests()?
+ clone.matching_tests = [path]
+ return clone
+
+ def _update_expectations(self):
+ """Updates all test expectations that are affected by the move.
+ """
+ _log.info('Updating expectations')
+ test_expectations = TestExpectations(self._port, include_overrides=False, model_all_expectations=True)
+
+ for expectation in self._get_expectations(test_expectations.model(), self._origin):
+ path = expectation.path
+ if self._is_child_path(self._origin, path):
+ # If the existing expectation is a child of the moved path, we simply replace it
+ # with an expectation for the updated path.
+ new_path = self._move_path(path, self._origin, self._destination)
+ _log.debug('Updating expectation for %s to %s' % (path, new_path))
+ test_expectations.remove_expectation_line(path)
+ test_expectations.add_expectation_line(LayoutTestsMover._clone_expectation_line_for_path(expectation, new_path))
+ else:
+ # If the existing expectation is not a child of the moved path, we have to leave it
+ # in place. But we also add a new expectation for the destination path.
+ new_path = self._destination
+ _log.warning('Copying expectation for %s to %s. You should check that these expectations are still correct.' %
+ (path, new_path))
+ test_expectations.add_expectation_line(LayoutTestsMover._clone_expectation_line_for_path(expectation, new_path))
+
+ expectations_file = self._port.path_to_generic_test_expectations_file()
+ self._filesystem.write_text_file(expectations_file,
+ TestExpectations.list_to_string(test_expectations._expectations, reconstitute_only_these=[]))
+ self._scm.add(self._filesystem.relpath(expectations_file, self._scm.checkout_root))
+
+ def _find_references(self, input_files):
+ """Attempts to find all references to other files in the supplied list of files. Returns a
+ dictionary that maps from an absolute file path to an array of reference strings.
+ """
+ reference_regex = re.compile(r'(?:(?:src=|href=|importScripts\(|url\()(?:"([^"]+)"|\'([^\']+)\')|url\(([^\)\'"]+)\))')
+ references = {}
+ for input_file in input_files:
+ matches = reference_regex.findall(self._filesystem.read_binary_file(input_file))
+ if matches:
+ references[input_file] = [filter(None, match)[0] for match in matches]
+ return references
+
+ def _get_updated_reference(self, root, reference):
+ """For a reference <reference> in a directory <root>, determines the updated reference.
+ Returns the the updated reference, or None if no update is required.
+ """
+ # If the reference is an absolute path or url, it's safe.
+ if reference.startswith('/') or urlparse.urlparse(reference).scheme:
+ return None
+
+ # Both the root path and the target of the reference my be subject to the move, so there are
+ # four cases to consider. In the case where both or neither are subject to the move, the
+ # reference doesn't need updating.
+ #
+ # This is true even if the reference includes superfluous dot segments which mention a moved
+ # directory, as dot segments are collapsed during URL normalization. For example, if
+ # foo.html contains a reference 'bar/../script.js', this remains valid (though ugly) even if
+ # bar/ is moved to baz/, because the reference is always normalized to 'script.js'.
+ absolute_reference = self._filesystem.normpath(self._filesystem.join(root, reference))
+ if self._is_child_path(self._absolute_origin, root) == self._is_child_path(self._absolute_origin, absolute_reference):
+ return None;
+
+ new_root = self._move_path(root, self._absolute_origin, self._absolute_destination)
+ new_absolute_reference = self._move_path(absolute_reference, self._absolute_origin, self._absolute_destination)
+ return self._filesystem.relpath(new_absolute_reference, new_root)
+
+ def _get_all_updated_references(self, references):
+ """Determines the updated references due to the move. Returns a dictionary that maps from an
+ absolute file path to a dictionary that maps from a reference string to the corresponding
+ updated reference.
+ """
+ updates = {}
+ for file_path in references.keys():
+ root = self._filesystem.dirname(file_path)
+ # sript-tests/TEMPLATE.html files contain references which are written as if the file
+ # were in the parent directory. This special-casing is ugly, but there are plans to
+ # remove script-tests.
+ if root.endswith('script-tests') and file_path.endswith('TEMPLATE.html'):
+ root = self._filesystem.dirname(root)
+ local_updates = {}
+ for reference in references[file_path]:
+ update = self._get_updated_reference(root, reference)
+ if update:
+ local_updates[reference] = update
+ if local_updates:
+ updates[file_path] = local_updates
+ return updates
+
+ def _update_file(self, path, updates):
+ contents = self._filesystem.read_binary_file(path)
+ # Note that this regex isn't quite as strict as that used to find the references, but this
+ # avoids the need for alternative match groups, which simplifies things.
+ for target in updates.keys():
+ regex = re.compile(r'((?:src=|href=|importScripts\(|url\()["\']?)%s(["\']?)' % target)
+ contents = regex.sub(r'\1%s\2' % updates[target], contents)
+ self._filesystem.write_binary_file(path, contents)
+ self._scm.add(path)
+
+ def _update_test_source_files(self):
+ def is_test_source_file(filesystem, dirname, basename):
+ pass_regex = re.compile(r'\.(css|js)$')
+ fail_regex = re.compile(r'-expected\.')
+ return (Port.is_test_file(filesystem, dirname, basename) or pass_regex.search(basename)) and not fail_regex.search(basename)
+
+ test_source_files = self._filesystem.files_under(self._layout_tests_root, file_filter=is_test_source_file)
+ _log.info('Considering %s test source files for references' % len(test_source_files))
+ references = self._find_references(test_source_files)
+ _log.info('Considering references in %s files' % len(references))
+ updates = self._get_all_updated_references(references)
+ _log.info('Updating references in %s files' % len(updates))
+ count = 0
+ for file_path in updates.keys():
+ self._update_file(file_path, updates[file_path])
+ count += 1
+ if count % 1000 == 0 or count == len(updates):
+ _log.debug('Updated references in %s files' % count)
+
+ def _move_directory(self, origin, destination):
+ """Moves the directory <origin> to <destination>. If <destination> is a directory, moves the
+ children of <origin> into <destination>. Uses relative paths.
+ """
+ absolute_origin = self._filesystem.join(self._layout_tests_root, origin)
+ if not self._filesystem.isdir(absolute_origin):
+ return
+ _log.info('Moving directory %s to %s' % (origin, destination))
+ # Note that FileSystem.move() may silently overwrite existing files, but we
+ # check for this in _validate_input().
+ absolute_destination = self._filesystem.join(self._layout_tests_root, destination)
+ self._filesystem.maybe_make_directory(absolute_destination)
+ for directory in self._filesystem.listdir(absolute_origin):
+ self._scm.move(self._scm_path(origin, directory), self._scm_path(destination, directory))
+ self._filesystem.rmtree(absolute_origin)
+
+ def _move_files(self):
+ """Moves the all files that correspond to the move, including platform-specific expected
+ results.
+ """
+ self._move_directory(self._origin, self._destination)
+ for directory in self._filesystem.listdir(self._filesystem.join(self._layout_tests_root, PLATFORM_DIRECTORY)):
+ self._move_directory(self._filesystem.join(PLATFORM_DIRECTORY, directory, self._origin),
+ self._filesystem.join(PLATFORM_DIRECTORY, directory, self._destination))
+
+ def _commit_changes(self):
+ if not self._scm.supports_local_commits():
+ return
+ title = 'Move LayoutTests directory %s to %s' % (self._origin, self._destination)
+ _log.info('Committing change \'%s\'' % title)
+ self._scm.commit_locally_with_message('%s\n\nThis commit was automatically generated by move-layout-tests.' % title,
+ commit_all_working_directory_changes=False)
+
+ def move(self, origin, destination):
+ self._origin = origin
+ self._destination = destination
+ self._absolute_origin = self._filesystem.join(self._layout_tests_root, self._origin)
+ self._absolute_destination = self._filesystem.join(self._layout_tests_root, self._destination)
+ self._validate_input()
+ self._update_expectations()
+ self._update_test_source_files()
+ self._move_files()
+ # FIXME: Handle virtual test suites.
+ self._commit_changes()
+
+def main(argv):
+ parser = optparse.OptionParser(description=__doc__)
+ parser.add_option('--origin',
+ help=('The directory of tests to move, as a relative path from the LayoutTests directory.'))
+ parser.add_option('--destination',
+ help=('The new path for the directory of tests, as a relative path from the LayoutTests directory.'))
+ options, _ = parser.parse_args()
+ LayoutTestsMover().move(options.origin, options.destination)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_tests_mover_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_tests_mover_unittest.py
new file mode 100755
index 0000000..7be0bba
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/layout_tests_mover_unittest.py
@@ -0,0 +1,178 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.checkout.scm.scm_mock import MockSCM
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.layout_tests.layout_tests_mover import LayoutTestsMover
+from webkitpy.layout_tests.port import base
+
+
+class MockPort(base.Port):
+
+ def __init__(self, **kwargs):
+ # This sets up a mock FileSystem and SCM using that FileSystem.
+ host = MockHost()
+ super(MockPort, self).__init__(host, host.port_factory.all_port_names()[0], **kwargs)
+
+ host.filesystem.maybe_make_directory(self._absolute_path('platform'))
+ host.filesystem.maybe_make_directory(self._absolute_path('existing_directory'))
+ host.filesystem.write_text_file(self._absolute_path('existing_file.txt'), '')
+ host.filesystem.write_text_file(self._absolute_path('VirtualTestSuites'), '[]')
+ host.filesystem.write_text_file(self._absolute_path('TestExpectations'), """
+crbug.com/42 [ Debug ] origin/path/test.html [ Pass Timeout Failure ]
+crbug.com/42 [ Win ] origin/path [ Slow ]
+crbug.com/42 [ Release ] origin [ Crash ]
+""")
+ host.filesystem.write_text_file(self._absolute_path('existing_directory_with_contents', 'test.html'), '')
+ host.filesystem.write_text_file(self._absolute_path('origin', 'path', 'test.html'), """
+<script src="local_script.js">
+<script src="../../unmoved/remote_script.js">
+<script src='../../unmoved/remote_script_single_quotes.js'>
+<script href="../../unmoved/remote_script.js">
+<script href='../../unmoved/remote_script_single_quotes.js'>
+<script href="">
+""")
+ host.filesystem.write_text_file(self._absolute_path('origin', 'path', 'test.css'), """
+url('../../unmoved/url_function.js')
+url("../../unmoved/url_function_double_quotes.js")
+url(../../unmoved/url_function_no_quotes.js)
+url('')
+url()
+""")
+ host.filesystem.write_text_file(self._absolute_path('origin', 'path', 'test.js'), """
+importScripts('../../unmoved/import_scripts_function.js')
+importScripts("../../unmoved/import_scripts_function_double_quotes.js")
+importScripts('')
+""")
+ host.filesystem.write_text_file(self._absolute_path('unmoved', 'test.html'), """
+<script src="local_script.js">
+<script src="../origin/path/remote_script.js">
+""")
+
+ def _absolute_path(self, *paths):
+ return self.host.scm().absolute_path('LayoutTests', *paths)
+
+ def layout_tests_dir(self):
+ return self._absolute_path()
+
+
+class LayoutTestsMoverTest(unittest.TestCase):
+
+ def setUp(self):
+ port = MockPort()
+ self._port = port
+ self._filesystem = self._port.host.filesystem
+ self._mover = LayoutTestsMover(port=self._port)
+
+ def test_non_existent_origin_raises(self):
+ self.assertRaises(Exception, self._mover.move, 'non_existent', 'destination')
+
+ def test_origin_outside_layout_tests_directory_raises(self):
+ self.assertRaises(Exception, self._mover.move, '../outside', 'destination')
+
+ def test_file_destination_raises(self):
+ self.assertRaises(Exception, self._mover.move, 'origin/path', 'existing_file.txt')
+
+ def test_destination_outside_layout_tests_directory_raises(self):
+ self.assertRaises(Exception, self._mover.move, 'origin/path', '../outside')
+
+ def test_basic_operation(self):
+ self._mover.move('origin/path', 'destination')
+ self.assertFalse(self._filesystem.exists(self._port._absolute_path('origin/path')))
+ self.assertTrue(self._filesystem.isfile(self._port._absolute_path('destination/test.html')))
+
+ def test_move_to_existing_directory(self):
+ self._mover.move('origin/path', 'existing_directory')
+ self.assertFalse(self._filesystem.exists(self._port._absolute_path('origin', 'path')))
+ self.assertTrue(self._filesystem.isfile(self._port._absolute_path('existing_directory', 'test.html')))
+
+ def test_collision_in_existing_directory_raises(self):
+ self.assertRaises(Exception, self._mover.move, 'origin/path', 'existing_directory_with_contents')
+
+ def test_move_to_layout_tests_root(self):
+ self._mover.move('origin/path', '')
+ self.assertFalse(self._filesystem.exists(self._port._absolute_path('origin', 'path')))
+ self.assertTrue(self._filesystem.isfile(self._port._absolute_path('test.html')))
+
+ def test_moved_reference_in_moved_file_not_updated(self):
+ self._mover.move('origin/path', 'destination')
+ self.assertTrue('src="local_script.js"' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.html')))
+
+ def test_unmoved_reference_in_unmoved_file_not_updated(self):
+ self._mover.move('origin/path', 'destination')
+ self.assertTrue('src="local_script.js"' in self._filesystem.read_text_file(self._port._absolute_path('unmoved', 'test.html')))
+
+ def test_moved_reference_in_unmoved_file_is_updated(self):
+ self._mover.move('origin/path', 'destination')
+ self.assertTrue('src="../destination/remote_script.js"' in self._filesystem.read_text_file(self._port._absolute_path('unmoved', 'test.html')))
+
+ def test_unmoved_reference_in_moved_file_is_updated(self):
+ self._mover.move('origin/path', 'destination')
+ self.assertTrue('src="../unmoved/remote_script.js"' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.html')))
+
+ def test_references_in_html_file_are_updated(self):
+ self._mover.move('origin/path', 'destination')
+ self.assertTrue('src="../unmoved/remote_script.js"' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.html')))
+ self.assertTrue('src=\'../unmoved/remote_script_single_quotes.js\'' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.html')))
+ self.assertTrue('href="../unmoved/remote_script.js"' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.html')))
+ self.assertTrue('href=\'../unmoved/remote_script_single_quotes.js\'' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.html')))
+ self.assertTrue('href=""' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.html')))
+
+ def test_references_in_css_file_are_updated(self):
+ self._mover.move('origin/path', 'destination')
+ self.assertTrue('url(\'../unmoved/url_function.js\')' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.css')))
+ self.assertTrue('url("../unmoved/url_function_double_quotes.js")' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.css')))
+ self.assertTrue('url(../unmoved/url_function_no_quotes.js)' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.css')))
+ self.assertTrue('url(\'\')' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.css')))
+ self.assertTrue('url()' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.css')))
+
+ def test_references_in_javascript_file_are_updated(self):
+ self._mover.move('origin/path', 'destination')
+ self.assertTrue('importScripts(\'../unmoved/import_scripts_function.js\')' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.js')))
+ self.assertTrue('importScripts("../unmoved/import_scripts_function_double_quotes.js")' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.js')))
+ self.assertTrue('importScripts(\'\')' in self._filesystem.read_text_file(self._port._absolute_path('destination', 'test.js')))
+
+ def test_expectation_is_updated(self):
+ self._mover.move('origin/path', 'destination')
+ self.assertFalse('origin/path/test.html' in self._filesystem.read_text_file(self._port._absolute_path('TestExpectations')))
+ self.assertTrue('crbug.com/42 [ Debug ] destination/test.html [ Pass Timeout Failure ]'
+ in self._filesystem.read_text_file(self._port._absolute_path('TestExpectations')))
+
+ def test_directory_expectation_is_updated(self):
+ self._mover.move('origin/path', 'destination')
+ self.assertFalse('origin/path' in self._filesystem.read_text_file(self._port._absolute_path('TestExpectations')))
+ self.assertTrue('crbug.com/42 [ Win ] destination [ Slow ]' in self._filesystem.read_text_file(self._port._absolute_path('TestExpectations')))
+
+ def test_expectation_is_added_when_subdirectory_moved(self):
+ self._mover.move('origin/path', 'destination')
+ self.assertTrue('crbug.com/42 [ Release ] origin [ Crash ]' in self._filesystem.read_text_file(self._port._absolute_path('TestExpectations')))
+ self.assertTrue('crbug.com/42 [ Release ] destination [ Crash ]' in self._filesystem.read_text_file(self._port._absolute_path('TestExpectations')))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations.py
new file mode 100644
index 0000000..70c04f6
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations.py
@@ -0,0 +1,146 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import optparse
+import signal
+import traceback
+
+from webkitpy.common.host import Host
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.port import platform_options
+
+
+# This mirrors what the shell normally does.
+INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
+
+# This is a randomly chosen exit code that can be tested against to
+# indicate that an unexpected exception occurred.
+EXCEPTIONAL_EXIT_STATUS = 254
+
+_log = logging.getLogger(__name__)
+
+
+def lint(host, options):
+ # FIXME: Remove this when we remove the --chromium flag (crbug.com/245504).
+ if options.platform == 'chromium':
+ options.platform = None
+
+ ports_to_lint = [host.port_factory.get(name) for name in host.port_factory.all_port_names(options.platform)]
+ files_linted = set()
+ lint_failed = False
+
+ for port_to_lint in ports_to_lint:
+ expectations_dict = port_to_lint.expectations_dict()
+
+ for expectations_file in expectations_dict.keys():
+ if expectations_file in files_linted:
+ continue
+
+ try:
+ test_expectations.TestExpectations(port_to_lint,
+ expectations_dict={expectations_file: expectations_dict[expectations_file]},
+ is_lint_mode=True)
+ except test_expectations.ParseError as e:
+ lint_failed = True
+ _log.error('')
+ for warning in e.warnings:
+ _log.error(warning)
+ _log.error('')
+ files_linted.add(expectations_file)
+ return lint_failed
+
+
+def check_virtual_test_suites(host, options):
+ port = host.port_factory.get(options=options)
+ fs = host.filesystem
+ layout_tests_dir = port.layout_tests_dir()
+ virtual_suites = port.virtual_test_suites()
+
+ check_failed = False
+ for suite in virtual_suites:
+ comps = [layout_tests_dir] + suite.name.split('/') + ['README.txt']
+ path_to_readme = fs.join(*comps)
+ if not fs.exists(path_to_readme):
+ _log.error('LayoutTests/%s/README.txt is missing (each virtual suite must have one).' % suite.name)
+ check_failed = True
+ if check_failed:
+ _log.error('')
+ return check_failed
+
+
+def set_up_logging(logging_stream):
+ logger = logging.getLogger()
+ logger.setLevel(logging.INFO)
+ handler = logging.StreamHandler(logging_stream)
+ logger.addHandler(handler)
+ return (logger, handler)
+
+
+def tear_down_logging(logger, handler):
+ logger.removeHandler(handler)
+
+
+def run_checks(host, options, logging_stream):
+ logger, handler = set_up_logging(logging_stream)
+ try:
+ lint_failed = lint(host, options)
+ check_failed = check_virtual_test_suites(host, options)
+ if lint_failed or check_failed:
+ _log.error('Lint failed.')
+ return 1
+ else:
+ _log.info('Lint succeeded.')
+ return 0
+ finally:
+ logger.removeHandler(handler)
+
+
+def main(argv, _, stderr):
+ parser = optparse.OptionParser(option_list=platform_options(use_globs=True))
+ options, _ = parser.parse_args(argv)
+
+ if options.platform and 'test' in options.platform:
+ # It's a bit lame to import mocks into real code, but this allows the user
+ # to run tests against the test platform interactively, which is useful for
+ # debugging test failures.
+ from webkitpy.common.host_mock import MockHost
+ host = MockHost()
+ else:
+ host = Host()
+
+ try:
+ exit_status = run_checks(host, options, stderr)
+ except KeyboardInterrupt:
+ exit_status = INTERRUPTED_EXIT_STATUS
+ except Exception as e:
+ print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
+ traceback.print_exc(file=stderr)
+ exit_status = EXCEPTIONAL_EXIT_STATUS
+
+ return exit_status
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py
new file mode 100644
index 0000000..28ed16b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py
@@ -0,0 +1,200 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import optparse
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests import lint_test_expectations
+
+
+class FakePort(object):
+ def __init__(self, host, name, path):
+ self.host = host
+ self.name = name
+ self.path = path
+
+ def test_configuration(self):
+ return None
+
+ def expectations_dict(self):
+ self.host.ports_parsed.append(self.name)
+ return {self.path: ''}
+
+ def bot_expectations(self):
+ return {}
+
+ def skipped_layout_tests(self, _):
+ return set([])
+
+ def all_test_configurations(self):
+ return []
+
+ def configuration_specifier_macros(self):
+ return []
+
+ def get_option(self, _, val):
+ return val
+
+ def path_to_generic_test_expectations_file(self):
+ return ''
+
+class FakeFactory(object):
+ def __init__(self, host, ports):
+ self.host = host
+ self.ports = {}
+ for port in ports:
+ self.ports[port.name] = port
+
+ def get(self, port_name='a', *args, **kwargs): # pylint: disable=W0613,E0202
+ return self.ports[port_name]
+
+ def all_port_names(self, platform=None): # pylint: disable=W0613,E0202
+ return sorted(self.ports.keys())
+
+
+class LintTest(unittest.TestCase):
+ def test_all_configurations(self):
+ host = MockHost()
+ host.ports_parsed = []
+ host.port_factory = FakeFactory(host, (FakePort(host, 'a', 'path-to-a'),
+ FakePort(host, 'b', 'path-to-b'),
+ FakePort(host, 'b-win', 'path-to-b')))
+
+ logging_stream = StringIO.StringIO()
+ options = optparse.Values({'platform': None})
+ logger, handler = lint_test_expectations.set_up_logging(logging_stream)
+ try:
+ res = lint_test_expectations.lint(host, options)
+ finally:
+ lint_test_expectations.tear_down_logging(logger, handler)
+ self.assertEqual(res, 0)
+ self.assertEqual(host.ports_parsed, ['a', 'b', 'b-win'])
+
+ def test_lint_test_files(self):
+ logging_stream = StringIO.StringIO()
+ options = optparse.Values({'platform': 'test-mac-leopard'})
+ host = MockHost()
+
+ # pylint appears to complain incorrectly about the method overrides pylint: disable=E0202,C0322
+ # FIXME: incorrect complaints about spacing pylint: disable=C0322
+ host.port_factory.all_port_names = lambda platform=None: [platform]
+
+ logger, handler = lint_test_expectations.set_up_logging(logging_stream)
+ try:
+ res = lint_test_expectations.lint(host, options)
+ self.assertEqual(res, 0)
+ finally:
+ lint_test_expectations.tear_down_logging(logger, handler)
+
+
+ def test_lint_test_files__errors(self):
+ options = optparse.Values({'platform': 'test', 'debug_rwt_logging': False})
+ host = MockHost()
+
+ # FIXME: incorrect complaints about spacing pylint: disable=C0322
+ port = host.port_factory.get(options.platform, options=options)
+ port.expectations_dict = lambda: {'foo': '-- syntax error1', 'bar': '-- syntax error2'}
+
+ host.port_factory.get = lambda platform, options=None: port
+ host.port_factory.all_port_names = lambda platform=None: [port.name()]
+
+ logging_stream = StringIO.StringIO()
+ logger, handler = lint_test_expectations.set_up_logging(logging_stream)
+ try:
+ res = lint_test_expectations.lint(host, options)
+ finally:
+ lint_test_expectations.tear_down_logging(logger, handler)
+
+ self.assertTrue(res)
+ self.assertIn('foo:1', logging_stream.getvalue())
+ self.assertIn('bar:1', logging_stream.getvalue())
+
+
+class CheckVirtualSuiteTest(unittest.TestCase):
+ def test_check_virtual_test_suites(self):
+ host = MockHost()
+ options = optparse.Values({'platform': 'test', 'debug_rwt_logging': False})
+ orig_get = host.port_factory.get
+ host.port_factory.get = lambda options: orig_get('test', options=options)
+
+ logging_stream = StringIO.StringIO()
+ logger, handler = lint_test_expectations.set_up_logging(logging_stream)
+ try:
+ res = lint_test_expectations.check_virtual_test_suites(host, options)
+ self.assertTrue(res)
+
+ host.filesystem.exists = lambda path: True
+ res = lint_test_expectations.check_virtual_test_suites(host, options)
+ self.assertFalse(res)
+ finally:
+ lint_test_expectations.tear_down_logging(logger, handler)
+
+
+class MainTest(unittest.TestCase):
+ # unused args pylint: disable=W0613
+
+ def setUp(self):
+ self.orig_lint_fn = lint_test_expectations.lint
+ self.orig_check_fn = lint_test_expectations.check_virtual_test_suites
+ lint_test_expectations.check_virtual_test_suites = lambda host, options: False
+
+ self.stdout = StringIO.StringIO()
+ self.stderr = StringIO.StringIO()
+
+ def tearDown(self):
+ lint_test_expectations.lint = self.orig_lint_fn
+ lint_test_expectations.check_virtual_test_suites = self.orig_check_fn
+
+ def test_success(self):
+ lint_test_expectations.lint = lambda host, options: False
+ res = lint_test_expectations.main(['--platform', 'test'], self.stdout, self.stderr)
+ self.assertTrue('Lint succeeded' in self.stderr.getvalue())
+ self.assertEqual(res, 0)
+
+ def test_failure(self):
+ lint_test_expectations.lint = lambda host, options: True
+ res = lint_test_expectations.main(['--platform', 'test'], self.stdout, self.stderr)
+ self.assertTrue('Lint failed' in self.stderr.getvalue())
+ self.assertEqual(res, 1)
+
+ def test_interrupt(self):
+ def interrupting_lint(host, options):
+ raise KeyboardInterrupt
+
+ lint_test_expectations.lint = interrupting_lint
+ res = lint_test_expectations.main([], self.stdout, self.stderr)
+ self.assertEqual(res, lint_test_expectations.INTERRUPTED_EXIT_STATUS)
+
+ def test_exception(self):
+ def exception_raising_lint(host, options):
+ assert False
+ lint_test_expectations.lint = exception_raising_lint
+ res = lint_test_expectations.main([], self.stdout, self.stderr)
+ self.assertEqual(res, lint_test_expectations.EXCEPTIONAL_EXIT_STATUS)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py
new file mode 100644
index 0000000..2527c8f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py
@@ -0,0 +1,308 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import copy
+
+
+class TestConfiguration(object):
+ def __init__(self, version, architecture, build_type):
+ self.version = version
+ self.architecture = architecture
+ self.build_type = build_type
+
+ @classmethod
+ def category_order(cls):
+ """The most common human-readable order in which the configuration properties are listed."""
+ return ['version', 'architecture', 'build_type']
+
+ def items(self):
+ return self.__dict__.items()
+
+ def keys(self):
+ return self.__dict__.keys()
+
+ def __str__(self):
+ return ("<%(version)s, %(architecture)s, %(build_type)s>" %
+ self.__dict__)
+
+ def __repr__(self):
+ return "TestConfig(version='%(version)s', architecture='%(architecture)s', build_type='%(build_type)s')" % self.__dict__
+
+ def __hash__(self):
+ return hash(self.version + self.architecture + self.build_type)
+
+ def __eq__(self, other):
+ return self.__hash__() == other.__hash__()
+
+ def values(self):
+ """Returns the configuration values of this instance as a tuple."""
+ return self.__dict__.values()
+
+
+class SpecifierSorter(object):
+ def __init__(self, all_test_configurations=None, macros=None):
+ self._specifier_to_category = {}
+
+ if not all_test_configurations:
+ return
+ for test_configuration in all_test_configurations:
+ for category, specifier in test_configuration.items():
+ self.add_specifier(category, specifier)
+
+ self.add_macros(macros)
+
+ def add_specifier(self, category, specifier):
+ self._specifier_to_category[specifier] = category
+
+ def add_macros(self, macros):
+ if not macros:
+ return
+ # Assume well-formed macros.
+ for macro, specifier_list in macros.items():
+ self.add_specifier(self.category_for_specifier(specifier_list[0]), macro)
+
+ @classmethod
+ def category_priority(cls, category):
+ return TestConfiguration.category_order().index(category)
+
+ def specifier_priority(self, specifier):
+ return self.category_priority(self._specifier_to_category[specifier])
+
+ def category_for_specifier(self, specifier):
+ return self._specifier_to_category.get(specifier)
+
+ def sort_specifiers(self, specifiers):
+ category_slots = map(lambda x: [], TestConfiguration.category_order())
+ for specifier in specifiers:
+ category_slots[self.specifier_priority(specifier)].append(specifier)
+
+ def sort_and_return(result, specifier_list):
+ specifier_list.sort()
+ return result + specifier_list
+
+ return reduce(sort_and_return, category_slots, [])
+
+
+class TestConfigurationConverter(object):
+ def __init__(self, all_test_configurations, configuration_macros=None):
+ self._all_test_configurations = all_test_configurations
+ self._configuration_macros = configuration_macros or {}
+ self._specifier_to_configuration_set = {}
+ self._specifier_sorter = SpecifierSorter()
+ self._collapsing_sets_by_size = {}
+ self._junk_specifier_combinations = {}
+ self._collapsing_sets_by_category = {}
+ matching_sets_by_category = {}
+ for configuration in all_test_configurations:
+ for category, specifier in configuration.items():
+ self._specifier_to_configuration_set.setdefault(specifier, set()).add(configuration)
+ self._specifier_sorter.add_specifier(category, specifier)
+ self._collapsing_sets_by_category.setdefault(category, set()).add(specifier)
+ # FIXME: This seems extra-awful.
+ for cat2, spec2 in configuration.items():
+ if category == cat2:
+ continue
+ matching_sets_by_category.setdefault(specifier, {}).setdefault(cat2, set()).add(spec2)
+ for collapsing_set in self._collapsing_sets_by_category.values():
+ self._collapsing_sets_by_size.setdefault(len(collapsing_set), set()).add(frozenset(collapsing_set))
+
+ for specifier, sets_by_category in matching_sets_by_category.items():
+ for category, set_by_category in sets_by_category.items():
+ if len(set_by_category) == 1 and self._specifier_sorter.category_priority(category) > self._specifier_sorter.specifier_priority(specifier):
+ self._junk_specifier_combinations[specifier] = set_by_category
+
+ self._specifier_sorter.add_macros(configuration_macros)
+
+ def specifier_sorter(self):
+ return self._specifier_sorter
+
+ def _expand_macros(self, specifier):
+ expanded_specifiers = self._configuration_macros.get(specifier)
+ return expanded_specifiers or [specifier]
+
+ def to_config_set(self, specifier_set, error_list=None):
+ """Convert a list of specifiers into a set of TestConfiguration instances."""
+ if len(specifier_set) == 0:
+ return copy.copy(self._all_test_configurations)
+
+ matching_sets = {}
+
+ for specifier in specifier_set:
+ for expanded_specifier in self._expand_macros(specifier):
+ configurations = self._specifier_to_configuration_set.get(expanded_specifier)
+ if not configurations:
+ if error_list is not None:
+ error_list.append("Unrecognized specifier '" + expanded_specifier + "'")
+ return set()
+ category = self._specifier_sorter.category_for_specifier(expanded_specifier)
+ matching_sets.setdefault(category, set()).update(configurations)
+
+ return reduce(set.intersection, matching_sets.values())
+
+ @classmethod
+ def collapse_macros(cls, macros_dict, specifiers_list):
+ for macro_specifier, macro in macros_dict.items():
+ if len(macro) == 1:
+ continue
+
+ for combination in cls.combinations(specifiers_list, len(macro)):
+ if cls.symmetric_difference(combination) == set(macro):
+ for item in combination:
+ specifiers_list.remove(item)
+ new_specifier_set = cls.intersect_combination(combination)
+ new_specifier_set.add(macro_specifier)
+ specifiers_list.append(frozenset(new_specifier_set))
+
+ def collapse_individual_specifier_set(macro_specifier, macro):
+ specifiers_to_remove = []
+ specifiers_to_add = []
+ for specifier_set in specifiers_list:
+ macro_set = set(macro)
+ if macro_set.intersection(specifier_set) == macro_set:
+ specifiers_to_remove.append(specifier_set)
+ specifiers_to_add.append(frozenset((set(specifier_set) - macro_set) | set([macro_specifier])))
+ for specifier in specifiers_to_remove:
+ specifiers_list.remove(specifier)
+ for specifier in specifiers_to_add:
+ specifiers_list.append(specifier)
+
+ for macro_specifier, macro in macros_dict.items():
+ collapse_individual_specifier_set(macro_specifier, macro)
+
+ # FIXME: itertools.combinations in buggy in Python 2.6.1 (the version that ships on SL).
+ # It seems to be okay in 2.6.5 or later; until then, this is the implementation given
+ # in http://docs.python.org/library/itertools.html (from 2.7).
+ @staticmethod
+ def combinations(iterable, r):
+ # combinations('ABCD', 2) --> AB AC AD BC BD CD
+ # combinations(range(4), 3) --> 012 013 023 123
+ pool = tuple(iterable)
+ n = len(pool)
+ if r > n:
+ return
+ indices = range(r)
+ yield tuple(pool[i] for i in indices)
+ while True:
+ for i in reversed(range(r)):
+ if indices[i] != i + n - r:
+ break
+ else:
+ return
+ indices[i] += 1 # pylint: disable=W0631
+ for j in range(i + 1, r): # pylint: disable=W0631
+ indices[j] = indices[j - 1] + 1
+ yield tuple(pool[i] for i in indices)
+
+ @classmethod
+ def intersect_combination(cls, combination):
+ return reduce(set.intersection, [set(specifiers) for specifiers in combination])
+
+ @classmethod
+ def symmetric_difference(cls, iterable):
+ union = set()
+ intersection = iterable[0]
+ for item in iterable:
+ union = union | item
+ intersection = intersection.intersection(item)
+ return union - intersection
+
+ def to_specifiers_list(self, test_configuration_set):
+ """Convert a set of TestConfiguration instances into one or more list of specifiers."""
+ # Easy out: if the set is all configurations, the specifier is empty.
+ if len(test_configuration_set) == len(self._all_test_configurations):
+ return [[]]
+
+ # 1) Build a list of specifier sets, discarding specifiers that don't add value.
+ specifiers_list = []
+ for config in test_configuration_set:
+ values = set(config.values())
+ for specifier, junk_specifier_set in self._junk_specifier_combinations.items():
+ if specifier in values:
+ values -= junk_specifier_set
+ specifiers_list.append(frozenset(values))
+
+ def try_collapsing(size, collapsing_sets):
+ if len(specifiers_list) < size:
+ return False
+ for combination in self.combinations(specifiers_list, size):
+ if self.symmetric_difference(combination) in collapsing_sets:
+ for item in combination:
+ specifiers_list.remove(item)
+ specifiers_list.append(frozenset(self.intersect_combination(combination)))
+ return True
+ return False
+
+ # 2) Collapse specifier sets with common specifiers:
+ # (xp, release), (xp, debug) --> (xp, x86)
+ for size, collapsing_sets in self._collapsing_sets_by_size.items():
+ while try_collapsing(size, collapsing_sets):
+ pass
+
+ def try_abbreviating(collapsing_sets):
+ if len(specifiers_list) < 2:
+ return False
+ for combination in self.combinations(specifiers_list, 2):
+ for collapsing_set in collapsing_sets:
+ diff = self.symmetric_difference(combination)
+ if diff <= collapsing_set:
+ common = self.intersect_combination(combination)
+ for item in combination:
+ specifiers_list.remove(item)
+ specifiers_list.append(frozenset(common | diff))
+ return True
+ return False
+
+ # 3) Abbreviate specifier sets by combining specifiers across categories.
+ # (xp, release), (win7, release) --> (xp, win7, release)
+ while try_abbreviating(self._collapsing_sets_by_size.values()):
+ pass
+
+
+ # 4) Substitute specifier subsets that match macros witin each set:
+ # (xp, win7, release) -> (win, release)
+ self.collapse_macros(self._configuration_macros, specifiers_list)
+
+ macro_keys = set(self._configuration_macros.keys())
+
+ # 5) Collapsing macros may have created combinations the can now be abbreviated.
+ # (xp, release), (linux, x86, release), (linux, x86_64, release) --> (xp, release), (linux, release) --> (xp, linux, release)
+ while try_abbreviating([self._collapsing_sets_by_category['version'] | macro_keys]):
+ pass
+
+ # 6) Remove cases where we have collapsed but have all macros.
+ # (android, win, mac, linux, release) --> (release)
+ specifiers_to_remove = []
+ for specifier_set in specifiers_list:
+ if macro_keys <= specifier_set:
+ specifiers_to_remove.append(specifier_set)
+
+ for specifier_set in specifiers_to_remove:
+ specifiers_list.remove(specifier_set)
+ specifiers_list.append(frozenset(specifier_set - macro_keys))
+
+ return specifiers_list
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py
new file mode 100644
index 0000000..d56d7b5
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py
@@ -0,0 +1,369 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.models.test_configuration import *
+
+
+def make_mock_all_test_configurations_set():
+ all_test_configurations = set()
+ for version, architecture in (('snowleopard', 'x86'), ('xp', 'x86'), ('win7', 'x86'), ('vista', 'x86'), ('lucid', 'x86'), ('lucid', 'x86_64')):
+ for build_type in ('debug', 'release'):
+ all_test_configurations.add(TestConfiguration(version, architecture, build_type))
+ return all_test_configurations
+
+MOCK_MACROS = {
+ 'mac': ['snowleopard'],
+ 'win': ['xp', 'vista', 'win7'],
+ 'linux': ['lucid'],
+}
+
+
+class TestConfigurationTest(unittest.TestCase):
+ def test_items(self):
+ config = TestConfiguration('xp', 'x86', 'release')
+ result_config_dict = {}
+ for category, specifier in config.items():
+ result_config_dict[category] = specifier
+ self.assertEqual({'version': 'xp', 'architecture': 'x86', 'build_type': 'release'}, result_config_dict)
+
+ def test_keys(self):
+ config = TestConfiguration('xp', 'x86', 'release')
+ result_config_keys = []
+ for category in config.keys():
+ result_config_keys.append(category)
+ self.assertEqual(set(['version', 'architecture', 'build_type']), set(result_config_keys))
+
+ def test_str(self):
+ config = TestConfiguration('xp', 'x86', 'release')
+ self.assertEqual('<xp, x86, release>', str(config))
+
+ def test_repr(self):
+ config = TestConfiguration('xp', 'x86', 'release')
+ self.assertEqual("TestConfig(version='xp', architecture='x86', build_type='release')", repr(config))
+
+ def test_hash(self):
+ config_dict = {}
+ config_dict[TestConfiguration('xp', 'x86', 'release')] = True
+ self.assertIn(TestConfiguration('xp', 'x86', 'release'), config_dict)
+ self.assertTrue(config_dict[TestConfiguration('xp', 'x86', 'release')])
+
+ def query_unknown_key():
+ return config_dict[TestConfiguration('xp', 'x86', 'debug')]
+
+ self.assertRaises(KeyError, query_unknown_key)
+ self.assertIn(TestConfiguration('xp', 'x86', 'release'), config_dict)
+ self.assertNotIn(TestConfiguration('xp', 'x86', 'debug'), config_dict)
+ configs_list = [TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug'), TestConfiguration('xp', 'x86', 'debug')]
+ self.assertEqual(len(configs_list), 3)
+ self.assertEqual(len(set(configs_list)), 2)
+
+ def test_eq(self):
+ self.assertEqual(TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'release'))
+ self.assertNotEquals(TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug'))
+
+ def test_values(self):
+ config = TestConfiguration('xp', 'x86', 'release')
+ result_config_values = []
+ for value in config.values():
+ result_config_values.append(value)
+ self.assertEqual(set(['xp', 'x86', 'release']), set(result_config_values))
+
+
+class SpecifierSorterTest(unittest.TestCase):
+ def __init__(self, testFunc):
+ self._all_test_configurations = make_mock_all_test_configurations_set()
+ unittest.TestCase.__init__(self, testFunc)
+
+ def test_init(self):
+ sorter = SpecifierSorter()
+ self.assertIsNone(sorter.category_for_specifier('control'))
+ sorter = SpecifierSorter(self._all_test_configurations)
+ self.assertEqual(sorter.category_for_specifier('xp'), 'version')
+ sorter = SpecifierSorter(self._all_test_configurations, MOCK_MACROS)
+ self.assertEqual(sorter.category_for_specifier('mac'), 'version')
+
+ def test_add_specifier(self):
+ sorter = SpecifierSorter()
+ self.assertIsNone(sorter.category_for_specifier('control'))
+ sorter.add_specifier('version', 'control')
+ self.assertEqual(sorter.category_for_specifier('control'), 'version')
+ sorter.add_specifier('version', 'one')
+ self.assertEqual(sorter.category_for_specifier('one'), 'version')
+ sorter.add_specifier('architecture', 'renaissance')
+ self.assertEqual(sorter.category_for_specifier('one'), 'version')
+ self.assertEqual(sorter.category_for_specifier('renaissance'), 'architecture')
+
+ def test_add_macros(self):
+ sorter = SpecifierSorter(self._all_test_configurations)
+ sorter.add_macros(MOCK_MACROS)
+ self.assertEqual(sorter.category_for_specifier('mac'), 'version')
+ self.assertEqual(sorter.category_for_specifier('win'), 'version')
+ self.assertEqual(sorter.category_for_specifier('x86'), 'architecture')
+
+ def test_category_priority(self):
+ sorter = SpecifierSorter(self._all_test_configurations)
+ self.assertEqual(sorter.category_priority('version'), 0)
+ self.assertEqual(sorter.category_priority('build_type'), 2)
+
+ def test_specifier_priority(self):
+ sorter = SpecifierSorter(self._all_test_configurations)
+ self.assertEqual(sorter.specifier_priority('x86'), 1)
+ self.assertEqual(sorter.specifier_priority('snowleopard'), 0)
+
+ def test_sort_specifiers(self):
+ sorter = SpecifierSorter(self._all_test_configurations, MOCK_MACROS)
+ self.assertEqual(sorter.sort_specifiers(set()), [])
+ self.assertEqual(sorter.sort_specifiers(set(['x86'])), ['x86'])
+ self.assertEqual(sorter.sort_specifiers(set(['x86', 'win7'])), ['win7', 'x86'])
+ self.assertEqual(sorter.sort_specifiers(set(['x86', 'debug', 'win7'])), ['win7', 'x86', 'debug'])
+ self.assertEqual(sorter.sort_specifiers(set(['snowleopard', 'x86', 'debug', 'win7'])), ['snowleopard', 'win7', 'x86', 'debug'])
+ self.assertEqual(sorter.sort_specifiers(set(['x86', 'mac', 'debug', 'win7'])), ['mac', 'win7', 'x86', 'debug'])
+
+
+class TestConfigurationConverterTest(unittest.TestCase):
+ def __init__(self, testFunc):
+ self._all_test_configurations = make_mock_all_test_configurations_set()
+ unittest.TestCase.__init__(self, testFunc)
+
+ def test_symmetric_difference(self):
+ self.assertEqual(TestConfigurationConverter.symmetric_difference([set(['a', 'b']), set(['b', 'c'])]), set(['a', 'c']))
+ self.assertEqual(TestConfigurationConverter.symmetric_difference([set(['a', 'b']), set(['b', 'c']), set(['b', 'd'])]), set(['a', 'c', 'd']))
+
+ def test_to_config_set(self):
+ converter = TestConfigurationConverter(self._all_test_configurations)
+
+ self.assertEqual(converter.to_config_set(set()), self._all_test_configurations)
+
+ self.assertEqual(converter.to_config_set(set(['foo'])), set())
+
+ self.assertEqual(converter.to_config_set(set(['xp', 'foo'])), set())
+
+ errors = []
+ self.assertEqual(converter.to_config_set(set(['xp', 'foo']), errors), set())
+ self.assertEqual(errors, ["Unrecognized specifier 'foo'"])
+
+ self.assertEqual(converter.to_config_set(set(['xp', 'x86_64'])), set())
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ ])
+ self.assertEqual(converter.to_config_set(set(['xp', 'release'])), configs_to_match)
+
+ configs_to_match = set([
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
+ ])
+ self.assertEqual(converter.to_config_set(set(['release'])), configs_to_match)
+
+ configs_to_match = set([
+ TestConfiguration('lucid', 'x86_64', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'debug'),
+ ])
+ self.assertEqual(converter.to_config_set(set(['x86_64'])), configs_to_match)
+
+ configs_to_match = set([
+ TestConfiguration('lucid', 'x86_64', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'debug'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'debug'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'debug'),
+ ])
+ self.assertEqual(converter.to_config_set(set(['lucid', 'snowleopard'])), configs_to_match)
+
+ configs_to_match = set([
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'debug'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'debug'),
+ ])
+ self.assertEqual(converter.to_config_set(set(['lucid', 'snowleopard', 'x86'])), configs_to_match)
+
+ configs_to_match = set([
+ TestConfiguration('lucid', 'x86_64', 'release'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ ])
+ self.assertEqual(converter.to_config_set(set(['lucid', 'snowleopard', 'release'])), configs_to_match)
+
+ def test_macro_expansion(self):
+ converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ ])
+ self.assertEqual(converter.to_config_set(set(['win', 'release'])), configs_to_match)
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
+ ])
+ self.assertEqual(converter.to_config_set(set(['win', 'lucid', 'release'])), configs_to_match)
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ ])
+ self.assertEqual(converter.to_config_set(set(['win', 'mac', 'release'])), configs_to_match)
+
+ def test_to_specifier_lists(self):
+ converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
+
+ self.assertEqual(converter.to_specifiers_list(set(self._all_test_configurations)), [[]])
+ self.assertEqual(converter.to_specifiers_list(set()), [])
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ ])
+ self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp'])])
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('xp', 'x86', 'debug'),
+ ])
+ self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['xp'])])
+
+ configs_to_match = set([
+ TestConfiguration('lucid', 'x86_64', 'debug'),
+ TestConfiguration('xp', 'x86', 'release'),
+ ])
+ self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp']), set(['debug', 'x86_64', 'linux'])])
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'debug'),
+ TestConfiguration('lucid', 'x86', 'debug'),
+ TestConfiguration('lucid', 'x86_64', 'debug'),
+ TestConfiguration('lucid', 'x86', 'debug'),
+ ])
+ self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp']), set(['debug', 'linux'])])
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
+ ])
+ self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release'])])
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ ])
+ self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['xp', 'mac', 'release'])])
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'debug'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ ])
+ self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win7']), set(['release', 'linux', 'x86']), set(['release', 'xp', 'mac'])])
+
+ def test_macro_collapsing(self):
+ macros = {'foo': ['bar', 'baz'], 'people': ['bob', 'alice', 'john']}
+
+ specifiers_list = [set(['john', 'godzilla', 'bob', 'alice'])]
+ TestConfigurationConverter.collapse_macros(macros, specifiers_list)
+ self.assertEqual(specifiers_list, [set(['people', 'godzilla'])])
+
+ specifiers_list = [set(['john', 'godzilla', 'alice'])]
+ TestConfigurationConverter.collapse_macros(macros, specifiers_list)
+ self.assertEqual(specifiers_list, [set(['john', 'godzilla', 'alice', 'godzilla'])])
+
+ specifiers_list = [set(['bar', 'godzilla', 'baz', 'bob', 'alice', 'john'])]
+ TestConfigurationConverter.collapse_macros(macros, specifiers_list)
+ self.assertEqual(specifiers_list, [set(['foo', 'godzilla', 'people'])])
+
+ specifiers_list = [set(['bar', 'godzilla', 'baz', 'bob']), set(['bar', 'baz']), set(['people', 'alice', 'bob', 'john'])]
+ TestConfigurationConverter.collapse_macros(macros, specifiers_list)
+ self.assertEqual(specifiers_list, [set(['bob', 'foo', 'godzilla']), set(['foo']), set(['people'])])
+
+ def test_converter_macro_collapsing(self):
+ converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ ])
+ self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'release'])])
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
+ ])
+ self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'linux', 'release'])])
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ ])
+ self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'mac', 'release'])])
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ ])
+ self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'mac', 'release'])])
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ ])
+ self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'release'])])
+
+ def test_specifier_converter_access(self):
+ specifier_sorter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS).specifier_sorter()
+ self.assertEqual(specifier_sorter.category_for_specifier('snowleopard'), 'version')
+ self.assertEqual(specifier_sorter.category_for_specifier('mac'), 'version')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
new file mode 100644
index 0000000..5c02dc2
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
@@ -0,0 +1,1126 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A helper class for reading in and dealing with tests expectations
+for layout tests.
+"""
+
+import logging
+import re
+
+from webkitpy.layout_tests.models.test_configuration import TestConfigurationConverter
+
+_log = logging.getLogger(__name__)
+
+
+# Test expectation and specifier constants.
+#
+# FIXME: range() starts with 0 which makes if expectation checks harder
+# as PASS is 0.
+(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, TIMEOUT, CRASH, LEAK, SKIP, WONTFIX,
+ SLOW, REBASELINE, NEEDS_REBASELINE, NEEDS_MANUAL_REBASELINE, MISSING, FLAKY, NOW, NONE) = range(19)
+
+# FIXME: Perhas these two routines should be part of the Port instead?
+BASELINE_SUFFIX_LIST = ('png', 'wav', 'txt')
+
+WEBKIT_BUG_PREFIX = 'webkit.org/b/'
+CHROMIUM_BUG_PREFIX = 'crbug.com/'
+V8_BUG_PREFIX = 'code.google.com/p/v8/issues/detail?id='
+NAMED_BUG_PREFIX = 'Bug('
+
+MISSING_KEYWORD = 'Missing'
+NEEDS_REBASELINE_KEYWORD = 'NeedsRebaseline'
+NEEDS_MANUAL_REBASELINE_KEYWORD = 'NeedsManualRebaseline'
+
+class ParseError(Exception):
+ def __init__(self, warnings):
+ super(ParseError, self).__init__()
+ self.warnings = warnings
+
+ def __str__(self):
+ return '\n'.join(map(str, self.warnings))
+
+ def __repr__(self):
+ return 'ParseError(warnings=%s)' % self.warnings
+
+
+class TestExpectationParser(object):
+ """Provides parsing facilities for lines in the test_expectation.txt file."""
+
+ # FIXME: Rename these to *_KEYWORD as in MISSING_KEYWORD above, but make the case studdly-caps to match the actual file contents.
+ REBASELINE_MODIFIER = 'rebaseline'
+ NEEDS_REBASELINE_MODIFIER = 'needsrebaseline'
+ NEEDS_MANUAL_REBASELINE_MODIFIER = 'needsmanualrebaseline'
+ PASS_EXPECTATION = 'pass'
+ SKIP_MODIFIER = 'skip'
+ SLOW_MODIFIER = 'slow'
+ WONTFIX_MODIFIER = 'wontfix'
+
+ TIMEOUT_EXPECTATION = 'timeout'
+
+ MISSING_BUG_WARNING = 'Test lacks BUG specifier.'
+
+ def __init__(self, port, full_test_list, is_lint_mode):
+ self._port = port
+ self._test_configuration_converter = TestConfigurationConverter(set(port.all_test_configurations()), port.configuration_specifier_macros())
+ self._full_test_list = full_test_list
+ self._is_lint_mode = is_lint_mode
+
+ def parse(self, filename, expectations_string):
+ expectation_lines = []
+ line_number = 0
+ for line in expectations_string.split("\n"):
+ line_number += 1
+ test_expectation = self._tokenize_line(filename, line, line_number)
+ self._parse_line(test_expectation)
+ expectation_lines.append(test_expectation)
+ return expectation_lines
+
+ def _create_expectation_line(self, test_name, expectations, file_name):
+ expectation_line = TestExpectationLine()
+ expectation_line.original_string = test_name
+ expectation_line.name = test_name
+ expectation_line.filename = file_name
+ expectation_line.expectations = expectations
+ return expectation_line
+
+ def expectation_line_for_test(self, test_name, expectations):
+ expectation_line = self._create_expectation_line(test_name, expectations, '<Bot TestExpectations>')
+ self._parse_line(expectation_line)
+ return expectation_line
+
+
+ def expectation_for_skipped_test(self, test_name):
+ if not self._port.test_exists(test_name):
+ _log.warning('The following test %s from the Skipped list doesn\'t exist' % test_name)
+ expectation_line = self._create_expectation_line(test_name, [TestExpectationParser.PASS_EXPECTATION], '<Skipped file>')
+ expectation_line.expectations = [TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER]
+ expectation_line.is_skipped_outside_expectations_file = True
+ self._parse_line(expectation_line)
+ return expectation_line
+
+ def _parse_line(self, expectation_line):
+ if not expectation_line.name:
+ return
+
+ if not self._check_test_exists(expectation_line):
+ return
+
+ expectation_line.is_file = self._port.test_isfile(expectation_line.name)
+ if expectation_line.is_file:
+ expectation_line.path = expectation_line.name
+ else:
+ expectation_line.path = self._port.normalize_test_name(expectation_line.name)
+
+ self._collect_matching_tests(expectation_line)
+
+ self._parse_specifiers(expectation_line)
+ self._parse_expectations(expectation_line)
+
+ def _parse_specifiers(self, expectation_line):
+ if self._is_lint_mode:
+ self._lint_line(expectation_line)
+
+ parsed_specifiers = set([specifier.lower() for specifier in expectation_line.specifiers])
+ expectation_line.matching_configurations = self._test_configuration_converter.to_config_set(parsed_specifiers, expectation_line.warnings)
+
+ def _lint_line(self, expectation_line):
+ expectations = [expectation.lower() for expectation in expectation_line.expectations]
+ if not expectation_line.bugs and self.WONTFIX_MODIFIER not in expectations:
+ expectation_line.warnings.append(self.MISSING_BUG_WARNING)
+ if self.REBASELINE_MODIFIER in expectations:
+ expectation_line.warnings.append('REBASELINE should only be used for running rebaseline.py. Cannot be checked in.')
+
+ if self.NEEDS_REBASELINE_MODIFIER in expectations or self.NEEDS_MANUAL_REBASELINE_MODIFIER in expectations:
+ for test in expectation_line.matching_tests:
+ if self._port.reference_files(test):
+ expectation_line.warnings.append('A reftest cannot be marked as NeedsRebaseline/NeedsManualRebaseline')
+
+ def _parse_expectations(self, expectation_line):
+ result = set()
+ for part in expectation_line.expectations:
+ expectation = TestExpectations.expectation_from_string(part)
+ if expectation is None: # Careful, PASS is currently 0.
+ expectation_line.warnings.append('Unsupported expectation: %s' % part)
+ continue
+ result.add(expectation)
+ expectation_line.parsed_expectations = result
+
+ def _check_test_exists(self, expectation_line):
+ # WebKit's way of skipping tests is to add a -disabled suffix.
+ # So we should consider the path existing if the path or the
+ # -disabled version exists.
+ if not self._port.test_exists(expectation_line.name) and not self._port.test_exists(expectation_line.name + '-disabled'):
+ # Log a warning here since you hit this case any
+ # time you update TestExpectations without syncing
+ # the LayoutTests directory
+ expectation_line.warnings.append('Path does not exist.')
+ return False
+ return True
+
+ def _collect_matching_tests(self, expectation_line):
+ """Convert the test specification to an absolute, normalized
+ path and make sure directories end with the OS path separator."""
+ # FIXME: full_test_list can quickly contain a big amount of
+ # elements. We should consider at some point to use a more
+ # efficient structure instead of a list. Maybe a dictionary of
+ # lists to represent the tree of tests, leaves being test
+ # files and nodes being categories.
+
+ if not self._full_test_list:
+ expectation_line.matching_tests = [expectation_line.path]
+ return
+
+ if not expectation_line.is_file:
+ # this is a test category, return all the tests of the category.
+ expectation_line.matching_tests = [test for test in self._full_test_list if test.startswith(expectation_line.path)]
+ return
+
+ # this is a test file, do a quick check if it's in the
+ # full test suite.
+ if expectation_line.path in self._full_test_list:
+ expectation_line.matching_tests.append(expectation_line.path)
+
+ # FIXME: Update the original specifiers and remove this once the old syntax is gone.
+ _configuration_tokens_list = [
+ 'Mac', 'SnowLeopard', 'Lion', 'Retina', 'MountainLion', 'Mavericks',
+ 'Win', 'XP', 'Win7',
+ 'Linux',
+ 'Android',
+ 'Release',
+ 'Debug',
+ ]
+
+ _configuration_tokens = dict((token, token.upper()) for token in _configuration_tokens_list)
+ _inverted_configuration_tokens = dict((value, name) for name, value in _configuration_tokens.iteritems())
+
+ # FIXME: Update the original specifiers list and remove this once the old syntax is gone.
+ _expectation_tokens = {
+ 'Crash': 'CRASH',
+ 'Leak': 'LEAK',
+ 'Failure': 'FAIL',
+ 'ImageOnlyFailure': 'IMAGE',
+ MISSING_KEYWORD: 'MISSING',
+ 'Pass': 'PASS',
+ 'Rebaseline': 'REBASELINE',
+ NEEDS_REBASELINE_KEYWORD: 'NEEDSREBASELINE',
+ NEEDS_MANUAL_REBASELINE_KEYWORD: 'NEEDSMANUALREBASELINE',
+ 'Skip': 'SKIP',
+ 'Slow': 'SLOW',
+ 'Timeout': 'TIMEOUT',
+ 'WontFix': 'WONTFIX',
+ }
+
+ _inverted_expectation_tokens = dict([(value, name) for name, value in _expectation_tokens.iteritems()] +
+ [('TEXT', 'Failure'), ('IMAGE+TEXT', 'Failure'), ('AUDIO', 'Failure')])
+
+ # FIXME: Seems like these should be classmethods on TestExpectationLine instead of TestExpectationParser.
+ @classmethod
+ def _tokenize_line(cls, filename, expectation_string, line_number):
+ """Tokenizes a line from TestExpectations and returns an unparsed TestExpectationLine instance using the old format.
+
+ The new format for a test expectation line is:
+
+ [[bugs] [ "[" <configuration specifiers> "]" <name> [ "[" <expectations> "]" ["#" <comment>]
+
+ Any errant whitespace is not preserved.
+
+ """
+ expectation_line = TestExpectationLine()
+ expectation_line.original_string = expectation_string
+ expectation_line.filename = filename
+ expectation_line.line_numbers = str(line_number)
+
+ comment_index = expectation_string.find("#")
+ if comment_index == -1:
+ comment_index = len(expectation_string)
+ else:
+ expectation_line.comment = expectation_string[comment_index + 1:]
+
+ remaining_string = re.sub(r"\s+", " ", expectation_string[:comment_index].strip())
+ if len(remaining_string) == 0:
+ return expectation_line
+
+ # special-case parsing this so that we fail immediately instead of treating this as a test name
+ if remaining_string.startswith('//'):
+ expectation_line.warnings = ['use "#" instead of "//" for comments']
+ return expectation_line
+
+ bugs = []
+ specifiers = []
+ name = None
+ expectations = []
+ warnings = []
+ has_unrecognized_expectation = False
+
+ tokens = remaining_string.split()
+ state = 'start'
+ for token in tokens:
+ if (token.startswith(WEBKIT_BUG_PREFIX) or
+ token.startswith(CHROMIUM_BUG_PREFIX) or
+ token.startswith(V8_BUG_PREFIX) or
+ token.startswith(NAMED_BUG_PREFIX)):
+ if state != 'start':
+ warnings.append('"%s" is not at the start of the line.' % token)
+ break
+ if token.startswith(WEBKIT_BUG_PREFIX):
+ bugs.append(token)
+ elif token.startswith(CHROMIUM_BUG_PREFIX):
+ bugs.append(token)
+ elif token.startswith(V8_BUG_PREFIX):
+ bugs.append(token)
+ else:
+ match = re.match('Bug\((\w+)\)$', token)
+ if not match:
+ warnings.append('unrecognized bug identifier "%s"' % token)
+ break
+ else:
+ bugs.append(token)
+ elif token == '[':
+ if state == 'start':
+ state = 'configuration'
+ elif state == 'name_found':
+ state = 'expectations'
+ else:
+ warnings.append('unexpected "["')
+ break
+ elif token == ']':
+ if state == 'configuration':
+ state = 'name'
+ elif state == 'expectations':
+ state = 'done'
+ else:
+ warnings.append('unexpected "]"')
+ break
+ elif token in ('//', ':', '='):
+ warnings.append('"%s" is not legal in the new TestExpectations syntax.' % token)
+ break
+ elif state == 'configuration':
+ specifiers.append(cls._configuration_tokens.get(token, token))
+ elif state == 'expectations':
+ if token not in cls._expectation_tokens:
+ has_unrecognized_expectation = True
+ warnings.append('Unrecognized expectation "%s"' % token)
+ else:
+ expectations.append(cls._expectation_tokens.get(token, token))
+ elif state == 'name_found':
+ warnings.append('expecting "[", "#", or end of line instead of "%s"' % token)
+ break
+ else:
+ name = token
+ state = 'name_found'
+
+ if not warnings:
+ if not name:
+ warnings.append('Did not find a test name.')
+ elif state not in ('name_found', 'done'):
+ warnings.append('Missing a "]"')
+
+ if 'WONTFIX' in expectations and 'SKIP' not in expectations:
+ expectations.append('SKIP')
+
+ if ('SKIP' in expectations or 'WONTFIX' in expectations) and len(set(expectations) - set(['SKIP', 'WONTFIX'])):
+ warnings.append('A test marked Skip or WontFix must not have other expectations.')
+
+ if not expectations and not has_unrecognized_expectation:
+ warnings.append('Missing expectations.')
+
+ expectation_line.bugs = bugs
+ expectation_line.specifiers = specifiers
+ expectation_line.expectations = expectations
+ expectation_line.name = name
+ expectation_line.warnings = warnings
+ return expectation_line
+
+ @classmethod
+ def _split_space_separated(cls, space_separated_string):
+ """Splits a space-separated string into an array."""
+ return [part.strip() for part in space_separated_string.strip().split(' ')]
+
+
+class TestExpectationLine(object):
+ """Represents a line in test expectations file."""
+
+ def __init__(self):
+ """Initializes a blank-line equivalent of an expectation."""
+ self.original_string = None
+ self.filename = None # this is the path to the expectations file for this line
+ self.line_numbers = "0"
+ self.name = None # this is the path in the line itself
+ self.path = None # this is the normpath of self.name
+ self.bugs = []
+ self.specifiers = []
+ self.parsed_specifiers = []
+ self.matching_configurations = set()
+ self.expectations = []
+ self.parsed_expectations = set()
+ self.comment = None
+ self.matching_tests = []
+ self.warnings = []
+ self.is_skipped_outside_expectations_file = False
+
+ def __eq__(self, other):
+ return (self.original_string == other.original_string
+ and self.filename == other.filename
+ and self.line_numbers == other.line_numbers
+ and self.name == other.name
+ and self.path == other.path
+ and self.bugs == other.bugs
+ and self.specifiers == other.specifiers
+ and self.parsed_specifiers == other.parsed_specifiers
+ and self.matching_configurations == other.matching_configurations
+ and self.expectations == other.expectations
+ and self.parsed_expectations == other.parsed_expectations
+ and self.comment == other.comment
+ and self.matching_tests == other.matching_tests
+ and self.warnings == other.warnings
+ and self.is_skipped_outside_expectations_file == other.is_skipped_outside_expectations_file)
+
+ def is_invalid(self):
+ return bool(self.warnings and self.warnings != [TestExpectationParser.MISSING_BUG_WARNING])
+
+ def is_flaky(self):
+ return len(self.parsed_expectations) > 1
+
+ def is_whitespace_or_comment(self):
+ return bool(re.match("^\s*$", self.original_string.split('#')[0]))
+
+ @staticmethod
+ def create_passing_expectation(test):
+ expectation_line = TestExpectationLine()
+ expectation_line.name = test
+ expectation_line.path = test
+ expectation_line.parsed_expectations = set([PASS])
+ expectation_line.expectations = set(['PASS'])
+ expectation_line.matching_tests = [test]
+ return expectation_line
+
+ @staticmethod
+ def merge_expectation_lines(line1, line2, model_all_expectations):
+ """Merges the expectations of line2 into line1 and returns a fresh object."""
+ if line1 is None:
+ return line2
+ if line2 is None:
+ return line1
+ if model_all_expectations and line1.filename != line2.filename:
+ return line2
+
+ # Don't merge original_string or comment.
+ result = TestExpectationLine()
+ # We only care about filenames when we're linting, in which case the filenames are the same.
+ # Not clear that there's anything better to do when not linting and the filenames are different.
+ if model_all_expectations:
+ result.filename = line2.filename
+ result.line_numbers = line1.line_numbers + "," + line2.line_numbers
+ result.name = line1.name
+ result.path = line1.path
+ result.parsed_expectations = set(line1.parsed_expectations) | set(line2.parsed_expectations)
+ result.expectations = list(set(line1.expectations) | set(line2.expectations))
+ result.bugs = list(set(line1.bugs) | set(line2.bugs))
+ result.specifiers = list(set(line1.specifiers) | set(line2.specifiers))
+ result.parsed_specifiers = list(set(line1.parsed_specifiers) | set(line2.parsed_specifiers))
+ result.matching_configurations = set(line1.matching_configurations) | set(line2.matching_configurations)
+ result.matching_tests = list(list(set(line1.matching_tests) | set(line2.matching_tests)))
+ result.warnings = list(set(line1.warnings) | set(line2.warnings))
+ result.is_skipped_outside_expectations_file = line1.is_skipped_outside_expectations_file or line2.is_skipped_outside_expectations_file
+ return result
+
+ def to_string(self, test_configuration_converter, include_specifiers=True, include_expectations=True, include_comment=True):
+ parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
+
+ if self.is_invalid():
+ return self.original_string or ''
+
+ if self.name is None:
+ return '' if self.comment is None else "#%s" % self.comment
+
+ if test_configuration_converter and self.bugs:
+ specifiers_list = test_configuration_converter.to_specifiers_list(self.matching_configurations)
+ result = []
+ for specifiers in specifiers_list:
+ # FIXME: this is silly that we join the specifiers and then immediately split them.
+ specifiers = self._serialize_parsed_specifiers(test_configuration_converter, specifiers).split()
+ expectations = self._serialize_parsed_expectations(parsed_expectation_to_string).split()
+ result.append(self._format_line(self.bugs, specifiers, self.name, expectations, self.comment))
+ return "\n".join(result) if result else None
+
+ return self._format_line(self.bugs, self.specifiers, self.name, self.expectations, self.comment,
+ include_specifiers, include_expectations, include_comment)
+
+ def to_csv(self):
+ # Note that this doesn't include the comments.
+ return '%s,%s,%s,%s' % (self.name, ' '.join(self.bugs), ' '.join(self.specifiers), ' '.join(self.expectations))
+
+ def _serialize_parsed_expectations(self, parsed_expectation_to_string):
+ result = []
+ for index in TestExpectations.EXPECTATIONS.values():
+ if index in self.parsed_expectations:
+ result.append(parsed_expectation_to_string[index])
+ return ' '.join(result)
+
+ def _serialize_parsed_specifiers(self, test_configuration_converter, specifiers):
+ result = []
+ result.extend(sorted(self.parsed_specifiers))
+ result.extend(test_configuration_converter.specifier_sorter().sort_specifiers(specifiers))
+ return ' '.join(result)
+
+ @staticmethod
+ def _filter_redundant_expectations(expectations):
+ if set(expectations) == set(['Pass', 'Skip']):
+ return ['Skip']
+ if set(expectations) == set(['Pass', 'Slow']):
+ return ['Slow']
+ return expectations
+
+ @staticmethod
+ def _format_line(bugs, specifiers, name, expectations, comment, include_specifiers=True, include_expectations=True, include_comment=True):
+ new_specifiers = []
+ new_expectations = []
+ for specifier in specifiers:
+ # FIXME: Make this all work with the mixed-cased specifiers (e.g. WontFix, Slow, etc).
+ specifier = specifier.upper()
+ new_specifiers.append(TestExpectationParser._inverted_configuration_tokens.get(specifier, specifier))
+
+ for expectation in expectations:
+ expectation = expectation.upper()
+ new_expectations.append(TestExpectationParser._inverted_expectation_tokens.get(expectation, expectation))
+
+ result = ''
+ if include_specifiers and (bugs or new_specifiers):
+ if bugs:
+ result += ' '.join(bugs) + ' '
+ if new_specifiers:
+ result += '[ %s ] ' % ' '.join(new_specifiers)
+ result += name
+ if include_expectations and new_expectations:
+ new_expectations = TestExpectationLine._filter_redundant_expectations(new_expectations)
+ result += ' [ %s ]' % ' '.join(sorted(set(new_expectations)))
+ if include_comment and comment is not None:
+ result += " #%s" % comment
+ return result
+
+
+# FIXME: Refactor API to be a proper CRUD.
+class TestExpectationsModel(object):
+ """Represents relational store of all expectations and provides CRUD semantics to manage it."""
+
+ def __init__(self, shorten_filename=None):
+ # Maps a test to its list of expectations.
+ self._test_to_expectations = {}
+
+ # Maps a test to list of its specifiers (string values)
+ self._test_to_specifiers = {}
+
+ # Maps a test to a TestExpectationLine instance.
+ self._test_to_expectation_line = {}
+
+ self._expectation_to_tests = self._dict_of_sets(TestExpectations.EXPECTATIONS)
+ self._timeline_to_tests = self._dict_of_sets(TestExpectations.TIMELINES)
+ self._result_type_to_tests = self._dict_of_sets(TestExpectations.RESULT_TYPES)
+
+ self._shorten_filename = shorten_filename or (lambda x: x)
+
+ def _merge_test_map(self, self_map, other_map):
+ for test in other_map:
+ new_expectations = set(other_map[test])
+ if test in self_map:
+ new_expectations |= set(self_map[test])
+ self_map[test] = list(new_expectations) if isinstance(other_map[test], list) else new_expectations
+
+ def _merge_dict_of_sets(self, self_dict, other_dict):
+ for key in other_dict:
+ self_dict[key] |= other_dict[key]
+
+ def merge_model(self, other):
+ self._merge_test_map(self._test_to_expectations, other._test_to_expectations)
+
+ for test, line in other._test_to_expectation_line.items():
+ if test in self._test_to_expectation_line:
+ line = TestExpectationLine.merge_expectation_lines(self._test_to_expectation_line[test], line, model_all_expectations=False)
+ self._test_to_expectation_line[test] = line
+
+ self._merge_dict_of_sets(self._expectation_to_tests, other._expectation_to_tests)
+ self._merge_dict_of_sets(self._timeline_to_tests, other._timeline_to_tests)
+ self._merge_dict_of_sets(self._result_type_to_tests, other._result_type_to_tests)
+
+ def _dict_of_sets(self, strings_to_constants):
+ """Takes a dict of strings->constants and returns a dict mapping
+ each constant to an empty set."""
+ d = {}
+ for c in strings_to_constants.values():
+ d[c] = set()
+ return d
+
+ def get_test_set(self, expectation, include_skips=True):
+ tests = self._expectation_to_tests[expectation]
+ if not include_skips:
+ tests = tests - self.get_test_set(SKIP)
+ return tests
+
+ def get_test_set_for_keyword(self, keyword):
+ expectation_enum = TestExpectations.EXPECTATIONS.get(keyword.lower(), None)
+ if expectation_enum is not None:
+ return self._expectation_to_tests[expectation_enum]
+
+ matching_tests = set()
+ for test, specifiers in self._test_to_specifiers.iteritems():
+ if keyword.lower() in specifiers:
+ matching_tests.add(test)
+ return matching_tests
+
+ def get_tests_with_result_type(self, result_type):
+ return self._result_type_to_tests[result_type]
+
+ def get_tests_with_timeline(self, timeline):
+ return self._timeline_to_tests[timeline]
+
+ def has_test(self, test):
+ return test in self._test_to_expectation_line
+
+ def get_expectation_line(self, test):
+ return self._test_to_expectation_line.get(test)
+
+ def get_expectations(self, test):
+ return self._test_to_expectations[test]
+
+ def get_expectations_string(self, test):
+ """Returns the expectatons for the given test as an uppercase string.
+ If there are no expectations for the test, then "PASS" is returned."""
+ if self.get_expectation_line(test).is_skipped_outside_expectations_file:
+ return 'NOTRUN'
+
+ expectations = self.get_expectations(test)
+ retval = []
+
+ # FIXME: WontFix should cause the test to get skipped without artificially adding SKIP to the expectations list.
+ if WONTFIX in expectations and SKIP in expectations:
+ expectations.remove(SKIP)
+
+ for expectation in expectations:
+ retval.append(self.expectation_to_string(expectation))
+
+ return " ".join(retval)
+
+ def expectation_to_string(self, expectation):
+ """Return the uppercased string equivalent of a given expectation."""
+ for item in TestExpectations.EXPECTATIONS.items():
+ if item[1] == expectation:
+ return item[0].upper()
+ raise ValueError(expectation)
+
+ def remove_expectation_line(self, test):
+ if not self.has_test(test):
+ return
+ self._clear_expectations_for_test(test)
+ del self._test_to_expectation_line[test]
+
+ def add_expectation_line(self, expectation_line,
+ model_all_expectations=False):
+ """Returns a list of warnings encountered while matching specifiers."""
+
+ if expectation_line.is_invalid():
+ return
+
+ for test in expectation_line.matching_tests:
+ if self._already_seen_better_match(test, expectation_line):
+ continue
+
+ if model_all_expectations:
+ expectation_line = TestExpectationLine.merge_expectation_lines(self.get_expectation_line(test), expectation_line, model_all_expectations)
+
+ self._clear_expectations_for_test(test)
+ self._test_to_expectation_line[test] = expectation_line
+ self._add_test(test, expectation_line)
+
+ def _add_test(self, test, expectation_line):
+ """Sets the expected state for a given test.
+
+ This routine assumes the test has not been added before. If it has,
+ use _clear_expectations_for_test() to reset the state prior to
+ calling this."""
+ self._test_to_expectations[test] = expectation_line.parsed_expectations
+ for expectation in expectation_line.parsed_expectations:
+ self._expectation_to_tests[expectation].add(test)
+
+ self._test_to_specifiers[test] = expectation_line.specifiers
+
+ if WONTFIX in expectation_line.parsed_expectations:
+ self._timeline_to_tests[WONTFIX].add(test)
+ else:
+ self._timeline_to_tests[NOW].add(test)
+
+ if SKIP in expectation_line.parsed_expectations:
+ self._result_type_to_tests[SKIP].add(test)
+ elif expectation_line.parsed_expectations == set([PASS]):
+ self._result_type_to_tests[PASS].add(test)
+ elif expectation_line.is_flaky():
+ self._result_type_to_tests[FLAKY].add(test)
+ else:
+ # FIXME: What is this?
+ self._result_type_to_tests[FAIL].add(test)
+
+ def _clear_expectations_for_test(self, test):
+ """Remove prexisting expectations for this test.
+ This happens if we are seeing a more precise path
+ than a previous listing.
+ """
+ if self.has_test(test):
+ self._test_to_expectations.pop(test, '')
+ self._remove_from_sets(test, self._expectation_to_tests)
+ self._remove_from_sets(test, self._timeline_to_tests)
+ self._remove_from_sets(test, self._result_type_to_tests)
+
+ def _remove_from_sets(self, test, dict_of_sets_of_tests):
+ """Removes the given test from the sets in the dictionary.
+
+ Args:
+ test: test to look for
+ dict: dict of sets of files"""
+ for set_of_tests in dict_of_sets_of_tests.itervalues():
+ if test in set_of_tests:
+ set_of_tests.remove(test)
+
+ def _already_seen_better_match(self, test, expectation_line):
+ """Returns whether we've seen a better match already in the file.
+
+ Returns True if we've already seen a expectation_line.name that matches more of the test
+ than this path does
+ """
+ # FIXME: See comment below about matching test configs and specificity.
+ if not self.has_test(test):
+ # We've never seen this test before.
+ return False
+
+ prev_expectation_line = self._test_to_expectation_line[test]
+
+ if prev_expectation_line.filename != expectation_line.filename:
+ # We've moved on to a new expectation file, which overrides older ones.
+ return False
+
+ if len(prev_expectation_line.path) > len(expectation_line.path):
+ # The previous path matched more of the test.
+ return True
+
+ if len(prev_expectation_line.path) < len(expectation_line.path):
+ # This path matches more of the test.
+ return False
+
+ # At this point we know we have seen a previous exact match on this
+ # base path, so we need to check the two sets of specifiers.
+
+ # FIXME: This code was originally designed to allow lines that matched
+ # more specifiers to override lines that matched fewer specifiers.
+ # However, we currently view these as errors.
+ #
+ # To use the "more specifiers wins" policy, change the errors for overrides
+ # to be warnings and return False".
+
+ if prev_expectation_line.matching_configurations == expectation_line.matching_configurations:
+ expectation_line.warnings.append('Duplicate or ambiguous entry lines %s:%s and %s:%s.' % (
+ self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
+ self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
+ return True
+
+ if prev_expectation_line.matching_configurations >= expectation_line.matching_configurations:
+ expectation_line.warnings.append('More specific entry for %s on line %s:%s overrides line %s:%s.' % (expectation_line.name,
+ self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
+ self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
+ # FIXME: return False if we want more specific to win.
+ return True
+
+ if prev_expectation_line.matching_configurations <= expectation_line.matching_configurations:
+ expectation_line.warnings.append('More specific entry for %s on line %s:%s overrides line %s:%s.' % (expectation_line.name,
+ self._shorten_filename(expectation_line.filename), expectation_line.line_numbers,
+ self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers))
+ return True
+
+ if prev_expectation_line.matching_configurations & expectation_line.matching_configurations:
+ expectation_line.warnings.append('Entries for %s on lines %s:%s and %s:%s match overlapping sets of configurations.' % (expectation_line.name,
+ self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
+ self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
+ return True
+
+ # Configuration sets are disjoint, then.
+ return False
+
+
+class TestExpectations(object):
+ """Test expectations consist of lines with specifications of what
+ to expect from layout test cases. The test cases can be directories
+ in which case the expectations apply to all test cases in that
+ directory and any subdirectory. The format is along the lines of:
+
+ LayoutTests/fast/js/fixme.js [ Failure ]
+ LayoutTests/fast/js/flaky.js [ Failure Pass ]
+ LayoutTests/fast/js/crash.js [ Crash Failure Pass Timeout ]
+ ...
+
+ To add specifiers:
+ LayoutTests/fast/js/no-good.js
+ [ Debug ] LayoutTests/fast/js/no-good.js [ Pass Timeout ]
+ [ Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
+ [ Linux Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
+ [ Linux Win ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
+
+ Skip: Doesn't run the test.
+ Slow: The test takes a long time to run, but does not timeout indefinitely.
+ WontFix: For tests that we never intend to pass on a given platform (treated like Skip).
+
+ Notes:
+ -A test cannot be both SLOW and TIMEOUT
+ -A test can be included twice, but not via the same path.
+ -If a test is included twice, then the more precise path wins.
+ -CRASH tests cannot be WONTFIX
+ """
+
+ # FIXME: Update to new syntax once the old format is no longer supported.
+ EXPECTATIONS = {'pass': PASS,
+ 'audio': AUDIO,
+ 'fail': FAIL,
+ 'image': IMAGE,
+ 'image+text': IMAGE_PLUS_TEXT,
+ 'text': TEXT,
+ 'timeout': TIMEOUT,
+ 'crash': CRASH,
+ 'leak': LEAK,
+ 'missing': MISSING,
+ TestExpectationParser.SKIP_MODIFIER: SKIP,
+ TestExpectationParser.NEEDS_REBASELINE_MODIFIER: NEEDS_REBASELINE,
+ TestExpectationParser.NEEDS_MANUAL_REBASELINE_MODIFIER: NEEDS_MANUAL_REBASELINE,
+ TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
+ TestExpectationParser.SLOW_MODIFIER: SLOW,
+ TestExpectationParser.REBASELINE_MODIFIER: REBASELINE,
+ }
+
+ EXPECTATIONS_TO_STRING = dict((k, v) for (v, k) in EXPECTATIONS.iteritems())
+
+ # (aggregated by category, pass/fail/skip, type)
+ EXPECTATION_DESCRIPTIONS = {SKIP: 'skipped',
+ PASS: 'passes',
+ FAIL: 'failures',
+ IMAGE: 'image-only failures',
+ TEXT: 'text-only failures',
+ IMAGE_PLUS_TEXT: 'image and text failures',
+ AUDIO: 'audio failures',
+ CRASH: 'crashes',
+ LEAK: 'leaks',
+ TIMEOUT: 'timeouts',
+ MISSING: 'missing results'}
+
+ NON_TEST_OUTCOME_EXPECTATIONS = (REBASELINE, SKIP, SLOW, WONTFIX)
+
+ BUILD_TYPES = ('debug', 'release')
+
+ TIMELINES = {TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
+ 'now': NOW}
+
+ RESULT_TYPES = {'skip': SKIP,
+ 'pass': PASS,
+ 'fail': FAIL,
+ 'flaky': FLAKY}
+
+ @classmethod
+ def expectation_from_string(cls, string):
+ assert(' ' not in string) # This only handles one expectation at a time.
+ return cls.EXPECTATIONS.get(string.lower())
+
+ @staticmethod
+ def result_was_expected(result, expected_results, test_needs_rebaselining):
+ """Returns whether we got a result we were expecting.
+ Args:
+ result: actual result of a test execution
+ expected_results: set of results listed in test_expectations
+ test_needs_rebaselining: whether test was marked as REBASELINE"""
+ if not (set(expected_results) - (set(TestExpectations.NON_TEST_OUTCOME_EXPECTATIONS))):
+ expected_results = set([PASS])
+
+ if result in expected_results:
+ return True
+ if result in (PASS, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, MISSING) and (NEEDS_REBASELINE in expected_results or NEEDS_MANUAL_REBASELINE in expected_results):
+ return True
+ if result in (TEXT, IMAGE_PLUS_TEXT, AUDIO) and (FAIL in expected_results):
+ return True
+ if result == MISSING and test_needs_rebaselining:
+ return True
+ if result == SKIP:
+ return True
+ return False
+
+ @staticmethod
+ def remove_pixel_failures(expected_results):
+ """Returns a copy of the expected results for a test, except that we
+ drop any pixel failures and return the remaining expectations. For example,
+ if we're not running pixel tests, then tests expected to fail as IMAGE
+ will PASS."""
+ expected_results = expected_results.copy()
+ if IMAGE in expected_results:
+ expected_results.remove(IMAGE)
+ expected_results.add(PASS)
+ return expected_results
+
+ @staticmethod
+ def remove_non_sanitizer_failures(expected_results):
+ """Returns a copy of the expected results for a test, except that we
+ drop any failures that the sanitizers don't care about."""
+ expected_results = expected_results.copy()
+ for result in (IMAGE, FAIL, IMAGE_PLUS_TEXT):
+ if result in expected_results:
+ expected_results.remove(result)
+ expected_results.add(PASS)
+ return expected_results
+
+ @staticmethod
+ def has_pixel_failures(actual_results):
+ return IMAGE in actual_results or FAIL in actual_results
+
+ @staticmethod
+ def suffixes_for_expectations(expectations):
+ suffixes = set()
+ if IMAGE in expectations:
+ suffixes.add('png')
+ if FAIL in expectations:
+ suffixes.add('txt')
+ suffixes.add('png')
+ suffixes.add('wav')
+ return set(suffixes)
+
+ @staticmethod
+ def suffixes_for_actual_expectations_string(expectations):
+ suffixes = set()
+ if 'TEXT' in expectations:
+ suffixes.add('txt')
+ if 'IMAGE' in expectations:
+ suffixes.add('png')
+ if 'AUDIO' in expectations:
+ suffixes.add('wav')
+ if 'MISSING' in expectations:
+ suffixes.add('txt')
+ suffixes.add('png')
+ suffixes.add('wav')
+ return suffixes
+
+ # FIXME: This constructor does too much work. We should move the actual parsing of
+ # the expectations into separate routines so that linting and handling overrides
+ # can be controlled separately, and the constructor can be more of a no-op.
+ def __init__(self, port, tests=None, include_overrides=True, expectations_dict=None, model_all_expectations=False, is_lint_mode=False):
+ self._full_test_list = tests
+ self._test_config = port.test_configuration()
+ self._is_lint_mode = is_lint_mode
+ self._model_all_expectations = self._is_lint_mode or model_all_expectations
+ self._model = TestExpectationsModel(self._shorten_filename)
+ self._parser = TestExpectationParser(port, tests, self._is_lint_mode)
+ self._port = port
+ self._skipped_tests_warnings = []
+ self._expectations = []
+
+ if not expectations_dict:
+ expectations_dict = port.expectations_dict()
+
+ # Always parse the generic expectations (the generic file is required
+ # to be the first one in the expectations_dict, which must be an OrderedDict).
+ generic_path, generic_exps = expectations_dict.items()[0]
+ expectations = self._parser.parse(generic_path, generic_exps)
+ self._add_expectations(expectations, self._model)
+ self._expectations += expectations
+
+ # Now add the overrides if so requested.
+ if include_overrides:
+ for path, contents in expectations_dict.items()[1:]:
+ expectations = self._parser.parse(path, contents)
+ model = TestExpectationsModel(self._shorten_filename)
+ self._add_expectations(expectations, model)
+ self._expectations += expectations
+ self._model.merge_model(model)
+
+ # FIXME: move ignore_tests into port.skipped_layout_tests()
+ self.add_extra_skipped_tests(port.skipped_layout_tests(tests).union(set(port.get_option('ignore_tests', []))))
+ self.add_expectations_from_bot()
+
+ self._has_warnings = False
+ self._report_warnings()
+ self._process_tests_without_expectations()
+
+ # TODO(ojan): Allow for removing skipped tests when getting the list of
+ # tests to run, but not when getting metrics.
+ def model(self):
+ return self._model
+
+ def get_needs_rebaseline_failures(self):
+ return self._model.get_test_set(NEEDS_REBASELINE)
+
+ def get_rebaselining_failures(self):
+ return self._model.get_test_set(REBASELINE)
+
+ # FIXME: Change the callsites to use TestExpectationsModel and remove.
+ def get_expectations(self, test):
+ return self._model.get_expectations(test)
+
+ # FIXME: Change the callsites to use TestExpectationsModel and remove.
+ def get_tests_with_result_type(self, result_type):
+ return self._model.get_tests_with_result_type(result_type)
+
+ # FIXME: Change the callsites to use TestExpectationsModel and remove.
+ def get_test_set(self, expectation, include_skips=True):
+ return self._model.get_test_set(expectation, include_skips)
+
+ # FIXME: Change the callsites to use TestExpectationsModel and remove.
+ def get_tests_with_timeline(self, timeline):
+ return self._model.get_tests_with_timeline(timeline)
+
+ def get_expectations_string(self, test):
+ return self._model.get_expectations_string(test)
+
+ def expectation_to_string(self, expectation):
+ return self._model.expectation_to_string(expectation)
+
+ def matches_an_expected_result(self, test, result, pixel_tests_are_enabled, sanitizer_is_enabled):
+ expected_results = self._model.get_expectations(test)
+ if sanitizer_is_enabled:
+ expected_results = self.remove_non_sanitizer_failures(expected_results)
+ elif not pixel_tests_are_enabled:
+ expected_results = self.remove_pixel_failures(expected_results)
+ return self.result_was_expected(result, expected_results, self.is_rebaselining(test))
+
+ def is_rebaselining(self, test):
+ return REBASELINE in self._model.get_expectations(test)
+
+ def _shorten_filename(self, filename):
+ if filename.startswith(self._port.path_from_webkit_base()):
+ return self._port.host.filesystem.relpath(filename, self._port.path_from_webkit_base())
+ return filename
+
+ def _report_warnings(self):
+ warnings = []
+ for expectation in self._expectations:
+ for warning in expectation.warnings:
+ warnings.append('%s:%s %s %s' % (self._shorten_filename(expectation.filename), expectation.line_numbers,
+ warning, expectation.name if expectation.expectations else expectation.original_string))
+
+ if warnings:
+ self._has_warnings = True
+ if self._is_lint_mode:
+ raise ParseError(warnings)
+ _log.warning('--lint-test-files warnings:')
+ for warning in warnings:
+ _log.warning(warning)
+ _log.warning('')
+
+ def _process_tests_without_expectations(self):
+ if self._full_test_list:
+ for test in self._full_test_list:
+ if not self._model.has_test(test):
+ self._model.add_expectation_line(TestExpectationLine.create_passing_expectation(test))
+
+ def has_warnings(self):
+ return self._has_warnings
+
+ def remove_configurations(self, removals):
+ expectations_to_remove = []
+ modified_expectations = []
+
+ for test, test_configuration in removals:
+ for expectation in self._expectations:
+ if expectation.name != test or not expectation.parsed_expectations:
+ continue
+ if test_configuration not in expectation.matching_configurations:
+ continue
+
+ expectation.matching_configurations.remove(test_configuration)
+ if expectation.matching_configurations:
+ modified_expectations.append(expectation)
+ else:
+ expectations_to_remove.append(expectation)
+
+ for expectation in expectations_to_remove:
+ index = self._expectations.index(expectation)
+ self._expectations.remove(expectation)
+
+ if index == len(self._expectations) or self._expectations[index].is_whitespace_or_comment():
+ while index and self._expectations[index - 1].is_whitespace_or_comment():
+ index = index - 1
+ self._expectations.pop(index)
+
+ return self.list_to_string(self._expectations, self._parser._test_configuration_converter, modified_expectations)
+
+ def _add_expectations(self, expectation_list, model):
+ for expectation_line in expectation_list:
+ if not expectation_line.expectations:
+ continue
+
+ if self._model_all_expectations or self._test_config in expectation_line.matching_configurations:
+ model.add_expectation_line(expectation_line, model_all_expectations=self._model_all_expectations)
+
+ def add_extra_skipped_tests(self, tests_to_skip):
+ if not tests_to_skip:
+ return
+ for test in self._expectations:
+ if test.name and test.name in tests_to_skip:
+ test.warnings.append('%s:%s %s is also in a Skipped file.' % (test.filename, test.line_numbers, test.name))
+
+ model = TestExpectationsModel(self._shorten_filename)
+ for test_name in tests_to_skip:
+ expectation_line = self._parser.expectation_for_skipped_test(test_name)
+ model.add_expectation_line(expectation_line)
+ self._model.merge_model(model)
+
+ def add_expectations_from_bot(self):
+ # FIXME: With mode 'very-flaky' and 'maybe-flaky', this will show the expectations entry in the flakiness
+ # dashboard rows for each test to be whatever the bot thinks they should be. Is this a good thing?
+ bot_expectations = self._port.bot_expectations()
+ model = TestExpectationsModel(self._shorten_filename)
+ for test_name in bot_expectations:
+ expectation_line = self._parser.expectation_line_for_test(test_name, bot_expectations[test_name])
+
+ # Unexpected results are merged into existing expectations.
+ merge = self._port.get_option('ignore_flaky_tests') == 'unexpected'
+ model.add_expectation_line(expectation_line)
+ self._model.merge_model(model)
+
+ def add_expectation_line(self, expectation_line):
+ self._model.add_expectation_line(expectation_line)
+ self._expectations += [expectation_line]
+
+ def remove_expectation_line(self, test):
+ if not self._model.has_test(test):
+ return
+ self._expectations.remove(self._model.get_expectation_line(test))
+ self._model.remove_expectation_line(test)
+
+ @staticmethod
+ def list_to_string(expectation_lines, test_configuration_converter=None, reconstitute_only_these=None):
+ def serialize(expectation_line):
+ # If reconstitute_only_these is an empty list, we want to return original_string.
+ # So we need to compare reconstitute_only_these to None, not just check if it's falsey.
+ if reconstitute_only_these is None or expectation_line in reconstitute_only_these:
+ return expectation_line.to_string(test_configuration_converter)
+ return expectation_line.original_string
+
+ def nones_out(expectation_line):
+ return expectation_line is not None
+
+ return "\n".join(filter(nones_out, map(serialize, expectation_lines)))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
new file mode 100644
index 0000000..fc91620
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
@@ -0,0 +1,906 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.outputcapture import OutputCapture
+
+from webkitpy.layout_tests.models.test_configuration import *
+from webkitpy.layout_tests.models.test_expectations import *
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ # Needed for Python < 2.7
+ from webkitpy.thirdparty.ordered_dict import OrderedDict
+
+
+class Base(unittest.TestCase):
+ # Note that all of these tests are written assuming the configuration
+ # being tested is Windows XP, Release build.
+
+ def __init__(self, testFunc):
+ host = MockHost()
+ self._port = host.port_factory.get('test-win-xp', None)
+ self._exp = None
+ unittest.TestCase.__init__(self, testFunc)
+
+ def get_basic_tests(self):
+ return ['failures/expected/text.html',
+ 'failures/expected/image_checksum.html',
+ 'failures/expected/crash.html',
+ 'failures/expected/needsrebaseline.html',
+ 'failures/expected/needsmanualrebaseline.html',
+ 'failures/expected/missing_text.html',
+ 'failures/expected/image.html',
+ 'failures/expected/timeout.html',
+ 'passes/text.html']
+
+
+ def get_basic_expectations(self):
+ return """
+Bug(test) failures/expected/text.html [ Failure ]
+Bug(test) failures/expected/crash.html [ WontFix ]
+Bug(test) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
+Bug(test) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
+Bug(test) failures/expected/missing_image.html [ Rebaseline Missing ]
+Bug(test) failures/expected/image_checksum.html [ WontFix ]
+Bug(test) failures/expected/image.html [ WontFix Mac ]
+"""
+
+ def parse_exp(self, expectations, overrides=None, is_lint_mode=False):
+ expectations_dict = OrderedDict()
+ expectations_dict['expectations'] = expectations
+ if overrides:
+ expectations_dict['overrides'] = overrides
+ self._port.expectations_dict = lambda: expectations_dict
+ expectations_to_lint = expectations_dict if is_lint_mode else None
+ self._exp = TestExpectations(self._port, self.get_basic_tests(), expectations_dict=expectations_to_lint, is_lint_mode=is_lint_mode)
+
+ def assert_exp_list(self, test, results):
+ self.assertEqual(self._exp.get_expectations(test), set(results))
+
+ def assert_exp(self, test, result):
+ self.assert_exp_list(test, [result])
+
+ def assert_bad_expectations(self, expectations, overrides=None):
+ self.assertRaises(ParseError, self.parse_exp, expectations, is_lint_mode=True, overrides=overrides)
+
+
+class BasicTests(Base):
+ def test_basic(self):
+ self.parse_exp(self.get_basic_expectations())
+ self.assert_exp('failures/expected/text.html', FAIL)
+ self.assert_exp_list('failures/expected/image_checksum.html', [WONTFIX, SKIP])
+ self.assert_exp('passes/text.html', PASS)
+ self.assert_exp('failures/expected/image.html', PASS)
+
+
+class MiscTests(Base):
+ def test_multiple_results(self):
+ self.parse_exp('Bug(x) failures/expected/text.html [ Crash Failure ]')
+ self.assertEqual(self._exp.get_expectations('failures/expected/text.html'), set([FAIL, CRASH]))
+
+ def test_result_was_expected(self):
+ # test basics
+ self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False), True)
+ self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False), False)
+
+ # test handling of SKIPped tests and results
+ self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False), True)
+ self.assertEqual(TestExpectations.result_was_expected(SKIP, set([LEAK]), test_needs_rebaselining=False), True)
+
+ # test handling of MISSING results and the REBASELINE specifier
+ self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True), True)
+ self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False), False)
+
+ self.assertTrue(TestExpectations.result_was_expected(PASS, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
+ self.assertTrue(TestExpectations.result_was_expected(MISSING, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
+ self.assertTrue(TestExpectations.result_was_expected(TEXT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
+ self.assertTrue(TestExpectations.result_was_expected(IMAGE, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
+ self.assertTrue(TestExpectations.result_was_expected(IMAGE_PLUS_TEXT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
+ self.assertTrue(TestExpectations.result_was_expected(AUDIO, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
+ self.assertFalse(TestExpectations.result_was_expected(TIMEOUT, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
+ self.assertFalse(TestExpectations.result_was_expected(CRASH, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
+ self.assertFalse(TestExpectations.result_was_expected(LEAK, set([NEEDS_REBASELINE]), test_needs_rebaselining=False))
+
+ def test_remove_pixel_failures(self):
+ self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
+ self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS])), set([PASS]))
+ self.assertEqual(TestExpectations.remove_pixel_failures(set([IMAGE])), set([PASS]))
+ self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
+ self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS, IMAGE, CRASH])), set([PASS, CRASH]))
+
+ def test_suffixes_for_expectations(self):
+ self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL])), set(['txt', 'png', 'wav']))
+ self.assertEqual(TestExpectations.suffixes_for_expectations(set([IMAGE])), set(['png']))
+ self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL, IMAGE, CRASH])), set(['txt', 'png', 'wav']))
+ self.assertEqual(TestExpectations.suffixes_for_expectations(set()), set())
+
+ def test_category_expectations(self):
+ # This test checks unknown tests are not present in the
+ # expectations and that known test part of a test category is
+ # present in the expectations.
+ exp_str = 'Bug(x) failures/expected [ WontFix ]'
+ self.parse_exp(exp_str)
+ test_name = 'failures/expected/unknown-test.html'
+ unknown_test = test_name
+ self.assertRaises(KeyError, self._exp.get_expectations,
+ unknown_test)
+ self.assert_exp_list('failures/expected/crash.html', [WONTFIX, SKIP])
+
+ def test_get_expectations_string(self):
+ self.parse_exp(self.get_basic_expectations())
+ self.assertEqual(self._exp.get_expectations_string('failures/expected/text.html'), 'FAIL')
+
+ def test_expectation_to_string(self):
+ # Normal cases are handled by other tests.
+ self.parse_exp(self.get_basic_expectations())
+ self.assertRaises(ValueError, self._exp.expectation_to_string,
+ -1)
+
+ def test_get_test_set(self):
+ # Handle some corner cases for this routine not covered by other tests.
+ self.parse_exp(self.get_basic_expectations())
+ s = self._exp.get_test_set(WONTFIX)
+ self.assertEqual(s, set(['failures/expected/crash.html', 'failures/expected/image_checksum.html']))
+
+ def test_needs_rebaseline_reftest(self):
+ try:
+ filesystem = self._port.host.filesystem
+ filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsrebaseline.html'), 'content')
+ filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsrebaseline-expected.html'), 'content')
+ filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsmanualrebaseline.html'), 'content')
+ filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'failures/expected/needsmanualrebaseline-expected.html'), 'content')
+ self.parse_exp("""Bug(user) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
+Bug(user) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]""", is_lint_mode=True)
+ self.assertFalse(True, "ParseError wasn't raised")
+ except ParseError, e:
+ warnings = """expectations:1 A reftest cannot be marked as NeedsRebaseline/NeedsManualRebaseline failures/expected/needsrebaseline.html
+expectations:2 A reftest cannot be marked as NeedsRebaseline/NeedsManualRebaseline failures/expected/needsmanualrebaseline.html"""
+ self.assertEqual(str(e), warnings)
+
+ def test_parse_warning(self):
+ try:
+ filesystem = self._port.host.filesystem
+ filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'disabled-test.html-disabled'), 'content')
+ 'disabled-test.html-disabled',
+ self.parse_exp("Bug(user) [ FOO ] failures/expected/text.html [ Failure ]\n"
+ "Bug(user) non-existent-test.html [ Failure ]\n"
+ "Bug(user) disabled-test.html-disabled [ ImageOnlyFailure ]", is_lint_mode=True)
+ self.assertFalse(True, "ParseError wasn't raised")
+ except ParseError, e:
+ warnings = ("expectations:1 Unrecognized specifier 'foo' failures/expected/text.html\n"
+ "expectations:2 Path does not exist. non-existent-test.html")
+ self.assertEqual(str(e), warnings)
+
+ def test_parse_warnings_are_logged_if_not_in_lint_mode(self):
+ oc = OutputCapture()
+ try:
+ oc.capture_output()
+ self.parse_exp('-- this should be a syntax error', is_lint_mode=False)
+ finally:
+ _, _, logs = oc.restore_output()
+ self.assertNotEquals(logs, '')
+
+ def test_error_on_different_platform(self):
+ # parse_exp uses a Windows port. Assert errors on Mac show up in lint mode.
+ self.assertRaises(ParseError, self.parse_exp,
+ 'Bug(test) [ Mac ] failures/expected/text.html [ Failure ]\nBug(test) [ Mac ] failures/expected/text.html [ Failure ]',
+ is_lint_mode=True)
+
+ def test_error_on_different_build_type(self):
+ # parse_exp uses a Release port. Assert errors on DEBUG show up in lint mode.
+ self.assertRaises(ParseError, self.parse_exp,
+ 'Bug(test) [ Debug ] failures/expected/text.html [ Failure ]\nBug(test) [ Debug ] failures/expected/text.html [ Failure ]',
+ is_lint_mode=True)
+
+ def test_overrides(self):
+ self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
+ "Bug(override) failures/expected/text.html [ ImageOnlyFailure ]")
+ self.assert_exp_list('failures/expected/text.html', [FAIL, IMAGE])
+
+ def test_overrides__directory(self):
+ self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
+ "Bug(override) failures/expected [ Crash ]")
+ self.assert_exp_list('failures/expected/text.html', [FAIL, CRASH])
+ self.assert_exp_list('failures/expected/image.html', [CRASH])
+
+ def test_overrides__duplicate(self):
+ self.assert_bad_expectations("Bug(exp) failures/expected/text.html [ Failure ]",
+ "Bug(override) failures/expected/text.html [ ImageOnlyFailure ]\n"
+ "Bug(override) failures/expected/text.html [ Crash ]\n")
+
+ def test_pixel_tests_flag(self):
+ def match(test, result, pixel_tests_enabled):
+ return self._exp.matches_an_expected_result(
+ test, result, pixel_tests_enabled, sanitizer_is_enabled=False)
+
+ self.parse_exp(self.get_basic_expectations())
+ self.assertTrue(match('failures/expected/text.html', FAIL, True))
+ self.assertTrue(match('failures/expected/text.html', FAIL, False))
+ self.assertFalse(match('failures/expected/text.html', CRASH, True))
+ self.assertFalse(match('failures/expected/text.html', CRASH, False))
+ self.assertTrue(match('failures/expected/image_checksum.html', PASS, True))
+ self.assertTrue(match('failures/expected/image_checksum.html', PASS, False))
+ self.assertTrue(match('failures/expected/crash.html', PASS, False))
+ self.assertTrue(match('failures/expected/needsrebaseline.html', TEXT, True))
+ self.assertFalse(match('failures/expected/needsrebaseline.html', CRASH, True))
+ self.assertTrue(match('failures/expected/needsmanualrebaseline.html', TEXT, True))
+ self.assertFalse(match('failures/expected/needsmanualrebaseline.html', CRASH, True))
+ self.assertTrue(match('passes/text.html', PASS, False))
+
+ def test_sanitizer_flag(self):
+ def match(test, result):
+ return self._exp.matches_an_expected_result(
+ test, result, pixel_tests_are_enabled=False, sanitizer_is_enabled=True)
+
+ self.parse_exp("""
+Bug(test) failures/expected/crash.html [ Crash ]
+Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
+Bug(test) failures/expected/text.html [ Failure ]
+Bug(test) failures/expected/timeout.html [ Timeout ]
+""")
+ self.assertTrue(match('failures/expected/crash.html', CRASH))
+ self.assertTrue(match('failures/expected/image.html', PASS))
+ self.assertTrue(match('failures/expected/text.html', PASS))
+ self.assertTrue(match('failures/expected/timeout.html', TIMEOUT))
+
+ def test_more_specific_override_resets_skip(self):
+ self.parse_exp("Bug(x) failures/expected [ Skip ]\n"
+ "Bug(x) failures/expected/text.html [ ImageOnlyFailure ]\n")
+ self.assert_exp('failures/expected/text.html', IMAGE)
+ self.assertFalse(self._port._filesystem.join(self._port.layout_tests_dir(),
+ 'failures/expected/text.html') in
+ self._exp.get_tests_with_result_type(SKIP))
+
+ def test_bot_test_expectations(self):
+ """Test that expectations are merged rather than overridden when using flaky option 'unexpected'."""
+ test_name1 = 'failures/expected/text.html'
+ test_name2 = 'passes/text.html'
+
+ expectations_dict = OrderedDict()
+ expectations_dict['expectations'] = "Bug(x) %s [ ImageOnlyFailure ]\nBug(x) %s [ Slow ]\n" % (test_name1, test_name2)
+ self._port.expectations_dict = lambda: expectations_dict
+
+ expectations = TestExpectations(self._port, self.get_basic_tests())
+ self.assertEqual(expectations.get_expectations(test_name1), set([IMAGE]))
+ self.assertEqual(expectations.get_expectations(test_name2), set([SLOW]))
+
+ def bot_expectations():
+ return {test_name1: ['PASS', 'TIMEOUT'], test_name2: ['CRASH']}
+ self._port.bot_expectations = bot_expectations
+ self._port._options.ignore_flaky_tests = 'unexpected'
+
+ expectations = TestExpectations(self._port, self.get_basic_tests())
+ self.assertEqual(expectations.get_expectations(test_name1), set([PASS, IMAGE, TIMEOUT]))
+ self.assertEqual(expectations.get_expectations(test_name2), set([CRASH, SLOW]))
+
+class SkippedTests(Base):
+ def check(self, expectations, overrides, skips, lint=False, expected_results=[WONTFIX, SKIP, FAIL]):
+ port = MockHost().port_factory.get('test-win-xp')
+ port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), 'failures/expected/text.html'), 'foo')
+ expectations_dict = OrderedDict()
+ expectations_dict['expectations'] = expectations
+ if overrides:
+ expectations_dict['overrides'] = overrides
+ port.expectations_dict = lambda: expectations_dict
+ port.skipped_layout_tests = lambda tests: set(skips)
+ expectations_to_lint = expectations_dict if lint else None
+ exp = TestExpectations(port, ['failures/expected/text.html'], expectations_dict=expectations_to_lint, is_lint_mode=lint)
+ self.assertEqual(exp.get_expectations('failures/expected/text.html'), set(expected_results))
+
+ def test_skipped_tests_work(self):
+ self.check(expectations='', overrides=None, skips=['failures/expected/text.html'], expected_results=[WONTFIX, SKIP])
+
+ def test_duplicate_skipped_test_fails_lint(self):
+ self.assertRaises(ParseError, self.check, expectations='Bug(x) failures/expected/text.html [ Failure ]\n',
+ overrides=None, skips=['failures/expected/text.html'], lint=True)
+
+ def test_skipped_file_overrides_expectations(self):
+ self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
+ skips=['failures/expected/text.html'])
+
+ def test_skipped_dir_overrides_expectations(self):
+ self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
+ skips=['failures/expected'])
+
+ def test_skipped_file_overrides_overrides(self):
+ self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
+ skips=['failures/expected/text.html'])
+
+ def test_skipped_dir_overrides_overrides(self):
+ self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
+ skips=['failures/expected'])
+
+ def test_skipped_entry_dont_exist(self):
+ port = MockHost().port_factory.get('test-win-xp')
+ expectations_dict = OrderedDict()
+ expectations_dict['expectations'] = ''
+ port.expectations_dict = lambda: expectations_dict
+ port.skipped_layout_tests = lambda tests: set(['foo/bar/baz.html'])
+ capture = OutputCapture()
+ capture.capture_output()
+ exp = TestExpectations(port)
+ _, _, logs = capture.restore_output()
+ self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs)
+
+ def test_expectations_string(self):
+ self.parse_exp(self.get_basic_expectations())
+ notrun = 'failures/expected/text.html'
+ self._exp.add_extra_skipped_tests([notrun])
+ self.assertEqual('NOTRUN', self._exp.get_expectations_string(notrun))
+
+
+class ExpectationSyntaxTests(Base):
+ def test_unrecognized_expectation(self):
+ self.assert_bad_expectations('Bug(test) failures/expected/text.html [ Unknown ]')
+
+ def test_macro(self):
+ exp_str = 'Bug(test) [ Win ] failures/expected/text.html [ Failure ]'
+ self.parse_exp(exp_str)
+ self.assert_exp('failures/expected/text.html', FAIL)
+
+ def assert_tokenize_exp(self, line, bugs=None, specifiers=None, expectations=None, warnings=None, comment=None, name='foo.html'):
+ bugs = bugs or []
+ specifiers = specifiers or []
+ expectations = expectations or []
+ warnings = warnings or []
+ filename = 'TestExpectations'
+ line_number = '1'
+ expectation_line = TestExpectationParser._tokenize_line(filename, line, line_number)
+ self.assertEqual(expectation_line.warnings, warnings)
+ self.assertEqual(expectation_line.name, name)
+ self.assertEqual(expectation_line.filename, filename)
+ self.assertEqual(expectation_line.line_numbers, line_number)
+ if not warnings:
+ self.assertEqual(expectation_line.specifiers, specifiers)
+ self.assertEqual(expectation_line.expectations, expectations)
+
+ def test_comments(self):
+ self.assert_tokenize_exp("# comment", name=None, comment="# comment")
+ self.assert_tokenize_exp("foo.html [ Pass ] # comment", comment="# comment", expectations=['PASS'], specifiers=[])
+
+ def test_config_specifiers(self):
+ self.assert_tokenize_exp('[ Mac ] foo.html [ Failure ] ', specifiers=['MAC'], expectations=['FAIL'])
+
+ def test_unknown_config(self):
+ self.assert_tokenize_exp('[ Foo ] foo.html [ Pass ]', specifiers=['Foo'], expectations=['PASS'])
+
+ def test_unknown_expectation(self):
+ self.assert_tokenize_exp('foo.html [ Audio ]', warnings=['Unrecognized expectation "Audio"'])
+
+ def test_skip(self):
+ self.assert_tokenize_exp('foo.html [ Skip ]', specifiers=[], expectations=['SKIP'])
+
+ def test_slow(self):
+ self.assert_tokenize_exp('foo.html [ Slow ]', specifiers=[], expectations=['SLOW'])
+
+ def test_wontfix(self):
+ self.assert_tokenize_exp('foo.html [ WontFix ]', specifiers=[], expectations=['WONTFIX', 'SKIP'])
+ self.assert_tokenize_exp('foo.html [ WontFix ImageOnlyFailure ]', specifiers=[], expectations=['WONTFIX', 'SKIP'],
+ warnings=['A test marked Skip or WontFix must not have other expectations.'])
+
+ def test_blank_line(self):
+ self.assert_tokenize_exp('', name=None)
+
+ def test_warnings(self):
+ self.assert_tokenize_exp('[ Mac ]', warnings=['Did not find a test name.', 'Missing expectations.'], name=None)
+ self.assert_tokenize_exp('[ [', warnings=['unexpected "["', 'Missing expectations.'], name=None)
+ self.assert_tokenize_exp('crbug.com/12345 ]', warnings=['unexpected "]"', 'Missing expectations.'], name=None)
+
+ self.assert_tokenize_exp('foo.html crbug.com/12345 ]', warnings=['"crbug.com/12345" is not at the start of the line.', 'Missing expectations.'])
+ self.assert_tokenize_exp('foo.html', warnings=['Missing expectations.'])
+
+
+class SemanticTests(Base):
+ def test_bug_format(self):
+ self.assertRaises(ParseError, self.parse_exp, 'BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
+
+ def test_bad_bugid(self):
+ try:
+ self.parse_exp('crbug/1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
+ self.fail('should have raised an error about a bad bug identifier')
+ except ParseError, exp:
+ self.assertEqual(len(exp.warnings), 3)
+
+ def test_missing_bugid(self):
+ self.parse_exp('failures/expected/text.html [ Failure ]', is_lint_mode=False)
+ self.assertFalse(self._exp.has_warnings())
+
+ try:
+ self.parse_exp('failures/expected/text.html [ Failure ]', is_lint_mode=True)
+ except ParseError, exp:
+ self.assertEqual(exp.warnings, ['expectations:1 Test lacks BUG specifier. failures/expected/text.html'])
+
+ def test_skip_and_wontfix(self):
+ # Skip is not allowed to have other expectations as well, because those
+ # expectations won't be exercised and may become stale .
+ self.parse_exp('failures/expected/text.html [ Failure Skip ]')
+ self.assertTrue(self._exp.has_warnings())
+
+ self.parse_exp('failures/expected/text.html [ Crash WontFix ]')
+ self.assertTrue(self._exp.has_warnings())
+
+ self.parse_exp('failures/expected/text.html [ Pass WontFix ]')
+ self.assertTrue(self._exp.has_warnings())
+
+ def test_rebaseline(self):
+ # Can't lint a file w/ 'REBASELINE' in it.
+ self.assertRaises(ParseError, self.parse_exp,
+ 'Bug(test) failures/expected/text.html [ Failure Rebaseline ]',
+ is_lint_mode=True)
+
+ def test_duplicates(self):
+ self.assertRaises(ParseError, self.parse_exp, """
+Bug(exp) failures/expected/text.html [ Failure ]
+Bug(exp) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
+
+ self.assertRaises(ParseError, self.parse_exp,
+ self.get_basic_expectations(), overrides="""
+Bug(override) failures/expected/text.html [ Failure ]
+Bug(override) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
+
+ def test_duplicate_with_line_before_preceding_line(self):
+ self.assert_bad_expectations("""Bug(exp) [ Debug ] failures/expected/text.html [ Failure ]
+Bug(exp) [ Release ] failures/expected/text.html [ Failure ]
+Bug(exp) [ Debug ] failures/expected/text.html [ Failure ]
+""")
+
+ def test_missing_file(self):
+ self.parse_exp('Bug(test) missing_file.html [ Failure ]')
+ self.assertTrue(self._exp.has_warnings(), 1)
+
+
+class PrecedenceTests(Base):
+ def test_file_over_directory(self):
+ # This tests handling precedence of specific lines over directories
+ # and tests expectations covering entire directories.
+ exp_str = """
+Bug(x) failures/expected/text.html [ Failure ]
+Bug(y) failures/expected [ WontFix ]
+"""
+ self.parse_exp(exp_str)
+ self.assert_exp('failures/expected/text.html', FAIL)
+ self.assert_exp_list('failures/expected/crash.html', [WONTFIX, SKIP])
+
+ exp_str = """
+Bug(x) failures/expected [ WontFix ]
+Bug(y) failures/expected/text.html [ Failure ]
+"""
+ self.parse_exp(exp_str)
+ self.assert_exp('failures/expected/text.html', FAIL)
+ self.assert_exp_list('failures/expected/crash.html', [WONTFIX, SKIP])
+
+ def test_ambiguous(self):
+ self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
+ "Bug(test) [ Win ] passes/text.html [ Failure ]\n")
+
+ def test_more_specifiers(self):
+ self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
+ "Bug(test) [ Win Release ] passes/text.html [ Failure ]\n")
+
+ def test_order_in_file(self):
+ self.assert_bad_expectations("Bug(test) [ Win Release ] : passes/text.html [ Failure ]\n"
+ "Bug(test) [ Release ] : passes/text.html [ Pass ]\n")
+
+ def test_macro_overrides(self):
+ self.assert_bad_expectations("Bug(test) [ Win ] passes/text.html [ Pass ]\n"
+ "Bug(test) [ XP ] passes/text.html [ Failure ]\n")
+
+
+class RemoveConfigurationsTest(Base):
+ def test_remove(self):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ test_port.test_exists = lambda test: True
+ test_port.test_isfile = lambda test: True
+
+ test_config = test_port.test_configuration()
+ test_port.expectations_dict = lambda: {"expectations": """Bug(x) [ Linux Win Release ] failures/expected/foo.html [ Failure ]
+Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
+"""}
+ expectations = TestExpectations(test_port, self.get_basic_tests())
+
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
+
+ self.assertEqual("""Bug(x) [ Linux Win7 Release ] failures/expected/foo.html [ Failure ]
+Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
+ def test_remove_needs_rebaseline(self):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ test_port.test_exists = lambda test: True
+ test_port.test_isfile = lambda test: True
+
+ test_config = test_port.test_configuration()
+ test_port.expectations_dict = lambda: {"expectations": """Bug(x) [ Win ] failures/expected/foo.html [ NeedsRebaseline ]
+"""}
+ expectations = TestExpectations(test_port, self.get_basic_tests())
+
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
+
+ self.assertEqual("""Bug(x) [ XP Debug ] failures/expected/foo.html [ NeedsRebaseline ]
+Bug(x) [ Win7 ] failures/expected/foo.html [ NeedsRebaseline ]
+""", actual_expectations)
+
+ def test_remove_multiple_configurations(self):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ test_port.test_exists = lambda test: True
+ test_port.test_isfile = lambda test: True
+
+ test_config = test_port.test_configuration()
+ test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
+"""}
+ expectations = TestExpectations(test_port)
+
+ actual_expectations = expectations.remove_configurations([
+ ('failures/expected/foo.html', test_config),
+ ('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration()),
+ ])
+
+ self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
+ def test_remove_line_with_comments(self):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ test_port.test_exists = lambda test: True
+ test_port.test_isfile = lambda test: True
+
+ test_config = test_port.test_configuration()
+ test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+
+ # This comment line should get stripped. As should the preceding line.
+Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
+"""}
+ expectations = TestExpectations(test_port)
+
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
+
+ self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
+ def test_remove_line_with_comments_at_start(self):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ test_port.test_exists = lambda test: True
+ test_port.test_isfile = lambda test: True
+
+ test_config = test_port.test_configuration()
+ test_port.expectations_dict = lambda: {'expectations': """
+ # This comment line should get stripped. As should the preceding line.
+Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
+
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+"""}
+ expectations = TestExpectations(test_port)
+
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
+
+ self.assertEqual("""
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
+ def test_remove_line_with_comments_at_end_with_no_trailing_newline(self):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ test_port.test_exists = lambda test: True
+ test_port.test_isfile = lambda test: True
+
+ test_config = test_port.test_configuration()
+ test_port.expectations_dict = lambda: {'expectations': """Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+
+ # This comment line should get stripped. As should the preceding line.
+Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]"""}
+ expectations = TestExpectations(test_port)
+
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
+
+ self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]""", actual_expectations)
+
+ def test_remove_line_leaves_comments_for_next_line(self):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ test_port.test_exists = lambda test: True
+ test_port.test_isfile = lambda test: True
+
+ test_config = test_port.test_configuration()
+ test_port.expectations_dict = lambda: {'expectations': """
+ # This comment line should not get stripped.
+Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+"""}
+ expectations = TestExpectations(test_port)
+
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
+
+ self.assertEqual("""
+ # This comment line should not get stripped.
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
+ def test_remove_line_no_whitespace_lines(self):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ test_port.test_exists = lambda test: True
+ test_port.test_isfile = lambda test: True
+
+ test_config = test_port.test_configuration()
+ test_port.expectations_dict = lambda: {'expectations': """
+ # This comment line should get stripped.
+Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
+ # This comment line should not get stripped.
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+"""}
+ expectations = TestExpectations(test_port)
+
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
+
+ self.assertEqual(""" # This comment line should not get stripped.
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
+ def test_remove_first_line(self):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ test_port.test_exists = lambda test: True
+ test_port.test_isfile = lambda test: True
+
+ test_config = test_port.test_configuration()
+ test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
+ # This comment line should not get stripped.
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+"""}
+ expectations = TestExpectations(test_port)
+
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
+
+ self.assertEqual(""" # This comment line should not get stripped.
+Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
+ def test_remove_flaky_line(self):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ test_port.test_exists = lambda test: True
+ test_port.test_isfile = lambda test: True
+
+ test_config = test_port.test_configuration()
+ test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win ] failures/expected/foo.html [ Failure Timeout ]
+Bug(y) [ Mac ] failures/expected/foo.html [ Crash ]
+"""}
+ expectations = TestExpectations(test_port)
+
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', test_config)])
+ actual_expectations = expectations.remove_configurations([('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())])
+
+ self.assertEqual("""Bug(x) [ Win Debug ] failures/expected/foo.html [ Failure Timeout ]
+Bug(y) [ Mac ] failures/expected/foo.html [ Crash ]
+""", actual_expectations)
+
+
+class RebaseliningTest(Base):
+ def test_get_rebaselining_failures(self):
+ # Make sure we find a test as needing a rebaseline even if it is not marked as a failure.
+ self.parse_exp('Bug(x) failures/expected/text.html [ Rebaseline ]\n')
+ self.assertEqual(len(self._exp.get_rebaselining_failures()), 1)
+
+ self.parse_exp(self.get_basic_expectations())
+ self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
+
+
+class TestExpectationsParserTests(unittest.TestCase):
+ def __init__(self, testFunc):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ self._converter = TestConfigurationConverter(test_port.all_test_configurations(), test_port.configuration_specifier_macros())
+ unittest.TestCase.__init__(self, testFunc)
+ self._parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], is_lint_mode=False)
+
+ def test_expectation_line_for_test(self):
+ # This is kind of a silly test, but it at least ensures that we don't throw an error.
+ test_name = 'foo/test.html'
+ expectations = set(["PASS", "IMAGE"])
+
+ expectation_line = TestExpectationLine()
+ expectation_line.original_string = test_name
+ expectation_line.name = test_name
+ expectation_line.filename = '<Bot TestExpectations>'
+ expectation_line.line_numbers = '0'
+ expectation_line.expectations = expectations
+ self._parser._parse_line(expectation_line)
+
+ self.assertEqual(self._parser.expectation_line_for_test(test_name, expectations), expectation_line)
+
+
+class TestExpectationSerializationTests(unittest.TestCase):
+ def __init__(self, testFunc):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ self._converter = TestConfigurationConverter(test_port.all_test_configurations(), test_port.configuration_specifier_macros())
+ unittest.TestCase.__init__(self, testFunc)
+
+ def _tokenize(self, line):
+ return TestExpectationParser._tokenize_line('path', line, 0)
+
+ def assert_round_trip(self, in_string, expected_string=None):
+ expectation = self._tokenize(in_string)
+ if expected_string is None:
+ expected_string = in_string
+ self.assertEqual(expected_string, expectation.to_string(self._converter))
+
+ def assert_list_round_trip(self, in_string, expected_string=None):
+ host = MockHost()
+ parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], is_lint_mode=False)
+ expectations = parser.parse('path', in_string)
+ if expected_string is None:
+ expected_string = in_string
+ self.assertEqual(expected_string, TestExpectations.list_to_string(expectations, self._converter))
+
+ def test_unparsed_to_string(self):
+ expectation = TestExpectationLine()
+
+ self.assertEqual(expectation.to_string(self._converter), '')
+ expectation.comment = ' Qux.'
+ self.assertEqual(expectation.to_string(self._converter), '# Qux.')
+ expectation.name = 'bar'
+ self.assertEqual(expectation.to_string(self._converter), 'bar # Qux.')
+ expectation.specifiers = ['foo']
+ # FIXME: case should be preserved here but we can't until we drop the old syntax.
+ self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar # Qux.')
+ expectation.expectations = ['bAz']
+ self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ ] # Qux.')
+ expectation.expectations = ['bAz1', 'baZ2']
+ self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ1 BAZ2 ] # Qux.')
+ expectation.specifiers = ['foo1', 'foO2']
+ self.assertEqual(expectation.to_string(self._converter), '[ FOO1 FOO2 ] bar [ BAZ1 BAZ2 ] # Qux.')
+ expectation.warnings.append('Oh the horror.')
+ self.assertEqual(expectation.to_string(self._converter), '')
+ expectation.original_string = 'Yes it is!'
+ self.assertEqual(expectation.to_string(self._converter), 'Yes it is!')
+
+ def test_unparsed_list_to_string(self):
+ expectation = TestExpectationLine()
+ expectation.comment = 'Qux.'
+ expectation.name = 'bar'
+ expectation.specifiers = ['foo']
+ expectation.expectations = ['bAz1', 'baZ2']
+ # FIXME: case should be preserved here but we can't until we drop the old syntax.
+ self.assertEqual(TestExpectations.list_to_string([expectation]), '[ FOO ] bar [ BAZ1 BAZ2 ] #Qux.')
+
+ def test_parsed_to_string(self):
+ expectation_line = TestExpectationLine()
+ expectation_line.bugs = ['Bug(x)']
+ expectation_line.name = 'test/name/for/realz.html'
+ expectation_line.parsed_expectations = set([IMAGE])
+ self.assertEqual(expectation_line.to_string(self._converter), None)
+ expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release')])
+ self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP Release ] test/name/for/realz.html [ ImageOnlyFailure ]')
+ expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')])
+ self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP ] test/name/for/realz.html [ ImageOnlyFailure ]')
+
+ def test_serialize_parsed_expectations(self):
+ expectation_line = TestExpectationLine()
+ expectation_line.parsed_expectations = set([])
+ parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
+ self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), '')
+ expectation_line.parsed_expectations = set([FAIL])
+ self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'fail')
+ expectation_line.parsed_expectations = set([PASS, IMAGE])
+ self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'image pass')
+ expectation_line.parsed_expectations = set([FAIL, PASS])
+ self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass fail')
+
+ def test_serialize_parsed_specifier_string(self):
+ expectation_line = TestExpectationLine()
+ expectation_line.bugs = ['garden-o-matic']
+ expectation_line.parsed_specifiers = ['the', 'for']
+ self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, []), 'for the')
+ self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, ['win']), 'for the win')
+ expectation_line.bugs = []
+ expectation_line.parsed_specifiers = []
+ self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, []), '')
+ self.assertEqual(expectation_line._serialize_parsed_specifiers(self._converter, ['win']), 'win')
+
+ def test_format_line(self):
+ self.assertEqual(TestExpectationLine._format_line([], ['MODIFIERS'], 'name', ['EXPECTATIONS'], 'comment'), '[ MODIFIERS ] name [ EXPECTATIONS ] #comment')
+ self.assertEqual(TestExpectationLine._format_line([], ['MODIFIERS'], 'name', ['EXPECTATIONS'], None), '[ MODIFIERS ] name [ EXPECTATIONS ]')
+
+ def test_string_roundtrip(self):
+ self.assert_round_trip('')
+ self.assert_round_trip('[')
+ self.assert_round_trip('FOO [')
+ self.assert_round_trip('FOO ] bar')
+ self.assert_round_trip(' FOO [')
+ self.assert_round_trip(' [ FOO ] ')
+ self.assert_round_trip('[ FOO ] bar [ BAZ ]')
+ self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
+ self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
+ self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
+ self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
+ self.assert_round_trip('[ FOO ] ] ] bar BAZ')
+ self.assert_round_trip('[ FOO ] ] ] bar [ BAZ ]')
+ self.assert_round_trip('FOO ] ] bar ==== BAZ')
+ self.assert_round_trip('=')
+ self.assert_round_trip('#')
+ self.assert_round_trip('# ')
+ self.assert_round_trip('# Foo')
+ self.assert_round_trip('# Foo')
+ self.assert_round_trip('# Foo :')
+ self.assert_round_trip('# Foo : =')
+
+ def test_list_roundtrip(self):
+ self.assert_list_round_trip('')
+ self.assert_list_round_trip('\n')
+ self.assert_list_round_trip('\n\n')
+ self.assert_list_round_trip('bar')
+ self.assert_list_round_trip('bar\n# Qux.')
+ self.assert_list_round_trip('bar\n# Qux.\n')
+
+ def test_reconstitute_only_these(self):
+ lines = []
+ reconstitute_only_these = []
+
+ def add_line(matching_configurations, reconstitute):
+ expectation_line = TestExpectationLine()
+ expectation_line.original_string = "Nay"
+ expectation_line.bugs = ['Bug(x)']
+ expectation_line.name = 'Yay'
+ expectation_line.parsed_expectations = set([IMAGE])
+ expectation_line.matching_configurations = matching_configurations
+ lines.append(expectation_line)
+ if reconstitute:
+ reconstitute_only_these.append(expectation_line)
+
+ add_line(set([TestConfiguration('xp', 'x86', 'release')]), True)
+ add_line(set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')]), False)
+ serialized = TestExpectations.list_to_string(lines, self._converter)
+ self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nBug(x) [ XP ] Yay [ ImageOnlyFailure ]")
+ serialized = TestExpectations.list_to_string(lines, self._converter, reconstitute_only_these=reconstitute_only_these)
+ self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nNay")
+
+ def disabled_test_string_whitespace_stripping(self):
+ # FIXME: Re-enable this test once we rework the code to no longer support the old syntax.
+ self.assert_round_trip('\n', '')
+ self.assert_round_trip(' [ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
+ self.assert_round_trip('[ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
+ self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
+ self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
+ self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
+ self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py
new file mode 100644
index 0000000..5c16b94
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py
@@ -0,0 +1,239 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import cPickle
+
+from webkitpy.layout_tests.models import test_expectations
+
+
+def is_reftest_failure(failure_list):
+ failure_types = [type(f) for f in failure_list]
+ return set((FailureReftestMismatch, FailureReftestMismatchDidNotOccur, FailureReftestNoImagesGenerated)).intersection(failure_types)
+
+# FIXME: This is backwards. Each TestFailure subclass should know what
+# test_expectation type it corresponds too. Then this method just
+# collects them all from the failure list and returns the worst one.
+def determine_result_type(failure_list):
+ """Takes a set of test_failures and returns which result type best fits
+ the list of failures. "Best fits" means we use the worst type of failure.
+
+ Returns:
+ one of the test_expectations result types - PASS, FAIL, CRASH, etc."""
+
+ if not failure_list or len(failure_list) == 0:
+ return test_expectations.PASS
+
+ failure_types = [type(f) for f in failure_list]
+ if FailureCrash in failure_types:
+ return test_expectations.CRASH
+ elif FailureLeak in failure_types:
+ return test_expectations.LEAK
+ elif FailureTimeout in failure_types:
+ return test_expectations.TIMEOUT
+ elif FailureEarlyExit in failure_types:
+ return test_expectations.SKIP
+ elif (FailureMissingResult in failure_types or
+ FailureMissingImage in failure_types or
+ FailureMissingImageHash in failure_types or
+ FailureMissingAudio in failure_types):
+ return test_expectations.MISSING
+ else:
+ is_text_failure = (FailureTextMismatch in failure_types or
+ FailureTestHarnessAssertion in failure_types)
+ is_image_failure = (FailureImageHashIncorrect in failure_types or
+ FailureImageHashMismatch in failure_types)
+ is_audio_failure = (FailureAudioMismatch in failure_types)
+ if is_text_failure and is_image_failure:
+ return test_expectations.IMAGE_PLUS_TEXT
+ elif is_text_failure:
+ return test_expectations.TEXT
+ elif is_image_failure or is_reftest_failure(failure_list):
+ return test_expectations.IMAGE
+ elif is_audio_failure:
+ return test_expectations.AUDIO
+ else:
+ raise ValueError("unclassifiable set of failures: "
+ + str(failure_types))
+
+
+class TestFailure(object):
+ """Abstract base class that defines the failure interface."""
+
+ @staticmethod
+ def loads(s):
+ """Creates a TestFailure object from the specified string."""
+ return cPickle.loads(s)
+
+ def message(self):
+ """Returns a string describing the failure in more detail."""
+ raise NotImplementedError
+
+ def __eq__(self, other):
+ return self.__class__.__name__ == other.__class__.__name__
+
+ def __ne__(self, other):
+ return self.__class__.__name__ != other.__class__.__name__
+
+ def __hash__(self):
+ return hash(self.__class__.__name__)
+
+ def dumps(self):
+ """Returns the string/JSON representation of a TestFailure."""
+ return cPickle.dumps(self)
+
+ def driver_needs_restart(self):
+ """Returns True if we should kill the driver before the next test."""
+ return False
+
+
+class FailureTimeout(TestFailure):
+ def __init__(self, is_reftest=False):
+ super(FailureTimeout, self).__init__()
+ self.is_reftest = is_reftest
+
+ def message(self):
+ return "test timed out"
+
+ def driver_needs_restart(self):
+ return True
+
+
+class FailureCrash(TestFailure):
+ def __init__(self, is_reftest=False, process_name='content_shell', pid=None, has_log=False):
+ super(FailureCrash, self).__init__()
+ self.process_name = process_name
+ self.pid = pid
+ self.is_reftest = is_reftest
+ self.has_log = has_log
+
+ def message(self):
+ if self.pid:
+ return "%s crashed [pid=%d]" % (self.process_name, self.pid)
+ return self.process_name + " crashed"
+
+ def driver_needs_restart(self):
+ return True
+
+
+class FailureLeak(TestFailure):
+ def __init__(self, is_reftest=False, log=''):
+ super(FailureLeak, self).__init__()
+ self.is_reftest = is_reftest
+ self.log = log
+
+ def message(self):
+ return "leak detected: %s" % (self.log)
+
+
+class FailureMissingResult(TestFailure):
+ def message(self):
+ return "-expected.txt was missing"
+
+
+class FailureTestHarnessAssertion(TestFailure):
+ def message(self):
+ return "asserts failed"
+
+
+class FailureTextMismatch(TestFailure):
+ def message(self):
+ return "text diff"
+
+
+class FailureMissingImageHash(TestFailure):
+ def message(self):
+ return "-expected.png was missing an embedded checksum"
+
+
+class FailureMissingImage(TestFailure):
+ def message(self):
+ return "-expected.png was missing"
+
+
+class FailureImageHashMismatch(TestFailure):
+ def message(self):
+ return "image diff"
+
+
+class FailureImageHashIncorrect(TestFailure):
+ def message(self):
+ return "-expected.png embedded checksum is incorrect"
+
+
+class FailureReftestMismatch(TestFailure):
+ def __init__(self, reference_filename=None):
+ super(FailureReftestMismatch, self).__init__()
+ self.reference_filename = reference_filename
+
+ def message(self):
+ return "reference mismatch"
+
+
+class FailureReftestMismatchDidNotOccur(TestFailure):
+ def __init__(self, reference_filename=None):
+ super(FailureReftestMismatchDidNotOccur, self).__init__()
+ self.reference_filename = reference_filename
+
+ def message(self):
+ return "reference mismatch didn't happen"
+
+
+class FailureReftestNoImagesGenerated(TestFailure):
+ def __init__(self, reference_filename=None):
+ super(FailureReftestNoImagesGenerated, self).__init__()
+ self.reference_filename = reference_filename
+
+ def message(self):
+ return "reference didn't generate pixel results."
+
+
+class FailureMissingAudio(TestFailure):
+ def message(self):
+ return "expected audio result was missing"
+
+
+class FailureAudioMismatch(TestFailure):
+ def message(self):
+ return "audio mismatch"
+
+
+class FailureEarlyExit(TestFailure):
+ def message(self):
+ return "skipped due to early exit"
+
+
+# Convenient collection of all failure classes for anything that might
+# need to enumerate over them all.
+ALL_FAILURE_CLASSES = (FailureTimeout, FailureCrash, FailureMissingResult,
+ FailureTestHarnessAssertion,
+ FailureTextMismatch, FailureMissingImageHash,
+ FailureMissingImage, FailureImageHashMismatch,
+ FailureImageHashIncorrect, FailureReftestMismatch,
+ FailureReftestMismatchDidNotOccur, FailureReftestNoImagesGenerated,
+ FailureMissingAudio, FailureAudioMismatch,
+ FailureEarlyExit)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py
new file mode 100644
index 0000000..55a9b2c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.models.test_failures import *
+
+
+class TestFailuresTest(unittest.TestCase):
+ def assert_loads(self, cls):
+ failure_obj = cls()
+ s = failure_obj.dumps()
+ new_failure_obj = TestFailure.loads(s)
+ self.assertIsInstance(new_failure_obj, cls)
+
+ self.assertEqual(failure_obj, new_failure_obj)
+
+ # Also test that != is implemented.
+ self.assertFalse(failure_obj != new_failure_obj)
+
+ def test_unknown_failure_type(self):
+ class UnknownFailure(TestFailure):
+ def message(self):
+ return ''
+
+ failure_obj = UnknownFailure()
+ self.assertRaises(ValueError, determine_result_type, [failure_obj])
+
+ def test_message_is_virtual(self):
+ failure_obj = TestFailure()
+ self.assertRaises(NotImplementedError, failure_obj.message)
+
+ def test_loads(self):
+ for c in ALL_FAILURE_CLASSES:
+ self.assert_loads(c)
+
+ def test_equals(self):
+ self.assertEqual(FailureCrash(), FailureCrash())
+ self.assertNotEqual(FailureCrash(), FailureTimeout())
+ crash_set = set([FailureCrash(), FailureCrash()])
+ self.assertEqual(len(crash_set), 1)
+ # The hash happens to be the name of the class, but sets still work:
+ crash_set = set([FailureCrash(), "FailureCrash"])
+ self.assertEqual(len(crash_set), 2)
+
+ def test_crashes(self):
+ self.assertEqual(FailureCrash().message(), 'content_shell crashed')
+ self.assertEqual(FailureCrash(process_name='foo', pid=1234).message(), 'foo crashed [pid=1234]')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_input.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
new file mode 100644
index 0000000..1a3030f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class TestInput(object):
+ """Groups information about a test for easy passing of data."""
+
+ def __init__(self, test_name, timeout=None, requires_lock=None, reference_files=None, should_run_pixel_tests=None, should_add_missing_baselines=True):
+ # TestInput objects are normally constructed by the manager and passed
+ # to the workers, but these some fields are set lazily in the workers where possible
+ # because they require us to look at the filesystem and we want to be able to do that in parallel.
+ self.test_name = test_name
+ self.timeout = timeout # in msecs; should rename this for consistency
+ self.requires_lock = requires_lock
+ self.reference_files = reference_files
+ self.should_run_pixel_tests = should_run_pixel_tests
+ self.should_add_missing_baselines = should_add_missing_baselines
+
+ def __repr__(self):
+ return "TestInput('%s', timeout=%s, requires_lock=%s, reference_files=%s, should_run_pixel_tests=%s, should_add_missing_baselines%s)" % (self.test_name, self.timeout, self.requires_lock, self.reference_files, self.should_run_pixel_tests, self.should_add_missing_baselines)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_results.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_results.py
new file mode 100644
index 0000000..2e313ca
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_results.py
@@ -0,0 +1,76 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import cPickle
+
+from webkitpy.layout_tests.models import test_failures
+
+
+class TestResult(object):
+ """Data object containing the results of a single test."""
+
+ @staticmethod
+ def loads(string):
+ return cPickle.loads(string)
+
+ def __init__(self, test_name, failures=None, test_run_time=None, has_stderr=False, reftest_type=None, pid=None, references=None, device_failed=False, has_repaint_overlay=False):
+ self.test_name = test_name
+ self.failures = failures or []
+ self.test_run_time = test_run_time or 0 # The time taken to execute the test itself.
+ self.has_stderr = has_stderr
+ self.reftest_type = reftest_type or []
+ self.pid = pid
+ self.references = references or []
+ self.device_failed = device_failed
+ self.has_repaint_overlay = has_repaint_overlay
+
+ # FIXME: Setting this in the constructor makes this class hard to mutate.
+ self.type = test_failures.determine_result_type(failures)
+
+ # These are set by the worker, not by the driver, so they are not passed to the constructor.
+ self.worker_name = ''
+ self.shard_name = ''
+ self.total_run_time = 0 # The time taken to run the test plus any references, compute diffs, etc.
+ self.test_number = None
+
+ def __eq__(self, other):
+ return (self.test_name == other.test_name and
+ self.failures == other.failures and
+ self.test_run_time == other.test_run_time)
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def has_failure_matching_types(self, *failure_classes):
+ for failure in self.failures:
+ if type(failure) in failure_classes:
+ return True
+ return False
+
+ def dumps(self):
+ return cPickle.dumps(self)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py
new file mode 100644
index 0000000..8d46315
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.models.test_results import TestResult
+
+
+class TestResultsTest(unittest.TestCase):
+ def test_defaults(self):
+ result = TestResult("foo")
+ self.assertEqual(result.test_name, 'foo')
+ self.assertEqual(result.failures, [])
+ self.assertEqual(result.test_run_time, 0)
+
+ def test_loads(self):
+ result = TestResult(test_name='foo',
+ failures=[],
+ test_run_time=1.1)
+ s = result.dumps()
+ new_result = TestResult.loads(s)
+ self.assertIsInstance(new_result, TestResult)
+
+ self.assertEqual(new_result, result)
+
+ # Also check that != is implemented.
+ self.assertFalse(new_result != result)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
new file mode 100644
index 0000000..1e729f8
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
@@ -0,0 +1,339 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import re
+import signal
+import time
+
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+
+
+_log = logging.getLogger(__name__)
+
+OK_EXIT_STATUS = 0
+
+# This matches what the shell does on POSIX.
+INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
+
+# POSIX limits status codes to 0-255. Normally run-webkit-tests returns the number
+# of tests that failed. These indicate exceptional conditions triggered by the
+# script itself, so we count backwards from 255 (aka -1) to enumerate them.
+#
+# FIXME: crbug.com/357866. We really shouldn't return the number of failures
+# in the exit code at all.
+EARLY_EXIT_STATUS = 251
+SYS_DEPS_EXIT_STATUS = 252
+NO_TESTS_EXIT_STATUS = 253
+NO_DEVICES_EXIT_STATUS = 254
+UNEXPECTED_ERROR_EXIT_STATUS = 255
+
+ERROR_CODES = (
+ INTERRUPTED_EXIT_STATUS,
+ EARLY_EXIT_STATUS,
+ SYS_DEPS_EXIT_STATUS,
+ NO_TESTS_EXIT_STATUS,
+ NO_DEVICES_EXIT_STATUS,
+ UNEXPECTED_ERROR_EXIT_STATUS,
+)
+
+# In order to avoid colliding with the above codes, we put a ceiling on
+# the value returned by num_regressions
+MAX_FAILURES_EXIT_STATUS = 101
+
+class TestRunException(Exception):
+ def __init__(self, code, msg):
+ self.code = code
+ self.msg = msg
+
+
+class TestRunResults(object):
+ def __init__(self, expectations, num_tests):
+ self.total = num_tests
+ self.remaining = self.total
+ self.expectations = expectations
+ self.expected = 0
+ self.expected_failures = 0
+ self.unexpected = 0
+ self.unexpected_failures = 0
+ self.unexpected_crashes = 0
+ self.unexpected_timeouts = 0
+ self.tests_by_expectation = {}
+ self.tests_by_timeline = {}
+ self.results_by_name = {} # Map of test name to the last result for the test.
+ self.all_results = [] # All results from a run, including every iteration of every test.
+ self.unexpected_results_by_name = {}
+ self.failures_by_name = {}
+ self.total_failures = 0
+ self.expected_skips = 0
+ for expectation in test_expectations.TestExpectations.EXPECTATIONS.values():
+ self.tests_by_expectation[expectation] = set()
+ for timeline in test_expectations.TestExpectations.TIMELINES.values():
+ self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline)
+ self.slow_tests = set()
+ self.interrupted = False
+ self.keyboard_interrupted = False
+ self.run_time = 0 # The wall clock time spent running the tests (layout_test_runner.run()).
+
+ def add(self, test_result, expected, test_is_slow):
+ result_type_for_stats = test_result.type
+ if test_expectations.WONTFIX in self.expectations.model().get_expectations(test_result.test_name):
+ result_type_for_stats = test_expectations.WONTFIX
+ self.tests_by_expectation[result_type_for_stats].add(test_result.test_name)
+
+ self.results_by_name[test_result.test_name] = test_result
+ if test_result.type != test_expectations.SKIP:
+ self.all_results.append(test_result)
+ self.remaining -= 1
+ if len(test_result.failures):
+ self.total_failures += 1
+ self.failures_by_name[test_result.test_name] = test_result.failures
+ if expected:
+ self.expected += 1
+ if test_result.type == test_expectations.SKIP:
+ self.expected_skips += 1
+ elif test_result.type != test_expectations.PASS:
+ self.expected_failures += 1
+ else:
+ self.unexpected_results_by_name[test_result.test_name] = test_result
+ self.unexpected += 1
+ if len(test_result.failures):
+ self.unexpected_failures += 1
+ if test_result.type == test_expectations.CRASH:
+ self.unexpected_crashes += 1
+ elif test_result.type == test_expectations.TIMEOUT:
+ self.unexpected_timeouts += 1
+ if test_is_slow:
+ self.slow_tests.add(test_result.test_name)
+
+
+class RunDetails(object):
+ def __init__(self, exit_code, summarized_full_results=None, summarized_failing_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False):
+ self.exit_code = exit_code
+ self.summarized_full_results = summarized_full_results
+ self.summarized_failing_results = summarized_failing_results
+ self.initial_results = initial_results
+ self.retry_results = retry_results
+ self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry
+
+
+def _interpret_test_failures(failures):
+ test_dict = {}
+ failure_types = [type(failure) for failure in failures]
+ # FIXME: get rid of all this is_* values once there is a 1:1 map between
+ # TestFailure type and test_expectations.EXPECTATION.
+ if test_failures.FailureMissingAudio in failure_types:
+ test_dict['is_missing_audio'] = True
+
+ if test_failures.FailureMissingResult in failure_types:
+ test_dict['is_missing_text'] = True
+
+ if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
+ test_dict['is_missing_image'] = True
+
+ if test_failures.FailureTestHarnessAssertion in failure_types:
+ test_dict['is_testharness_test'] = True
+
+ return test_dict
+
+
+def _chromium_commit_position(scm, path):
+ log = scm.most_recent_log_matching('Cr-Commit-Position:', path)
+ match = re.search('^\s*Cr-Commit-Position:.*@\{#(?P<commit_position>\d+)\}', log, re.MULTILINE)
+ if not match:
+ return ""
+ return str(match.group('commit_position'))
+
+
+def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, only_include_failing=False):
+ """Returns a dictionary containing a summary of the test runs, with the following fields:
+ 'version': a version indicator
+ 'fixable': The number of fixable tests (NOW - PASS)
+ 'skipped': The number of skipped tests (NOW & SKIPPED)
+ 'num_regressions': The number of non-flaky failures
+ 'num_flaky': The number of flaky failures
+ 'num_passes': The number of unexpected passes
+ 'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
+ """
+ results = {}
+ results['version'] = 3
+
+ tbe = initial_results.tests_by_expectation
+ tbt = initial_results.tests_by_timeline
+ results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
+ # FIXME: Remove this. It is redundant with results['num_failures_by_type'].
+ results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
+
+ num_passes = 0
+ num_flaky = 0
+ num_regressions = 0
+ keywords = {}
+ for expecation_string, expectation_enum in test_expectations.TestExpectations.EXPECTATIONS.iteritems():
+ keywords[expectation_enum] = expecation_string.upper()
+
+ num_failures_by_type = {}
+ for expectation in initial_results.tests_by_expectation:
+ tests = initial_results.tests_by_expectation[expectation]
+ if expectation != test_expectations.WONTFIX:
+ tests &= tbt[test_expectations.NOW]
+ num_failures_by_type[keywords[expectation]] = len(tests)
+ # The number of failures by type.
+ results['num_failures_by_type'] = num_failures_by_type
+
+ tests = {}
+
+ for test_name, result in initial_results.results_by_name.iteritems():
+ expected = expectations.get_expectations_string(test_name)
+ result_type = result.type
+ actual = [keywords[result_type]]
+
+ if only_include_failing and result.type == test_expectations.SKIP:
+ continue
+
+ if result_type == test_expectations.PASS:
+ num_passes += 1
+ if not result.has_stderr and only_include_failing:
+ continue
+ elif result_type != test_expectations.SKIP and test_name in initial_results.unexpected_results_by_name:
+ if retry_results:
+ if test_name not in retry_results.unexpected_results_by_name:
+ # The test failed unexpectedly at first, but ran as expected the second time -> flaky.
+ actual.extend(expectations.get_expectations_string(test_name).split(" "))
+ num_flaky += 1
+ else:
+ retry_result_type = retry_results.unexpected_results_by_name[test_name].type
+ if retry_result_type == test_expectations.PASS:
+ # The test failed unexpectedly at first, then passed unexpectedly -> unexpected pass.
+ num_passes += 1
+ if not result.has_stderr and only_include_failing:
+ continue
+ else:
+ # The test failed unexpectedly both times -> regression.
+ num_regressions += 1
+ if not keywords[retry_result_type] in actual:
+ actual.append(keywords[retry_result_type])
+ else:
+ # The test failed unexpectedly, but we didn't do any retries -> regression.
+ num_regressions += 1
+
+ test_dict = {}
+
+ rounded_run_time = round(result.test_run_time, 1)
+ if rounded_run_time:
+ test_dict['time'] = rounded_run_time
+
+ if result.has_stderr:
+ test_dict['has_stderr'] = True
+
+ bugs = expectations.model().get_expectation_line(test_name).bugs
+ if bugs:
+ test_dict['bugs'] = bugs
+
+ if result.reftest_type:
+ test_dict.update(reftest_type=list(result.reftest_type))
+
+ test_dict['expected'] = expected
+ test_dict['actual'] = " ".join(actual)
+
+ def is_expected(actual_result):
+ return expectations.matches_an_expected_result(test_name, result_type,
+ port_obj.get_option('pixel_tests') or result.reftest_type,
+ port_obj.get_option('enable_sanitizer'))
+
+ # To avoid bloating the output results json too much, only add an entry for whether the failure is unexpected.
+ if not all(is_expected(actual_result) for actual_result in actual):
+ test_dict['is_unexpected'] = True
+
+ test_dict.update(_interpret_test_failures(result.failures))
+
+ if retry_results:
+ retry_result = retry_results.unexpected_results_by_name.get(test_name)
+ if retry_result:
+ test_dict.update(_interpret_test_failures(retry_result.failures))
+
+ if (result.has_repaint_overlay):
+ test_dict['has_repaint_overlay'] = True
+
+ # Store test hierarchically by directory. e.g.
+ # foo/bar/baz.html: test_dict
+ # foo/bar/baz1.html: test_dict
+ #
+ # becomes
+ # foo: {
+ # bar: {
+ # baz.html: test_dict,
+ # baz1.html: test_dict
+ # }
+ # }
+ parts = test_name.split('/')
+ current_map = tests
+ for i, part in enumerate(parts):
+ if i == (len(parts) - 1):
+ current_map[part] = test_dict
+ break
+ if part not in current_map:
+ current_map[part] = {}
+ current_map = current_map[part]
+
+ results['tests'] = tests
+ # FIXME: Remove this. It is redundant with results['num_failures_by_type'].
+ results['num_passes'] = num_passes
+ results['num_flaky'] = num_flaky
+ # FIXME: Remove this. It is redundant with results['num_failures_by_type'].
+ results['num_regressions'] = num_regressions
+ results['interrupted'] = initial_results.interrupted # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
+ results['layout_tests_dir'] = port_obj.layout_tests_dir()
+ results['has_wdiff'] = port_obj.wdiff_available()
+ results['has_pretty_patch'] = port_obj.pretty_patch_available()
+ results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
+ results['seconds_since_epoch'] = int(time.time())
+ results['build_number'] = port_obj.get_option('build_number')
+ results['builder_name'] = port_obj.get_option('builder_name')
+
+ # Don't do this by default since it takes >100ms.
+ # It's only used for uploading data to the flakiness dashboard.
+ results['chromium_revision'] = ''
+ results['blink_revision'] = ''
+ if port_obj.get_option('builder_name'):
+ for (name, path) in port_obj.repository_paths():
+ scm = port_obj.host.scm_for_path(path)
+ if scm:
+ if name.lower() == 'chromium':
+ rev = _chromium_commit_position(scm, path)
+ else:
+ rev = scm.svn_revision(path)
+ if rev:
+ results[name.lower() + '_revision'] = rev
+ else:
+ _log.warn('Failed to determine svn revision for %s, '
+ 'leaving "%s_revision" key blank in full_results.json.'
+ % (path, name))
+
+ return results
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py
new file mode 100644
index 0000000..00b7577
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py
@@ -0,0 +1,209 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_run_results
+
+
+def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
+ failures = []
+ if result_type == test_expectations.TIMEOUT:
+ failures = [test_failures.FailureTimeout()]
+ elif result_type == test_expectations.AUDIO:
+ failures = [test_failures.FailureAudioMismatch()]
+ elif result_type == test_expectations.CRASH:
+ failures = [test_failures.FailureCrash()]
+ elif result_type == test_expectations.LEAK:
+ failures = [test_failures.FailureLeak()]
+ return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
+
+
+def run_results(port, extra_skipped_tests=[]):
+ tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html', 'failures/expected/leak.html', 'failures/expected/keyboard.html',
+ 'failures/expected/audio.html', 'passes/skipped/skip.html']
+ expectations = test_expectations.TestExpectations(port, tests)
+ if extra_skipped_tests:
+ expectations.add_extra_skipped_tests(extra_skipped_tests)
+ return test_run_results.TestRunResults(expectations, len(tests))
+
+
+def summarized_results(port, expected, passing, flaky, only_include_failing=False, extra_skipped_tests=[], fail_on_retry=False):
+ test_is_slow = False
+
+ initial_results = run_results(port, extra_skipped_tests)
+ if expected:
+ initial_results.add(get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/leak.html', test_expectations.LEAK), expected, test_is_slow)
+ elif passing:
+ skipped_result = get_result('passes/skipped/skip.html')
+ skipped_result.type = test_expectations.SKIP
+ initial_results.add(skipped_result, expected, test_is_slow)
+
+ initial_results.add(get_result('passes/text.html', run_time=1), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/audio.html'), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/timeout.html'), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/crash.html'), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/leak.html'), expected, test_is_slow)
+ else:
+ initial_results.add(get_result('passes/text.html', test_expectations.TIMEOUT, run_time=1), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO, run_time=0.049), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/timeout.html', test_expectations.CRASH, run_time=0.05), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/leak.html', test_expectations.TIMEOUT), expected, test_is_slow)
+
+ # we only list keyboard.html here, since normally this is WontFix
+ initial_results.add(get_result('failures/expected/keyboard.html', test_expectations.SKIP), expected, test_is_slow)
+
+ if flaky:
+ retry_results = run_results(port, extra_skipped_tests)
+ retry_results.add(get_result('passes/text.html'), True, test_is_slow)
+ if fail_on_retry:
+ retry_results.add(get_result('failures/expected/timeout.html', test_expectations.AUDIO), False, test_is_slow)
+ else:
+ retry_results.add(get_result('failures/expected/timeout.html'), True, test_is_slow)
+ retry_results.add(get_result('failures/expected/crash.html'), True, test_is_slow)
+ retry_results.add(get_result('failures/expected/leak.html'), True, test_is_slow)
+ else:
+ retry_results = None
+
+ return test_run_results.summarize_results(port, initial_results.expectations, initial_results, retry_results, enabled_pixel_tests_in_retry=False, only_include_failing=only_include_failing)
+
+
+class InterpretTestFailuresTest(unittest.TestCase):
+ def setUp(self):
+ host = MockHost()
+ self.port = host.port_factory.get(port_name='test')
+
+ def test_interpret_test_failures(self):
+ test_dict = test_run_results._interpret_test_failures([test_failures.FailureReftestMismatchDidNotOccur(self.port.abspath_for_test('foo/reftest-expected-mismatch.html'))])
+ self.assertEqual(len(test_dict), 0)
+
+ test_dict = test_run_results._interpret_test_failures([test_failures.FailureMissingAudio()])
+ self.assertIn('is_missing_audio', test_dict)
+
+ test_dict = test_run_results._interpret_test_failures([test_failures.FailureMissingResult()])
+ self.assertIn('is_missing_text', test_dict)
+
+ test_dict = test_run_results._interpret_test_failures([test_failures.FailureMissingImage()])
+ self.assertIn('is_missing_image', test_dict)
+
+ test_dict = test_run_results._interpret_test_failures([test_failures.FailureMissingImageHash()])
+ self.assertIn('is_missing_image', test_dict)
+
+
+class SummarizedResultsTest(unittest.TestCase):
+ def setUp(self):
+ host = MockHost(initialize_scm_by_default=False)
+ self.port = host.port_factory.get(port_name='test')
+
+ def test_no_svn_revision(self):
+ summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
+ self.assertNotIn('revision', summary)
+
+ def test_num_failures_by_type(self):
+ summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
+ self.assertEquals(summary['num_failures_by_type'], {'CRASH': 1, 'MISSING': 0, 'TEXT': 0, 'IMAGE': 0, 'NEEDSREBASELINE': 0, 'NEEDSMANUALREBASELINE': 0, 'PASS': 0, 'REBASELINE': 0, 'SKIP': 0, 'SLOW': 0, 'TIMEOUT': 3, 'IMAGE+TEXT': 0, 'LEAK': 0, 'FAIL': 0, 'AUDIO': 1, 'WONTFIX': 1})
+
+ summary = summarized_results(self.port, expected=True, passing=False, flaky=False)
+ self.assertEquals(summary['num_failures_by_type'], {'CRASH': 1, 'MISSING': 0, 'TEXT': 0, 'IMAGE': 0, 'NEEDSREBASELINE': 0, 'NEEDSMANUALREBASELINE': 0, 'PASS': 1, 'REBASELINE': 0, 'SKIP': 0, 'SLOW': 0, 'TIMEOUT': 1, 'IMAGE+TEXT': 0, 'LEAK': 1, 'FAIL': 0, 'AUDIO': 1, 'WONTFIX': 0})
+
+ summary = summarized_results(self.port, expected=False, passing=True, flaky=False)
+ self.assertEquals(summary['num_failures_by_type'], {'CRASH': 0, 'MISSING': 0, 'TEXT': 0, 'IMAGE': 0, 'NEEDSREBASELINE': 0, 'NEEDSMANUALREBASELINE': 0, 'PASS': 5, 'REBASELINE': 0, 'SKIP': 1, 'SLOW': 0, 'TIMEOUT': 0, 'IMAGE+TEXT': 0, 'LEAK': 0, 'FAIL': 0, 'AUDIO': 0, 'WONTFIX': 0})
+
+ def test_svn_revision(self):
+ self.port._options.builder_name = 'dummy builder'
+ summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
+ self.assertNotEquals(summary['blink_revision'], '')
+
+ def test_bug_entry(self):
+ self.port._options.builder_name = 'dummy builder'
+ summary = summarized_results(self.port, expected=False, passing=True, flaky=False)
+ self.assertEquals(summary['tests']['passes']['skipped']['skip.html']['bugs'], ['Bug(test)'])
+
+ def test_extra_skipped_tests(self):
+ self.port._options.builder_name = 'dummy builder'
+ summary = summarized_results(self.port, expected=False, passing=True, flaky=False, extra_skipped_tests=['passes/text.html'])
+ self.assertEquals(summary['tests']['passes']['text.html']['expected'], 'NOTRUN')
+
+ def test_summarized_results_wontfix(self):
+ self.port._options.builder_name = 'dummy builder'
+ summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
+ self.assertEquals(summary['tests']['failures']['expected']['keyboard.html']['expected'], 'WONTFIX')
+ self.assertTrue(summary['tests']['passes']['text.html']['is_unexpected'])
+
+ def test_summarized_results_expected_pass(self):
+ self.port._options.builder_name = 'dummy builder'
+ summary = summarized_results(self.port, expected=False, passing=True, flaky=False)
+ self.assertTrue(summary['tests']['passes']['text.html'])
+ self.assertTrue('is_unexpected' not in summary['tests']['passes']['text.html'])
+
+ def test_summarized_results_expected_only_include_failing(self):
+ self.port._options.builder_name = 'dummy builder'
+ summary = summarized_results(self.port, expected=True, passing=False, flaky=False, only_include_failing=True)
+ self.assertNotIn('passes', summary['tests'])
+ self.assertTrue(summary['tests']['failures']['expected']['audio.html'])
+ self.assertTrue(summary['tests']['failures']['expected']['timeout.html'])
+ self.assertTrue(summary['tests']['failures']['expected']['crash.html'])
+ self.assertTrue(summary['tests']['failures']['expected']['leak.html'])
+
+ def test_summarized_results_skipped(self):
+ self.port._options.builder_name = 'dummy builder'
+ summary = summarized_results(self.port, expected=False, passing=True, flaky=False)
+ self.assertEquals(summary['tests']['passes']['skipped']['skip.html']['expected'], 'SKIP')
+
+ def test_summarized_results_only_inlude_failing(self):
+ self.port._options.builder_name = 'dummy builder'
+ summary = summarized_results(self.port, expected=False, passing=True, flaky=False, only_include_failing=True)
+ self.assertTrue('passes' not in summary['tests'])
+
+ def test_rounded_run_times(self):
+ summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
+ self.assertEquals(summary['tests']['passes']['text.html']['time'], 1)
+ self.assertTrue('time' not in summary['tests']['failures']['expected']['audio.html'])
+ self.assertEquals(summary['tests']['failures']['expected']['timeout.html']['time'], 0.1)
+ self.assertTrue('time' not in summary['tests']['failures']['expected']['crash.html'])
+ self.assertTrue('time' not in summary['tests']['failures']['expected']['leak.html'])
+
+ def test_timeout_then_unexpected_pass(self):
+ tests = ['failures/expected/image.html']
+ expectations = test_expectations.TestExpectations(self.port, tests)
+ initial_results = test_run_results.TestRunResults(expectations, len(tests))
+ initial_results.add(get_result('failures/expected/image.html', test_expectations.TIMEOUT, run_time=1), False, False)
+ retry_results = test_run_results.TestRunResults(expectations, len(tests))
+ retry_results.add(get_result('failures/expected/image.html', test_expectations.PASS, run_time=0.1), False, False)
+ summary = test_run_results.summarize_results(self.port, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry=True, only_include_failing=True)
+ self.assertEquals(summary['num_regressions'], 0)
+ self.assertEquals(summary['num_passes'], 1)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/testharness_results.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/testharness_results.py
new file mode 100644
index 0000000..72c8a35
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/testharness_results.py
@@ -0,0 +1,72 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utility module for testharness."""
+
+
+# const definitions
+TESTHARNESSREPORT_HEADER = 'This is a testharness.js-based test.'
+TESTHARNESSREPORT_FOOTER = 'Harness: the test ran to completion.'
+
+
+def is_testharness_output(content_text):
+ """
+ Returns whether the content_text in parameter is a testharness output.
+ """
+
+ # Leading and trailing white spaces are accepted.
+ lines = content_text.strip().splitlines()
+ lines = [line.strip() for line in lines]
+
+ # A testharness output is defined as containing the header and the footer.
+ found_header = False
+ found_footer = False
+ for line in lines:
+ if line == TESTHARNESSREPORT_HEADER:
+ found_header = True
+ elif line == TESTHARNESSREPORT_FOOTER:
+ found_footer = True
+
+ return found_header and found_footer
+
+
+def is_testharness_output_passing(content_text):
+ """
+ Returns whether the content_text in parameter is a passing testharness output.
+
+ Note:
+ It is expected that the |content_text| is a testharness output.
+ """
+
+ # Leading and trailing white spaces are accepted.
+ lines = content_text.strip().splitlines()
+ lines = [line.strip() for line in lines]
+
+ # The check is very conservative and rejects any unexpected content in the output.
+ for line in lines:
+ # There should be no empty lines.
+ if len(line) == 0:
+ return False
+
+ # Those lines are expected to be exactly equivalent.
+ if line == TESTHARNESSREPORT_HEADER or \
+ line == TESTHARNESSREPORT_FOOTER:
+ continue
+
+ # Those are expected passing output.
+ if line.startswith('CONSOLE') or \
+ line.startswith('PASS'):
+ continue
+
+ # Those are expected failing output.
+ if line.startswith('FAIL') or \
+ line.startswith('TIMEOUT') or \
+ line.startswith('NOTRUN') or \
+ line.startswith('Harness Error. harness_status = '):
+ return False
+
+ # Unexpected output should be considered as a failure.
+ return False
+
+ return True
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/testharness_results_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/testharness_results_unittest.py
new file mode 100644
index 0000000..6ec58cf
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/models/testharness_results_unittest.py
@@ -0,0 +1,46 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from webkitpy.layout_tests.models import testharness_results
+
+
+class TestHarnessResultCheckerTest(unittest.TestCase):
+
+ def test_is_testharness_output(self):
+ test_data = [
+ {'content': 'foo', 'result': False},
+ {'content': '', 'result': False},
+ {'content': ' ', 'result': False},
+ {'content': 'This is a testharness.js-based test.\nHarness: the test ran to completion.', 'result': True},
+ {'content': '\n \r This is a testharness.js-based test. \n \r \n \rHarness: the test ran to completion. \n\n', 'result': True},
+ {'content': ' This \nis a testharness.js-based test.\nHarness: the test ran to completion.', 'result': False},
+ {'content': 'This is a testharness.js-based test. Harness: the test ran to completion.', 'result': False},
+ {'content': 'This is a testharness.js-based test.\nFoo bar \n Harness: the test ran to completion.', 'result': True},
+ {'content': 'This is a testharness.js-based test.\nFAIL: bah \n Harness: the test ran to completion.\n\n\n', 'result': True},
+ ]
+
+ for data in test_data:
+ self.assertEqual(data['result'], testharness_results.is_testharness_output(data['content']))
+
+ def test_is_testharness_output_passing(self):
+ test_data = [
+ {'content': 'This is a testharness.js-based test.\n Harness: the test ran to completion.', 'result': True},
+ {'content': 'This is a testharness.js-based test.\n \n Harness: the test ran to completion.', 'result': False},
+ {'content': 'This is a testharness.js-based test.\n PASS: foo bar \n Harness: the test ran to completion.', 'result': True},
+ {'content': 'This is a testharness.js-based test.\n PASS: foo bar FAIL \n Harness: the test ran to completion.', 'result': True},
+ {'content': 'This is a testharness.js-based test.\n PASS: foo bar \nFAIL \n Harness: the test ran to completion.', 'result': False},
+ {'content': 'This is a testharness.js-based test.\n CONSOLE ERROR: BLAH \n Harness: the test ran to completion.', 'result': True},
+ {'content': 'This is a testharness.js-based test.\n Foo bar \n Harness: the test ran to completion.', 'result': False},
+ {'content': 'This is a testharness.js-based test.\n FAIL: bah \n Harness: the test ran to completion.', 'result': False},
+ {'content': 'This is a testharness.js-based test.\n TIMEOUT: bah \n Harness: the test ran to completion.', 'result': False},
+ {'content': 'This is a testharness.js-based test.\n NOTRUN: bah \n Harness: the test ran to completion.', 'result': False},
+ {'content': 'CONSOLE LOG: error.\nThis is a testharness.js-based test.\nPASS: things are fine.\nHarness: the test ran to completion.\n\n', 'result': True},
+ {'content': 'CONSOLE ERROR: error.\nThis is a testharness.js-based test.\nPASS: things are fine.\nHarness: the test ran to completion.\n\n', 'result': True},
+ {'content': 'RANDOM TEXT.\nThis is a testharness.js-based test.\nPASS: things are fine.\n.Harness: the test ran to completion.\n\n', 'result': False},
+ ]
+
+ for data in test_data:
+ self.assertEqual(data['result'], testharness_results.is_testharness_output_passing(data['content']))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/__init__.py
new file mode 100644
index 0000000..cc7fa86
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/__init__.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Port-specific entrypoints for the layout tests test infrastructure."""
+
+import builders # Why is this in port?
+
+from base import Port # It's possible we don't need to export this virtual baseclass outside the module.
+from driver import DeviceFailure, Driver, DriverInput, DriverOutput
+from factory import platform_options, configuration_options
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/android.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/android.py
new file mode 100644
index 0000000..8a84b11
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/android.py
@@ -0,0 +1,1279 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import copy
+import logging
+import os
+import re
+import signal
+import sys
+import subprocess
+import threading
+import time
+
+from multiprocessing.pool import ThreadPool
+
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.layout_tests.breakpad.dump_reader_multipart import DumpReaderAndroid
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.port import base
+from webkitpy.layout_tests.port import linux
+from webkitpy.layout_tests.port import driver
+from webkitpy.layout_tests.port import factory
+from webkitpy.layout_tests.port import server_process
+from webkitpy.common.system.profiler import SingleFileOutputProfiler
+
+_log = logging.getLogger(__name__)
+
+# The root directory for test resources, which has the same structure as the
+# source root directory of Chromium.
+# This path is defined in Chromium's base/test/test_support_android.cc.
+DEVICE_SOURCE_ROOT_DIR = '/data/local/tmp/'
+
+# The layout tests directory on device, which has two usages:
+# 1. as a virtual path in file urls that will be bridged to HTTP.
+# 2. pointing to some files that are pushed to the device for tests that
+# don't work on file-over-http (e.g. blob protocol tests).
+DEVICE_WEBKIT_BASE_DIR = DEVICE_SOURCE_ROOT_DIR + 'third_party/WebKit/'
+DEVICE_LAYOUT_TESTS_DIR = DEVICE_WEBKIT_BASE_DIR + 'LayoutTests/'
+
+SCALING_GOVERNORS_PATTERN = "/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor"
+KPTR_RESTRICT_PATH = "/proc/sys/kernel/kptr_restrict"
+
+# All the test cases are still served to the test runner through file protocol,
+# but we use a file-to-http feature to bridge the file request to host's http
+# server to get the real test files and corresponding resources.
+# See webkit/support/platform_support_android.cc for the other side of this bridge.
+PERF_TEST_PATH_PREFIX = '/all-perf-tests'
+LAYOUT_TEST_PATH_PREFIX = '/all-tests'
+
+# All ports the Android forwarder to forward.
+# 8000, 8080 and 8443 are for http/https tests.
+# 8880 and 9323 are for websocket tests
+# (see http_server.py, apache_http_server.py and websocket_server.py).
+FORWARD_PORTS = '8000 8080 8443 8880 9323'
+
+MS_TRUETYPE_FONTS_DIR = '/usr/share/fonts/truetype/msttcorefonts/'
+MS_TRUETYPE_FONTS_PACKAGE = 'ttf-mscorefonts-installer'
+
+# Timeout in seconds to wait for starting/stopping the driver.
+DRIVER_START_STOP_TIMEOUT_SECS = 10
+
+HOST_FONT_FILES = [
+ [[MS_TRUETYPE_FONTS_DIR], 'Arial.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Arial_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Arial_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Arial_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Comic_Sans_MS.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Comic_Sans_MS_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Courier_New.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Courier_New_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Courier_New_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Courier_New_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Georgia.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Georgia_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Georgia_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Georgia_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Impact.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Trebuchet_MS.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Trebuchet_MS_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Trebuchet_MS_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Trebuchet_MS_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Times_New_Roman.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Times_New_Roman_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Times_New_Roman_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Times_New_Roman_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Verdana.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Verdana_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Verdana_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ [[MS_TRUETYPE_FONTS_DIR], 'Verdana_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
+ # The Microsoft font EULA
+ [['/usr/share/doc/ttf-mscorefonts-installer/'], 'READ_ME!.gz', MS_TRUETYPE_FONTS_PACKAGE],
+ # Other fonts: Arabic, CJK, Indic, Thai, etc.
+ [['/usr/share/fonts/truetype/ttf-dejavu/'], 'DejaVuSans.ttf', 'ttf-dejavu'],
+ [['/usr/share/fonts/truetype/kochi/'], 'kochi-mincho.ttf', 'ttf-kochi-mincho'],
+ [['/usr/share/fonts/truetype/ttf-indic-fonts-core/'], 'lohit_hi.ttf', 'ttf-indic-fonts-core'],
+ [['/usr/share/fonts/truetype/ttf-indic-fonts-core/'], 'lohit_ta.ttf', 'ttf-indic-fonts-core'],
+ [['/usr/share/fonts/truetype/ttf-indic-fonts-core/'], 'MuktiNarrow.ttf', 'ttf-indic-fonts-core'],
+ [['/usr/share/fonts/truetype/thai/', '/usr/share/fonts/truetype/tlwg/'], 'Garuda.ttf', 'fonts-tlwg-garuda'],
+ [['/usr/share/fonts/truetype/ttf-indic-fonts-core/', '/usr/share/fonts/truetype/ttf-punjabi-fonts/'], 'lohit_pa.ttf', 'ttf-indic-fonts-core'],
+]
+
+# Test resources that need to be accessed as files directly.
+# Each item can be the relative path of a directory or a file.
+TEST_RESOURCES_TO_PUSH = [
+ # Blob tests need to access files directly.
+ 'editing/pasteboard/resources',
+ 'fast/files/resources',
+ 'http/tests/local/resources',
+ 'http/tests/local/formdata/resources',
+ # User style URLs are accessed as local files in webkit_support.
+ 'http/tests/security/resources/cssStyle.css',
+ # Media tests need to access audio/video as files.
+ 'media/content',
+ 'compositing/resources/video.mp4',
+]
+
+MD5SUM_DEVICE_FILE_NAME = 'md5sum_bin'
+MD5SUM_HOST_FILE_NAME = 'md5sum_bin_host'
+MD5SUM_DEVICE_PATH = '/data/local/tmp/' + MD5SUM_DEVICE_FILE_NAME
+
+
+# Information required when running layout tests using content_shell as the test runner.
+class ContentShellDriverDetails():
+ def device_cache_directory(self):
+ return self.device_directory() + 'cache/'
+
+ def device_fonts_directory(self):
+ return self.device_directory() + 'fonts/'
+
+ def device_forwarder_path(self):
+ return self.device_directory() + 'forwarder'
+
+ def device_fifo_directory(self):
+ return '/data/data/' + self.package_name() + '/files/'
+
+ def apk_name(self):
+ return 'apks/ContentShell.apk'
+
+ def package_name(self):
+ return 'org.chromium.content_shell_apk'
+
+ def activity_name(self):
+ return self.package_name() + '/.ContentShellActivity'
+
+ def library_name(self):
+ return 'libcontent_shell_content_view.so'
+
+ def additional_resources(self):
+ return ['content_resources.pak', 'content_shell.pak', 'shell_resources.pak']
+
+ def command_line_file(self):
+ return '/data/local/tmp/content-shell-command-line'
+
+ def device_crash_dumps_directory(self):
+ return '/data/local/tmp/content-shell-crash-dumps'
+
+ def additional_command_line_flags(self, use_breakpad):
+ flags = ['--dump-render-tree', '--encode-binary']
+ if use_breakpad:
+ flags.extend(['--enable-crash-reporter', '--crash-dumps-dir=%s' % self.device_crash_dumps_directory()])
+ return flags
+
+ def device_directory(self):
+ return DEVICE_SOURCE_ROOT_DIR + 'content_shell/'
+
+
+# The AndroidCommands class encapsulates commands to communicate with an attached device.
+class AndroidCommands(object):
+ _adb_command_path = None
+ _adb_command_path_options = []
+
+ def __init__(self, executive, device_serial, debug_logging):
+ self._executive = executive
+ self._device_serial = device_serial
+ self._debug_logging = debug_logging
+
+ # Local public methods.
+
+ def file_exists(self, full_path):
+ assert full_path.startswith('/')
+ return self.run(['shell', 'ls', '-d', full_path]).strip() == full_path
+
+ def push(self, host_path, device_path, ignore_error=False):
+ return self.run(['push', host_path, device_path], ignore_error=ignore_error)
+
+ def pull(self, device_path, host_path, ignore_error=False):
+ return self.run(['pull', device_path, host_path], ignore_error=ignore_error)
+
+ def mkdir(self, device_path, chmod=None):
+ self.run(['shell', 'mkdir', '-p', device_path])
+ if chmod:
+ self.run(['shell', 'chmod', chmod, device_path])
+
+ def restart_adb(self):
+ pids = self.extract_pids('adbd')
+ if pids:
+ output = self.run(['shell', 'kill', '-' + str(signal.SIGTERM)] + pids)
+ self.run(['wait-for-device'])
+
+ def restart_as_root(self):
+ output = self.run(['root'])
+ if 'adbd is already running as root' in output:
+ return
+
+ elif not 'restarting adbd as root' in output:
+ self._log_error('Unrecognized output from adb root: %s' % output)
+
+ self.run(['wait-for-device'])
+
+ def extract_pids(self, process_name):
+ pids = []
+ output = self.run(['shell', 'ps'])
+ for line in output.splitlines():
+ data = line.split()
+ try:
+ if process_name in data[-1]: # name is in the last column
+ if process_name == data[-1]:
+ pids.insert(0, data[1]) # PID is in the second column
+ else:
+ pids.append(data[1])
+ except IndexError:
+ pass
+ return pids
+
+ def run(self, command, ignore_error=False):
+ self._log_debug('Run adb command: ' + str(command))
+ if ignore_error:
+ error_handler = self._executive.ignore_error
+ else:
+ error_handler = None
+
+ result = self._executive.run_command(self.adb_command() + command, error_handler=error_handler, debug_logging=self._debug_logging)
+
+ # We limit the length to avoid outputting too verbose commands, such as "adb logcat".
+ # Also make sure that the output is ascii-encoded to avoid confusing other parts of
+ # the system.
+ self._log_debug('Run adb result: ' + result[:80].encode('ascii', errors='replace'))
+ return result
+
+ def get_serial(self):
+ return self._device_serial
+
+ def adb_command(self):
+ return [AndroidCommands.adb_command_path(self._executive, self._debug_logging), '-s', self._device_serial]
+
+ @staticmethod
+ def set_adb_command_path_options(paths):
+ AndroidCommands._adb_command_path_options = paths
+
+ @staticmethod
+ def adb_command_path(executive, debug_logging):
+ if AndroidCommands._adb_command_path:
+ return AndroidCommands._adb_command_path
+
+ assert AndroidCommands._adb_command_path_options, 'No commands paths have been set to look for the "adb" command.'
+
+ command_path = None
+ command_version = None
+ for path_option in AndroidCommands._adb_command_path_options:
+ path_version = AndroidCommands._determine_adb_version(path_option, executive, debug_logging)
+ if not path_version:
+ continue
+ if command_version != None and path_version < command_version:
+ continue
+
+ command_path = path_option
+ command_version = path_version
+
+ assert command_path, 'Unable to locate the "adb" command. Are you using an Android checkout of Chromium?'
+
+ AndroidCommands._adb_command_path = command_path
+ return command_path
+
+ # Local private methods.
+
+ def _log_error(self, message):
+ _log.error('[%s] %s' % (self._device_serial, message))
+
+ def _log_info(self, message):
+ _log.info('[%s] %s' % (self._device_serial, message))
+
+ def _log_debug(self, message):
+ if self._debug_logging:
+ _log.debug('[%s] %s' % (self._device_serial, message))
+
+ @staticmethod
+ def _determine_adb_version(adb_command_path, executive, debug_logging):
+ re_version = re.compile('^.*version ([\d\.]+)$')
+ try:
+ output = executive.run_command([adb_command_path, 'version'], error_handler=executive.ignore_error,
+ debug_logging=debug_logging)
+ except OSError:
+ return None
+
+ result = re_version.match(output)
+ if not output or not result:
+ return None
+
+ return [int(n) for n in result.group(1).split('.')]
+
+
+# A class to encapsulate device status and information, such as the AndroidCommands
+# instances and whether the device has been set up.
+class AndroidDevices(object):
+ # Percentage of battery a device needs to have in order for it to be considered
+ # to participate in running the layout tests.
+ MINIMUM_BATTERY_PERCENTAGE = 30
+
+ def __init__(self, executive, default_device=None, debug_logging=False):
+ self._usable_devices = []
+ self._default_device = default_device
+ self._prepared_devices = []
+ self._debug_logging = debug_logging
+
+ def prepared_devices(self):
+ return self._prepared_devices
+
+ def usable_devices(self, executive):
+ if self._usable_devices:
+ return self._usable_devices
+
+ if self._default_device:
+ self._usable_devices = [AndroidCommands(executive, self._default_device, self._debug_logging)]
+ return self._usable_devices
+
+ # Example "adb devices" command output:
+ # List of devices attached
+ # 0123456789ABCDEF device
+ re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
+
+ result = executive.run_command([AndroidCommands.adb_command_path(executive, debug_logging=self._debug_logging), 'devices'],
+ error_handler=executive.ignore_error, debug_logging=self._debug_logging)
+ devices = re_device.findall(result)
+ if not devices:
+ return []
+
+ for device_serial in sorted(devices):
+ commands = AndroidCommands(executive, device_serial, self._debug_logging)
+ if self._battery_level_for_device(commands) < AndroidDevices.MINIMUM_BATTERY_PERCENTAGE:
+ _log.warning('Device with serial "%s" skipped because it has less than %d percent battery.'
+ % (commands.get_serial(), AndroidDevices.MINIMUM_BATTERY_PERCENTAGE))
+ continue
+
+ if not self._is_device_screen_on(commands):
+ _log.warning('Device with serial "%s" skipped because the screen must be on.' % commands.get_serial())
+ continue
+
+ self._usable_devices.append(commands)
+
+ return self._usable_devices
+
+ def get_device(self, executive, device_index):
+ devices = self.usable_devices(executive)
+ if device_index >= len(devices):
+ raise AssertionError('Device index exceeds number of usable devices.')
+
+ return devices[device_index]
+
+ def is_device_prepared(self, device_serial):
+ return device_serial in self._prepared_devices
+
+ def set_device_prepared(self, device_serial):
+ self._prepared_devices.append(device_serial)
+
+ # Private methods
+ def _battery_level_for_device(self, commands):
+ battery_status = commands.run(['shell', 'dumpsys', 'battery'])
+ if 'Error' in battery_status or "Can't find service: battery" in battery_status:
+ _log.warning('Unable to read the battery level from device with serial "%s".' % commands.get_serial())
+ return 0
+
+ return int(re.findall('level: (\d+)', battery_status)[0])
+
+ def _is_device_screen_on(self, commands):
+ power_status = commands.run(['shell', 'dumpsys', 'power'])
+ return 'mScreenOn=true' in power_status or 'mScreenOn=SCREEN_ON_BIT' in power_status or 'Display Power: state=ON' in power_status
+
+
+class AndroidPort(base.Port):
+ port_name = 'android'
+
+ # Avoid initializing the adb path [worker count]+1 times by storing it as a static member.
+ _adb_path = None
+
+ SUPPORTED_VERSIONS = ('android')
+
+ FALLBACK_PATHS = {'icecreamsandwich': ['android'] + linux.LinuxPort.latest_platform_fallback_path()}
+
+ # Android has aac and mp3 codecs built in.
+ PORT_HAS_AUDIO_CODECS_BUILT_IN = True
+
+ BUILD_REQUIREMENTS_URL = 'https://code.google.com/p/chromium/wiki/AndroidBuildInstructions'
+
+ def __init__(self, host, port_name, **kwargs):
+ super(AndroidPort, self).__init__(host, port_name, **kwargs)
+
+ self._operating_system = 'android'
+ self._version = 'icecreamsandwich'
+
+ self._host_port = factory.PortFactory(host).get('chromium', **kwargs)
+ self._server_process_constructor = self._android_server_process_constructor
+
+ if not self.get_option('disable_breakpad'):
+ self._dump_reader = DumpReaderAndroid(host, self._build_path())
+
+ if self.driver_name() != self.CONTENT_SHELL_NAME:
+ raise AssertionError('Layout tests on Android only support content_shell as the driver.')
+
+ self._driver_details = ContentShellDriverDetails()
+
+ # Initialize the AndroidDevices class which tracks available devices.
+ default_device = None
+ if hasattr(self._options, 'adb_device') and len(self._options.adb_device):
+ default_device = self._options.adb_device
+
+ self._debug_logging = self.get_option('android_logging')
+ self._devices = AndroidDevices(self._executive, default_device, self._debug_logging)
+
+ # Tell AndroidCommands where to search for the "adb" command.
+ AndroidCommands.set_adb_command_path_options(['adb',
+ self.path_from_chromium_base('third_party', 'android_tools', 'sdk', 'platform-tools', 'adb')])
+
+ prepared_devices = self.get_option('prepared_devices', [])
+ for serial in prepared_devices:
+ self._devices.set_device_prepared(serial)
+
+ def default_smoke_test_only(self):
+ return True
+
+ # Local public methods.
+ def path_to_forwarder(self):
+ return self._build_path('forwarder')
+
+ def path_to_md5sum(self):
+ return self._build_path(MD5SUM_DEVICE_FILE_NAME)
+
+ def path_to_md5sum_host(self):
+ return self._build_path(MD5SUM_HOST_FILE_NAME)
+
+ def additional_drt_flag(self):
+ return self._driver_details.additional_command_line_flags(use_breakpad=not self.get_option('disable_breakpad'))
+
+ def default_timeout_ms(self):
+ # Android platform has less computing power than desktop platforms.
+ # Using 10 seconds allows us to pass most slow tests which are not
+ # marked as slow tests on desktop platforms.
+ return 10 * 1000
+
+ def driver_stop_timeout(self):
+ # The driver doesn't respond to closing stdin, so we might as well stop the driver immediately.
+ return 0.0
+
+ def default_child_processes(self):
+ usable_devices = self._devices.usable_devices(self._executive)
+ if not usable_devices:
+ raise test_run_results.TestRunException(test_run_results.NO_DEVICES_EXIT_STATUS, "Unable to find any attached Android devices.")
+ return len(usable_devices)
+
+ def check_wdiff(self, logging=True):
+ return self._host_port.check_wdiff(logging)
+
+ def check_build(self, needs_http, printer):
+ exit_status = super(AndroidPort, self).check_build(needs_http, printer)
+ if exit_status:
+ return exit_status
+
+ result = self._check_file_exists(self.path_to_md5sum(), 'md5sum utility')
+ result = self._check_file_exists(self.path_to_md5sum_host(), 'md5sum host utility') and result
+ result = self._check_file_exists(self.path_to_forwarder(), 'forwarder utility') and result
+
+ if not result:
+ # There is a race condition in adb at least <= 4.3 on Linux that causes it to go offline periodically
+ # We set the processor affinity for any running adb process to attempt to work around this.
+ # See crbug.com/268450
+ if self.host.platform.is_linux():
+ pids = self._executive.running_pids(lambda name: 'adb' in name)
+ if not pids:
+ # Apparently adb is not running, which is unusual. Running any adb command should start it.
+ self._executive.run_command(['adb', 'devices'])
+ pids = self._executive.running_pids(lambda name: 'adb' in name)
+ if not pids:
+ _log.error("The adb daemon does not appear to be running.")
+ return False
+
+ for pid in pids:
+ self._executive.run_command(['taskset', '-p', '-c', '0', str(pid)])
+
+ if not result:
+ _log.error('For complete Android build requirements, please see:')
+ _log.error('')
+ _log.error(' http://code.google.com/p/chromium/wiki/AndroidBuildInstructions')
+ return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
+
+ return self._check_devices(printer)
+
+ def _check_devices(self, printer):
+ # Printer objects aren't threadsafe, so we need to protect calls to them.
+ lock = threading.Lock()
+ pool = None
+
+ # Push the executables and other files to the devices; doing this now
+ # means we can do this in parallel in the manager process and not mix
+ # this in with starting and stopping workers.
+ def setup_device(worker_number):
+ d = self.create_driver(worker_number)
+ serial = d._android_commands.get_serial()
+
+ def log_safely(msg, throttled=True):
+ if throttled:
+ callback = printer.write_throttled_update
+ else:
+ callback = printer.write_update
+ lock.acquire()
+ try:
+ callback("[%s] %s" % (serial, msg))
+ finally:
+ lock.release()
+
+ log_safely("preparing device", throttled=False)
+ try:
+ d._setup_test(log_safely)
+ log_safely("device prepared", throttled=False)
+ except (ScriptError, driver.DeviceFailure) as e:
+ lock.acquire()
+ _log.warning("[%s] failed to prepare_device: %s" % (serial, str(e)))
+ lock.release()
+ except KeyboardInterrupt:
+ if pool:
+ pool.terminate()
+
+ # FIXME: It would be nice if we knew how many workers we needed.
+ num_workers = self.default_child_processes()
+ num_child_processes = int(self.get_option('child_processes'))
+ if num_child_processes:
+ num_workers = min(num_workers, num_child_processes)
+ if num_workers > 1:
+ pool = ThreadPool(num_workers)
+ try:
+ pool.map(setup_device, range(num_workers))
+ except KeyboardInterrupt:
+ pool.terminate()
+ raise
+ else:
+ setup_device(0)
+
+ if not self._devices.prepared_devices():
+ _log.error('Could not prepare any devices for testing.')
+ return test_run_results.NO_DEVICES_EXIT_STATUS
+ return test_run_results.OK_EXIT_STATUS
+
+ def setup_test_run(self):
+ super(AndroidPort, self).setup_test_run()
+
+ # By setting this on the options object, we can propagate the list
+ # of prepared devices to the workers (it is read in __init__()).
+ if self._devices._prepared_devices:
+ self._options.prepared_devices = self._devices.prepared_devices()
+ else:
+ # We were called with --no-build, so assume the devices are up to date.
+ self._options.prepared_devices = [d.get_serial() for d in self._devices.usable_devices(self.host.executive)]
+
+ def num_workers(self, requested_num_workers):
+ return min(len(self._options.prepared_devices), requested_num_workers)
+
+ def check_sys_deps(self, needs_http):
+ for (font_dirs, font_file, package) in HOST_FONT_FILES:
+ exists = False
+ for font_dir in font_dirs:
+ font_path = font_dir + font_file
+ if self._check_file_exists(font_path, '', logging=False):
+ exists = True
+ break
+ if not exists:
+ _log.error('You are missing %s under %s. Try installing %s. See build instructions.' % (font_file, font_dirs, package))
+ return test_run_results.SYS_DEPS_EXIT_STATUS
+ return test_run_results.OK_EXIT_STATUS
+
+ def requires_http_server(self):
+ """Chromium Android runs tests on devices, and uses the HTTP server to
+ serve the actual layout tests to the test driver."""
+ return True
+
+ def start_http_server(self, additional_dirs, number_of_drivers):
+ additional_dirs[PERF_TEST_PATH_PREFIX] = self.perf_tests_dir()
+ additional_dirs[LAYOUT_TEST_PATH_PREFIX] = self.layout_tests_dir()
+ super(AndroidPort, self).start_http_server(additional_dirs, number_of_drivers)
+
+ def create_driver(self, worker_number, no_timeout=False):
+ return ChromiumAndroidDriver(self, worker_number, pixel_tests=self.get_option('pixel_tests'),
+ driver_details=self._driver_details,
+ android_devices=self._devices,
+ # Force no timeout to avoid test driver timeouts before NRWT.
+ no_timeout=True)
+
+ def driver_cmd_line(self):
+ # Override to return the actual test driver's command line.
+ return self.create_driver(0)._android_driver_cmd_line(self.get_option('pixel_tests'), [])
+
+ def clobber_old_port_specific_results(self):
+ if not self.get_option('disable_breakpad'):
+ self._dump_reader.clobber_old_results()
+
+ # Overridden protected methods.
+
+ def _build_path(self, *comps):
+ return self._host_port._build_path(*comps)
+
+ def _build_path_with_configuration(self, configuration, *comps):
+ return self._host_port._build_path_with_configuration(configuration, *comps)
+
+ def path_to_apache(self):
+ return self._host_port.path_to_apache()
+
+ def path_to_apache_config_file(self):
+ return self._host_port.path_to_apache_config_file()
+
+ def _path_to_driver(self, configuration=None):
+ return self._build_path_with_configuration(configuration, self._driver_details.apk_name())
+
+ def _path_to_helper(self):
+ return None
+
+ def _path_to_image_diff(self):
+ return self._host_port._path_to_image_diff()
+
+ def _path_to_wdiff(self):
+ return self._host_port._path_to_wdiff()
+
+ def _shut_down_http_server(self, pid):
+ return self._host_port._shut_down_http_server(pid)
+
+ def _driver_class(self):
+ return ChromiumAndroidDriver
+
+ # Local private methods.
+
+ @staticmethod
+ def _android_server_process_constructor(port, server_name, cmd_line, env=None, logging=False):
+ return server_process.ServerProcess(port, server_name, cmd_line, env,
+ universal_newlines=True, treat_no_data_as_crash=True, logging=logging)
+
+
+class AndroidPerf(SingleFileOutputProfiler):
+ _cached_perf_host_path = None
+ _have_searched_for_perf_host = False
+
+ def __init__(self, host, executable_path, output_dir, android_commands, symfs_path, kallsyms_path, identifier=None):
+ super(AndroidPerf, self).__init__(host, executable_path, output_dir, "data", identifier)
+ self._android_commands = android_commands
+ self._perf_process = None
+ self._symfs_path = symfs_path
+ self._kallsyms_path = kallsyms_path
+
+ def check_configuration(self):
+ # Check that perf is installed
+ if not self._android_commands.file_exists('/system/bin/perf'):
+ print "Cannot find /system/bin/perf on device %s" % self._android_commands.get_serial()
+ return False
+
+ # Check that the device is a userdebug build (or at least has the necessary libraries).
+ if self._android_commands.run(['shell', 'getprop', 'ro.build.type']).strip() != 'userdebug':
+ print "Device %s is not flashed with a userdebug build of Android" % self._android_commands.get_serial()
+ return False
+
+ # FIXME: Check that the binary actually is perf-able (has stackframe pointers)?
+ # objdump -s a function and make sure it modifies the fp?
+ # Instruct users to rebuild after export GYP_DEFINES="profiling=1 $GYP_DEFINES"
+ return True
+
+ def print_setup_instructions(self):
+ print """
+perf on android requires a 'userdebug' build of Android, see:
+http://source.android.com/source/building-devices.html"
+
+The perf command can be built from:
+https://android.googlesource.com/platform/external/linux-tools-perf/
+and requires libefl, libebl, libdw, and libdwfl available in:
+https://android.googlesource.com/platform/external/elfutils/
+
+The test driver must be built with profiling=1, make sure you've done:
+export GYP_DEFINES="profiling=1 $GYP_DEFINES"
+update-webkit --chromium-android
+build-webkit --chromium-android
+
+Googlers should read:
+http://goto.google.com/cr-android-perf-howto
+"""
+
+ def attach_to_pid(self, pid):
+ assert(pid)
+ assert(self._perf_process == None)
+ # FIXME: This can't be a fixed timeout!
+ cmd = self._android_commands.adb_command() + ['shell', 'perf', 'record', '-g', '-p', pid, 'sleep', 30]
+ self._perf_process = self._host.executive.popen(cmd)
+
+ def _perf_version_string(self, perf_path):
+ try:
+ return self._host.executive.run_command([perf_path, '--version'])
+ except:
+ return None
+
+ def _find_perfhost_binary(self):
+ perfhost_version = self._perf_version_string('perfhost_linux')
+ if perfhost_version:
+ return 'perfhost_linux'
+ perf_version = self._perf_version_string('perf')
+ if perf_version:
+ return 'perf'
+ return None
+
+ def _perfhost_path(self):
+ if self._have_searched_for_perf_host:
+ return self._cached_perf_host_path
+ self._have_searched_for_perf_host = True
+ self._cached_perf_host_path = self._find_perfhost_binary()
+ return self._cached_perf_host_path
+
+ def _first_ten_lines_of_profile(self, perf_output):
+ match = re.search("^#[^\n]*\n((?: [^\n]*\n){1,10})", perf_output, re.MULTILINE)
+ return match.group(1) if match else None
+
+ def profile_after_exit(self):
+ perf_exitcode = self._perf_process.wait()
+ if perf_exitcode != 0:
+ print "Perf failed (exit code: %i), can't process results." % perf_exitcode
+ return
+
+ self._android_commands.pull('/data/perf.data', self._output_path)
+
+ perfhost_path = self._perfhost_path()
+ perfhost_report_command = [
+ 'report',
+ '--input', self._output_path,
+ '--symfs', self._symfs_path,
+ '--kallsyms', self._kallsyms_path,
+ ]
+ if perfhost_path:
+ perfhost_args = [perfhost_path] + perfhost_report_command + ['--call-graph', 'none']
+ perf_output = self._host.executive.run_command(perfhost_args)
+ # We could save off the full -g report to a file if users found that useful.
+ print self._first_ten_lines_of_profile(perf_output)
+ else:
+ print """
+Failed to find perfhost_linux binary, can't process samples from the device.
+
+perfhost_linux can be built from:
+https://android.googlesource.com/platform/external/linux-tools-perf/
+also, modern versions of perf (available from apt-get install goobuntu-kernel-tools-common)
+may also be able to process the perf.data files from the device.
+
+Googlers should read:
+http://goto.google.com/cr-android-perf-howto
+for instructions on installing pre-built copies of perfhost_linux
+http://crbug.com/165250 discusses making these pre-built binaries externally available.
+"""
+
+ perfhost_display_patch = perfhost_path if perfhost_path else 'perfhost_linux'
+ print "To view the full profile, run:"
+ print ' '.join([perfhost_display_patch] + perfhost_report_command)
+
+
+class ChromiumAndroidDriver(driver.Driver):
+ def __init__(self, port, worker_number, pixel_tests, driver_details, android_devices, no_timeout=False):
+ super(ChromiumAndroidDriver, self).__init__(port, worker_number, pixel_tests, no_timeout)
+ self._in_fifo_path = driver_details.device_fifo_directory() + 'stdin.fifo'
+ self._out_fifo_path = driver_details.device_fifo_directory() + 'test.fifo'
+ self._err_fifo_path = driver_details.device_fifo_directory() + 'stderr.fifo'
+ self._read_stdout_process = None
+ self._read_stderr_process = None
+ self._forwarder_process = None
+ self._original_governors = {}
+ self._original_kptr_restrict = None
+
+ self._android_devices = android_devices
+ self._android_commands = android_devices.get_device(port._executive, worker_number)
+ self._driver_details = driver_details
+ self._debug_logging = self._port._debug_logging
+ self._created_cmd_line = False
+ self._device_failed = False
+
+ # FIXME: If we taught ProfileFactory about "target" devices we could
+ # just use the logic in Driver instead of duplicating it here.
+ if self._port.get_option("profile"):
+ # FIXME: This should be done once, instead of per-driver!
+ symfs_path = self._find_or_create_symfs()
+ kallsyms_path = self._update_kallsyms_cache(symfs_path)
+ # FIXME: We should pass this some sort of "Bridge" object abstraction around ADB instead of a path/device pair.
+ self._profiler = AndroidPerf(self._port.host, self._port._path_to_driver(), self._port.results_directory(),
+ self._android_commands, symfs_path, kallsyms_path)
+ # FIXME: This is a layering violation and should be moved to Port.check_sys_deps
+ # once we have an abstraction around an adb_path/device_serial pair to make it
+ # easy to make these class methods on AndroidPerf.
+ if not self._profiler.check_configuration():
+ self._profiler.print_setup_instructions()
+ sys.exit(1)
+ else:
+ self._profiler = None
+
+ def __del__(self):
+ self._teardown_performance()
+ self._clean_up_cmd_line()
+ super(ChromiumAndroidDriver, self).__del__()
+
+ def _update_kallsyms_cache(self, output_dir):
+ kallsyms_name = "%s-kallsyms" % self._android_commands.get_serial()
+ kallsyms_cache_path = self._port.host.filesystem.join(output_dir, kallsyms_name)
+
+ self._android_commands.restart_as_root()
+
+ saved_kptr_restrict = self._android_commands.run(['shell', 'cat', KPTR_RESTRICT_PATH]).strip()
+ self._android_commands.run(['shell', 'echo', '0', '>', KPTR_RESTRICT_PATH])
+
+ print "Updating kallsyms file (%s) from device" % kallsyms_cache_path
+ self._android_commands.pull("/proc/kallsyms", kallsyms_cache_path)
+
+ self._android_commands.run(['shell', 'echo', saved_kptr_restrict, '>', KPTR_RESTRICT_PATH])
+
+ return kallsyms_cache_path
+
+ def _find_or_create_symfs(self):
+ environment = self._port.host.copy_current_environment()
+ env = environment.to_dictionary()
+ fs = self._port.host.filesystem
+
+ if 'ANDROID_SYMFS' in env:
+ symfs_path = env['ANDROID_SYMFS']
+ else:
+ symfs_path = fs.join(self._port.results_directory(), 'symfs')
+ print "ANDROID_SYMFS not set, using %s" % symfs_path
+
+ # find the installed path, and the path of the symboled built library
+ # FIXME: We should get the install path from the device!
+ symfs_library_path = fs.join(symfs_path, "data/app-lib/%s-1/%s" % (self._driver_details.package_name(), self._driver_details.library_name()))
+ built_library_path = self._port._build_path('lib', self._driver_details.library_name())
+ assert(fs.exists(built_library_path))
+
+ # FIXME: Ideally we'd check the sha1's first and make a soft-link instead of copying (since we probably never care about windows).
+ print "Updating symfs libary (%s) from built copy (%s)" % (symfs_library_path, built_library_path)
+ fs.maybe_make_directory(fs.dirname(symfs_library_path))
+ fs.copyfile(built_library_path, symfs_library_path)
+
+ return symfs_path
+
+ def _setup_md5sum_and_push_data_if_needed(self, log_callback):
+ self._md5sum_path = self._port.path_to_md5sum()
+ if not self._android_commands.file_exists(MD5SUM_DEVICE_PATH):
+ if not self._android_commands.push(self._md5sum_path, MD5SUM_DEVICE_PATH):
+ self._abort('Could not push md5sum to device')
+
+ self._push_executable(log_callback)
+ self._push_fonts(log_callback)
+ self._push_test_resources(log_callback)
+
+ def _setup_test(self, log_callback):
+ # FIXME: Move this routine and its subroutines off of the AndroidDriver
+ # class and onto AndroidCommands or some other helper class, so that we
+ # can initialize the device without needing to create a driver.
+
+ if self._android_devices.is_device_prepared(self._android_commands.get_serial()):
+ return
+
+ self._android_commands.restart_adb()
+ self._android_commands.restart_as_root()
+ self._setup_md5sum_and_push_data_if_needed(log_callback)
+ self._setup_performance()
+
+ # Required by webkit_support::GetWebKitRootDirFilePath().
+ # Other directories will be created automatically by adb push.
+ self._android_commands.mkdir(DEVICE_SOURCE_ROOT_DIR + 'chrome')
+
+ # Allow the test driver to get full read and write access to the directory on the device,
+ # as well as for the FIFOs. We'll need a world writable directory.
+ self._android_commands.mkdir(self._driver_details.device_directory(), chmod='777')
+ self._android_commands.mkdir(self._driver_details.device_fifo_directory(), chmod='777')
+
+ # Make sure that the disk cache on the device resets to a clean state.
+ self._android_commands.run(['shell', 'rm', '-r', self._driver_details.device_cache_directory()])
+
+ # Mark this device as having been set up.
+ self._android_devices.set_device_prepared(self._android_commands.get_serial())
+
+ def _log_error(self, message):
+ _log.error('[%s] %s' % (self._android_commands.get_serial(), message))
+
+ def _log_warning(self, message):
+ _log.warning('[%s] %s' % (self._android_commands.get_serial(), message))
+
+ def _log_debug(self, message):
+ if self._debug_logging:
+ _log.debug('[%s] %s' % (self._android_commands.get_serial(), message))
+
+ def _abort(self, message):
+ self._device_failed = True
+ raise driver.DeviceFailure('[%s] %s' % (self._android_commands.get_serial(), message))
+
+ @staticmethod
+ def _extract_hashes_from_md5sum_output(md5sum_output):
+ assert md5sum_output
+ return [line.split(' ')[0] for line in md5sum_output]
+
+ def _files_match(self, host_file, device_file):
+ assert self._port.host.filesystem.exists(host_file)
+ device_hashes = self._extract_hashes_from_md5sum_output(
+ self._port.host.executive.popen(self._android_commands.adb_command() + ['shell', MD5SUM_DEVICE_PATH, device_file],
+ stdout=subprocess.PIPE).stdout)
+ host_hashes = self._extract_hashes_from_md5sum_output(
+ self._port.host.executive.popen(args=['%s_host' % self._md5sum_path, host_file],
+ stdout=subprocess.PIPE).stdout)
+ return host_hashes and device_hashes == host_hashes
+
+ def _push_file_if_needed(self, host_file, device_file, log_callback):
+ basename = self._port.host.filesystem.basename(host_file)
+ log_callback("checking %s" % basename)
+ if not self._files_match(host_file, device_file):
+ log_callback("pushing %s" % basename)
+ self._android_commands.push(host_file, device_file)
+
+ def _push_executable(self, log_callback):
+ self._push_file_if_needed(self._port.path_to_forwarder(), self._driver_details.device_forwarder_path(), log_callback)
+ for resource in self._driver_details.additional_resources():
+ self._push_file_if_needed(self._port._build_path(resource), self._driver_details.device_directory() + resource, log_callback)
+
+ self._push_file_if_needed(self._port._build_path('android_main_fonts.xml'), self._driver_details.device_directory() + 'android_main_fonts.xml', log_callback)
+ self._push_file_if_needed(self._port._build_path('android_fallback_fonts.xml'), self._driver_details.device_directory() + 'android_fallback_fonts.xml', log_callback)
+
+ log_callback("checking apk")
+ if self._files_match(self._port._build_path('apks', 'ContentShell.apk'),
+ '/data/app/org.chromium.content_shell_apk-1.apk'):
+ return
+
+ log_callback("uninstalling apk")
+ self._android_commands.run(['uninstall', self._driver_details.package_name()])
+ driver_host_path = self._port._path_to_driver()
+ log_callback("installing apk")
+ install_result = self._android_commands.run(['install', driver_host_path])
+ if install_result.find('Success') == -1:
+ self._abort('Failed to install %s onto device: %s' % (driver_host_path, install_result))
+
+ def _push_fonts(self, log_callback):
+ path_to_ahem_font = self._port._build_path('AHEM____.TTF')
+ self._push_file_if_needed(path_to_ahem_font, self._driver_details.device_fonts_directory() + 'AHEM____.TTF', log_callback)
+ for (host_dirs, font_file, package) in HOST_FONT_FILES:
+ for host_dir in host_dirs:
+ host_font_path = host_dir + font_file
+ if self._port._check_file_exists(host_font_path, '', logging=False):
+ self._push_file_if_needed(host_font_path, self._driver_details.device_fonts_directory() + font_file, log_callback)
+
+ def _push_test_resources(self, log_callback):
+ for resource in TEST_RESOURCES_TO_PUSH:
+ self._push_file_if_needed(self._port.layout_tests_dir() + '/' + resource, DEVICE_LAYOUT_TESTS_DIR + resource, log_callback)
+
+ def _get_last_stacktrace(self):
+ tombstones = self._android_commands.run(['shell', 'ls', '-n', '/data/tombstones/tombstone_*'])
+ if not tombstones or tombstones.startswith('/data/tombstones/tombstone_*: No such file or directory'):
+ self._log_error('The driver crashed, but no tombstone found!')
+ return ''
+
+ if tombstones.startswith('/data/tombstones/tombstone_*: Permission denied'):
+ # FIXME: crbug.com/321489 ... figure out why this happens.
+ self._log_error('The driver crashed, but we could not read the tombstones!')
+ return ''
+
+ tombstones = tombstones.rstrip().split('\n')
+ last_tombstone = None
+ for tombstone in tombstones:
+ # Format of fields:
+ # 0 1 2 3 4 5 6
+ # permission uid gid size date time filename
+ # -rw------- 1000 1000 45859 2011-04-13 06:00 tombstone_00
+ fields = tombstone.split()
+ if len(fields) != 7:
+ self._log_warning("unexpected line in tombstone output, skipping: '%s'" % tombstone)
+ continue
+
+ if not last_tombstone or fields[4] + fields[5] >= last_tombstone[4] + last_tombstone[5]:
+ last_tombstone = fields
+ else:
+ break
+
+ if not last_tombstone:
+ self._log_error('The driver crashed, but we could not find any valid tombstone!')
+ return ''
+
+ # Use Android tool vendor/google/tools/stack to convert the raw
+ # stack trace into a human readable format, if needed.
+ # It takes a long time, so don't do it here.
+ return '%s\n%s' % (' '.join(last_tombstone),
+ self._android_commands.run(['shell', 'cat', '/data/tombstones/' + last_tombstone[6]]))
+
+ def _get_logcat(self):
+ return self._android_commands.run(['logcat', '-d', '-v', 'threadtime'])
+
+ def _setup_performance(self):
+ # Disable CPU scaling and drop ram cache to reduce noise in tests
+ if not self._original_governors:
+ governor_files = self._android_commands.run(['shell', 'ls', SCALING_GOVERNORS_PATTERN])
+ if governor_files.find('No such file or directory') == -1:
+ for file in governor_files.split():
+ self._original_governors[file] = self._android_commands.run(['shell', 'cat', file]).strip()
+ self._android_commands.run(['shell', 'echo', 'performance', '>', file])
+
+ def _teardown_performance(self):
+ for file, original_content in self._original_governors.items():
+ self._android_commands.run(['shell', 'echo', original_content, '>', file])
+ self._original_governors = {}
+
+ def _get_crash_log(self, stdout, stderr, newer_than):
+ if not stdout:
+ stdout = ''
+ stdout += '********* [%s] Logcat:\n%s' % (self._android_commands.get_serial(), self._get_logcat())
+ if not stderr:
+ stderr = ''
+ stderr += '********* [%s] Tombstone file:\n%s' % (self._android_commands.get_serial(), self._get_last_stacktrace())
+
+ if not self._port.get_option('disable_breakpad'):
+ crashes = self._pull_crash_dumps_from_device()
+ for crash in crashes:
+ stderr += '********* [%s] breakpad minidump %s:\n%s' % (self._port.host.filesystem.basename(crash), self._android_commands.get_serial(), self._port._dump_reader._get_stack_from_dump(crash))
+
+ return super(ChromiumAndroidDriver, self)._get_crash_log(stdout, stderr, newer_than)
+
+ def cmd_line(self, pixel_tests, per_test_args):
+ # The returned command line is used to start _server_process. In our case, it's an interactive 'adb shell'.
+ # The command line passed to the driver process is returned by _driver_cmd_line() instead.
+ return self._android_commands.adb_command() + ['shell']
+
+ def _android_driver_cmd_line(self, pixel_tests, per_test_args):
+ return driver.Driver.cmd_line(self, pixel_tests, per_test_args)
+
+ @staticmethod
+ def _loop_with_timeout(condition, timeout_secs):
+ deadline = time.time() + timeout_secs
+ while time.time() < deadline:
+ if condition():
+ return True
+ return False
+
+ def _all_pipes_created(self):
+ return (self._android_commands.file_exists(self._in_fifo_path) and
+ self._android_commands.file_exists(self._out_fifo_path) and
+ self._android_commands.file_exists(self._err_fifo_path))
+
+ def _remove_all_pipes(self):
+ for file in [self._in_fifo_path, self._out_fifo_path, self._err_fifo_path]:
+ self._android_commands.run(['shell', 'rm', file])
+
+ return (not self._android_commands.file_exists(self._in_fifo_path) and
+ not self._android_commands.file_exists(self._out_fifo_path) and
+ not self._android_commands.file_exists(self._err_fifo_path))
+
+ def start(self, pixel_tests, per_test_args, deadline):
+ # We override the default start() so that we can call _android_driver_cmd_line()
+ # instead of cmd_line().
+ new_cmd_line = self._android_driver_cmd_line(pixel_tests, per_test_args)
+
+ # Since _android_driver_cmd_line() is different than cmd_line() we need to provide
+ # our own mechanism for detecting when the process should be stopped.
+ if self._current_cmd_line is None:
+ self._current_android_cmd_line = None
+ if new_cmd_line != self._current_android_cmd_line:
+ self.stop()
+ self._current_android_cmd_line = new_cmd_line
+
+ super(ChromiumAndroidDriver, self).start(pixel_tests, per_test_args, deadline)
+
+ def _start(self, pixel_tests, per_test_args):
+ if not self._android_devices.is_device_prepared(self._android_commands.get_serial()):
+ raise driver.DeviceFailure("%s is not prepared in _start()" % self._android_commands.get_serial())
+
+ for retries in range(3):
+ try:
+ if self._start_once(pixel_tests, per_test_args):
+ return
+ except ScriptError as e:
+ self._abort('ScriptError("%s") in _start()' % str(e))
+
+ self._log_error('Failed to start the content_shell application. Retries=%d. Log:%s' % (retries, self._get_logcat()))
+ self.stop()
+ time.sleep(2)
+ self._abort('Failed to start the content_shell application multiple times. Giving up.')
+
+ def _start_once(self, pixel_tests, per_test_args):
+ super(ChromiumAndroidDriver, self)._start(pixel_tests, per_test_args, wait_for_ready=False)
+
+ self._log_debug('Starting forwarder')
+ self._forwarder_process = self._port._server_process_constructor(
+ self._port, 'Forwarder', self._android_commands.adb_command() + ['shell', '%s -D %s' % (self._driver_details.device_forwarder_path(), FORWARD_PORTS)])
+ self._forwarder_process.start()
+
+ deadline = time.time() + DRIVER_START_STOP_TIMEOUT_SECS
+ if not self._wait_for_server_process_output(self._forwarder_process, deadline, 'Forwarding device port'):
+ return False
+
+ self._android_commands.run(['logcat', '-c'])
+
+ cmd_line_file_path = self._driver_details.command_line_file()
+ original_cmd_line_file_path = cmd_line_file_path + '.orig'
+ if self._android_commands.file_exists(cmd_line_file_path) and not self._android_commands.file_exists(original_cmd_line_file_path):
+ # We check for both the normal path and the backup because we do not want to step
+ # on the backup. Otherwise, we'd clobber the backup whenever we changed the
+ # command line during the run.
+ self._android_commands.run(['shell', 'mv', cmd_line_file_path, original_cmd_line_file_path])
+
+ self._android_commands.run(['shell', 'echo'] + self._android_driver_cmd_line(pixel_tests, per_test_args) + ['>', self._driver_details.command_line_file()])
+ self._created_cmd_line = True
+
+ self._android_commands.run(['shell', 'rm', '-rf', self._driver_details.device_crash_dumps_directory()])
+ self._android_commands.mkdir(self._driver_details.device_crash_dumps_directory(), chmod='777')
+
+ start_result = self._android_commands.run(['shell', 'am', 'start', '-e', 'RunInSubThread', '-n', self._driver_details.activity_name()])
+ if start_result.find('Exception') != -1:
+ self._log_error('Failed to start the content_shell application. Exception:\n' + start_result)
+ return False
+
+ if not ChromiumAndroidDriver._loop_with_timeout(self._all_pipes_created, DRIVER_START_STOP_TIMEOUT_SECS):
+ return False
+
+ # Read back the shell prompt to ensure adb shell ready.
+ deadline = time.time() + DRIVER_START_STOP_TIMEOUT_SECS
+ self._server_process.start()
+ self._read_prompt(deadline)
+ self._log_debug('Interactive shell started')
+
+ # Start a process to read from the stdout fifo of the test driver and print to stdout.
+ self._log_debug('Redirecting stdout to ' + self._out_fifo_path)
+ self._read_stdout_process = self._port._server_process_constructor(
+ self._port, 'ReadStdout', self._android_commands.adb_command() + ['shell', 'cat', self._out_fifo_path])
+ self._read_stdout_process.start()
+
+ # Start a process to read from the stderr fifo of the test driver and print to stdout.
+ self._log_debug('Redirecting stderr to ' + self._err_fifo_path)
+ self._read_stderr_process = self._port._server_process_constructor(
+ self._port, 'ReadStderr', self._android_commands.adb_command() + ['shell', 'cat', self._err_fifo_path])
+ self._read_stderr_process.start()
+
+ self._log_debug('Redirecting stdin to ' + self._in_fifo_path)
+ self._server_process.write('cat >%s\n' % self._in_fifo_path)
+
+ # Combine the stdout and stderr pipes into self._server_process.
+ self._server_process.replace_outputs(self._read_stdout_process._proc.stdout, self._read_stderr_process._proc.stdout)
+
+ def deadlock_detector(processes, normal_startup_event):
+ if not ChromiumAndroidDriver._loop_with_timeout(lambda: normal_startup_event.is_set(), DRIVER_START_STOP_TIMEOUT_SECS):
+ # If normal_startup_event is not set in time, the main thread must be blocked at
+ # reading/writing the fifo. Kill the fifo reading/writing processes to let the
+ # main thread escape from the deadlocked state. After that, the main thread will
+ # treat this as a crash.
+ self._log_error('Deadlock detected. Processes killed.')
+ for i in processes:
+ i.kill()
+
+ # Start a thread to kill the pipe reading/writing processes on deadlock of the fifos during startup.
+ normal_startup_event = threading.Event()
+ threading.Thread(name='DeadlockDetector', target=deadlock_detector,
+ args=([self._server_process, self._read_stdout_process, self._read_stderr_process], normal_startup_event)).start()
+
+ # The test driver might crash during startup or when the deadlock detector hits
+ # a deadlock and kills the fifo reading/writing processes.
+ if not self._wait_for_server_process_output(self._server_process, deadline, '#READY'):
+ return False
+
+ # Inform the deadlock detector that the startup is successful without deadlock.
+ normal_startup_event.set()
+ self._log_debug("content_shell is ready")
+ return True
+
+ def _pid_from_android_ps_output(self, ps_output, package_name):
+ # ps output seems to be fixed width, we only care about the name and the pid
+ # u0_a72 21630 125 947920 59364 ffffffff 400beee4 S org.chromium.native_test
+ for line in ps_output.split('\n'):
+ if line.find(self._driver_details.package_name()) != -1:
+ match = re.match(r'\S+\s+(\d+)', line)
+ return int(match.group(1))
+
+ def _pid_on_target(self):
+ # FIXME: There must be a better way to do this than grepping ps output!
+ ps_output = self._android_commands.run(['shell', 'ps'])
+ return self._pid_from_android_ps_output(ps_output, self._driver_details.package_name())
+
+ def stop(self):
+ if not self._device_failed:
+ # Do not try to stop the application if there's something wrong with the device; adb may hang.
+ # FIXME: crbug.com/305040. Figure out if it's really hanging (and why).
+ self._android_commands.run(['shell', 'am', 'force-stop', self._driver_details.package_name()])
+
+ if self._read_stdout_process:
+ self._read_stdout_process.kill()
+ self._read_stdout_process = None
+
+ if self._read_stderr_process:
+ self._read_stderr_process.kill()
+ self._read_stderr_process = None
+
+ super(ChromiumAndroidDriver, self).stop()
+
+ if self._forwarder_process:
+ self._forwarder_process.kill()
+ self._forwarder_process = None
+
+ if self._android_devices.is_device_prepared(self._android_commands.get_serial()):
+ if not ChromiumAndroidDriver._loop_with_timeout(self._remove_all_pipes, DRIVER_START_STOP_TIMEOUT_SECS):
+ self._abort('Failed to remove fifo files. May be locked.')
+
+ self._clean_up_cmd_line()
+
+ def _pull_crash_dumps_from_device(self):
+ result = []
+ if not self._android_commands.file_exists(self._driver_details.device_crash_dumps_directory()):
+ return result
+ dumps = self._android_commands.run(['shell', 'ls', self._driver_details.device_crash_dumps_directory()])
+ for dump in dumps.splitlines():
+ device_dump = '%s/%s' % (self._driver_details.device_crash_dumps_directory(), dump)
+ local_dump = self._port._filesystem.join(self._port._dump_reader.crash_dumps_directory(), dump)
+
+ # FIXME: crbug.com/321489. Figure out why these commands would fail ...
+ err = self._android_commands.run(['shell', 'chmod', '777', device_dump])
+ if not err:
+ self._android_commands.pull(device_dump, local_dump)
+ if not err:
+ self._android_commands.run(['shell', 'rm', '-f', device_dump])
+
+ if self._port._filesystem.exists(local_dump):
+ result.append(local_dump)
+ return result
+
+ def _clean_up_cmd_line(self):
+ if not self._created_cmd_line:
+ return
+
+ cmd_line_file_path = self._driver_details.command_line_file()
+ original_cmd_line_file_path = cmd_line_file_path + '.orig'
+ if self._android_commands.file_exists(original_cmd_line_file_path):
+ self._android_commands.run(['shell', 'mv', original_cmd_line_file_path, cmd_line_file_path])
+ elif self._android_commands.file_exists(cmd_line_file_path):
+ self._android_commands.run(['shell', 'rm', cmd_line_file_path])
+ self._created_cmd_line = False
+
+ def _command_from_driver_input(self, driver_input):
+ command = super(ChromiumAndroidDriver, self)._command_from_driver_input(driver_input)
+ if command.startswith('/'):
+ fs = self._port._filesystem
+ # FIXME: what happens if command lies outside of the layout_tests_dir on the host?
+ relative_test_filename = fs.relpath(command, fs.dirname(self._port.layout_tests_dir()))
+ command = DEVICE_WEBKIT_BASE_DIR + relative_test_filename
+ return command
+
+ def _read_prompt(self, deadline):
+ last_char = ''
+ while True:
+ current_char = self._server_process.read_stdout(deadline, 1)
+ if current_char == ' ':
+ if last_char in ('#', '$'):
+ return
+ last_char = current_char
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/android_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/android_unittest.py
new file mode 100644
index 0000000..9a2fd61
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/android_unittest.py
@@ -0,0 +1,320 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import optparse
+import sys
+import time
+import unittest
+
+from webkitpy.common.system import executive_mock
+from webkitpy.common.system.executive_mock import MockExecutive2
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from webkitpy.layout_tests.port import android
+from webkitpy.layout_tests.port import port_testcase
+from webkitpy.layout_tests.port import driver
+from webkitpy.layout_tests.port import driver_unittest
+from webkitpy.tool.mocktool import MockOptions
+
+# Type of tombstone test which the mocked Android Debug Bridge should execute.
+VALID_TOMBSTONE_TEST_TYPE = 0
+NO_FILES_TOMBSTONE_TEST_TYPE = 1
+NO_PERMISSION_TOMBSTONE_TEST_TYPE = 2
+INVALID_ENTRY_TOMBSTONE_TEST_TYPE = 3
+INVALID_ENTRIES_TOMBSTONE_TEST_TYPE = 4
+
+# Any "adb" commands will be interpret by this class instead of executing actual
+# commansd on the file system, which we don't want to do.
+class MockAndroidDebugBridge:
+ def __init__(self, device_count):
+ self._device_count = device_count
+ self._last_command = None
+ self._tombstone_output = None
+
+ # Local public methods.
+
+ def run_command(self, args):
+ self._last_command = ' '.join(args)
+ if args[0].startswith('path'):
+ if args[0] == 'path1':
+ return ''
+ if args[0] == 'path2':
+ return 'version 1.1'
+
+ return 'version 1.0'
+
+ if args[0] == 'adb':
+ if len(args) > 1 and args[1] == 'version':
+ return 'version 1.0'
+ if len(args) > 1 and args[1] == 'devices':
+ return self._get_device_output()
+ if len(args) > 3 and args[3] == 'command':
+ return 'mockoutput'
+ if len(args) > 3 and args[3] == 'install':
+ return 'Success'
+ if len(args) > 3 and args[3] in ('push', 'wait-for-device'):
+ return 'mockoutput'
+ if len(args) > 5 and args[5] == 'battery':
+ return 'level: 99'
+ if len(args) > 5 and args[5] == 'force-stop':
+ return 'mockoutput'
+ if len(args) > 5 and args[5] == 'power':
+ return 'mScreenOn=true'
+ if len(args) > 5 and args[4] == 'cat' and args[5].find('tombstone') != -1:
+ return 'tombstone content'
+ if len(args) > 6 and args[4] == 'ls' and args[6].find('tombstone') != -1:
+ assert self._tombstone_output, 'Tombstone output needs to have been set by the test.'
+ return self._tombstone_output
+
+ return ''
+
+ def last_command(self):
+ return self._last_command
+
+ def set_tombstone_output(self, output):
+ self._tombstone_output = output
+
+ # Local private methods.
+
+ def _get_device_output(self):
+ serials = ['123456789ABCDEF0', '123456789ABCDEF1', '123456789ABCDEF2',
+ '123456789ABCDEF3', '123456789ABCDEF4', '123456789ABCDEF5']
+ output = 'List of devices attached\n'
+ for serial in serials[:self._device_count]:
+ output += '%s\tdevice\n' % serial
+ return output
+
+
+class AndroidCommandsTest(unittest.TestCase):
+ def setUp(self):
+ android.AndroidCommands._adb_command_path = None
+ android.AndroidCommands._adb_command_path_options = ['adb']
+
+ def make_executive(self, device_count):
+ self._mock_executive = MockAndroidDebugBridge(device_count)
+ return MockExecutive2(run_command_fn=self._mock_executive.run_command)
+
+ def make_android_commands(self, device_count, serial):
+ return android.AndroidCommands(self.make_executive(device_count), serial, debug_logging=False)
+
+ # The used adb command should include the device's serial number, and get_serial() should reflect this.
+ def test_adb_command_and_get_serial(self):
+ android_commands = self.make_android_commands(1, '123456789ABCDEF0')
+ self.assertEquals(['adb', '-s', '123456789ABCDEF0'], android_commands.adb_command())
+ self.assertEquals('123456789ABCDEF0', android_commands.get_serial())
+
+ # Running an adb command should return the command's output.
+ def test_run_command(self):
+ android_commands = self.make_android_commands(1, '123456789ABCDEF0')
+
+ output = android_commands.run(['command'])
+ self.assertEquals('adb -s 123456789ABCDEF0 command', self._mock_executive.last_command())
+ self.assertEquals('mockoutput', output)
+
+ # Test that the convenience methods create the expected commands.
+ def test_convenience_methods(self):
+ android_commands = self.make_android_commands(1, '123456789ABCDEF0')
+
+ android_commands.file_exists('/some_directory')
+ self.assertEquals('adb -s 123456789ABCDEF0 shell ls -d /some_directory', self._mock_executive.last_command())
+
+ android_commands.push('foo', 'bar')
+ self.assertEquals('adb -s 123456789ABCDEF0 push foo bar', self._mock_executive.last_command())
+
+ android_commands.pull('bar', 'foo')
+ self.assertEquals('adb -s 123456789ABCDEF0 pull bar foo', self._mock_executive.last_command())
+
+
+class AndroidPortTest(port_testcase.PortTestCase):
+ port_name = 'android'
+ port_maker = android.AndroidPort
+
+ def make_port(self, **kwargs):
+ port = super(AndroidPortTest, self).make_port(**kwargs)
+ port._mock_adb = MockAndroidDebugBridge(kwargs.get('device_count', 1))
+ port._executive = MockExecutive2(run_command_fn=port._mock_adb.run_command)
+ return port
+
+ def test_check_build(self):
+ host = MockSystemHost()
+ host.filesystem.exists = lambda p: True
+ port = self.make_port(host=host, options=MockOptions(child_processes=1))
+ port.check_build(needs_http=True, printer=port_testcase.FakePrinter())
+
+ def test_check_sys_deps(self):
+ # FIXME: Do something useful here, but testing the full logic would be hard.
+ pass
+
+ def make_wdiff_available(self, port):
+ port._wdiff_available = True
+ port._host_port._wdiff_available = True
+
+ # Test that content_shell currently is the only supported driver.
+ def test_non_content_shell_driver(self):
+ self.assertRaises(self.make_port, options=optparse.Values({'driver_name': 'foobar'}))
+
+ # Test that the number of child processes to create depends on the devices.
+ def test_default_child_processes(self):
+ port_default = self.make_port(device_count=5)
+ port_fixed_device = self.make_port(device_count=5, options=optparse.Values({'adb_device': '123456789ABCDEF9'}))
+
+ self.assertEquals(5, port_default.default_child_processes())
+ self.assertEquals(1, port_fixed_device.default_child_processes())
+
+ # Test that an HTTP server indeed is required by Android (as we serve all tests over them)
+ def test_requires_http_server(self):
+ self.assertTrue(self.make_port(device_count=1).requires_http_server())
+
+ # Tests the default timeouts for Android, which are different than the rest of Chromium.
+ def test_default_timeout_ms(self):
+ self.assertEqual(self.make_port(options=optparse.Values({'configuration': 'Release'})).default_timeout_ms(), 10000)
+ self.assertEqual(self.make_port(options=optparse.Values({'configuration': 'Debug'})).default_timeout_ms(), 10000)
+
+
+class ChromiumAndroidDriverTest(unittest.TestCase):
+ def setUp(self):
+ self._mock_adb = MockAndroidDebugBridge(1)
+ self._mock_executive = MockExecutive2(run_command_fn=self._mock_adb.run_command)
+
+ android_commands = android.AndroidCommands(self._mock_executive, '123456789ABCDEF0', debug_logging=False)
+ self._port = android.AndroidPort(MockSystemHost(executive=self._mock_executive), 'android')
+ self._driver = android.ChromiumAndroidDriver(self._port, worker_number=0,
+ pixel_tests=True, driver_details=android.ContentShellDriverDetails(), android_devices=self._port._devices)
+
+ # The cmd_line() method in the Android port is used for starting a shell, not the test runner.
+ def test_cmd_line(self):
+ self.assertEquals(['adb', '-s', '123456789ABCDEF0', 'shell'], self._driver.cmd_line(False, []))
+
+ # Test that the Chromium Android port can interpret Android's shell output.
+ def test_read_prompt(self):
+ self._driver._server_process = driver_unittest.MockServerProcess(lines=['root@android:/ # '])
+ self.assertIsNone(self._driver._read_prompt(time.time() + 1))
+ self._driver._server_process = driver_unittest.MockServerProcess(lines=['$ '])
+ self.assertIsNone(self._driver._read_prompt(time.time() + 1))
+
+
+class ChromiumAndroidDriverTwoDriversTest(unittest.TestCase):
+ # Test two drivers getting the right serial numbers, and that we disregard per-test arguments.
+ def test_two_drivers(self):
+ mock_adb = MockAndroidDebugBridge(2)
+ mock_executive = MockExecutive2(run_command_fn=mock_adb.run_command)
+
+ port = android.AndroidPort(MockSystemHost(executive=mock_executive), 'android')
+ driver0 = android.ChromiumAndroidDriver(port, worker_number=0, pixel_tests=True,
+ driver_details=android.ContentShellDriverDetails(), android_devices=port._devices)
+ driver1 = android.ChromiumAndroidDriver(port, worker_number=1, pixel_tests=True,
+ driver_details=android.ContentShellDriverDetails(), android_devices=port._devices)
+
+ self.assertEqual(['adb', '-s', '123456789ABCDEF0', 'shell'], driver0.cmd_line(True, []))
+ self.assertEqual(['adb', '-s', '123456789ABCDEF1', 'shell'], driver1.cmd_line(True, ['anything']))
+
+
+class ChromiumAndroidTwoPortsTest(unittest.TestCase):
+ # Test that the driver's command line indeed goes through to the driver.
+ def test_options_with_two_ports(self):
+ mock_adb = MockAndroidDebugBridge(2)
+ mock_executive = MockExecutive2(run_command_fn=mock_adb.run_command)
+
+ port0 = android.AndroidPort(MockSystemHost(executive=mock_executive),
+ 'android', options=MockOptions(additional_drt_flag=['--foo=bar']))
+ port1 = android.AndroidPort(MockSystemHost(executive=mock_executive),
+ 'android', options=MockOptions(driver_name='content_shell'))
+
+ self.assertEqual(1, port0.driver_cmd_line().count('--foo=bar'))
+ self.assertEqual(0, port1.driver_cmd_line().count('--create-stdin-fifo'))
+
+
+class ChromiumAndroidDriverTombstoneTest(unittest.TestCase):
+ EXPECTED_STACKTRACE = '-rw------- 1000 1000 3604 2013-11-19 16:16 tombstone_10\ntombstone content'
+
+ def setUp(self):
+ self._mock_adb = MockAndroidDebugBridge(1)
+ self._mock_executive = MockExecutive2(run_command_fn=self._mock_adb.run_command)
+
+ self._port = android.AndroidPort(MockSystemHost(executive=self._mock_executive), 'android')
+ self._driver = android.ChromiumAndroidDriver(self._port, worker_number=0,
+ pixel_tests=True, driver_details=android.ContentShellDriverDetails(), android_devices=self._port._devices)
+
+ self._errors = []
+ self._driver._log_error = lambda msg: self._errors.append(msg)
+
+ self._warnings = []
+ self._driver._log_warning = lambda msg: self._warnings.append(msg)
+
+ # Tests that we return an empty string and log an error when no tombstones could be found.
+ def test_no_tombstones_found(self):
+ self._mock_adb.set_tombstone_output('/data/tombstones/tombstone_*: No such file or directory')
+ stacktrace = self._driver._get_last_stacktrace()
+
+ self.assertEqual(1, len(self._errors))
+ self.assertEqual('The driver crashed, but no tombstone found!', self._errors[0])
+ self.assertEqual('', stacktrace)
+
+ # Tests that an empty string will be returned if we cannot read the tombstone files.
+ def test_insufficient_tombstone_permission(self):
+ self._mock_adb.set_tombstone_output('/data/tombstones/tombstone_*: Permission denied')
+ stacktrace = self._driver._get_last_stacktrace()
+
+ self.assertEqual(1, len(self._errors))
+ self.assertEqual('The driver crashed, but we could not read the tombstones!', self._errors[0])
+ self.assertEqual('', stacktrace)
+
+ # Tests that invalid "ls" output will throw a warning when listing the tombstone files.
+ def test_invalid_tombstone_list_entry_format(self):
+ self._mock_adb.set_tombstone_output('-rw------- 1000 1000 3604 2013-11-19 16:15 tombstone_00\n' +
+ '-- invalid entry --\n' +
+ '-rw------- 1000 1000 3604 2013-11-19 16:16 tombstone_10')
+ stacktrace = self._driver._get_last_stacktrace()
+
+ self.assertEqual(1, len(self._warnings))
+ self.assertEqual(ChromiumAndroidDriverTombstoneTest.EXPECTED_STACKTRACE, stacktrace)
+
+ # Tests the case in which we can't find any valid tombstone entries at all. The tombstone
+ # output used for the mock misses the permission part.
+ def test_invalid_tombstone_list(self):
+ self._mock_adb.set_tombstone_output('1000 1000 3604 2013-11-19 16:15 tombstone_00\n' +
+ '1000 1000 3604 2013-11-19 16:15 tombstone_01\n' +
+ '1000 1000 3604 2013-11-19 16:15 tombstone_02')
+ stacktrace = self._driver._get_last_stacktrace()
+
+ self.assertEqual(3, len(self._warnings))
+ self.assertEqual(1, len(self._errors))
+ self.assertEqual('The driver crashed, but we could not find any valid tombstone!', self._errors[0])
+ self.assertEqual('', stacktrace)
+
+ # Tests that valid tombstone listings will return the contents of the most recent file.
+ def test_read_valid_tombstone_file(self):
+ self._mock_adb.set_tombstone_output('-rw------- 1000 1000 3604 2013-11-19 16:15 tombstone_00\n' +
+ '-rw------- 1000 1000 3604 2013-11-19 16:16 tombstone_10\n' +
+ '-rw------- 1000 1000 3604 2013-11-19 16:15 tombstone_02')
+ stacktrace = self._driver._get_last_stacktrace()
+
+ self.assertEqual(0, len(self._warnings))
+ self.assertEqual(0, len(self._errors))
+ self.assertEqual(ChromiumAndroidDriverTombstoneTest.EXPECTED_STACKTRACE, stacktrace)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/base.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/base.py
new file mode 100644
index 0000000..ea4b366
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/base.py
@@ -0,0 +1,1737 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Abstract base class of Port-specific entry points for the layout tests
+test infrastructure (the Port and Driver classes)."""
+
+import cgi
+import difflib
+import errno
+import itertools
+import json
+import logging
+import os
+import operator
+import optparse
+import re
+import sys
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ # Needed for Python < 2.7
+ from webkitpy.thirdparty.ordered_dict import OrderedDict
+
+
+from webkitpy.common import find_files
+from webkitpy.common import read_checksum_from_png
+from webkitpy.common.memoized import memoized
+from webkitpy.common.system import path
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.path import cygpath
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.webkit_finder import WebKitFinder
+from webkitpy.layout_tests.layout_package.bot_test_expectations import BotTestExpectationsFactory
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+from webkitpy.layout_tests.port import config as port_config
+from webkitpy.layout_tests.port import driver
+from webkitpy.layout_tests.port import server_process
+from webkitpy.layout_tests.port.factory import PortFactory
+from webkitpy.layout_tests.servers import apache_http
+from webkitpy.layout_tests.servers import pywebsocket
+
+_log = logging.getLogger(__name__)
+
+
+# FIXME: This class should merge with WebKitPort now that Chromium behaves mostly like other webkit ports.
+class Port(object):
+ """Abstract class for Port-specific hooks for the layout_test package."""
+
+ # Subclasses override this. This should indicate the basic implementation
+ # part of the port name, e.g., 'mac', 'win', 'gtk'; there is probably (?)
+ # one unique value per class.
+
+ # FIXME: We should probably rename this to something like 'implementation_name'.
+ port_name = None
+
+ # Test names resemble unix relative paths, and use '/' as a directory separator.
+ TEST_PATH_SEPARATOR = '/'
+
+ ALL_BUILD_TYPES = ('debug', 'release')
+
+ CONTENT_SHELL_NAME = 'content_shell'
+
+ # True if the port as aac and mp3 codecs built in.
+ PORT_HAS_AUDIO_CODECS_BUILT_IN = False
+
+ ALL_SYSTEMS = (
+ ('snowleopard', 'x86'),
+ ('lion', 'x86'),
+
+ # FIXME: We treat Retina (High-DPI) devices as if they are running
+ # a different operating system version. This isn't accurate, but will work until
+ # we need to test and support baselines across multiple O/S versions.
+ ('retina', 'x86'),
+
+ ('mountainlion', 'x86'),
+ ('mavericks', 'x86'),
+ ('xp', 'x86'),
+ ('win7', 'x86'),
+ ('lucid', 'x86'),
+ ('lucid', 'x86_64'),
+ # FIXME: Technically this should be 'arm', but adding a third architecture type breaks TestConfigurationConverter.
+ # If we need this to be 'arm' in the future, then we first have to fix TestConfigurationConverter.
+ ('icecreamsandwich', 'x86'),
+ )
+
+ ALL_BASELINE_VARIANTS = [
+ 'mac-mavericks', 'mac-mountainlion', 'mac-retina', 'mac-lion', 'mac-snowleopard',
+ 'win-win7', 'win-xp',
+ 'linux-x86_64', 'linux-x86',
+ ]
+
+ CONFIGURATION_SPECIFIER_MACROS = {
+ 'mac': ['snowleopard', 'lion', 'retina', 'mountainlion', 'mavericks'],
+ 'win': ['xp', 'win7'],
+ 'linux': ['lucid'],
+ 'android': ['icecreamsandwich'],
+ }
+
+ DEFAULT_BUILD_DIRECTORIES = ('out',)
+
+ # overridden in subclasses.
+ FALLBACK_PATHS = {}
+
+ SUPPORTED_VERSIONS = []
+
+ # URL to the build requirements page.
+ BUILD_REQUIREMENTS_URL = ''
+
+ @classmethod
+ def latest_platform_fallback_path(cls):
+ return cls.FALLBACK_PATHS[cls.SUPPORTED_VERSIONS[-1]]
+
+ @classmethod
+ def _static_build_path(cls, filesystem, build_directory, chromium_base, configuration, comps):
+ if build_directory:
+ return filesystem.join(build_directory, configuration, *comps)
+
+ hits = []
+ for directory in cls.DEFAULT_BUILD_DIRECTORIES:
+ base_dir = filesystem.join(chromium_base, directory, configuration)
+ path = filesystem.join(base_dir, *comps)
+ if filesystem.exists(path):
+ hits.append((filesystem.mtime(path), path))
+
+ if hits:
+ hits.sort(reverse=True)
+ return hits[0][1] # Return the newest file found.
+
+ # We have to default to something, so pick the last one.
+ return filesystem.join(base_dir, *comps)
+
+ @classmethod
+ def determine_full_port_name(cls, host, options, port_name):
+ """Return a fully-specified port name that can be used to construct objects."""
+ # Subclasses will usually override this.
+ assert port_name.startswith(cls.port_name)
+ return port_name
+
+ def __init__(self, host, port_name, options=None, **kwargs):
+
+ # This value may be different from cls.port_name by having version modifiers
+ # and other fields appended to it (for example, 'qt-arm' or 'mac-wk2').
+ self._name = port_name
+
+ # These are default values that should be overridden in a subclasses.
+ self._version = ''
+ self._architecture = 'x86'
+
+ # FIXME: Ideally we'd have a package-wide way to get a
+ # well-formed options object that had all of the necessary
+ # options defined on it.
+ self._options = options or optparse.Values()
+
+ self.host = host
+ self._executive = host.executive
+ self._filesystem = host.filesystem
+ self._webkit_finder = WebKitFinder(host.filesystem)
+ self._config = port_config.Config(self._executive, self._filesystem, self.port_name)
+
+ self._helper = None
+ self._http_server = None
+ self._websocket_server = None
+ self._image_differ = None
+ self._server_process_constructor = server_process.ServerProcess # overridable for testing
+ self._http_lock = None # FIXME: Why does this live on the port object?
+ self._dump_reader = None
+
+ # Python's Popen has a bug that causes any pipes opened to a
+ # process that can't be executed to be leaked. Since this
+ # code is specifically designed to tolerate exec failures
+ # to gracefully handle cases where wdiff is not installed,
+ # the bug results in a massive file descriptor leak. As a
+ # workaround, if an exec failure is ever experienced for
+ # wdiff, assume it's not available. This will leak one
+ # file descriptor but that's better than leaking each time
+ # wdiff would be run.
+ #
+ # http://mail.python.org/pipermail/python-list/
+ # 2008-August/505753.html
+ # http://bugs.python.org/issue3210
+ self._wdiff_available = None
+
+ # FIXME: prettypatch.py knows this path, why is it copied here?
+ self._pretty_patch_path = self.path_from_webkit_base("Tools", "Scripts", "webkitruby", "PrettyPatch", "prettify.rb")
+ self._pretty_patch_available = None
+
+ if not hasattr(options, 'configuration') or not options.configuration:
+ self.set_option_default('configuration', self.default_configuration())
+ self._test_configuration = None
+ self._reftest_list = {}
+ self._results_directory = None
+ self._virtual_test_suites = None
+
+ def buildbot_archives_baselines(self):
+ return True
+
+ def additional_drt_flag(self):
+ if self.driver_name() == self.CONTENT_SHELL_NAME:
+ return ['--dump-render-tree']
+ return []
+
+ def supports_per_test_timeout(self):
+ return False
+
+ def default_pixel_tests(self):
+ return True
+
+ def default_smoke_test_only(self):
+ return False
+
+ def default_timeout_ms(self):
+ timeout_ms = 6 * 1000
+ if self.get_option('configuration') == 'Debug':
+ # Debug is usually 2x-3x slower than Release.
+ return 3 * timeout_ms
+ return timeout_ms
+
+ def driver_stop_timeout(self):
+ """ Returns the amount of time in seconds to wait before killing the process in driver.stop()."""
+ # We want to wait for at least 3 seconds, but if we are really slow, we want to be slow on cleanup as
+ # well (for things like ASAN, Valgrind, etc.)
+ return 3.0 * float(self.get_option('time_out_ms', '0')) / self.default_timeout_ms()
+
+ def wdiff_available(self):
+ if self._wdiff_available is None:
+ self._wdiff_available = self.check_wdiff(logging=False)
+ return self._wdiff_available
+
+ def pretty_patch_available(self):
+ if self._pretty_patch_available is None:
+ self._pretty_patch_available = self.check_pretty_patch(logging=False)
+ return self._pretty_patch_available
+
+ def default_child_processes(self):
+ """Return the number of drivers to use for this port."""
+ if self.get_option('enable_sanitizer'):
+ # ASAN/MSAN/TSAN are more cpu- and memory- intensive than regular
+ # content_shell, and so we need to run fewer of them in parallel.
+ return max(int(self._executive.cpu_count() * 0.75), 1)
+ return self._executive.cpu_count()
+
+ def default_max_locked_shards(self):
+ """Return the number of "locked" shards to run in parallel (like the http tests)."""
+ max_locked_shards = int(self.default_child_processes()) / 4
+ if not max_locked_shards:
+ return 1
+ return max_locked_shards
+
+ def baseline_path(self):
+ """Return the absolute path to the directory to store new baselines in for this port."""
+ # FIXME: remove once all callers are calling either baseline_version_dir() or baseline_platform_dir()
+ return self.baseline_version_dir()
+
+ def baseline_platform_dir(self):
+ """Return the absolute path to the default (version-independent) platform-specific results."""
+ return self._filesystem.join(self.layout_tests_dir(), 'platform', self.port_name)
+
+ def baseline_version_dir(self):
+ """Return the absolute path to the platform-and-version-specific results."""
+ baseline_search_paths = self.baseline_search_path()
+ return baseline_search_paths[0]
+
+ def virtual_baseline_search_path(self, test_name):
+ suite = self.lookup_virtual_suite(test_name)
+ if not suite:
+ return None
+ return [self._filesystem.join(path, suite.name) for path in self.default_baseline_search_path()]
+
+ def baseline_search_path(self):
+ return self.get_option('additional_platform_directory', []) + self._compare_baseline() + self.default_baseline_search_path()
+
+ def default_baseline_search_path(self):
+ """Return a list of absolute paths to directories to search under for
+ baselines. The directories are searched in order."""
+ return map(self._webkit_baseline_path, self.FALLBACK_PATHS[self.version()])
+
+ @memoized
+ def _compare_baseline(self):
+ factory = PortFactory(self.host)
+ target_port = self.get_option('compare_port')
+ if target_port:
+ return factory.get(target_port).default_baseline_search_path()
+ return []
+
+ def _check_file_exists(self, path_to_file, file_description,
+ override_step=None, logging=True):
+ """Verify the file is present where expected or log an error.
+
+ Args:
+ file_name: The (human friendly) name or description of the file
+ you're looking for (e.g., "HTTP Server"). Used for error logging.
+ override_step: An optional string to be logged if the check fails.
+ logging: Whether or not log the error messages."""
+ if not self._filesystem.exists(path_to_file):
+ if logging:
+ _log.error('Unable to find %s' % file_description)
+ _log.error(' at %s' % path_to_file)
+ if override_step:
+ _log.error(' %s' % override_step)
+ _log.error('')
+ return False
+ return True
+
+ def check_build(self, needs_http, printer):
+ result = True
+
+ dump_render_tree_binary_path = self._path_to_driver()
+ result = self._check_file_exists(dump_render_tree_binary_path,
+ 'test driver') and result
+ if not result and self.get_option('build'):
+ result = self._check_driver_build_up_to_date(
+ self.get_option('configuration'))
+ else:
+ _log.error('')
+
+ helper_path = self._path_to_helper()
+ if helper_path:
+ result = self._check_file_exists(helper_path,
+ 'layout test helper') and result
+
+ if self.get_option('pixel_tests'):
+ result = self.check_image_diff(
+ 'To override, invoke with --no-pixel-tests') and result
+
+ # It's okay if pretty patch and wdiff aren't available, but we will at least log messages.
+ self._pretty_patch_available = self.check_pretty_patch()
+ self._wdiff_available = self.check_wdiff()
+
+ if self._dump_reader:
+ result = self._dump_reader.check_is_functional() and result
+
+ if needs_http:
+ result = self.check_httpd() and result
+
+ return test_run_results.OK_EXIT_STATUS if result else test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
+
+ def _check_driver(self):
+ driver_path = self._path_to_driver()
+ if not self._filesystem.exists(driver_path):
+ _log.error("%s was not found at %s" % (self.driver_name(), driver_path))
+ return False
+ return True
+
+ def _check_port_build(self):
+ # Ports can override this method to do additional checks.
+ return True
+
+ def check_sys_deps(self, needs_http):
+ """If the port needs to do some runtime checks to ensure that the
+ tests can be run successfully, it should override this routine.
+ This step can be skipped with --nocheck-sys-deps.
+
+ Returns whether the system is properly configured."""
+ cmd = [self._path_to_driver(), '--check-layout-test-sys-deps']
+
+ local_error = ScriptError()
+
+ def error_handler(script_error):
+ local_error.exit_code = script_error.exit_code
+
+ output = self._executive.run_command(cmd, error_handler=error_handler)
+ if local_error.exit_code:
+ _log.error('System dependencies check failed.')
+ _log.error('To override, invoke with --nocheck-sys-deps')
+ _log.error('')
+ _log.error(output)
+ if self.BUILD_REQUIREMENTS_URL is not '':
+ _log.error('')
+ _log.error('For complete build requirements, please see:')
+ _log.error(self.BUILD_REQUIREMENTS_URL)
+ return test_run_results.SYS_DEPS_EXIT_STATUS
+ return test_run_results.OK_EXIT_STATUS
+
+ def check_image_diff(self, override_step=None, logging=True):
+ """This routine is used to check whether image_diff binary exists."""
+ image_diff_path = self._path_to_image_diff()
+ if not self._filesystem.exists(image_diff_path):
+ _log.error("image_diff was not found at %s" % image_diff_path)
+ return False
+ return True
+
+ def check_pretty_patch(self, logging=True):
+ """Checks whether we can use the PrettyPatch ruby script."""
+ try:
+ _ = self._executive.run_command(['ruby', '--version'])
+ except OSError, e:
+ if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
+ if logging:
+ _log.warning("Ruby is not installed; can't generate pretty patches.")
+ _log.warning('')
+ return False
+
+ if not self._filesystem.exists(self._pretty_patch_path):
+ if logging:
+ _log.warning("Unable to find %s; can't generate pretty patches." % self._pretty_patch_path)
+ _log.warning('')
+ return False
+
+ return True
+
+ def check_wdiff(self, logging=True):
+ if not self._path_to_wdiff():
+ # Don't need to log here since this is the port choosing not to use wdiff.
+ return False
+
+ try:
+ _ = self._executive.run_command([self._path_to_wdiff(), '--help'])
+ except OSError:
+ if logging:
+ message = self._wdiff_missing_message()
+ if message:
+ for line in message.splitlines():
+ _log.warning(' ' + line)
+ _log.warning('')
+ return False
+
+ return True
+
+ def _wdiff_missing_message(self):
+ return 'wdiff is not installed; please install it to generate word-by-word diffs.'
+
+ def check_httpd(self):
+ httpd_path = self.path_to_apache()
+ try:
+ server_name = self._filesystem.basename(httpd_path)
+ env = self.setup_environ_for_server(server_name)
+ if self._executive.run_command([httpd_path, "-v"], env=env, return_exit_code=True) != 0:
+ _log.error("httpd seems broken. Cannot run http tests.")
+ return False
+ return True
+ except OSError:
+ _log.error("No httpd found. Cannot run http tests.")
+ return False
+
+ def do_text_results_differ(self, expected_text, actual_text):
+ return expected_text != actual_text
+
+ def do_audio_results_differ(self, expected_audio, actual_audio):
+ return expected_audio != actual_audio
+
+ def diff_image(self, expected_contents, actual_contents):
+ """Compare two images and return a tuple of an image diff, and an error string.
+
+ If an error occurs (like image_diff isn't found, or crashes, we log an error and return True (for a diff).
+ """
+ # If only one of them exists, return that one.
+ if not actual_contents and not expected_contents:
+ return (None, None)
+ if not actual_contents:
+ return (expected_contents, None)
+ if not expected_contents:
+ return (actual_contents, None)
+
+ tempdir = self._filesystem.mkdtemp()
+
+ expected_filename = self._filesystem.join(str(tempdir), "expected.png")
+ self._filesystem.write_binary_file(expected_filename, expected_contents)
+
+ actual_filename = self._filesystem.join(str(tempdir), "actual.png")
+ self._filesystem.write_binary_file(actual_filename, actual_contents)
+
+ diff_filename = self._filesystem.join(str(tempdir), "diff.png")
+
+ # image_diff needs native win paths as arguments, so we need to convert them if running under cygwin.
+ native_expected_filename = self._convert_path(expected_filename)
+ native_actual_filename = self._convert_path(actual_filename)
+ native_diff_filename = self._convert_path(diff_filename)
+
+ executable = self._path_to_image_diff()
+ # Note that although we are handed 'old', 'new', image_diff wants 'new', 'old'.
+ comand = [executable, '--diff', native_actual_filename, native_expected_filename, native_diff_filename]
+
+ result = None
+ err_str = None
+ try:
+ exit_code = self._executive.run_command(comand, return_exit_code=True)
+ if exit_code == 0:
+ # The images are the same.
+ result = None
+ elif exit_code == 1:
+ result = self._filesystem.read_binary_file(native_diff_filename)
+ else:
+ err_str = "Image diff returned an exit code of %s. See http://crbug.com/278596" % exit_code
+ except OSError, e:
+ err_str = 'error running image diff: %s' % str(e)
+ finally:
+ self._filesystem.rmtree(str(tempdir))
+
+ return (result, err_str or None)
+
+ def diff_text(self, expected_text, actual_text, expected_filename, actual_filename):
+ """Returns a string containing the diff of the two text strings
+ in 'unified diff' format."""
+
+ # The filenames show up in the diff output, make sure they're
+ # raw bytes and not unicode, so that they don't trigger join()
+ # trying to decode the input.
+ def to_raw_bytes(string_value):
+ if isinstance(string_value, unicode):
+ return string_value.encode('utf-8')
+ return string_value
+ expected_filename = to_raw_bytes(expected_filename)
+ actual_filename = to_raw_bytes(actual_filename)
+ diff = difflib.unified_diff(expected_text.splitlines(True),
+ actual_text.splitlines(True),
+ expected_filename,
+ actual_filename)
+
+ # The diff generated by the difflib is incorrect if one of the files
+ # does not have a newline at the end of the file and it is present in
+ # the diff. Relevant Python issue: http://bugs.python.org/issue2142
+ def diff_fixup(diff):
+ for line in diff:
+ yield line
+ if not line.endswith('\n'):
+ yield '\n\ No newline at end of file\n'
+
+ return ''.join(diff_fixup(diff))
+
+ def driver_name(self):
+ if self.get_option('driver_name'):
+ return self.get_option('driver_name')
+ return self.CONTENT_SHELL_NAME
+
+ def expected_baselines_by_extension(self, test_name):
+ """Returns a dict mapping baseline suffix to relative path for each baseline in
+ a test. For reftests, it returns ".==" or ".!=" instead of the suffix."""
+ # FIXME: The name similarity between this and expected_baselines() below, is unfortunate.
+ # We should probably rename them both.
+ baseline_dict = {}
+ reference_files = self.reference_files(test_name)
+ if reference_files:
+ # FIXME: How should this handle more than one type of reftest?
+ baseline_dict['.' + reference_files[0][0]] = self.relative_test_filename(reference_files[0][1])
+
+ for extension in self.baseline_extensions():
+ path = self.expected_filename(test_name, extension, return_default=False)
+ baseline_dict[extension] = self.relative_test_filename(path) if path else path
+
+ return baseline_dict
+
+ def baseline_extensions(self):
+ """Returns a tuple of all of the non-reftest baseline extensions we use. The extensions include the leading '.'."""
+ return ('.wav', '.txt', '.png')
+
+ def expected_baselines(self, test_name, suffix, all_baselines=False):
+ """Given a test name, finds where the baseline results are located.
+
+ Args:
+ test_name: name of test file (usually a relative path under LayoutTests/)
+ suffix: file suffix of the expected results, including dot; e.g.
+ '.txt' or '.png'. This should not be None, but may be an empty
+ string.
+ all_baselines: If True, return an ordered list of all baseline paths
+ for the given platform. If False, return only the first one.
+ Returns
+ a list of ( platform_dir, results_filename ), where
+ platform_dir - abs path to the top of the results tree (or test
+ tree)
+ results_filename - relative path from top of tree to the results
+ file
+ (port.join() of the two gives you the full path to the file,
+ unless None was returned.)
+ Return values will be in the format appropriate for the current
+ platform (e.g., "\\" for path separators on Windows). If the results
+ file is not found, then None will be returned for the directory,
+ but the expected relative pathname will still be returned.
+
+ This routine is generic but lives here since it is used in
+ conjunction with the other baseline and filename routines that are
+ platform specific.
+ """
+ baseline_filename = self._filesystem.splitext(test_name)[0] + '-expected' + suffix
+ baseline_search_path = self.baseline_search_path()
+
+ baselines = []
+ for platform_dir in baseline_search_path:
+ if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
+ baselines.append((platform_dir, baseline_filename))
+
+ if not all_baselines and baselines:
+ return baselines
+
+ # If it wasn't found in a platform directory, return the expected
+ # result in the test directory, even if no such file actually exists.
+ platform_dir = self.layout_tests_dir()
+ if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
+ baselines.append((platform_dir, baseline_filename))
+
+ if baselines:
+ return baselines
+
+ return [(None, baseline_filename)]
+
+ def expected_filename(self, test_name, suffix, return_default=True):
+ """Given a test name, returns an absolute path to its expected results.
+
+ If no expected results are found in any of the searched directories,
+ the directory in which the test itself is located will be returned.
+ The return value is in the format appropriate for the platform
+ (e.g., "\\" for path separators on windows).
+
+ Args:
+ test_name: name of test file (usually a relative path under LayoutTests/)
+ suffix: file suffix of the expected results, including dot; e.g. '.txt'
+ or '.png'. This should not be None, but may be an empty string.
+ platform: the most-specific directory name to use to build the
+ search list of directories, e.g., 'win', or
+ 'chromium-cg-mac-leopard' (we follow the WebKit format)
+ return_default: if True, returns the path to the generic expectation if nothing
+ else is found; if False, returns None.
+
+ This routine is generic but is implemented here to live alongside
+ the other baseline and filename manipulation routines.
+ """
+ # FIXME: The [0] here is very mysterious, as is the destructured return.
+ platform_dir, baseline_filename = self.expected_baselines(test_name, suffix)[0]
+ if platform_dir:
+ return self._filesystem.join(platform_dir, baseline_filename)
+
+ actual_test_name = self.lookup_virtual_test_base(test_name)
+ if actual_test_name:
+ return self.expected_filename(actual_test_name, suffix)
+
+ if return_default:
+ return self._filesystem.join(self.layout_tests_dir(), baseline_filename)
+ return None
+
+ def expected_checksum(self, test_name):
+ """Returns the checksum of the image we expect the test to produce, or None if it is a text-only test."""
+ png_path = self.expected_filename(test_name, '.png')
+
+ if self._filesystem.exists(png_path):
+ with self._filesystem.open_binary_file_for_reading(png_path) as filehandle:
+ return read_checksum_from_png.read_checksum(filehandle)
+
+ return None
+
+ def expected_image(self, test_name):
+ """Returns the image we expect the test to produce."""
+ baseline_path = self.expected_filename(test_name, '.png')
+ if not self._filesystem.exists(baseline_path):
+ return None
+ return self._filesystem.read_binary_file(baseline_path)
+
+ def expected_audio(self, test_name):
+ baseline_path = self.expected_filename(test_name, '.wav')
+ if not self._filesystem.exists(baseline_path):
+ return None
+ return self._filesystem.read_binary_file(baseline_path)
+
+ def expected_text(self, test_name):
+ """Returns the text output we expect the test to produce, or None
+ if we don't expect there to be any text output.
+ End-of-line characters are normalized to '\n'."""
+ # FIXME: DRT output is actually utf-8, but since we don't decode the
+ # output from DRT (instead treating it as a binary string), we read the
+ # baselines as a binary string, too.
+ baseline_path = self.expected_filename(test_name, '.txt')
+ if not self._filesystem.exists(baseline_path):
+ return None
+ text = self._filesystem.read_binary_file(baseline_path)
+ return text.replace("\r\n", "\n")
+
+ def _get_reftest_list(self, test_name):
+ dirname = self._filesystem.join(self.layout_tests_dir(), self._filesystem.dirname(test_name))
+ if dirname not in self._reftest_list:
+ self._reftest_list[dirname] = Port._parse_reftest_list(self._filesystem, dirname)
+ return self._reftest_list[dirname]
+
+ @staticmethod
+ def _parse_reftest_list(filesystem, test_dirpath):
+ reftest_list_path = filesystem.join(test_dirpath, 'reftest.list')
+ if not filesystem.isfile(reftest_list_path):
+ return None
+ reftest_list_file = filesystem.read_text_file(reftest_list_path)
+
+ parsed_list = {}
+ for line in reftest_list_file.split('\n'):
+ line = re.sub('#.+$', '', line)
+ split_line = line.split()
+ if len(split_line) == 4:
+ # FIXME: Probably one of mozilla's extensions in the reftest.list format. Do we need to support this?
+ _log.warning("unsupported reftest.list line '%s' in %s" % (line, reftest_list_path))
+ continue
+ if len(split_line) < 3:
+ continue
+ expectation_type, test_file, ref_file = split_line
+ parsed_list.setdefault(filesystem.join(test_dirpath, test_file), []).append((expectation_type, filesystem.join(test_dirpath, ref_file)))
+ return parsed_list
+
+ def reference_files(self, test_name):
+ """Return a list of expectation (== or !=) and filename pairs"""
+
+ reftest_list = self._get_reftest_list(test_name)
+ if not reftest_list:
+ reftest_list = []
+ for expectation, prefix in (('==', ''), ('!=', '-mismatch')):
+ for extention in Port._supported_file_extensions:
+ path = self.expected_filename(test_name, prefix + extention)
+ if self._filesystem.exists(path):
+ reftest_list.append((expectation, path))
+ return reftest_list
+
+ return reftest_list.get(self._filesystem.join(self.layout_tests_dir(), test_name), []) # pylint: disable=E1103
+
+ def tests(self, paths):
+ """Return the list of tests found matching paths."""
+ tests = self._real_tests(paths)
+
+ suites = self.virtual_test_suites()
+ if paths:
+ tests.extend(self._virtual_tests_matching_paths(paths, suites))
+ else:
+ tests.extend(self._all_virtual_tests(suites))
+ return tests
+
+ def _real_tests(self, paths):
+ # When collecting test cases, skip these directories
+ skipped_directories = set(['.svn', '_svn', 'platform', 'resources', 'support', 'script-tests', 'reference', 'reftest'])
+ files = find_files.find(self._filesystem, self.layout_tests_dir(), paths, skipped_directories, Port.is_test_file, self.test_key)
+ return [self.relative_test_filename(f) for f in files]
+
+ # When collecting test cases, we include any file with these extensions.
+ _supported_file_extensions = set(['.html', '.xml', '.xhtml', '.xht', '.pl',
+ '.htm', '.php', '.svg', '.mht', '.pdf'])
+
+ @staticmethod
+ # If any changes are made here be sure to update the isUsedInReftest method in old-run-webkit-tests as well.
+ def is_reference_html_file(filesystem, dirname, filename):
+ if filename.startswith('ref-') or filename.startswith('notref-'):
+ return True
+ filename_wihout_ext, unused = filesystem.splitext(filename)
+ for suffix in ['-expected', '-expected-mismatch', '-ref', '-notref']:
+ if filename_wihout_ext.endswith(suffix):
+ return True
+ return False
+
+ @staticmethod
+ def _has_supported_extension(filesystem, filename):
+ """Return true if filename is one of the file extensions we want to run a test on."""
+ extension = filesystem.splitext(filename)[1]
+ return extension in Port._supported_file_extensions
+
+ @staticmethod
+ def is_test_file(filesystem, dirname, filename):
+ return Port._has_supported_extension(filesystem, filename) and not Port.is_reference_html_file(filesystem, dirname, filename)
+
+ ALL_TEST_TYPES = ['audio', 'harness', 'pixel', 'ref', 'text', 'unknown']
+
+ def test_type(self, test_name):
+ fs = self._filesystem
+ if fs.exists(self.expected_filename(test_name, '.png')):
+ return 'pixel'
+ if fs.exists(self.expected_filename(test_name, '.wav')):
+ return 'audio'
+ if self.reference_files(test_name):
+ return 'ref'
+ txt = self.expected_text(test_name)
+ if txt:
+ if 'layer at (0,0) size 800x600' in txt:
+ return 'pixel'
+ for line in txt.splitlines():
+ if line.startswith('FAIL') or line.startswith('TIMEOUT') or line.startswith('PASS'):
+ return 'harness'
+ return 'text'
+ return 'unknown'
+
+ def test_key(self, test_name):
+ """Turns a test name into a list with two sublists, the natural key of the
+ dirname, and the natural key of the basename.
+
+ This can be used when sorting paths so that files in a directory.
+ directory are kept together rather than being mixed in with files in
+ subdirectories."""
+ dirname, basename = self.split_test(test_name)
+ return (self._natural_sort_key(dirname + self.TEST_PATH_SEPARATOR), self._natural_sort_key(basename))
+
+ def _natural_sort_key(self, string_to_split):
+ """ Turns a string into a list of string and number chunks, i.e. "z23a" -> ["z", 23, "a"]
+
+ This can be used to implement "natural sort" order. See:
+ http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
+ http://nedbatchelder.com/blog/200712.html#e20071211T054956
+ """
+ def tryint(val):
+ try:
+ return int(val)
+ except ValueError:
+ return val
+
+ return [tryint(chunk) for chunk in re.split('(\d+)', string_to_split)]
+
+ def test_dirs(self):
+ """Returns the list of top-level test directories."""
+ layout_tests_dir = self.layout_tests_dir()
+ return filter(lambda x: self._filesystem.isdir(self._filesystem.join(layout_tests_dir, x)),
+ self._filesystem.listdir(layout_tests_dir))
+
+ @memoized
+ def test_isfile(self, test_name):
+ """Return True if the test name refers to a directory of tests."""
+ # Used by test_expectations.py to apply rules to whole directories.
+ if self._filesystem.isfile(self.abspath_for_test(test_name)):
+ return True
+ base = self.lookup_virtual_test_base(test_name)
+ return base and self._filesystem.isfile(self.abspath_for_test(base))
+
+ @memoized
+ def test_isdir(self, test_name):
+ """Return True if the test name refers to a directory of tests."""
+ # Used by test_expectations.py to apply rules to whole directories.
+ if self._filesystem.isdir(self.abspath_for_test(test_name)):
+ return True
+ base = self.lookup_virtual_test_base(test_name)
+ return base and self._filesystem.isdir(self.abspath_for_test(base))
+
+ @memoized
+ def test_exists(self, test_name):
+ """Return True if the test name refers to an existing test or baseline."""
+ # Used by test_expectations.py to determine if an entry refers to a
+ # valid test and by printing.py to determine if baselines exist.
+ return self.test_isfile(test_name) or self.test_isdir(test_name)
+
+ def split_test(self, test_name):
+ """Splits a test name into the 'directory' part and the 'basename' part."""
+ index = test_name.rfind(self.TEST_PATH_SEPARATOR)
+ if index < 1:
+ return ('', test_name)
+ return (test_name[0:index], test_name[index:])
+
+ def normalize_test_name(self, test_name):
+ """Returns a normalized version of the test name or test directory."""
+ if test_name.endswith('/'):
+ return test_name
+ if self.test_isdir(test_name):
+ return test_name + '/'
+ return test_name
+
+ def driver_cmd_line(self):
+ """Prints the DRT command line that will be used."""
+ driver = self.create_driver(0)
+ return driver.cmd_line(self.get_option('pixel_tests'), [])
+
+ def update_baseline(self, baseline_path, data):
+ """Updates the baseline for a test.
+
+ Args:
+ baseline_path: the actual path to use for baseline, not the path to
+ the test. This function is used to update either generic or
+ platform-specific baselines, but we can't infer which here.
+ data: contents of the baseline.
+ """
+ self._filesystem.write_binary_file(baseline_path, data)
+
+ # FIXME: update callers to create a finder and call it instead of these next five routines (which should be protected).
+ def webkit_base(self):
+ return self._webkit_finder.webkit_base()
+
+ def path_from_webkit_base(self, *comps):
+ return self._webkit_finder.path_from_webkit_base(*comps)
+
+ def path_from_chromium_base(self, *comps):
+ return self._webkit_finder.path_from_chromium_base(*comps)
+
+ def path_to_script(self, script_name):
+ return self._webkit_finder.path_to_script(script_name)
+
+ def layout_tests_dir(self):
+ return self._webkit_finder.layout_tests_dir()
+
+ def perf_tests_dir(self):
+ return self._webkit_finder.perf_tests_dir()
+
+ def skipped_layout_tests(self, test_list):
+ """Returns tests skipped outside of the TestExpectations files."""
+ tests = set(self._skipped_tests_for_unsupported_features(test_list))
+
+ # We explicitly skip any tests in LayoutTests/w3c if need be to avoid running any tests
+ # left over from the old DEPS-pulled repos.
+ # We also will warn at the end of the test run if these directories still exist.
+ #
+ # TODO(dpranke): Remove this check after 1/1/2015 and let people deal with the warnings.
+ # Remove the check in controllers/manager.py as well.
+ if self._filesystem.isdir(self._filesystem.join(self.layout_tests_dir(), 'w3c')):
+ tests.add('w3c')
+
+ return tests
+
+ def _tests_from_skipped_file_contents(self, skipped_file_contents):
+ tests_to_skip = []
+ for line in skipped_file_contents.split('\n'):
+ line = line.strip()
+ line = line.rstrip('/') # Best to normalize directory names to not include the trailing slash.
+ if line.startswith('#') or not len(line):
+ continue
+ tests_to_skip.append(line)
+ return tests_to_skip
+
+ def _expectations_from_skipped_files(self, skipped_file_paths):
+ tests_to_skip = []
+ for search_path in skipped_file_paths:
+ filename = self._filesystem.join(self._webkit_baseline_path(search_path), "Skipped")
+ if not self._filesystem.exists(filename):
+ _log.debug("Skipped does not exist: %s" % filename)
+ continue
+ _log.debug("Using Skipped file: %s" % filename)
+ skipped_file_contents = self._filesystem.read_text_file(filename)
+ tests_to_skip.extend(self._tests_from_skipped_file_contents(skipped_file_contents))
+ return tests_to_skip
+
+ @memoized
+ def skipped_perf_tests(self):
+ return self._expectations_from_skipped_files([self.perf_tests_dir()])
+
+ def skips_perf_test(self, test_name):
+ for test_or_category in self.skipped_perf_tests():
+ if test_or_category == test_name:
+ return True
+ category = self._filesystem.join(self.perf_tests_dir(), test_or_category)
+ if self._filesystem.isdir(category) and test_name.startswith(test_or_category):
+ return True
+ return False
+
+ def is_chromium(self):
+ return True
+
+ def name(self):
+ """Returns a name that uniquely identifies this particular type of port
+ (e.g., "mac-snowleopard" or "linux-x86_x64" and can be passed
+ to factory.get() to instantiate the port."""
+ return self._name
+
+ def operating_system(self):
+ # Subclasses should override this default implementation.
+ return 'mac'
+
+ def version(self):
+ """Returns a string indicating the version of a given platform, e.g.
+ 'leopard' or 'xp'.
+
+ This is used to help identify the exact port when parsing test
+ expectations, determining search paths, and logging information."""
+ return self._version
+
+ def architecture(self):
+ return self._architecture
+
+ def get_option(self, name, default_value=None):
+ return getattr(self._options, name, default_value)
+
+ def set_option_default(self, name, default_value):
+ return self._options.ensure_value(name, default_value)
+
+ @memoized
+ def path_to_generic_test_expectations_file(self):
+ return self._filesystem.join(self.layout_tests_dir(), 'TestExpectations')
+
+ def relative_test_filename(self, filename):
+ """Returns a test_name a relative unix-style path for a filename under the LayoutTests
+ directory. Ports may legitimately return abspaths here if no relpath makes sense."""
+ # Ports that run on windows need to override this method to deal with
+ # filenames with backslashes in them.
+ if filename.startswith(self.layout_tests_dir()):
+ return self.host.filesystem.relpath(filename, self.layout_tests_dir())
+ else:
+ return self.host.filesystem.abspath(filename)
+
+ @memoized
+ def abspath_for_test(self, test_name):
+ """Returns the full path to the file for a given test name. This is the
+ inverse of relative_test_filename()."""
+ return self._filesystem.join(self.layout_tests_dir(), test_name)
+
+ def results_directory(self):
+ """Absolute path to the place to store the test results (uses --results-directory)."""
+ if not self._results_directory:
+ option_val = self.get_option('results_directory') or self.default_results_directory()
+ self._results_directory = self._filesystem.abspath(option_val)
+ return self._results_directory
+
+ def perf_results_directory(self):
+ return self._build_path()
+
+ def default_results_directory(self):
+ """Absolute path to the default place to store the test results."""
+ try:
+ return self.path_from_chromium_base('webkit', self.get_option('configuration'), 'layout-test-results')
+ except AssertionError:
+ return self._build_path('layout-test-results')
+
+ def setup_test_run(self):
+ """Perform port-specific work at the beginning of a test run."""
+ # Delete the disk cache if any to ensure a clean test run.
+ dump_render_tree_binary_path = self._path_to_driver()
+ cachedir = self._filesystem.dirname(dump_render_tree_binary_path)
+ cachedir = self._filesystem.join(cachedir, "cache")
+ if self._filesystem.exists(cachedir):
+ self._filesystem.rmtree(cachedir)
+
+ if self._dump_reader:
+ self._filesystem.maybe_make_directory(self._dump_reader.crash_dumps_directory())
+
+ def num_workers(self, requested_num_workers):
+ """Returns the number of available workers (possibly less than the number requested)."""
+ return requested_num_workers
+
+ def clean_up_test_run(self):
+ """Perform port-specific work at the end of a test run."""
+ if self._image_differ:
+ self._image_differ.stop()
+ self._image_differ = None
+
+ # FIXME: os.environ access should be moved to onto a common/system class to be more easily mockable.
+ def _value_or_default_from_environ(self, name, default=None):
+ if name in os.environ:
+ return os.environ[name]
+ return default
+
+ def _copy_value_from_environ_if_set(self, clean_env, name):
+ if name in os.environ:
+ clean_env[name] = os.environ[name]
+
+ def setup_environ_for_server(self, server_name=None):
+ # We intentionally copy only a subset of os.environ when
+ # launching subprocesses to ensure consistent test results.
+ clean_env = {
+ 'LOCAL_RESOURCE_ROOT': self.layout_tests_dir(), # FIXME: Is this used?
+ }
+ variables_to_copy = [
+ 'WEBKIT_TESTFONTS', # FIXME: Is this still used?
+ 'WEBKITOUTPUTDIR', # FIXME: Is this still used?
+ 'CHROME_DEVEL_SANDBOX',
+ 'CHROME_IPC_LOGGING',
+ 'ASAN_OPTIONS',
+ 'TSAN_OPTIONS',
+ 'MSAN_OPTIONS',
+ 'LSAN_OPTIONS',
+ 'UBSAN_OPTIONS',
+ 'VALGRIND_LIB',
+ 'VALGRIND_LIB_INNER',
+ ]
+ if self.host.platform.is_linux() or self.host.platform.is_freebsd():
+ variables_to_copy += [
+ 'XAUTHORITY',
+ 'HOME',
+ 'LANG',
+ 'LD_LIBRARY_PATH',
+ 'DBUS_SESSION_BUS_ADDRESS',
+ 'XDG_DATA_DIRS',
+ ]
+ clean_env['DISPLAY'] = self._value_or_default_from_environ('DISPLAY', ':1')
+ if self.host.platform.is_mac():
+ clean_env['DYLD_LIBRARY_PATH'] = self._build_path()
+ clean_env['DYLD_FRAMEWORK_PATH'] = self._build_path()
+ variables_to_copy += [
+ 'HOME',
+ ]
+ if self.host.platform.is_win():
+ variables_to_copy += [
+ 'PATH',
+ 'GYP_DEFINES', # Required to locate win sdk.
+ ]
+ if self.host.platform.is_cygwin():
+ variables_to_copy += [
+ 'HOMEDRIVE',
+ 'HOMEPATH',
+ '_NT_SYMBOL_PATH',
+ ]
+
+ for variable in variables_to_copy:
+ self._copy_value_from_environ_if_set(clean_env, variable)
+
+ for string_variable in self.get_option('additional_env_var', []):
+ [name, value] = string_variable.split('=', 1)
+ clean_env[name] = value
+
+ return clean_env
+
+ def show_results_html_file(self, results_filename):
+ """This routine should display the HTML file pointed at by
+ results_filename in a users' browser."""
+ return self.host.user.open_url(path.abspath_to_uri(self.host.platform, results_filename))
+
+ def create_driver(self, worker_number, no_timeout=False):
+ """Return a newly created Driver subclass for starting/stopping the test driver."""
+ return self._driver_class()(self, worker_number, pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
+
+ def start_helper(self):
+ """If a port needs to reconfigure graphics settings or do other
+ things to ensure a known test configuration, it should override this
+ method."""
+ helper_path = self._path_to_helper()
+ if helper_path:
+ _log.debug("Starting layout helper %s" % helper_path)
+ # Note: Not thread safe: http://bugs.python.org/issue2320
+ self._helper = self._executive.popen([helper_path],
+ stdin=self._executive.PIPE, stdout=self._executive.PIPE, stderr=None)
+ is_ready = self._helper.stdout.readline()
+ if not is_ready.startswith('ready'):
+ _log.error("layout_test_helper failed to be ready")
+
+ def requires_http_server(self):
+ """Does the port require an HTTP server for running tests? This could
+ be the case when the tests aren't run on the host platform."""
+ return False
+
+ def start_http_server(self, additional_dirs, number_of_drivers):
+ """Start a web server. Raise an error if it can't start or is already running.
+
+ Ports can stub this out if they don't need a web server to be running."""
+ assert not self._http_server, 'Already running an http server.'
+
+ server = apache_http.ApacheHTTP(self, self.results_directory(),
+ additional_dirs=additional_dirs,
+ number_of_servers=(number_of_drivers * 4))
+ server.start()
+ self._http_server = server
+
+ def start_websocket_server(self):
+ """Start a web server. Raise an error if it can't start or is already running.
+
+ Ports can stub this out if they don't need a websocket server to be running."""
+ assert not self._websocket_server, 'Already running a websocket server.'
+
+ server = pywebsocket.PyWebSocket(self, self.results_directory())
+ server.start()
+ self._websocket_server = server
+
+ def http_server_supports_ipv6(self):
+ # Apache < 2.4 on win32 does not support IPv6, nor does cygwin apache.
+ if self.host.platform.is_cygwin() or self.host.platform.is_win():
+ return False
+ return True
+
+ def stop_helper(self):
+ """Shut down the test helper if it is running. Do nothing if
+ it isn't, or it isn't available. If a port overrides start_helper()
+ it must override this routine as well."""
+ if self._helper:
+ _log.debug("Stopping layout test helper")
+ try:
+ self._helper.stdin.write("x\n")
+ self._helper.stdin.close()
+ self._helper.wait()
+ except IOError, e:
+ pass
+ finally:
+ self._helper = None
+
+ def stop_http_server(self):
+ """Shut down the http server if it is running. Do nothing if it isn't."""
+ if self._http_server:
+ self._http_server.stop()
+ self._http_server = None
+
+ def stop_websocket_server(self):
+ """Shut down the websocket server if it is running. Do nothing if it isn't."""
+ if self._websocket_server:
+ self._websocket_server.stop()
+ self._websocket_server = None
+
+ #
+ # TEST EXPECTATION-RELATED METHODS
+ #
+
+ def test_configuration(self):
+ """Returns the current TestConfiguration for the port."""
+ if not self._test_configuration:
+ self._test_configuration = TestConfiguration(self._version, self._architecture, self._options.configuration.lower())
+ return self._test_configuration
+
+ # FIXME: Belongs on a Platform object.
+ @memoized
+ def all_test_configurations(self):
+ """Returns a list of TestConfiguration instances, representing all available
+ test configurations for this port."""
+ return self._generate_all_test_configurations()
+
+ # FIXME: Belongs on a Platform object.
+ def configuration_specifier_macros(self):
+ """Ports may provide a way to abbreviate configuration specifiers to conveniently
+ refer to them as one term or alias specific values to more generic ones. For example:
+
+ (xp, vista, win7) -> win # Abbreviate all Windows versions into one namesake.
+ (lucid) -> linux # Change specific name of the Linux distro to a more generic term.
+
+ Returns a dictionary, each key representing a macro term ('win', for example),
+ and value being a list of valid configuration specifiers (such as ['xp', 'vista', 'win7'])."""
+ return self.CONFIGURATION_SPECIFIER_MACROS
+
+ def all_baseline_variants(self):
+ """Returns a list of platform names sufficient to cover all the baselines.
+
+ The list should be sorted so that a later platform will reuse
+ an earlier platform's baselines if they are the same (e.g.,
+ 'snowleopard' should precede 'leopard')."""
+ return self.ALL_BASELINE_VARIANTS
+
+ def _generate_all_test_configurations(self):
+ """Returns a sequence of the TestConfigurations the port supports."""
+ # By default, we assume we want to test every graphics type in
+ # every configuration on every system.
+ test_configurations = []
+ for version, architecture in self.ALL_SYSTEMS:
+ for build_type in self.ALL_BUILD_TYPES:
+ test_configurations.append(TestConfiguration(version, architecture, build_type))
+ return test_configurations
+
+ try_builder_names = frozenset([
+ 'linux_layout',
+ 'mac_layout',
+ 'win_layout',
+ 'linux_layout_rel',
+ 'mac_layout_rel',
+ 'win_layout_rel',
+ ])
+
+ def warn_if_bug_missing_in_test_expectations(self):
+ return True
+
+ def _port_specific_expectations_files(self):
+ paths = []
+ paths.append(self.path_from_chromium_base('skia', 'skia_test_expectations.txt'))
+ paths.append(self._filesystem.join(self.layout_tests_dir(), 'NeverFixTests'))
+ paths.append(self._filesystem.join(self.layout_tests_dir(), 'StaleTestExpectations'))
+ paths.append(self._filesystem.join(self.layout_tests_dir(), 'SlowTests'))
+ paths.append(self._filesystem.join(self.layout_tests_dir(), 'FlakyTests'))
+
+ return paths
+
+ def expectations_dict(self):
+ """Returns an OrderedDict of name -> expectations strings.
+ The names are expected to be (but not required to be) paths in the filesystem.
+ If the name is a path, the file can be considered updatable for things like rebaselining,
+ so don't use names that are paths if they're not paths.
+ Generally speaking the ordering should be files in the filesystem in cascade order
+ (TestExpectations followed by Skipped, if the port honors both formats),
+ then any built-in expectations (e.g., from compile-time exclusions), then --additional-expectations options."""
+ # FIXME: rename this to test_expectations() once all the callers are updated to know about the ordered dict.
+ expectations = OrderedDict()
+
+ for path in self.expectations_files():
+ if self._filesystem.exists(path):
+ expectations[path] = self._filesystem.read_text_file(path)
+
+ for path in self.get_option('additional_expectations', []):
+ expanded_path = self._filesystem.expanduser(path)
+ if self._filesystem.exists(expanded_path):
+ _log.debug("reading additional_expectations from path '%s'" % path)
+ expectations[path] = self._filesystem.read_text_file(expanded_path)
+ else:
+ _log.warning("additional_expectations path '%s' does not exist" % path)
+ return expectations
+
+ def bot_expectations(self):
+ if not self.get_option('ignore_flaky_tests'):
+ return {}
+
+ full_port_name = self.determine_full_port_name(self.host, self._options, self.port_name)
+ builder_category = self.get_option('ignore_builder_category', 'layout')
+ factory = BotTestExpectationsFactory()
+ # FIXME: This only grabs release builder's flakiness data. If we're running debug,
+ # when we should grab the debug builder's data.
+ expectations = factory.expectations_for_port(full_port_name, builder_category)
+
+ if not expectations:
+ return {}
+
+ ignore_mode = self.get_option('ignore_flaky_tests')
+ if ignore_mode == 'very-flaky' or ignore_mode == 'maybe-flaky':
+ return expectations.flakes_by_path(ignore_mode == 'very-flaky')
+ if ignore_mode == 'unexpected':
+ return expectations.unexpected_results_by_path()
+ _log.warning("Unexpected ignore mode: '%s'." % ignore_mode)
+ return {}
+
+ def expectations_files(self):
+ return [self.path_to_generic_test_expectations_file()] + self._port_specific_expectations_files()
+
+ def repository_paths(self):
+ """Returns a list of (repository_name, repository_path) tuples of its depending code base."""
+ return [('blink', self.layout_tests_dir()),
+ ('chromium', self.path_from_chromium_base('build'))]
+
+ _WDIFF_DEL = '##WDIFF_DEL##'
+ _WDIFF_ADD = '##WDIFF_ADD##'
+ _WDIFF_END = '##WDIFF_END##'
+
+ def _format_wdiff_output_as_html(self, wdiff):
+ wdiff = cgi.escape(wdiff)
+ wdiff = wdiff.replace(self._WDIFF_DEL, "<span class=del>")
+ wdiff = wdiff.replace(self._WDIFF_ADD, "<span class=add>")
+ wdiff = wdiff.replace(self._WDIFF_END, "</span>")
+ html = "<head><style>.del { background: #faa; } "
+ html += ".add { background: #afa; }</style></head>"
+ html += "<pre>%s</pre>" % wdiff
+ return html
+
+ def _wdiff_command(self, actual_filename, expected_filename):
+ executable = self._path_to_wdiff()
+ return [executable,
+ "--start-delete=%s" % self._WDIFF_DEL,
+ "--end-delete=%s" % self._WDIFF_END,
+ "--start-insert=%s" % self._WDIFF_ADD,
+ "--end-insert=%s" % self._WDIFF_END,
+ actual_filename,
+ expected_filename]
+
+ @staticmethod
+ def _handle_wdiff_error(script_error):
+ # Exit 1 means the files differed, any other exit code is an error.
+ if script_error.exit_code != 1:
+ raise script_error
+
+ def _run_wdiff(self, actual_filename, expected_filename):
+ """Runs wdiff and may throw exceptions.
+ This is mostly a hook for unit testing."""
+ # Diffs are treated as binary as they may include multiple files
+ # with conflicting encodings. Thus we do not decode the output.
+ command = self._wdiff_command(actual_filename, expected_filename)
+ wdiff = self._executive.run_command(command, decode_output=False,
+ error_handler=self._handle_wdiff_error)
+ return self._format_wdiff_output_as_html(wdiff)
+
+ _wdiff_error_html = "Failed to run wdiff, see error log."
+
+ def wdiff_text(self, actual_filename, expected_filename):
+ """Returns a string of HTML indicating the word-level diff of the
+ contents of the two filenames. Returns an empty string if word-level
+ diffing isn't available."""
+ if not self.wdiff_available():
+ return ""
+ try:
+ # It's possible to raise a ScriptError we pass wdiff invalid paths.
+ return self._run_wdiff(actual_filename, expected_filename)
+ except OSError as e:
+ if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
+ # Silently ignore cases where wdiff is missing.
+ self._wdiff_available = False
+ return ""
+ raise
+ except ScriptError as e:
+ _log.error("Failed to run wdiff: %s" % e)
+ self._wdiff_available = False
+ return self._wdiff_error_html
+
+ # This is a class variable so we can test error output easily.
+ _pretty_patch_error_html = "Failed to run PrettyPatch, see error log."
+
+ def pretty_patch_text(self, diff_path):
+ if self._pretty_patch_available is None:
+ self._pretty_patch_available = self.check_pretty_patch(logging=False)
+ if not self._pretty_patch_available:
+ return self._pretty_patch_error_html
+ command = ("ruby", "-I", self._filesystem.dirname(self._pretty_patch_path),
+ self._pretty_patch_path, diff_path)
+ try:
+ # Diffs are treated as binary (we pass decode_output=False) as they
+ # may contain multiple files of conflicting encodings.
+ return self._executive.run_command(command, decode_output=False)
+ except OSError, e:
+ # If the system is missing ruby log the error and stop trying.
+ self._pretty_patch_available = False
+ _log.error("Failed to run PrettyPatch (%s): %s" % (command, e))
+ return self._pretty_patch_error_html
+ except ScriptError, e:
+ # If ruby failed to run for some reason, log the command
+ # output and stop trying.
+ self._pretty_patch_available = False
+ _log.error("Failed to run PrettyPatch (%s):\n%s" % (command, e.message_with_output()))
+ return self._pretty_patch_error_html
+
+ def default_configuration(self):
+ return self._config.default_configuration()
+
+ def clobber_old_port_specific_results(self):
+ pass
+
+ # FIXME: This does not belong on the port object.
+ @memoized
+ def path_to_apache(self):
+ """Returns the full path to the apache binary.
+
+ This is needed only by ports that use the apache_http_server module."""
+ raise NotImplementedError('Port.path_to_apache')
+
+ def path_to_apache_config_file(self):
+ """Returns the full path to the apache configuration file.
+
+ If the WEBKIT_HTTP_SERVER_CONF_PATH environment variable is set, its
+ contents will be used instead.
+
+ This is needed only by ports that use the apache_http_server module."""
+ config_file_from_env = os.environ.get('WEBKIT_HTTP_SERVER_CONF_PATH')
+ if config_file_from_env:
+ if not self._filesystem.exists(config_file_from_env):
+ raise IOError('%s was not found on the system' % config_file_from_env)
+ return config_file_from_env
+
+ config_file_name = self._apache_config_file_name_for_platform(sys.platform)
+ return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', config_file_name)
+
+ #
+ # PROTECTED ROUTINES
+ #
+ # The routines below should only be called by routines in this class
+ # or any of its subclasses.
+ #
+
+ # FIXME: This belongs on some platform abstraction instead of Port.
+ def _is_redhat_based(self):
+ return self._filesystem.exists('/etc/redhat-release')
+
+ def _is_debian_based(self):
+ return self._filesystem.exists('/etc/debian_version')
+
+ def _apache_version(self):
+ config = self._executive.run_command([self.path_to_apache(), '-v'])
+ return re.sub(r'(?:.|\n)*Server version: Apache/(\d+\.\d+)(?:.|\n)*', r'\1', config)
+
+ # We pass sys_platform into this method to make it easy to unit test.
+ def _apache_config_file_name_for_platform(self, sys_platform):
+ if sys_platform == 'cygwin':
+ return 'cygwin-httpd.conf' # CYGWIN is the only platform to still use Apache 1.3.
+ if sys_platform.startswith('linux'):
+ if self._is_redhat_based():
+ return 'fedora-httpd-' + self._apache_version() + '.conf'
+ if self._is_debian_based():
+ return 'debian-httpd-' + self._apache_version() + '.conf'
+ # All platforms use apache2 except for CYGWIN (and Mac OS X Tiger and prior, which we no longer support).
+ return "apache2-httpd.conf"
+
+ def _path_to_driver(self, configuration=None):
+ """Returns the full path to the test driver."""
+ return self._build_path(self.driver_name())
+
+ def _path_to_webcore_library(self):
+ """Returns the full path to a built copy of WebCore."""
+ return None
+
+ def _path_to_helper(self):
+ """Returns the full path to the layout_test_helper binary, which
+ is used to help configure the system for the test run, or None
+ if no helper is needed.
+
+ This is likely only used by start/stop_helper()."""
+ return None
+
+ def _path_to_image_diff(self):
+ """Returns the full path to the image_diff binary, or None if it is not available.
+
+ This is likely used only by diff_image()"""
+ return self._build_path('image_diff')
+
+ @memoized
+ def _path_to_wdiff(self):
+ """Returns the full path to the wdiff binary, or None if it is not available.
+
+ This is likely used only by wdiff_text()"""
+ for path in ("/usr/bin/wdiff", "/usr/bin/dwdiff"):
+ if self._filesystem.exists(path):
+ return path
+ return None
+
+ def _webkit_baseline_path(self, platform):
+ """Return the full path to the top of the baseline tree for a
+ given platform."""
+ return self._filesystem.join(self.layout_tests_dir(), 'platform', platform)
+
+ def _driver_class(self):
+ """Returns the port's driver implementation."""
+ return driver.Driver
+
+ def _output_contains_sanitizer_messages(self, output):
+ if not output:
+ return None
+ if 'AddressSanitizer' in output:
+ return 'AddressSanitizer'
+ if 'MemorySanitizer' in output:
+ return 'MemorySanitizer'
+ return None
+
+ def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
+ if self._output_contains_sanitizer_messages(stderr):
+ # Running the symbolizer script can take a lot of memory, so we need to
+ # serialize access to it across all the concurrently running drivers.
+
+ llvm_symbolizer_path = self.path_from_chromium_base('third_party', 'llvm-build', 'Release+Asserts', 'bin', 'llvm-symbolizer')
+ if self._filesystem.exists(llvm_symbolizer_path):
+ env = os.environ.copy()
+ env['LLVM_SYMBOLIZER_PATH'] = llvm_symbolizer_path
+ else:
+ env = None
+ sanitizer_filter_path = self.path_from_chromium_base('tools', 'valgrind', 'asan', 'asan_symbolize.py')
+ sanitizer_strip_path_prefix = 'Release/../../'
+ if self._filesystem.exists(sanitizer_filter_path):
+ stderr = self._executive.run_command(['flock', sys.executable, sanitizer_filter_path, sanitizer_strip_path_prefix], input=stderr, decode_output=False, env=env)
+
+ name_str = name or '<unknown process name>'
+ pid_str = str(pid or '<unknown>')
+ stdout_lines = (stdout or '<empty>').decode('utf8', 'replace').splitlines()
+ stderr_lines = (stderr or '<empty>').decode('utf8', 'replace').splitlines()
+ return (stderr, 'crash log for %s (pid %s):\n%s\n%s\n' % (name_str, pid_str,
+ '\n'.join(('STDOUT: ' + l) for l in stdout_lines),
+ '\n'.join(('STDERR: ' + l) for l in stderr_lines)))
+
+ def look_for_new_crash_logs(self, crashed_processes, start_time):
+ pass
+
+ def look_for_new_samples(self, unresponsive_processes, start_time):
+ pass
+
+ def sample_process(self, name, pid):
+ pass
+
+ def physical_test_suites(self):
+ return [
+ # For example, to turn on force-compositing-mode in the svg/ directory:
+ # PhysicalTestSuite('svg',
+ # ['--force-compositing-mode']),
+ ]
+
+ def virtual_test_suites(self):
+ if self._virtual_test_suites is None:
+ path_to_virtual_test_suites = self._filesystem.join(self.layout_tests_dir(), 'VirtualTestSuites')
+ assert self._filesystem.exists(path_to_virtual_test_suites), 'LayoutTests/VirtualTestSuites not found'
+ try:
+ test_suite_json = json.loads(self._filesystem.read_text_file(path_to_virtual_test_suites))
+ self._virtual_test_suites = [VirtualTestSuite(**d) for d in test_suite_json]
+ except ValueError as e:
+ raise ValueError("LayoutTests/VirtualTestSuites is not a valid JSON file: %s" % str(e))
+ return self._virtual_test_suites
+
+ def _all_virtual_tests(self, suites):
+ tests = []
+ for suite in suites:
+ self._populate_virtual_suite(suite)
+ tests.extend(suite.tests.keys())
+ return tests
+
+ def _virtual_tests_matching_paths(self, paths, suites):
+ tests = []
+ for suite in suites:
+ if any(p.startswith(suite.name) for p in paths):
+ self._populate_virtual_suite(suite)
+ for test in suite.tests:
+ if any(test.startswith(p) for p in paths):
+ tests.append(test)
+ return tests
+
+ def _populate_virtual_suite(self, suite):
+ if not suite.tests:
+ base_tests = self._real_tests([suite.base])
+ suite.tests = {}
+ for test in base_tests:
+ suite.tests[test.replace(suite.base, suite.name, 1)] = test
+
+ def is_virtual_test(self, test_name):
+ return bool(self.lookup_virtual_suite(test_name))
+
+ def lookup_virtual_suite(self, test_name):
+ for suite in self.virtual_test_suites():
+ if test_name.startswith(suite.name):
+ return suite
+ return None
+
+ def lookup_virtual_test_base(self, test_name):
+ suite = self.lookup_virtual_suite(test_name)
+ if not suite:
+ return None
+ return test_name.replace(suite.name, suite.base, 1)
+
+ def lookup_virtual_test_args(self, test_name):
+ for suite in self.virtual_test_suites():
+ if test_name.startswith(suite.name):
+ return suite.args
+ return []
+
+ def lookup_physical_test_args(self, test_name):
+ for suite in self.physical_test_suites():
+ if test_name.startswith(suite.name):
+ return suite.args
+ return []
+
+ def should_run_as_pixel_test(self, test_input):
+ if not self._options.pixel_tests:
+ return False
+ if self._options.pixel_test_directories:
+ return any(test_input.test_name.startswith(directory) for directory in self._options.pixel_test_directories)
+ return True
+
+ def _modules_to_search_for_symbols(self):
+ path = self._path_to_webcore_library()
+ if path:
+ return [path]
+ return []
+
+ def _symbols_string(self):
+ symbols = ''
+ for path_to_module in self._modules_to_search_for_symbols():
+ try:
+ symbols += self._executive.run_command(['nm', path_to_module], error_handler=self._executive.ignore_error)
+ except OSError, e:
+ _log.warn("Failed to run nm: %s. Can't determine supported features correctly." % e)
+ return symbols
+
+ # Ports which use compile-time feature detection should define this method and return
+ # a dictionary mapping from symbol substrings to possibly disabled test directories.
+ # When the symbol substrings are not matched, the directories will be skipped.
+ # If ports don't ever enable certain features, then those directories can just be
+ # in the Skipped list instead of compile-time-checked here.
+ def _missing_symbol_to_skipped_tests(self):
+ if self.PORT_HAS_AUDIO_CODECS_BUILT_IN:
+ return {}
+ else:
+ return {
+ "ff_mp3_decoder": ["webaudio/codec-tests/mp3"],
+ "ff_aac_decoder": ["webaudio/codec-tests/aac"],
+ }
+
+ def _has_test_in_directories(self, directory_lists, test_list):
+ if not test_list:
+ return False
+
+ directories = itertools.chain.from_iterable(directory_lists)
+ for directory, test in itertools.product(directories, test_list):
+ if test.startswith(directory):
+ return True
+ return False
+
+ def _skipped_tests_for_unsupported_features(self, test_list):
+ # Only check the symbols of there are tests in the test_list that might get skipped.
+ # This is a performance optimization to avoid the calling nm.
+ # Runtime feature detection not supported, fallback to static detection:
+ # Disable any tests for symbols missing from the executable or libraries.
+ if self._has_test_in_directories(self._missing_symbol_to_skipped_tests().values(), test_list):
+ symbols_string = self._symbols_string()
+ if symbols_string is not None:
+ return reduce(operator.add, [directories for symbol_substring, directories in self._missing_symbol_to_skipped_tests().items() if symbol_substring not in symbols_string], [])
+ return []
+
+ def _convert_path(self, path):
+ """Handles filename conversion for subprocess command line args."""
+ # See note above in diff_image() for why we need this.
+ if sys.platform == 'cygwin':
+ return cygpath(path)
+ return path
+
+ def _build_path(self, *comps):
+ return self._build_path_with_configuration(None, *comps)
+
+ def _build_path_with_configuration(self, configuration, *comps):
+ # Note that we don't do the option caching that the
+ # base class does, because finding the right directory is relatively
+ # fast.
+ configuration = configuration or self.get_option('configuration')
+ return self._static_build_path(self._filesystem, self.get_option('build_directory'),
+ self.path_from_chromium_base(), configuration, comps)
+
+ def _check_driver_build_up_to_date(self, configuration):
+ if configuration in ('Debug', 'Release'):
+ try:
+ debug_path = self._path_to_driver('Debug')
+ release_path = self._path_to_driver('Release')
+
+ debug_mtime = self._filesystem.mtime(debug_path)
+ release_mtime = self._filesystem.mtime(release_path)
+
+ if (debug_mtime > release_mtime and configuration == 'Release' or
+ release_mtime > debug_mtime and configuration == 'Debug'):
+ most_recent_binary = 'Release' if configuration == 'Debug' else 'Debug'
+ _log.warning('You are running the %s binary. However the %s binary appears to be more recent. '
+ 'Please pass --%s.', configuration, most_recent_binary, most_recent_binary.lower())
+ _log.warning('')
+ # This will fail if we don't have both a debug and release binary.
+ # That's fine because, in this case, we must already be running the
+ # most up-to-date one.
+ except OSError:
+ pass
+ return True
+
+ def _chromium_baseline_path(self, platform):
+ if platform is None:
+ platform = self.name()
+ return self.path_from_webkit_base('LayoutTests', 'platform', platform)
+
+class VirtualTestSuite(object):
+ def __init__(self, prefix=None, base=None, args=None):
+ assert base
+ assert args
+ assert prefix.find('/') == -1, "Virtual test suites prefixes cannot contain /'s: %s" % prefix
+ self.name = 'virtual/' + prefix + '/' + base
+ self.base = base
+ self.args = args
+ self.tests = {}
+
+ def __repr__(self):
+ return "VirtualTestSuite('%s', '%s', %s)" % (self.name, self.base, self.args)
+
+
+class PhysicalTestSuite(object):
+ def __init__(self, base, args):
+ self.name = base
+ self.base = base
+ self.args = args
+ self.tests = set()
+
+ def __repr__(self):
+ return "PhysicalTestSuite('%s', '%s', %s)" % (self.name, self.base, self.args)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
new file mode 100644
index 0000000..0e2f81b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
@@ -0,0 +1,489 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import optparse
+import sys
+import tempfile
+import unittest
+
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system import executive_mock
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.path import abspath_to_uri
+from webkitpy.tool.mocktool import MockOptions
+from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from webkitpy.layout_tests.port import Port, Driver, DriverOutput
+from webkitpy.layout_tests.port.base import VirtualTestSuite
+from webkitpy.layout_tests.port.test import add_unit_tests_to_mock_filesystem, TestPort
+
+class PortTest(unittest.TestCase):
+ def make_port(self, executive=None, with_tests=False, port_name=None, **kwargs):
+ host = MockSystemHost()
+ if executive:
+ host.executive = executive
+ if with_tests:
+ add_unit_tests_to_mock_filesystem(host.filesystem)
+ return TestPort(host, **kwargs)
+ return Port(host, port_name or 'baseport', **kwargs)
+
+ def test_format_wdiff_output_as_html(self):
+ output = "OUTPUT %s %s %s" % (Port._WDIFF_DEL, Port._WDIFF_ADD, Port._WDIFF_END)
+ html = self.make_port()._format_wdiff_output_as_html(output)
+ expected_html = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre>OUTPUT <span class=del> <span class=add> </span></pre>"
+ self.assertEqual(html, expected_html)
+
+ def test_wdiff_command(self):
+ port = self.make_port()
+ port._path_to_wdiff = lambda: "/path/to/wdiff"
+ command = port._wdiff_command("/actual/path", "/expected/path")
+ expected_command = [
+ "/path/to/wdiff",
+ "--start-delete=##WDIFF_DEL##",
+ "--end-delete=##WDIFF_END##",
+ "--start-insert=##WDIFF_ADD##",
+ "--end-insert=##WDIFF_END##",
+ "/actual/path",
+ "/expected/path",
+ ]
+ self.assertEqual(command, expected_command)
+
+ def _file_with_contents(self, contents, encoding="utf-8"):
+ new_file = tempfile.NamedTemporaryFile()
+ new_file.write(contents.encode(encoding))
+ new_file.flush()
+ return new_file
+
+ def test_pretty_patch_os_error(self):
+ port = self.make_port(executive=executive_mock.MockExecutive2(exception=OSError))
+ oc = OutputCapture()
+ oc.capture_output()
+ self.assertEqual(port.pretty_patch_text("patch.txt"),
+ port._pretty_patch_error_html)
+
+ # This tests repeated calls to make sure we cache the result.
+ self.assertEqual(port.pretty_patch_text("patch.txt"),
+ port._pretty_patch_error_html)
+ oc.restore_output()
+
+ def test_pretty_patch_script_error(self):
+ # FIXME: This is some ugly white-box test hacking ...
+ port = self.make_port(executive=executive_mock.MockExecutive2(exception=ScriptError))
+ port._pretty_patch_available = True
+ self.assertEqual(port.pretty_patch_text("patch.txt"),
+ port._pretty_patch_error_html)
+
+ # This tests repeated calls to make sure we cache the result.
+ self.assertEqual(port.pretty_patch_text("patch.txt"),
+ port._pretty_patch_error_html)
+
+ def test_wdiff_text(self):
+ port = self.make_port()
+ port.wdiff_available = lambda: True
+ port._run_wdiff = lambda a, b: 'PASS'
+ self.assertEqual('PASS', port.wdiff_text(None, None))
+
+ def test_diff_text(self):
+ port = self.make_port()
+ # Make sure that we don't run into decoding exceptions when the
+ # filenames are unicode, with regular or malformed input (expected or
+ # actual input is always raw bytes, not unicode).
+ port.diff_text('exp', 'act', 'exp.txt', 'act.txt')
+ port.diff_text('exp', 'act', u'exp.txt', 'act.txt')
+ port.diff_text('exp', 'act', u'a\xac\u1234\u20ac\U00008000', 'act.txt')
+
+ port.diff_text('exp' + chr(255), 'act', 'exp.txt', 'act.txt')
+ port.diff_text('exp' + chr(255), 'act', u'exp.txt', 'act.txt')
+
+ # Though expected and actual files should always be read in with no
+ # encoding (and be stored as str objects), test unicode inputs just to
+ # be safe.
+ port.diff_text(u'exp', 'act', 'exp.txt', 'act.txt')
+ port.diff_text(
+ u'a\xac\u1234\u20ac\U00008000', 'act', 'exp.txt', 'act.txt')
+
+ # And make sure we actually get diff output.
+ diff = port.diff_text('foo', 'bar', 'exp.txt', 'act.txt')
+ self.assertIn('foo', diff)
+ self.assertIn('bar', diff)
+ self.assertIn('exp.txt', diff)
+ self.assertIn('act.txt', diff)
+ self.assertNotIn('nosuchthing', diff)
+
+ # Test for missing newline at end of file diff output.
+ content_a = "Hello\n\nWorld"
+ content_b = "Hello\n\nWorld\n\n\n"
+ expected = "--- exp.txt\n+++ act.txt\n@@ -1,3 +1,5 @@\n Hello\n \n-World\n\ No newline at end of file\n+World\n+\n+\n"
+ self.assertEqual(expected, port.diff_text(content_a, content_b, 'exp.txt', 'act.txt'))
+
+ def test_setup_test_run(self):
+ port = self.make_port()
+ # This routine is a no-op. We just test it for coverage.
+ port.setup_test_run()
+
+ def test_test_dirs(self):
+ port = self.make_port()
+ port.host.filesystem.write_text_file(port.layout_tests_dir() + '/canvas/test', '')
+ port.host.filesystem.write_text_file(port.layout_tests_dir() + '/css2.1/test', '')
+ dirs = port.test_dirs()
+ self.assertIn('canvas', dirs)
+ self.assertIn('css2.1', dirs)
+
+ def test_skipped_perf_tests(self):
+ port = self.make_port()
+
+ def add_text_file(dirname, filename, content='some content'):
+ dirname = port.host.filesystem.join(port.perf_tests_dir(), dirname)
+ port.host.filesystem.maybe_make_directory(dirname)
+ port.host.filesystem.write_text_file(port.host.filesystem.join(dirname, filename), content)
+
+ add_text_file('inspector', 'test1.html')
+ add_text_file('inspector', 'unsupported_test1.html')
+ add_text_file('inspector', 'test2.html')
+ add_text_file('inspector/resources', 'resource_file.html')
+ add_text_file('unsupported', 'unsupported_test2.html')
+ add_text_file('', 'Skipped', '\n'.join(['Layout', '', 'SunSpider', 'Supported/some-test.html']))
+ self.assertEqual(port.skipped_perf_tests(), ['Layout', 'SunSpider', 'Supported/some-test.html'])
+
+ def test_get_option__set(self):
+ options, args = optparse.OptionParser().parse_args([])
+ options.foo = 'bar'
+ port = self.make_port(options=options)
+ self.assertEqual(port.get_option('foo'), 'bar')
+
+ def test_get_option__unset(self):
+ port = self.make_port()
+ self.assertIsNone(port.get_option('foo'))
+
+ def test_get_option__default(self):
+ port = self.make_port()
+ self.assertEqual(port.get_option('foo', 'bar'), 'bar')
+
+ def test_additional_platform_directory(self):
+ port = self.make_port(port_name='foo')
+ port.default_baseline_search_path = lambda: ['LayoutTests/platform/foo']
+ layout_test_dir = port.layout_tests_dir()
+ test_file = 'fast/test.html'
+
+ # No additional platform directory
+ self.assertEqual(
+ port.expected_baselines(test_file, '.txt'),
+ [(None, 'fast/test-expected.txt')])
+ self.assertEqual(port.baseline_path(), 'LayoutTests/platform/foo')
+
+ # Simple additional platform directory
+ port._options.additional_platform_directory = ['/tmp/local-baselines']
+ port._filesystem.write_text_file('/tmp/local-baselines/fast/test-expected.txt', 'foo')
+ self.assertEqual(
+ port.expected_baselines(test_file, '.txt'),
+ [('/tmp/local-baselines', 'fast/test-expected.txt')])
+ self.assertEqual(port.baseline_path(), '/tmp/local-baselines')
+
+ # Multiple additional platform directories
+ port._options.additional_platform_directory = ['/foo', '/tmp/local-baselines']
+ self.assertEqual(
+ port.expected_baselines(test_file, '.txt'),
+ [('/tmp/local-baselines', 'fast/test-expected.txt')])
+ self.assertEqual(port.baseline_path(), '/foo')
+
+ def test_nonexistant_expectations(self):
+ port = self.make_port(port_name='foo')
+ port.expectations_files = lambda: ['/mock-checkout/third_party/WebKit/LayoutTests/platform/exists/TestExpectations', '/mock-checkout/third_party/WebKit/LayoutTests/platform/nonexistant/TestExpectations']
+ port._filesystem.write_text_file('/mock-checkout/third_party/WebKit/LayoutTests/platform/exists/TestExpectations', '')
+ self.assertEqual('\n'.join(port.expectations_dict().keys()), '/mock-checkout/third_party/WebKit/LayoutTests/platform/exists/TestExpectations')
+
+ def test_additional_expectations(self):
+ port = self.make_port(port_name='foo')
+ port.port_name = 'foo'
+ port._filesystem.write_text_file('/mock-checkout/third_party/WebKit/LayoutTests/platform/foo/TestExpectations', '')
+ port._filesystem.write_text_file(
+ '/tmp/additional-expectations-1.txt', 'content1\n')
+ port._filesystem.write_text_file(
+ '/tmp/additional-expectations-2.txt', 'content2\n')
+
+ self.assertEqual('\n'.join(port.expectations_dict().values()), '')
+
+ port._options.additional_expectations = [
+ '/tmp/additional-expectations-1.txt']
+ self.assertEqual('\n'.join(port.expectations_dict().values()), 'content1\n')
+
+ port._options.additional_expectations = [
+ '/tmp/nonexistent-file', '/tmp/additional-expectations-1.txt']
+ self.assertEqual('\n'.join(port.expectations_dict().values()), 'content1\n')
+
+ port._options.additional_expectations = [
+ '/tmp/additional-expectations-1.txt', '/tmp/additional-expectations-2.txt']
+ self.assertEqual('\n'.join(port.expectations_dict().values()), 'content1\n\ncontent2\n')
+
+ def test_additional_env_var(self):
+ port = self.make_port(options=optparse.Values({'additional_env_var': ['FOO=BAR', 'BAR=FOO']}))
+ self.assertEqual(port.get_option('additional_env_var'), ['FOO=BAR', 'BAR=FOO'])
+ environment = port.setup_environ_for_server()
+ self.assertTrue(('FOO' in environment) & ('BAR' in environment))
+ self.assertEqual(environment['FOO'], 'BAR')
+ self.assertEqual(environment['BAR'], 'FOO')
+
+ def test_find_no_paths_specified(self):
+ port = self.make_port(with_tests=True)
+ layout_tests_dir = port.layout_tests_dir()
+ tests = port.tests([])
+ self.assertNotEqual(len(tests), 0)
+
+ def test_find_one_test(self):
+ port = self.make_port(with_tests=True)
+ tests = port.tests(['failures/expected/image.html'])
+ self.assertEqual(len(tests), 1)
+
+ def test_find_glob(self):
+ port = self.make_port(with_tests=True)
+ tests = port.tests(['failures/expected/im*'])
+ self.assertEqual(len(tests), 2)
+
+ def test_find_with_skipped_directories(self):
+ port = self.make_port(with_tests=True)
+ tests = port.tests(['userscripts'])
+ self.assertNotIn('userscripts/resources/iframe.html', tests)
+
+ def test_find_with_skipped_directories_2(self):
+ port = self.make_port(with_tests=True)
+ tests = port.tests(['userscripts/resources'])
+ self.assertEqual(tests, [])
+
+ def test_is_test_file(self):
+ filesystem = MockFileSystem()
+ self.assertTrue(Port.is_test_file(filesystem, '', 'foo.html'))
+ self.assertTrue(Port.is_test_file(filesystem, '', 'foo.svg'))
+ self.assertTrue(Port.is_test_file(filesystem, '', 'test-ref-test.html'))
+ self.assertFalse(Port.is_test_file(filesystem, '', 'foo.png'))
+ self.assertFalse(Port.is_test_file(filesystem, '', 'foo-expected.html'))
+ self.assertFalse(Port.is_test_file(filesystem, '', 'foo-expected.svg'))
+ self.assertFalse(Port.is_test_file(filesystem, '', 'foo-expected.xht'))
+ self.assertFalse(Port.is_test_file(filesystem, '', 'foo-expected-mismatch.html'))
+ self.assertFalse(Port.is_test_file(filesystem, '', 'foo-expected-mismatch.svg'))
+ self.assertFalse(Port.is_test_file(filesystem, '', 'foo-expected-mismatch.xhtml'))
+ self.assertFalse(Port.is_test_file(filesystem, '', 'foo-ref.html'))
+ self.assertFalse(Port.is_test_file(filesystem, '', 'foo-notref.html'))
+ self.assertFalse(Port.is_test_file(filesystem, '', 'foo-notref.xht'))
+ self.assertFalse(Port.is_test_file(filesystem, '', 'foo-ref.xhtml'))
+ self.assertFalse(Port.is_test_file(filesystem, '', 'ref-foo.html'))
+ self.assertFalse(Port.is_test_file(filesystem, '', 'notref-foo.xhr'))
+
+ def test_parse_reftest_list(self):
+ port = self.make_port(with_tests=True)
+ port.host.filesystem.files['bar/reftest.list'] = "\n".join(["== test.html test-ref.html",
+ "",
+ "# some comment",
+ "!= test-2.html test-notref.html # more comments",
+ "== test-3.html test-ref.html",
+ "== test-3.html test-ref2.html",
+ "!= test-3.html test-notref.html",
+ "fuzzy(80,500) == test-3 test-ref.html"])
+
+ # Note that we don't support the syntax in the last line; the code should ignore it, rather than crashing.
+
+ reftest_list = Port._parse_reftest_list(port.host.filesystem, 'bar')
+ self.assertEqual(reftest_list, {'bar/test.html': [('==', 'bar/test-ref.html')],
+ 'bar/test-2.html': [('!=', 'bar/test-notref.html')],
+ 'bar/test-3.html': [('==', 'bar/test-ref.html'), ('==', 'bar/test-ref2.html'), ('!=', 'bar/test-notref.html')]})
+
+ def test_reference_files(self):
+ port = self.make_port(with_tests=True)
+ self.assertEqual(port.reference_files('passes/svgreftest.svg'), [('==', port.layout_tests_dir() + '/passes/svgreftest-expected.svg')])
+ self.assertEqual(port.reference_files('passes/xhtreftest.svg'), [('==', port.layout_tests_dir() + '/passes/xhtreftest-expected.html')])
+ self.assertEqual(port.reference_files('passes/phpreftest.php'), [('!=', port.layout_tests_dir() + '/passes/phpreftest-expected-mismatch.svg')])
+
+ def test_operating_system(self):
+ self.assertEqual('mac', self.make_port().operating_system())
+
+ def test_http_server_supports_ipv6(self):
+ port = self.make_port()
+ self.assertTrue(port.http_server_supports_ipv6())
+ port.host.platform.os_name = 'cygwin'
+ self.assertFalse(port.http_server_supports_ipv6())
+ port.host.platform.os_name = 'win'
+ self.assertFalse(port.http_server_supports_ipv6())
+
+ def test_check_httpd_success(self):
+ port = self.make_port(executive=MockExecutive2())
+ port.path_to_apache = lambda: '/usr/sbin/httpd'
+ capture = OutputCapture()
+ capture.capture_output()
+ self.assertTrue(port.check_httpd())
+ _, _, logs = capture.restore_output()
+ self.assertEqual('', logs)
+
+ def test_httpd_returns_error_code(self):
+ port = self.make_port(executive=MockExecutive2(exit_code=1))
+ port.path_to_apache = lambda: '/usr/sbin/httpd'
+ capture = OutputCapture()
+ capture.capture_output()
+ self.assertFalse(port.check_httpd())
+ _, _, logs = capture.restore_output()
+ self.assertEqual('httpd seems broken. Cannot run http tests.\n', logs)
+
+ def test_test_exists(self):
+ port = self.make_port(with_tests=True)
+ self.assertTrue(port.test_exists('passes'))
+ self.assertTrue(port.test_exists('passes/text.html'))
+ self.assertFalse(port.test_exists('passes/does_not_exist.html'))
+
+ self.assertTrue(port.test_exists('virtual'))
+ self.assertFalse(port.test_exists('virtual/does_not_exist.html'))
+ self.assertTrue(port.test_exists('virtual/virtual_passes/passes/text.html'))
+
+ def test_test_isfile(self):
+ port = self.make_port(with_tests=True)
+ self.assertFalse(port.test_isfile('passes'))
+ self.assertTrue(port.test_isfile('passes/text.html'))
+ self.assertFalse(port.test_isfile('passes/does_not_exist.html'))
+
+ self.assertFalse(port.test_isfile('virtual'))
+ self.assertTrue(port.test_isfile('virtual/virtual_passes/passes/text.html'))
+ self.assertFalse(port.test_isfile('virtual/does_not_exist.html'))
+
+ def test_test_isdir(self):
+ port = self.make_port(with_tests=True)
+ self.assertTrue(port.test_isdir('passes'))
+ self.assertFalse(port.test_isdir('passes/text.html'))
+ self.assertFalse(port.test_isdir('passes/does_not_exist.html'))
+ self.assertFalse(port.test_isdir('passes/does_not_exist/'))
+
+ self.assertTrue(port.test_isdir('virtual'))
+ self.assertFalse(port.test_isdir('virtual/does_not_exist.html'))
+ self.assertFalse(port.test_isdir('virtual/does_not_exist/'))
+ self.assertFalse(port.test_isdir('virtual/virtual_passes/passes/text.html'))
+
+ def test_tests(self):
+ port = self.make_port(with_tests=True)
+ tests = port.tests([])
+ self.assertIn('passes/text.html', tests)
+ self.assertIn('virtual/virtual_passes/passes/text.html', tests)
+
+ tests = port.tests(['passes'])
+ self.assertIn('passes/text.html', tests)
+ self.assertIn('passes/virtual_passes/test-virtual-passes.html', tests)
+ self.assertNotIn('virtual/virtual_passes/passes/text.html', tests)
+
+ tests = port.tests(['virtual/virtual_passes/passes'])
+ self.assertNotIn('passes/text.html', tests)
+ self.assertIn('virtual/virtual_passes/passes/test-virtual-passes.html', tests)
+ self.assertNotIn('passes/test-virtual-passes.html', tests)
+ self.assertNotIn('virtual/virtual_passes/passes/test-virtual-virtual/passes.html', tests)
+ self.assertNotIn('virtual/virtual_passes/passes/virtual_passes/passes/test-virtual-passes.html', tests)
+
+ def test_build_path(self):
+ port = self.make_port(options=optparse.Values({'build_directory': '/my-build-directory/'}))
+ self.assertEqual(port._build_path(), '/my-build-directory/Release')
+
+ def test_dont_require_http_server(self):
+ port = self.make_port()
+ self.assertEqual(port.requires_http_server(), False)
+
+ def test_can_load_actual_virtual_test_suite_file(self):
+ port = Port(SystemHost(), 'baseport')
+
+ # If this call returns successfully, we found and loaded the LayoutTests/VirtualTestSuites.
+ _ = port.virtual_test_suites()
+
+ def test_good_virtual_test_suite_file(self):
+ port = self.make_port()
+ fs = port._filesystem
+ fs.write_text_file(fs.join(port.layout_tests_dir(), 'VirtualTestSuites'),
+ '[{"prefix": "bar", "base": "fast/bar", "args": ["--bar"]}]')
+
+ # If this call returns successfully, we found and loaded the LayoutTests/VirtualTestSuites.
+ _ = port.virtual_test_suites()
+
+ def test_virtual_test_suite_file_is_not_json(self):
+ port = self.make_port()
+ fs = port._filesystem
+ fs.write_text_file(fs.join(port.layout_tests_dir(), 'VirtualTestSuites'),
+ '{[{[')
+ self.assertRaises(ValueError, port.virtual_test_suites)
+
+ def test_missing_virtual_test_suite_file(self):
+ port = self.make_port()
+ self.assertRaises(AssertionError, port.virtual_test_suites)
+
+
+class NaturalCompareTest(unittest.TestCase):
+ def setUp(self):
+ self._port = TestPort(MockSystemHost())
+
+ def assert_cmp(self, x, y, result):
+ self.assertEqual(cmp(self._port._natural_sort_key(x), self._port._natural_sort_key(y)), result)
+
+ def test_natural_compare(self):
+ self.assert_cmp('a', 'a', 0)
+ self.assert_cmp('ab', 'a', 1)
+ self.assert_cmp('a', 'ab', -1)
+ self.assert_cmp('', '', 0)
+ self.assert_cmp('', 'ab', -1)
+ self.assert_cmp('1', '2', -1)
+ self.assert_cmp('2', '1', 1)
+ self.assert_cmp('1', '10', -1)
+ self.assert_cmp('2', '10', -1)
+ self.assert_cmp('foo_1.html', 'foo_2.html', -1)
+ self.assert_cmp('foo_1.1.html', 'foo_2.html', -1)
+ self.assert_cmp('foo_1.html', 'foo_10.html', -1)
+ self.assert_cmp('foo_2.html', 'foo_10.html', -1)
+ self.assert_cmp('foo_23.html', 'foo_10.html', 1)
+ self.assert_cmp('foo_23.html', 'foo_100.html', -1)
+
+
+class KeyCompareTest(unittest.TestCase):
+ def setUp(self):
+ self._port = TestPort(MockSystemHost())
+
+ def assert_cmp(self, x, y, result):
+ self.assertEqual(cmp(self._port.test_key(x), self._port.test_key(y)), result)
+
+ def test_test_key(self):
+ self.assert_cmp('/a', '/a', 0)
+ self.assert_cmp('/a', '/b', -1)
+ self.assert_cmp('/a2', '/a10', -1)
+ self.assert_cmp('/a2/foo', '/a10/foo', -1)
+ self.assert_cmp('/a/foo11', '/a/foo2', 1)
+ self.assert_cmp('/ab', '/a/a/b', -1)
+ self.assert_cmp('/a/a/b', '/ab', 1)
+ self.assert_cmp('/foo-bar/baz', '/foo/baz', -1)
+
+
+class VirtualTestSuiteTest(unittest.TestCase):
+ def test_basic(self):
+ suite = VirtualTestSuite(prefix='suite', base='base/foo', args=['--args'])
+ self.assertEqual(suite.name, 'virtual/suite/base/foo')
+ self.assertEqual(suite.base, 'base/foo')
+ self.assertEqual(suite.args, ['--args'])
+
+ def test_no_slash(self):
+ self.assertRaises(AssertionError, VirtualTestSuite, prefix='suite/bar', base='base/foo', args=['--args'])
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/browser_test.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/browser_test.py
new file mode 100644
index 0000000..8fffb79
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/browser_test.py
@@ -0,0 +1,100 @@
+# Copyright (C) 2014 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.port import linux
+from webkitpy.layout_tests.port import mac
+from webkitpy.layout_tests.port import win
+from webkitpy.layout_tests.port import browser_test_driver
+
+
+def get_port_class_name(port_name):
+ if 'linux' in port_name:
+ return 'BrowserTestLinuxPort'
+ elif 'mac' in port_name:
+ return 'BrowserTestMacPort'
+ elif 'win' in port_name:
+ return 'BrowserTestWinPort'
+ return None
+
+
+class BrowserTestPortOverrides(object):
+ """Set of overrides that every browser test platform port should have. This
+ class should not be instantiated as certain functions depend on base. Port
+ to work."""
+ def _driver_class(self):
+ return browser_test_driver.BrowserTestDriver
+
+ def layout_tests_dir(self):
+ """Overriden function from the base port class. Redirects everything
+ to src/chrome/test/data/printing/layout_tests.
+ """
+ return self.path_from_chromium_base('chrome', 'test', 'data', 'printing', 'layout_tests') # pylint: disable=E1101
+
+ def check_sys_deps(self, needs_http):
+ """This function is meant to be a no-op since we don't want to actually
+ check for system dependencies."""
+ return test_run_results.OK_EXIT_STATUS
+
+ def driver_name(self):
+ return 'browser_tests'
+
+ def default_timeout_ms(self):
+ timeout_ms = 10 * 1000
+ if self.get_option('configuration') == 'Debug': # pylint: disable=E1101
+ # Debug is usually 2x-3x slower than Release.
+ return 3 * timeout_ms
+ return timeout_ms
+
+ def virtual_test_suites(self):
+ return []
+
+
+class BrowserTestLinuxPort(BrowserTestPortOverrides, linux.LinuxPort):
+ pass
+
+
+class BrowserTestMacPort(BrowserTestPortOverrides, mac.MacPort):
+ def _path_to_driver(self, configuration=None):
+ return self._build_path_with_configuration(configuration, self.driver_name())
+
+ def default_timeout_ms(self):
+ timeout_ms = 20 * 1000
+ if self.get_option('configuration') == 'Debug': # pylint: disable=E1101
+ # Debug is usually 2x-3x slower than Release.
+ return 3 * timeout_ms
+ return timeout_ms
+
+
+class BrowserTestWinPort(BrowserTestPortOverrides, win.WinPort):
+ def default_timeout_ms(self):
+ timeout_ms = 20 * 1000
+ if self.get_option('configuration') == 'Debug': # pylint: disable=E1101
+ # Debug is usually 2x-3x slower than Release.
+ return 3 * timeout_ms
+ return timeout_ms
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/browser_test_driver.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/browser_test_driver.py
new file mode 100644
index 0000000..e8e6537
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/browser_test_driver.py
@@ -0,0 +1,91 @@
+# Copyright (C) 2014 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.layout_tests.port import driver
+import time
+import shutil
+
+
+class BrowserTestDriver(driver.Driver):
+ """Object for running print preview test(s) using browser_tests."""
+ def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
+ """Invokes the constructor of driver.Driver."""
+ super(BrowserTestDriver, self).__init__(port, worker_number, pixel_tests, no_timeout)
+
+ def start(self, pixel_tests, per_test_args, deadline):
+ """Same as Driver.start() however, it has an extra step. It waits for
+ a path to a file to be used for stdin to be printed by the browser test.
+ If a path is found by the deadline test test will open the file and
+ assign it to the stdin of the process that is owned by this driver's
+ server process.
+ """
+ # FIXME(ivandavid): Need to handle case where the layout test doesn't
+ # get a file name.
+ new_cmd_line = self.cmd_line(pixel_tests, per_test_args)
+ if not self._server_process or new_cmd_line != self._current_cmd_line:
+ self._start(pixel_tests, per_test_args)
+ self._run_post_start_tasks()
+ self._open_stdin_path(deadline)
+
+ # Gets the path of the directory that the file for stdin communication is
+ # in. Since the browser test cannot clean it up, the layout test framework
+ # will. Everything the browser test uses is stored in the same directory as
+ # the stdin file, so deleting that directory recursively will remove all the
+ # other temp data, like the printed pdf. This function assumes the correct
+ # file path is sent. It won't delete files with only one component to avoid
+ # accidentally deleting files like /tmp.
+ def _open_stdin_path(self, deadline, test=False):
+ # FIXME(ivandavid): Come up with a way to test & see what happens when
+ # the file can't be opened.
+ path, found = self._read_stdin_path(deadline)
+ if found:
+ if test == False:
+ self._server_process._proc.stdin = open(path, 'wb', 0)
+
+ def _read_stdin_path(self, deadline):
+ # return (stdin_path, bool)
+ block = self._read_block(deadline)
+ if block.stdin_path:
+ return (block.stdin_path, True)
+ return (None, False)
+
+ def cmd_line(self, pixel_tests, per_test_args):
+ """Command line arguments to run the browser test."""
+ cmd = self._command_wrapper(self._port.get_option('wrapper'))
+ cmd.append(self._port._path_to_driver())
+ cmd.append('--gtest_filter=PrintPreviewPdfGeneratedBrowserTest.MANUAL_LayoutTestDriver')
+ cmd.append('--run-manual')
+ cmd.append('--single_process')
+ cmd.extend(per_test_args)
+ cmd.extend(self._port.get_option('additional_drt_flag', []))
+ return cmd
+
+ def stop(self):
+ if self._server_process:
+ self._server_process.write('QUIT')
+ super(BrowserTestDriver, self).stop(self._port.driver_stop_timeout())
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/browser_test_driver_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/browser_test_driver_unittest.py
new file mode 100644
index 0000000..e24e738
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/browser_test_driver_unittest.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2014 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from webkitpy.layout_tests.port import Port, Driver, DriverOutput
+from webkitpy.layout_tests.port import browser_test, browser_test_driver
+from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
+
+from webkitpy.layout_tests.port.port_testcase import TestWebKitPort
+
+from webkitpy.tool.mocktool import MockOptions
+
+
+class BrowserTestDriverTest(unittest.TestCase):
+ def test_read_stdin_path(self):
+ port = TestWebKitPort()
+ driver = browser_test_driver.BrowserTestDriver(port, 0, pixel_tests=True)
+ driver._server_process = MockServerProcess(lines=[
+ 'StdinPath: /foo/bar', '#EOF'])
+ content_block = driver._read_block(0)
+ self.assertEqual(content_block.stdin_path, '/foo/bar')
+ driver._stdin_directory = None
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/browser_test_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/browser_test_unittest.py
new file mode 100644
index 0000000..a2bf641
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/browser_test_unittest.py
@@ -0,0 +1,95 @@
+# Copyright (C) 2014 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.executive_mock import MockExecutive2
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.tool.mocktool import MockOptions
+
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.port import browser_test
+from webkitpy.layout_tests.port import port_testcase
+from webkitpy.layout_tests.port import browser_test_driver
+
+
+class _BrowserTestTestCaseMixin(object):
+
+ def test_check_sys_deps(self):
+ port = self.make_port()
+ port._executive = MockExecutive2(exit_code=0)
+ self.assertEqual(port.check_sys_deps(needs_http=False), test_run_results.OK_EXIT_STATUS)
+
+ def test_driver_name_option(self):
+ self.assertTrue(self.make_port()._path_to_driver().endswith(self.driver_name_endswith))
+
+ def test_default_timeout_ms(self):
+ self.assertEqual(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(),
+ self.timeout_ms)
+ self.assertEqual(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(),
+ 3 * self.timeout_ms)
+
+ def test_driver_type(self):
+ self.assertTrue(isinstance(self.make_port(options=MockOptions(driver_name='browser_tests')).create_driver(1), browser_test_driver.BrowserTestDriver))
+
+ def test_layout_tests_dir(self):
+ self.assertTrue(self.make_port().layout_tests_dir().endswith('chrome/test/data/printing/layout_tests'))
+
+ def test_virtual_test_suites(self):
+ # The browser_tests port do not use virtual test suites, so we are just testing the stub.
+ port = self.make_port()
+ self.assertEqual(port.virtual_test_suites(), [])
+
+
+class BrowserTestLinuxTest(_BrowserTestTestCaseMixin, port_testcase.PortTestCase):
+ port_name = 'linux'
+ port_maker = browser_test.BrowserTestLinuxPort
+ driver_name_endswith = 'browser_tests'
+ timeout_ms = 10 * 1000
+
+
+class BrowserTestWinTest(_BrowserTestTestCaseMixin, port_testcase.PortTestCase):
+ port_name = 'win'
+ port_maker = browser_test.BrowserTestWinPort
+ os_name = 'win'
+ os_version = 'xp'
+ driver_name_endswith = 'browser_tests.exe'
+ timeout_ms = 20 * 1000
+
+
+class BrowserTestMacTest(_BrowserTestTestCaseMixin, port_testcase.PortTestCase):
+ os_name = 'mac'
+ os_version = 'snowleopard'
+ port_name = 'mac'
+ port_maker = browser_test.BrowserTestMacPort
+ driver_name_endswith = 'browser_tests'
+ timeout_ms = 20 * 1000
+
+ def test_driver_path(self):
+ test_port = self.make_port(options=MockOptions(driver_name='browser_tests'))
+ self.assertFalse('.app/Contents/MacOS' in test_port._path_to_driver())
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/builders.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/builders.py
new file mode 100644
index 0000000..be0daf6
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/builders.py
@@ -0,0 +1,119 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+from webkitpy.common.memoized import memoized
+
+
+# In this dictionary, each item stores:
+# * port_name -- a fully qualified port name
+# * rebaseline_override_dir -- (optional) directory to put baselines in instead of where you would normally put them.
+# This is useful when we don't have bots that cover particular configurations; so, e.g., you might
+# support mac-mountainlion but not have a mac-mountainlion bot yet, so you'd want to put the mac-lion
+# results into platform/mac temporarily.
+# * specifiers -- TestExpectation specifiers for that config. Valid values are found in
+# TestExpectationsParser._configuration_tokens_list
+
+_exact_matches = {
+ "WebKit XP": {"port_name": "win-xp", "specifiers": ['XP', 'Release']},
+ "WebKit Win7": {"port_name": "win-win7", "specifiers": ['Win7', 'Release']},
+ "WebKit Win7 (dbg)": {"port_name": "win-win7", "specifiers": ['Win7', 'Debug']},
+ "WebKit Linux": {"port_name": "linux-x86_64", "specifiers": ['Linux', 'Release']},
+ "WebKit Linux 32": {"port_name": "linux-x86", "specifiers": ['Linux', 'Release']},
+ "WebKit Linux (dbg)": {"port_name": "linux-x86_64", "specifiers": ['Linux', 'Debug']},
+ "WebKit Mac10.6": {"port_name": "mac-snowleopard", "specifiers": ['SnowLeopard', 'Release']},
+ "WebKit Mac10.6 (dbg)": {"port_name": "mac-snowleopard", "specifiers": ['SnowLeopard', 'Debug']},
+ "WebKit Mac10.7": {"port_name": "mac-lion", "specifiers": ['Lion', 'Release']},
+ "WebKit Mac10.7 (dbg)": {"port_name": "mac-lion", "specifiers": ['Lion', 'Debug']},
+ "WebKit Mac10.8": {"port_name": "mac-mountainlion", "specifiers": ['MountainLion', 'Release']},
+ "WebKit Mac10.8 (retina)": {"port_name": "mac-retina", "specifiers": ['Retina', 'Release']},
+ "WebKit Mac10.9": {"port_name": "mac-mavericks", "specifiers": ['Mavericks', 'Release']},
+ "WebKit Android (Nexus4)": {"port_name": "android", "specifiers": ['Android', 'Release']},
+}
+
+
+# Mapping from port name to the deps builder of the same os:
+_deps_builders = {
+ "linux-x86": "WebKit Linux (deps)",
+ "linux-x86_64": "WebKit Linux (deps)",
+ "win-xp": "WebKit XP (deps)",
+ "win-win7": "WebKit XP (deps)",
+ "mac-snowleopard": "WebKit Mac10.6 (deps)",
+ "mac-lion": "WebKit Mac10.6 (deps)",
+ "mac-mountainlion": "WebKit Mac10.6 (deps)",
+ "mac-mavericks": "WebKit Mac10.6 (deps)",
+ "mac-retina": "WebKit Mac10.6 (deps)",
+}
+
+
+_ports_without_builders = [
+]
+
+
+def builder_path_from_name(builder_name):
+ return re.sub(r'[\s().]', '_', builder_name)
+
+
+def all_builder_names():
+ return sorted(set(_exact_matches.keys()))
+
+
+def all_port_names():
+ return sorted(set(map(lambda x: x["port_name"], _exact_matches.values()) + _ports_without_builders))
+
+
+def rebaseline_override_dir(builder_name):
+ return _exact_matches[builder_name].get("rebaseline_override_dir", None)
+
+
+def port_name_for_builder_name(builder_name):
+ return _exact_matches[builder_name]["port_name"]
+
+
+def specifiers_for_builder(builder_name):
+ return _exact_matches[builder_name]["specifiers"]
+
+
+def builder_name_for_port_name(target_port_name):
+ debug_builder_name = None
+ for builder_name, builder_info in _exact_matches.items():
+ if builder_info['port_name'] == target_port_name:
+ if 'dbg' in builder_name:
+ debug_builder_name = builder_name
+ else:
+ return builder_name
+ return debug_builder_name
+
+
+def builder_path_for_port_name(port_name):
+ builder_path_from_name(builder_name_for_port_name(port_name))
+
+
+def deps_builder_name_for_port_name(target_port_name):
+ return _deps_builders.get(target_port_name, None)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/builders_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/builders_unittest.py
new file mode 100644
index 0000000..d5ba2f2
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/builders_unittest.py
@@ -0,0 +1,42 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+import builders
+
+
+class BuildersTest(unittest.TestCase):
+ def test_path_from_name(self):
+ tests = {
+ 'test': 'test',
+ 'Mac 10.6 (dbg)(1)': 'Mac_10_6__dbg__1_',
+ '(.) ': '____',
+ }
+ for name, expected in tests.items():
+ self.assertEqual(expected, builders.builder_path_from_name(name))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/config.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/config.py
new file mode 100644
index 0000000..85e517f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/config.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# FIXME: Remove this file altogether. It's useless in a Blink checkout.
+
+import logging
+
+from webkitpy.common import webkit_finder
+
+
+_log = logging.getLogger(__name__)
+
+
+class Config(object):
+ _FLAGS_FROM_CONFIGURATIONS = {
+ "Debug": "--debug",
+ "Release": "--release",
+ }
+
+ def __init__(self, executive, filesystem, port_implementation=None):
+ self._executive = executive
+ self._filesystem = filesystem
+ self._webkit_finder = webkit_finder.WebKitFinder(self._filesystem)
+ self._default_configuration = None
+ self._build_directories = {}
+ self._port_implementation = port_implementation
+
+ def build_directory(self, configuration):
+ """Returns the path to the build directory for the configuration."""
+ if configuration:
+ flags = ["--configuration", self.flag_for_configuration(configuration)]
+ else:
+ configuration = ""
+ flags = []
+
+ if self._port_implementation:
+ flags.append('--' + self._port_implementation)
+
+ if not self._build_directories.get(configuration):
+ self._build_directories[configuration] = self._webkit_finder.path_from_webkit_base('out', configuration)
+
+ return self._build_directories[configuration]
+
+ def flag_for_configuration(self, configuration):
+ return self._FLAGS_FROM_CONFIGURATIONS[configuration]
+
+ def default_configuration(self):
+ return 'Release'
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/driver.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/driver.py
new file mode 100644
index 0000000..bddd09c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/driver.py
@@ -0,0 +1,525 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import base64
+import copy
+import logging
+import re
+import shlex
+import sys
+import time
+import os
+
+from webkitpy.common.system import path
+from webkitpy.common.system.profiler import ProfilerFactory
+
+
+_log = logging.getLogger(__name__)
+
+
+DRIVER_START_TIMEOUT_SECS = 30
+
+
+class DriverInput(object):
+ def __init__(self, test_name, timeout, image_hash, should_run_pixel_test, args):
+ self.test_name = test_name
+ self.timeout = timeout # in ms
+ self.image_hash = image_hash
+ self.should_run_pixel_test = should_run_pixel_test
+ self.args = args
+
+
+class DriverOutput(object):
+ """Groups information about a output from driver for easy passing
+ and post-processing of data."""
+
+ def __init__(self, text, image, image_hash, audio, crash=False,
+ test_time=0, measurements=None, timeout=False, error='', crashed_process_name='??',
+ crashed_pid=None, crash_log=None, leak=False, leak_log=None, pid=None):
+ # FIXME: Args could be renamed to better clarify what they do.
+ self.text = text
+ self.image = image # May be empty-string if the test crashes.
+ self.image_hash = image_hash
+ self.image_diff = None # image_diff gets filled in after construction.
+ self.audio = audio # Binary format is port-dependent.
+ self.crash = crash
+ self.crashed_process_name = crashed_process_name
+ self.crashed_pid = crashed_pid
+ self.crash_log = crash_log
+ self.leak = leak
+ self.leak_log = leak_log
+ self.test_time = test_time
+ self.measurements = measurements
+ self.timeout = timeout
+ self.error = error # stderr output
+ self.pid = pid
+
+ def has_stderr(self):
+ return bool(self.error)
+
+
+class DeviceFailure(Exception):
+ pass
+
+
+class Driver(object):
+ """object for running test(s) using content_shell or other driver."""
+
+ def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
+ """Initialize a Driver to subsequently run tests.
+
+ Typically this routine will spawn content_shell in a config
+ ready for subsequent input.
+
+ port - reference back to the port object.
+ worker_number - identifier for a particular worker/driver instance
+ """
+ self._port = port
+ self._worker_number = worker_number
+ self._no_timeout = no_timeout
+
+ self._driver_tempdir = None
+ # content_shell can report back subprocess crashes by printing
+ # "#CRASHED - PROCESSNAME". Since those can happen at any time
+ # and ServerProcess won't be aware of them (since the actual tool
+ # didn't crash, just a subprocess) we record the crashed subprocess name here.
+ self._crashed_process_name = None
+ self._crashed_pid = None
+
+ # content_shell can report back subprocesses that became unresponsive
+ # This could mean they crashed.
+ self._subprocess_was_unresponsive = False
+
+ # content_shell can report back subprocess DOM-object leaks by printing
+ # "#LEAK". This leak detection is enabled only when the flag
+ # --enable-leak-detection is passed to content_shell.
+ self._leaked = False
+
+ # stderr reading is scoped on a per-test (not per-block) basis, so we store the accumulated
+ # stderr output, as well as if we've seen #EOF on this driver instance.
+ # FIXME: We should probably remove _read_first_block and _read_optional_image_block and
+ # instead scope these locally in run_test.
+ self.error_from_test = str()
+ self.err_seen_eof = False
+ self._server_process = None
+ self._current_cmd_line = None
+
+ self._measurements = {}
+ if self._port.get_option("profile"):
+ profiler_name = self._port.get_option("profiler")
+ self._profiler = ProfilerFactory.create_profiler(self._port.host,
+ self._port._path_to_driver(), self._port.results_directory(), profiler_name)
+ else:
+ self._profiler = None
+
+ def __del__(self):
+ self.stop()
+
+ def run_test(self, driver_input, stop_when_done):
+ """Run a single test and return the results.
+
+ Note that it is okay if a test times out or crashes and leaves
+ the driver in an indeterminate state. The upper layers of the program
+ are responsible for cleaning up and ensuring things are okay.
+
+ Returns a DriverOutput object.
+ """
+ start_time = time.time()
+ stdin_deadline = start_time + int(driver_input.timeout) / 2000.0
+ self.start(driver_input.should_run_pixel_test, driver_input.args, stdin_deadline)
+ test_begin_time = time.time()
+ self.error_from_test = str()
+ self.err_seen_eof = False
+
+ command = self._command_from_driver_input(driver_input)
+ deadline = test_begin_time + int(driver_input.timeout) / 1000.0
+
+ self._server_process.write(command)
+ text, audio = self._read_first_block(deadline) # First block is either text or audio
+ image, actual_image_hash = self._read_optional_image_block(deadline) # The second (optional) block is image data.
+
+ crashed = self.has_crashed()
+ timed_out = self._server_process.timed_out
+ pid = self._server_process.pid()
+ leaked = self._leaked
+
+ if not crashed:
+ sanitizer = self._port._output_contains_sanitizer_messages(self.error_from_test)
+ if sanitizer:
+ self.error_from_test = 'OUTPUT CONTAINS "' + sanitizer + '", so we are treating this test as if it crashed, even though it did not.\n\n' + self.error_from_test
+ crashed = True
+ self._crashed_process_name = "unknown process name"
+ self._crashed_pid = 0
+
+ if stop_when_done or crashed or timed_out or leaked:
+ # We call stop() even if we crashed or timed out in order to get any remaining stdout/stderr output.
+ # In the timeout case, we kill the hung process as well.
+ out, err = self._server_process.stop(self._port.driver_stop_timeout() if stop_when_done else 0.0)
+ if out:
+ text += out
+ if err:
+ self.error_from_test += err
+ self._server_process = None
+
+ crash_log = None
+ if crashed:
+ self.error_from_test, crash_log = self._get_crash_log(text, self.error_from_test, newer_than=start_time)
+
+ # If we don't find a crash log use a placeholder error message instead.
+ if not crash_log:
+ pid_str = str(self._crashed_pid) if self._crashed_pid else "unknown pid"
+ crash_log = 'No crash log found for %s:%s.\n' % (self._crashed_process_name, pid_str)
+ # If we were unresponsive append a message informing there may not have been a crash.
+ if self._subprocess_was_unresponsive:
+ crash_log += 'Process failed to become responsive before timing out.\n'
+
+ # Print stdout and stderr to the placeholder crash log; we want as much context as possible.
+ if self.error_from_test:
+ crash_log += '\nstdout:\n%s\nstderr:\n%s\n' % (text, self.error_from_test)
+
+ return DriverOutput(text, image, actual_image_hash, audio,
+ crash=crashed, test_time=time.time() - test_begin_time, measurements=self._measurements,
+ timeout=timed_out, error=self.error_from_test,
+ crashed_process_name=self._crashed_process_name,
+ crashed_pid=self._crashed_pid, crash_log=crash_log,
+ leak=leaked, leak_log=self._leak_log,
+ pid=pid)
+
+ def _get_crash_log(self, stdout, stderr, newer_than):
+ return self._port._get_crash_log(self._crashed_process_name, self._crashed_pid, stdout, stderr, newer_than)
+
+ # FIXME: Seems this could just be inlined into callers.
+ @classmethod
+ def _command_wrapper(cls, wrapper_option):
+ # Hook for injecting valgrind or other runtime instrumentation,
+ # used by e.g. tools/valgrind/valgrind_tests.py.
+ return shlex.split(wrapper_option) if wrapper_option else []
+
+ HTTP_DIR = "http/tests/"
+ HTTP_LOCAL_DIR = "http/tests/local/"
+
+ def is_http_test(self, test_name):
+ return test_name.startswith(self.HTTP_DIR) and not test_name.startswith(self.HTTP_LOCAL_DIR)
+
+ def test_to_uri(self, test_name):
+ """Convert a test name to a URI.
+
+ Tests which have an 'https' directory in their paths (e.g.
+ '/http/tests/security/mixedContent/https/test1.html') or '.https.' in
+ their name (e.g. 'http/tests/security/mixedContent/test1.https.html') will
+ be loaded over HTTPS; all other tests over HTTP.
+ """
+ if not self.is_http_test(test_name):
+ return path.abspath_to_uri(self._port.host.platform, self._port.abspath_for_test(test_name))
+
+ relative_path = test_name[len(self.HTTP_DIR):]
+
+ if "/https/" in test_name or ".https." in test_name:
+ return "https://127.0.0.1:8443/" + relative_path
+ return "http://127.0.0.1:8000/" + relative_path
+
+ def uri_to_test(self, uri):
+ """Return the base layout test name for a given URI.
+
+ This returns the test name for a given URI, e.g., if you passed in
+ "file:///src/LayoutTests/fast/html/keygen.html" it would return
+ "fast/html/keygen.html".
+
+ """
+ if uri.startswith("file:///"):
+ prefix = path.abspath_to_uri(self._port.host.platform, self._port.layout_tests_dir())
+ if not prefix.endswith('/'):
+ prefix += '/'
+ return uri[len(prefix):]
+ if uri.startswith("http://"):
+ return uri.replace('http://127.0.0.1:8000/', self.HTTP_DIR)
+ if uri.startswith("https://"):
+ return uri.replace('https://127.0.0.1:8443/', self.HTTP_DIR)
+ raise NotImplementedError('unknown url type: %s' % uri)
+
+ def has_crashed(self):
+ if self._server_process is None:
+ return False
+ if self._crashed_process_name:
+ return True
+ if self._server_process.has_crashed():
+ self._crashed_process_name = self._server_process.name()
+ self._crashed_pid = self._server_process.pid()
+ return True
+ return False
+
+ def start(self, pixel_tests, per_test_args, deadline):
+ new_cmd_line = self.cmd_line(pixel_tests, per_test_args)
+ if not self._server_process or new_cmd_line != self._current_cmd_line:
+ self._start(pixel_tests, per_test_args)
+ self._run_post_start_tasks()
+
+ def _setup_environ_for_driver(self, environment):
+ if self._profiler:
+ environment = self._profiler.adjusted_environment(environment)
+ return environment
+
+ def _start(self, pixel_tests, per_test_args, wait_for_ready=True):
+ self.stop()
+ self._driver_tempdir = self._port._filesystem.mkdtemp(prefix='%s-' % self._port.driver_name())
+ server_name = self._port.driver_name()
+ environment = self._port.setup_environ_for_server(server_name)
+ environment = self._setup_environ_for_driver(environment)
+ self._crashed_process_name = None
+ self._crashed_pid = None
+ self._leaked = False
+ self._leak_log = None
+ cmd_line = self.cmd_line(pixel_tests, per_test_args)
+ self._server_process = self._port._server_process_constructor(self._port, server_name, cmd_line, environment, logging=self._port.get_option("driver_logging"))
+ self._server_process.start()
+ self._current_cmd_line = cmd_line
+
+ if wait_for_ready:
+ deadline = time.time() + DRIVER_START_TIMEOUT_SECS
+ if not self._wait_for_server_process_output(self._server_process, deadline, '#READY'):
+ _log.error("content_shell took too long to startup.")
+
+ def _wait_for_server_process_output(self, server_process, deadline, text):
+ output = ''
+ line = server_process.read_stdout_line(deadline)
+ while not server_process.timed_out and not server_process.has_crashed() and not text in line.rstrip():
+ output += line
+ line = server_process.read_stdout_line(deadline)
+
+ if server_process.timed_out or server_process.has_crashed():
+ _log.error('Failed to start the %s process: \n%s' % (server_process.name(), output))
+ return False
+
+ return True
+
+ def _run_post_start_tasks(self):
+ # Remote drivers may override this to delay post-start tasks until the server has ack'd.
+ if self._profiler:
+ self._profiler.attach_to_pid(self._pid_on_target())
+
+ def _pid_on_target(self):
+ # Remote drivers will override this method to return the pid on the device.
+ return self._server_process.pid()
+
+ def stop(self, timeout_secs=0.0):
+ if self._server_process:
+ self._server_process.stop(timeout_secs)
+ self._server_process = None
+ if self._profiler:
+ self._profiler.profile_after_exit()
+
+ if self._driver_tempdir:
+ self._port._filesystem.rmtree(str(self._driver_tempdir))
+ self._driver_tempdir = None
+
+ self._current_cmd_line = None
+
+ def cmd_line(self, pixel_tests, per_test_args):
+ cmd = self._command_wrapper(self._port.get_option('wrapper'))
+ cmd.append(self._port._path_to_driver())
+ if self._no_timeout:
+ cmd.append('--no-timeout')
+ cmd.extend(self._port.get_option('additional_drt_flag', []))
+ cmd.extend(self._port.additional_drt_flag())
+ if self._port.get_option('enable_leak_detection'):
+ cmd.append('--enable-leak-detection')
+ cmd.extend(per_test_args)
+ cmd.append('-')
+ return cmd
+
+ def _check_for_driver_crash(self, error_line):
+ if error_line == "#CRASHED\n":
+ # This is used on Windows to report that the process has crashed
+ # See http://trac.webkit.org/changeset/65537.
+ self._crashed_process_name = self._server_process.name()
+ self._crashed_pid = self._server_process.pid()
+ elif (error_line.startswith("#CRASHED - ")
+ or error_line.startswith("#PROCESS UNRESPONSIVE - ")):
+ # WebKitTestRunner uses this to report that the WebProcess subprocess crashed.
+ match = re.match('#(?:CRASHED|PROCESS UNRESPONSIVE) - (\S+)', error_line)
+ self._crashed_process_name = match.group(1) if match else 'WebProcess'
+ match = re.search('pid (\d+)', error_line)
+ pid = int(match.group(1)) if match else None
+ self._crashed_pid = pid
+ # FIXME: delete this after we're sure this code is working :)
+ _log.debug('%s crash, pid = %s, error_line = %s' % (self._crashed_process_name, str(pid), error_line))
+ if error_line.startswith("#PROCESS UNRESPONSIVE - "):
+ self._subprocess_was_unresponsive = True
+ self._port.sample_process(self._crashed_process_name, self._crashed_pid)
+ # We want to show this since it's not a regular crash and probably we don't have a crash log.
+ self.error_from_test += error_line
+ return True
+ return self.has_crashed()
+
+ def _check_for_leak(self, error_line):
+ if error_line.startswith("#LEAK - "):
+ self._leaked = True
+ match = re.match('#LEAK - (\S+) pid (\d+) (.+)\n', error_line)
+ self._leak_log = match.group(3)
+ return self._leaked
+
+ def _command_from_driver_input(self, driver_input):
+ # FIXME: performance tests pass in full URLs instead of test names.
+ if driver_input.test_name.startswith('http://') or driver_input.test_name.startswith('https://') or driver_input.test_name == ('about:blank'):
+ command = driver_input.test_name
+ elif self.is_http_test(driver_input.test_name):
+ command = self.test_to_uri(driver_input.test_name)
+ else:
+ command = self._port.abspath_for_test(driver_input.test_name)
+ if sys.platform == 'cygwin':
+ command = path.cygpath(command)
+
+ assert not driver_input.image_hash or driver_input.should_run_pixel_test
+
+ # ' is the separator between arguments.
+ if self._port.supports_per_test_timeout():
+ command += "'--timeout'%s" % driver_input.timeout
+ if driver_input.should_run_pixel_test:
+ command += "'--pixel-test"
+ if driver_input.image_hash:
+ command += "'" + driver_input.image_hash
+ return command + "\n"
+
+ def _read_first_block(self, deadline):
+ # returns (text_content, audio_content)
+ block = self._read_block(deadline)
+ if block.malloc:
+ self._measurements['Malloc'] = float(block.malloc)
+ if block.js_heap:
+ self._measurements['JSHeap'] = float(block.js_heap)
+ if block.content_type == 'audio/wav':
+ return (None, block.decoded_content)
+ return (block.decoded_content, None)
+
+ def _read_optional_image_block(self, deadline):
+ # returns (image, actual_image_hash)
+ block = self._read_block(deadline, wait_for_stderr_eof=True)
+ if block.content and block.content_type == 'image/png':
+ return (block.decoded_content, block.content_hash)
+ return (None, block.content_hash)
+
+ def _read_header(self, block, line, header_text, header_attr, header_filter=None):
+ if line.startswith(header_text) and getattr(block, header_attr) is None:
+ value = line.split()[1]
+ if header_filter:
+ value = header_filter(value)
+ setattr(block, header_attr, value)
+ return True
+ return False
+
+ def _process_stdout_line(self, block, line):
+ if (self._read_header(block, line, 'Content-Type: ', 'content_type')
+ or self._read_header(block, line, 'Content-Transfer-Encoding: ', 'encoding')
+ or self._read_header(block, line, 'Content-Length: ', '_content_length', int)
+ or self._read_header(block, line, 'ActualHash: ', 'content_hash')
+ or self._read_header(block, line, 'DumpMalloc: ', 'malloc')
+ or self._read_header(block, line, 'DumpJSHeap: ', 'js_heap')
+ or self._read_header(block, line, 'StdinPath', 'stdin_path')):
+ return
+ # Note, we're not reading ExpectedHash: here, but we could.
+ # If the line wasn't a header, we just append it to the content.
+ block.content += line
+
+ def _strip_eof(self, line):
+ if line and line.endswith("#EOF\n"):
+ return line[:-5], True
+ if line and line.endswith("#EOF\r\n"):
+ _log.error("Got a CRLF-terminated #EOF - this is a driver bug.")
+ return line[:-6], True
+ return line, False
+
+ def _read_block(self, deadline, wait_for_stderr_eof=False):
+ block = ContentBlock()
+ out_seen_eof = False
+
+ while not self.has_crashed():
+ if out_seen_eof and (self.err_seen_eof or not wait_for_stderr_eof):
+ break
+
+ if self.err_seen_eof:
+ out_line = self._server_process.read_stdout_line(deadline)
+ err_line = None
+ elif out_seen_eof:
+ out_line = None
+ err_line = self._server_process.read_stderr_line(deadline)
+ else:
+ out_line, err_line = self._server_process.read_either_stdout_or_stderr_line(deadline)
+
+ if self._server_process.timed_out or self.has_crashed():
+ break
+
+ if out_line:
+ assert not out_seen_eof
+ out_line, out_seen_eof = self._strip_eof(out_line)
+ if err_line:
+ assert not self.err_seen_eof
+ err_line, self.err_seen_eof = self._strip_eof(err_line)
+
+ if out_line:
+ if out_line[-1] != "\n":
+ _log.error("Last character read from DRT stdout line was not a newline! This indicates either a NRWT or DRT bug.")
+ content_length_before_header_check = block._content_length
+ self._process_stdout_line(block, out_line)
+ # FIXME: Unlike HTTP, DRT dumps the content right after printing a Content-Length header.
+ # Don't wait until we're done with headers, just read the binary blob right now.
+ if content_length_before_header_check != block._content_length:
+ if block._content_length > 0:
+ block.content = self._server_process.read_stdout(deadline, block._content_length)
+ else:
+ _log.error("Received content of type %s with Content-Length of 0! This indicates a bug in %s.",
+ block.content_type, self._server_process.name())
+
+ if err_line:
+ if self._check_for_driver_crash(err_line):
+ break
+ if self._check_for_leak(err_line):
+ break
+ self.error_from_test += err_line
+
+ block.decode_content()
+ return block
+
+
+class ContentBlock(object):
+ def __init__(self):
+ self.content_type = None
+ self.encoding = None
+ self.content_hash = None
+ self._content_length = None
+ # Content is treated as binary data even though the text output is usually UTF-8.
+ self.content = str() # FIXME: Should be bytearray() once we require Python 2.6.
+ self.decoded_content = None
+ self.malloc = None
+ self.js_heap = None
+ self.stdin_path = None
+
+ def decode_content(self):
+ if self.encoding == 'base64' and self.content is not None:
+ self.decoded_content = base64.b64decode(self.content)
+ else:
+ self.decoded_content = self.content
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
new file mode 100644
index 0000000..a5f7d79
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
@@ -0,0 +1,246 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from webkitpy.layout_tests.port import Port, Driver, DriverOutput
+from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
+
+# FIXME: remove the dependency on TestWebKitPort
+from webkitpy.layout_tests.port.port_testcase import TestWebKitPort
+
+from webkitpy.tool.mocktool import MockOptions
+
+
+class DriverTest(unittest.TestCase):
+ def make_port(self):
+ port = Port(MockSystemHost(), 'test', MockOptions(configuration='Release'))
+ port._config.build_directory = lambda configuration: '/mock-checkout/out/' + configuration
+ return port
+
+ def _assert_wrapper(self, wrapper_string, expected_wrapper):
+ wrapper = Driver(self.make_port(), None, pixel_tests=False)._command_wrapper(wrapper_string)
+ self.assertEqual(wrapper, expected_wrapper)
+
+ def test_command_wrapper(self):
+ self._assert_wrapper(None, [])
+ self._assert_wrapper("valgrind", ["valgrind"])
+
+ # Validate that shlex works as expected.
+ command_with_spaces = "valgrind --smc-check=\"check with spaces!\" --foo"
+ expected_parse = ["valgrind", "--smc-check=check with spaces!", "--foo"]
+ self._assert_wrapper(command_with_spaces, expected_parse)
+
+ def test_test_to_uri(self):
+ port = self.make_port()
+ driver = Driver(port, None, pixel_tests=False)
+ self.assertEqual(driver.test_to_uri('foo/bar.html'), 'file://%s/foo/bar.html' % port.layout_tests_dir())
+ self.assertEqual(driver.test_to_uri('http/tests/foo.html'), 'http://127.0.0.1:8000/foo.html')
+ self.assertEqual(driver.test_to_uri('http/tests/https/bar.html'), 'https://127.0.0.1:8443/https/bar.html')
+ self.assertEqual(driver.test_to_uri('http/tests/bar.https.html'), 'https://127.0.0.1:8443/bar.https.html')
+ self.assertEqual(driver.test_to_uri('http/tests/barhttps.html'), 'http://127.0.0.1:8000/barhttps.html')
+
+ def test_uri_to_test(self):
+ port = self.make_port()
+ driver = Driver(port, None, pixel_tests=False)
+ self.assertEqual(driver.uri_to_test('file://%s/foo/bar.html' % port.layout_tests_dir()), 'foo/bar.html')
+ self.assertEqual(driver.uri_to_test('http://127.0.0.1:8000/foo.html'), 'http/tests/foo.html')
+ self.assertEqual(driver.uri_to_test('https://127.0.0.1:8443/https/bar.html'), 'http/tests/https/bar.html')
+ self.assertEqual(driver.uri_to_test('https://127.0.0.1:8443/bar.https.html'), 'http/tests/bar.https.html')
+
+ def test_read_block(self):
+ port = TestWebKitPort()
+ driver = Driver(port, 0, pixel_tests=False)
+ driver._server_process = MockServerProcess(lines=[
+ 'ActualHash: foobar',
+ 'Content-Type: my_type',
+ 'Content-Transfer-Encoding: none',
+ "#EOF",
+ ])
+ content_block = driver._read_block(0)
+ self.assertEqual(content_block.content, '')
+ self.assertEqual(content_block.content_type, 'my_type')
+ self.assertEqual(content_block.encoding, 'none')
+ self.assertEqual(content_block.content_hash, 'foobar')
+ driver._server_process = None
+
+ def test_read_binary_block(self):
+ port = TestWebKitPort()
+ driver = Driver(port, 0, pixel_tests=True)
+ driver._server_process = MockServerProcess(lines=[
+ 'ActualHash: actual',
+ 'ExpectedHash: expected',
+ 'Content-Type: image/png',
+ 'Content-Length: 9',
+ "12345678",
+ "#EOF",
+ ])
+ content_block = driver._read_block(0)
+ self.assertEqual(content_block.content_type, 'image/png')
+ self.assertEqual(content_block.content_hash, 'actual')
+ self.assertEqual(content_block.content, '12345678\n')
+ self.assertEqual(content_block.decoded_content, '12345678\n')
+ driver._server_process = None
+
+ def test_read_base64_block(self):
+ port = TestWebKitPort()
+ driver = Driver(port, 0, pixel_tests=True)
+ driver._server_process = MockServerProcess(lines=[
+ 'ActualHash: actual',
+ 'ExpectedHash: expected',
+ 'Content-Type: image/png',
+ 'Content-Transfer-Encoding: base64',
+ 'Content-Length: 12',
+ 'MTIzNDU2NzgK#EOF',
+ ])
+ content_block = driver._read_block(0)
+ self.assertEqual(content_block.content_type, 'image/png')
+ self.assertEqual(content_block.content_hash, 'actual')
+ self.assertEqual(content_block.encoding, 'base64')
+ self.assertEqual(content_block.content, 'MTIzNDU2NzgK')
+ self.assertEqual(content_block.decoded_content, '12345678\n')
+
+ def test_no_timeout(self):
+ port = TestWebKitPort()
+ port._config.build_directory = lambda configuration: '/mock-checkout/out/' + configuration
+ driver = Driver(port, 0, pixel_tests=True, no_timeout=True)
+ self.assertEqual(driver.cmd_line(True, []), ['/mock-checkout/out/Release/content_shell', '--no-timeout', '--dump-render-tree', '-'])
+
+ def test_check_for_driver_crash(self):
+ port = TestWebKitPort()
+ driver = Driver(port, 0, pixel_tests=True)
+
+ class FakeServerProcess(object):
+ def __init__(self, crashed):
+ self.crashed = crashed
+
+ def pid(self):
+ return 1234
+
+ def name(self):
+ return 'FakeServerProcess'
+
+ def has_crashed(self):
+ return self.crashed
+
+ def stop(self, timeout=0.0):
+ pass
+
+ def assert_crash(driver, error_line, crashed, name, pid, unresponsive=False, leaked=False):
+ self.assertEqual(driver._check_for_driver_crash(error_line), crashed)
+ self.assertEqual(driver._crashed_process_name, name)
+ self.assertEqual(driver._crashed_pid, pid)
+ self.assertEqual(driver._subprocess_was_unresponsive, unresponsive)
+ self.assertEqual(driver._check_for_leak(error_line), leaked)
+ driver.stop()
+
+ driver._server_process = FakeServerProcess(False)
+ assert_crash(driver, '', False, None, None)
+
+ driver._crashed_process_name = None
+ driver._crashed_pid = None
+ driver._server_process = FakeServerProcess(False)
+ driver._subprocess_was_unresponsive = False
+ driver._leaked = False
+ assert_crash(driver, '#CRASHED\n', True, 'FakeServerProcess', 1234)
+
+ driver._crashed_process_name = None
+ driver._crashed_pid = None
+ driver._server_process = FakeServerProcess(False)
+ driver._subprocess_was_unresponsive = False
+ driver._leaked = False
+ assert_crash(driver, '#CRASHED - WebProcess\n', True, 'WebProcess', None)
+
+ driver._crashed_process_name = None
+ driver._crashed_pid = None
+ driver._server_process = FakeServerProcess(False)
+ driver._subprocess_was_unresponsive = False
+ driver._leaked = False
+ assert_crash(driver, '#CRASHED - WebProcess (pid 8675)\n', True, 'WebProcess', 8675)
+
+ driver._crashed_process_name = None
+ driver._crashed_pid = None
+ driver._server_process = FakeServerProcess(False)
+ driver._subprocess_was_unresponsive = False
+ driver._leaked = False
+ assert_crash(driver, '#PROCESS UNRESPONSIVE - WebProcess (pid 8675)\n', True, 'WebProcess', 8675, True)
+
+ driver._crashed_process_name = None
+ driver._crashed_pid = None
+ driver._server_process = FakeServerProcess(False)
+ driver._subprocess_was_unresponsive = False
+ driver._leaked = False
+ assert_crash(driver, '#CRASHED - renderer (pid 8675)\n', True, 'renderer', 8675)
+
+ driver._crashed_process_name = None
+ driver._crashed_pid = None
+ driver._server_process = FakeServerProcess(False)
+ driver._subprocess_was_unresponsive = False
+ driver._leaked = False
+ assert_crash(driver, '#LEAK - renderer pid 8675 ({"numberOfLiveDocuments":[2,3]})\n', False, None, None, False, True)
+
+ driver._crashed_process_name = None
+ driver._crashed_pid = None
+ driver._server_process = FakeServerProcess(True)
+ driver._subprocess_was_unresponsive = False
+ driver._leaked = False
+ assert_crash(driver, '', True, 'FakeServerProcess', 1234)
+
+ def test_creating_a_port_does_not_write_to_the_filesystem(self):
+ port = TestWebKitPort()
+ driver = Driver(port, 0, pixel_tests=True)
+ self.assertEqual(port._filesystem.written_files, {})
+ self.assertEqual(port._filesystem.last_tmpdir, None)
+
+ def test_stop_cleans_up_properly(self):
+ port = TestWebKitPort()
+ port._server_process_constructor = MockServerProcess
+ driver = Driver(port, 0, pixel_tests=True)
+ driver.start(True, [], None)
+ last_tmpdir = port._filesystem.last_tmpdir
+ self.assertNotEquals(last_tmpdir, None)
+ driver.stop()
+ self.assertFalse(port._filesystem.isdir(last_tmpdir))
+
+ def test_two_starts_cleans_up_properly(self):
+ port = TestWebKitPort()
+ port._server_process_constructor = MockServerProcess
+ driver = Driver(port, 0, pixel_tests=True)
+ driver.start(True, [], None)
+ last_tmpdir = port._filesystem.last_tmpdir
+ driver._start(True, [])
+ self.assertFalse(port._filesystem.isdir(last_tmpdir))
+
+ def test_start_actually_starts(self):
+ port = TestWebKitPort()
+ port._server_process_constructor = MockServerProcess
+ driver = Driver(port, 0, pixel_tests=True)
+ driver.start(True, [], None)
+ self.assertTrue(driver._server_process.started)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/factory.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/factory.py
new file mode 100644
index 0000000..70769d4
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/factory.py
@@ -0,0 +1,139 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Factory method to retrieve the appropriate port implementation."""
+
+import fnmatch
+import optparse
+import re
+
+from webkitpy.layout_tests.port import builders
+
+
+def platform_options(use_globs=False):
+ return [
+ optparse.make_option('--android', action='store_const', dest='platform',
+ const=('android*' if use_globs else 'android'),
+ help=('Alias for --platform=android*' if use_globs else 'Alias for --platform=android')),
+
+ # FIXME: Update run_webkit_tests.sh, any other callers to no longer pass --chromium, then remove this flag.
+ optparse.make_option('--chromium', action='store_const', dest='platform',
+ const=('chromium*' if use_globs else 'chromium'),
+ help=('Alias for --platform=chromium*' if use_globs else 'Alias for --platform=chromium')),
+
+ optparse.make_option('--platform', action='store',
+ help=('Glob-style list of platform/ports to use (e.g., "mac*")' if use_globs else 'Platform to use (e.g., "mac-lion")')),
+ ]
+
+
+def configuration_options():
+ return [
+ optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
+ help='Set the configuration to Debug'),
+ optparse.make_option("-t", "--target", dest="configuration",
+ help="specify the target configuration to use (Debug/Release)"),
+ optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
+ help='Set the configuration to Release'),
+ ]
+
+
+
+def _builder_options(builder_name):
+ configuration = "Debug" if re.search(r"[d|D](ebu|b)g", builder_name) else "Release"
+ is_webkit2 = builder_name.find("WK2") != -1
+ builder_name = builder_name
+ return optparse.Values({'builder_name': builder_name, 'configuration': configuration})
+
+
+class PortFactory(object):
+ PORT_CLASSES = (
+ 'android.AndroidPort',
+ 'linux.LinuxPort',
+ 'mac.MacPort',
+ 'win.WinPort',
+ 'mock_drt.MockDRTPort',
+ 'test.TestPort',
+ )
+
+ def __init__(self, host):
+ self._host = host
+
+ def _default_port(self, options):
+ platform = self._host.platform
+ if platform.is_linux() or platform.is_freebsd():
+ return 'linux'
+ elif platform.is_mac():
+ return 'mac'
+ elif platform.is_win():
+ return 'win'
+ raise NotImplementedError('unknown platform: %s' % platform)
+
+ def get(self, port_name=None, options=None, **kwargs):
+ """Returns an object implementing the Port interface. If
+ port_name is None, this routine attempts to guess at the most
+ appropriate port on this platform."""
+ port_name = port_name or self._default_port(options)
+
+ # FIXME(steveblock): There's no longer any need to pass '--platform
+ # chromium' on the command line so we can remove this logic.
+ if port_name == 'chromium':
+ port_name = self._host.platform.os_name
+
+ if 'browser_test' in port_name:
+ module_name, class_name = port_name.rsplit('.', 1)
+ module = __import__(module_name, globals(), locals(), [], -1)
+ port_class_name = module.get_port_class_name(class_name)
+ if port_class_name != None:
+ cls = module.__dict__[port_class_name]
+ port_name = cls.determine_full_port_name(self._host, options, class_name)
+ return cls(self._host, port_name, options=options, **kwargs)
+ else:
+ for port_class in self.PORT_CLASSES:
+ module_name, class_name = port_class.rsplit('.', 1)
+ module = __import__(module_name, globals(), locals(), [], -1)
+ cls = module.__dict__[class_name]
+ if port_name.startswith(cls.port_name):
+ port_name = cls.determine_full_port_name(self._host, options, port_name)
+ return cls(self._host, port_name, options=options, **kwargs)
+ raise NotImplementedError('unsupported platform: "%s"' % port_name)
+
+ def all_port_names(self, platform=None):
+ """Return a list of all valid, fully-specified, "real" port names.
+
+ This is the list of directories that are used as actual baseline_paths()
+ by real ports. This does not include any "fake" names like "test"
+ or "mock-mac", and it does not include any directories that are not.
+
+ If platform is not specified, we will glob-match all ports"""
+ platform = platform or '*'
+ return fnmatch.filter(builders.all_port_names(), platform)
+
+ def get_from_builder_name(self, builder_name):
+ port_name = builders.port_name_for_builder_name(builder_name)
+ assert port_name, "unrecognized builder name '%s'" % builder_name
+ return self.get(port_name, _builder_options(builder_name))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py
new file mode 100644
index 0000000..5217448
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py
@@ -0,0 +1,86 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.tool.mocktool import MockOptions
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+from webkitpy.layout_tests.port import android
+from webkitpy.layout_tests.port import linux
+from webkitpy.layout_tests.port import mac
+from webkitpy.layout_tests.port import win
+from webkitpy.layout_tests.port import factory
+from webkitpy.layout_tests.port import test
+
+
+class FactoryTest(unittest.TestCase):
+ """Test that the factory creates the proper port object for given combination of port_name, host.platform, and options."""
+ # FIXME: The ports themselves should expose what options they require,
+ # instead of passing generic "options".
+
+ def setUp(self):
+ self.webkit_options = MockOptions(pixel_tests=False)
+
+ def assert_port(self, port_name=None, os_name=None, os_version=None, options=None, cls=None):
+ host = MockSystemHost(os_name=os_name, os_version=os_version)
+ port = factory.PortFactory(host).get(port_name, options=options)
+ self.assertIsInstance(port, cls)
+
+ def test_mac(self):
+ self.assert_port(port_name='mac', os_name='mac', os_version='snowleopard',
+ cls=mac.MacPort)
+ self.assert_port(port_name='chromium', os_name='mac', os_version='lion',
+ cls=mac.MacPort)
+
+ def test_linux(self):
+ self.assert_port(port_name='linux', cls=linux.LinuxPort)
+ self.assert_port(port_name='chromium', os_name='linux', os_version='lucid',
+ cls=linux.LinuxPort)
+
+ def test_android(self):
+ self.assert_port(port_name='android', cls=android.AndroidPort)
+ # NOTE: We can't check for port_name=chromium here, as this will append the host's
+ # operating system, whereas host!=target for Android.
+
+ def test_win(self):
+ self.assert_port(port_name='win-xp', cls=win.WinPort)
+ self.assert_port(port_name='win', os_name='win', os_version='xp',
+ cls=win.WinPort)
+ self.assert_port(port_name='chromium', os_name='win', os_version='xp',
+ cls=win.WinPort)
+
+ def test_unknown_specified(self):
+ self.assertRaises(NotImplementedError, factory.PortFactory(MockSystemHost()).get, port_name='unknown')
+
+ def test_unknown_default(self):
+ self.assertRaises(NotImplementedError, factory.PortFactory(MockSystemHost(os_name='vms')).get)
+
+ def test_get_from_builder_name(self):
+ self.assertEqual(factory.PortFactory(MockSystemHost()).get_from_builder_name('WebKit Mac10.7').name(),
+ 'mac-lion')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/linux.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/linux.py
new file mode 100644
index 0000000..9d53319
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/linux.py
@@ -0,0 +1,171 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import re
+
+from webkitpy.common.webkit_finder import WebKitFinder
+from webkitpy.layout_tests.breakpad.dump_reader_multipart import DumpReaderLinux
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.port import base
+from webkitpy.layout_tests.port import win
+from webkitpy.layout_tests.port import config
+
+
+_log = logging.getLogger(__name__)
+
+
+class LinuxPort(base.Port):
+ port_name = 'linux'
+
+ SUPPORTED_VERSIONS = ('x86', 'x86_64')
+
+ FALLBACK_PATHS = { 'x86_64': [ 'linux' ] + win.WinPort.latest_platform_fallback_path() }
+ FALLBACK_PATHS['x86'] = ['linux-x86'] + FALLBACK_PATHS['x86_64']
+
+ DEFAULT_BUILD_DIRECTORIES = ('out',)
+
+ BUILD_REQUIREMENTS_URL = 'https://code.google.com/p/chromium/wiki/LinuxBuildInstructions'
+
+ @classmethod
+ def _determine_driver_path_statically(cls, host, options):
+ config_object = config.Config(host.executive, host.filesystem)
+ build_directory = getattr(options, 'build_directory', None)
+ finder = WebKitFinder(host.filesystem)
+ webkit_base = finder.webkit_base()
+ chromium_base = finder.chromium_base()
+ driver_name = getattr(options, 'driver_name', None)
+ if driver_name is None:
+ driver_name = cls.CONTENT_SHELL_NAME
+ if hasattr(options, 'configuration') and options.configuration:
+ configuration = options.configuration
+ else:
+ configuration = config_object.default_configuration()
+ return cls._static_build_path(host.filesystem, build_directory, chromium_base, configuration, [driver_name])
+
+ @staticmethod
+ def _determine_architecture(filesystem, executive, driver_path):
+ file_output = ''
+ if filesystem.isfile(driver_path):
+ # The --dereference flag tells file to follow symlinks
+ file_output = executive.run_command(['file', '--brief', '--dereference', driver_path], return_stderr=True)
+
+ if re.match(r'ELF 32-bit LSB\s+executable', file_output):
+ return 'x86'
+ if re.match(r'ELF 64-bit LSB\s+executable', file_output):
+ return 'x86_64'
+ if file_output:
+ _log.warning('Could not determine architecture from "file" output: %s' % file_output)
+
+ # We don't know what the architecture is; default to 'x86' because
+ # maybe we're rebaselining and the binary doesn't actually exist,
+ # or something else weird is going on. It's okay to do this because
+ # if we actually try to use the binary, check_build() should fail.
+ return 'x86_64'
+
+ @classmethod
+ def determine_full_port_name(cls, host, options, port_name):
+ if port_name.endswith('linux'):
+ return port_name + '-' + cls._determine_architecture(host.filesystem, host.executive, cls._determine_driver_path_statically(host, options))
+ return port_name
+
+ def __init__(self, host, port_name, **kwargs):
+ super(LinuxPort, self).__init__(host, port_name, **kwargs)
+ (base, arch) = port_name.rsplit('-', 1)
+ assert base == 'linux'
+ assert arch in self.SUPPORTED_VERSIONS
+ assert port_name in ('linux', 'linux-x86', 'linux-x86_64')
+ self._version = 'lucid' # We only support lucid right now.
+ self._architecture = arch
+ if not self.get_option('disable_breakpad'):
+ self._dump_reader = DumpReaderLinux(host, self._build_path())
+
+ def additional_drt_flag(self):
+ flags = super(LinuxPort, self).additional_drt_flag()
+ if not self.get_option('disable_breakpad'):
+ flags += ['--enable-crash-reporter', '--crash-dumps-dir=%s' % self._dump_reader.crash_dumps_directory()]
+ return flags
+
+ def default_baseline_search_path(self):
+ port_names = self.FALLBACK_PATHS[self._architecture]
+ return map(self._webkit_baseline_path, port_names)
+
+ def _modules_to_search_for_symbols(self):
+ return [self._build_path('libffmpegsumo.so')]
+
+ def check_build(self, needs_http, printer):
+ result = super(LinuxPort, self).check_build(needs_http, printer)
+
+ if result:
+ _log.error('For complete Linux build requirements, please see:')
+ _log.error('')
+ _log.error(' http://code.google.com/p/chromium/wiki/LinuxBuildInstructions')
+ return result
+
+ def look_for_new_crash_logs(self, crashed_processes, start_time):
+ if self.get_option('disable_breakpad'):
+ return None
+ return self._dump_reader.look_for_new_crash_logs(crashed_processes, start_time)
+
+ def clobber_old_port_specific_results(self):
+ if not self.get_option('disable_breakpad'):
+ self._dump_reader.clobber_old_results()
+
+ def operating_system(self):
+ return 'linux'
+
+ #
+ # PROTECTED METHODS
+ #
+
+ def _check_apache_install(self):
+ result = self._check_file_exists(self.path_to_apache(), "apache2")
+ result = self._check_file_exists(self.path_to_apache_config_file(), "apache2 config file") and result
+ if not result:
+ _log.error(' Please install using: "sudo apt-get install apache2 libapache2-mod-php5"')
+ _log.error('')
+ return result
+
+ def _wdiff_missing_message(self):
+ return 'wdiff is not installed; please install using "sudo apt-get install wdiff"'
+
+ def path_to_apache(self):
+ # The Apache binary path can vary depending on OS and distribution
+ # See http://wiki.apache.org/httpd/DistrosDefaultLayout
+ for path in ["/usr/sbin/httpd", "/usr/sbin/apache2"]:
+ if self._filesystem.exists(path):
+ return path
+ _log.error("Could not find apache. Not installed or unknown path.")
+ return None
+
+ def _path_to_driver(self, configuration=None):
+ binary_name = self.driver_name()
+ return self._build_path_with_configuration(configuration, binary_name)
+
+ def _path_to_helper(self):
+ return None
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/linux_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/linux_unittest.py
new file mode 100644
index 0000000..358425f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/linux_unittest.py
@@ -0,0 +1,108 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system import executive_mock
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.tool.mocktool import MockOptions
+
+from webkitpy.layout_tests.port import linux
+from webkitpy.layout_tests.port import port_testcase
+
+
+class LinuxPortTest(port_testcase.PortTestCase):
+ port_name = 'linux'
+ full_port_name = 'linux-x86'
+ port_maker = linux.LinuxPort
+
+ def assert_architecture(self, port_name=None, file_output=None, expected_architecture=None):
+ host = MockSystemHost()
+ host.filesystem.isfile = lambda x: 'content_shell' in x
+ if file_output:
+ host.executive = executive_mock.MockExecutive2(file_output)
+
+ port = self.make_port(host, port_name=port_name)
+ self.assertEqual(port.architecture(), expected_architecture)
+ if expected_architecture == 'x86':
+ self.assertTrue(port.baseline_path().endswith('linux-x86'))
+ self.assertTrue(port.baseline_search_path()[0].endswith('linux-x86'))
+ self.assertTrue(port.baseline_search_path()[1].endswith('linux'))
+ else:
+ self.assertTrue(port.baseline_path().endswith('linux'))
+ self.assertTrue(port.baseline_search_path()[0].endswith('linux'))
+
+ def test_architectures(self):
+ self.assert_architecture(port_name='linux-x86',
+ expected_architecture='x86')
+ self.assert_architecture(port_name='linux-x86_64',
+ expected_architecture='x86_64')
+ self.assert_architecture(file_output='ELF 32-bit LSB executable',
+ expected_architecture='x86')
+ self.assert_architecture(file_output='ELF 64-bit LSB executable',
+ expected_architecture='x86_64')
+
+ def test_check_illegal_port_names(self):
+ # FIXME: Check that, for now, these are illegal port names.
+ # Eventually we should be able to do the right thing here.
+ self.assertRaises(AssertionError, linux.LinuxPort, MockSystemHost(), port_name='x86-linux')
+
+ def test_determine_architecture_fails(self):
+ # Test that we default to 'x86' if the driver doesn't exist.
+ port = self.make_port()
+ self.assertEqual(port.architecture(), 'x86_64')
+
+ # Test that we default to 'x86' on an unknown architecture.
+ host = MockSystemHost()
+ host.filesystem.exists = lambda x: True
+ host.executive = executive_mock.MockExecutive2('win32')
+ port = self.make_port(host=host)
+ self.assertEqual(port.architecture(), 'x86_64')
+
+ # Test that we raise errors if something weird happens.
+ host.executive = executive_mock.MockExecutive2(exception=AssertionError)
+ self.assertRaises(AssertionError, linux.LinuxPort, host, '%s-foo' % self.port_name)
+
+ def test_operating_system(self):
+ self.assertEqual('linux', self.make_port().operating_system())
+
+ def test_build_path(self):
+ # Test that optional paths are used regardless of whether they exist.
+ options = MockOptions(configuration='Release', build_directory='/foo')
+ self.assert_build_path(options, ['/mock-checkout/out/Release'], '/foo/Release')
+
+ # Test that optional relative paths are returned unmodified.
+ options = MockOptions(configuration='Release', build_directory='foo')
+ self.assert_build_path(options, ['/mock-checkout/out/Release'], 'foo/Release')
+
+ def test_driver_name_option(self):
+ self.assertTrue(self.make_port()._path_to_driver().endswith('content_shell'))
+ self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver'))
+
+ def test_path_to_image_diff(self):
+ self.assertEqual(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/image_diff')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/mac.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/mac.py
new file mode 100644
index 0000000..d637c95
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/mac.py
@@ -0,0 +1,118 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Chromium Mac implementation of the Port interface."""
+
+import logging
+import signal
+
+from webkitpy.layout_tests.port import base
+
+
+_log = logging.getLogger(__name__)
+
+
+class MacPort(base.Port):
+ SUPPORTED_VERSIONS = ('snowleopard', 'lion', 'retina', 'mountainlion', 'mavericks')
+ port_name = 'mac'
+
+ # FIXME: We treat Retina (High-DPI) devices as if they are running
+ # a different operating system version. This is lame and should be fixed.
+ # Note that the retina versions fallback to the non-retina versions and so no
+ # baselines are shared between retina versions; this keeps the fallback graph as a tree
+ # and maximizes the number of baselines we can share that way.
+ # We also currently only support Retina on 10.8; we need to either upgrade to 10.9 or support both.
+
+ FALLBACK_PATHS = {}
+ FALLBACK_PATHS['mavericks'] = ['mac']
+ FALLBACK_PATHS['mountainlion'] = ['mac-mountainlion'] + FALLBACK_PATHS['mavericks']
+ FALLBACK_PATHS['retina'] = ['mac-retina'] + FALLBACK_PATHS['mountainlion']
+ FALLBACK_PATHS['lion'] = ['mac-lion'] + FALLBACK_PATHS['mountainlion']
+ FALLBACK_PATHS['snowleopard'] = ['mac-snowleopard'] + FALLBACK_PATHS['lion']
+
+ DEFAULT_BUILD_DIRECTORIES = ('xcodebuild', 'out')
+
+ CONTENT_SHELL_NAME = 'Content Shell'
+
+ BUILD_REQUIREMENTS_URL = 'https://code.google.com/p/chromium/wiki/MacBuildInstructions'
+
+ @classmethod
+ def determine_full_port_name(cls, host, options, port_name):
+ if port_name.endswith('mac'):
+ if host.platform.os_version in ('future',):
+ version = 'mavericks'
+ else:
+ version = host.platform.os_version
+ if host.platform.is_highdpi():
+ version = 'retina'
+ return port_name + '-' + version
+ return port_name
+
+ def __init__(self, host, port_name, **kwargs):
+ super(MacPort, self).__init__(host, port_name, **kwargs)
+ self._version = port_name[port_name.index('mac-') + len('mac-'):]
+ assert self._version in self.SUPPORTED_VERSIONS
+
+ def _modules_to_search_for_symbols(self):
+ return [self._build_path('ffmpegsumo.so')]
+
+ def check_build(self, needs_http, printer):
+ result = super(MacPort, self).check_build(needs_http, printer)
+ if result:
+ _log.error('For complete Mac build requirements, please see:')
+ _log.error('')
+ _log.error(' http://code.google.com/p/chromium/wiki/MacBuildInstructions')
+
+ return result
+
+ def operating_system(self):
+ return 'mac'
+
+ #
+ # PROTECTED METHODS
+ #
+
+ def _wdiff_missing_message(self):
+ return 'wdiff is not installed; please install from MacPorts or elsewhere'
+
+ def path_to_apache(self):
+ return '/usr/sbin/httpd'
+
+ def path_to_apache_config_file(self):
+ return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'apache2-httpd.conf')
+
+ def _path_to_driver(self, configuration=None):
+ # FIXME: make |configuration| happy with case-sensitive file systems.
+ return self._build_path_with_configuration(configuration, self.driver_name() + '.app', 'Contents', 'MacOS', self.driver_name())
+
+ def _path_to_helper(self):
+ binary_name = 'layout_test_helper'
+ return self._build_path(binary_name)
+
+ def _path_to_wdiff(self):
+ return 'wdiff'
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
new file mode 100644
index 0000000..aa5653b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.port import mac
+from webkitpy.layout_tests.port import port_testcase
+from webkitpy.tool.mocktool import MockOptions
+
+
+class MacPortTest(port_testcase.PortTestCase):
+ os_name = 'mac'
+ os_version = 'snowleopard'
+ port_name = 'mac'
+ full_port_name = 'mac-snowleopard'
+ port_maker = mac.MacPort
+
+ def assert_name(self, port_name, os_version_string, expected):
+ port = self.make_port(os_version=os_version_string, port_name=port_name)
+ self.assertEqual(expected, port.name())
+
+ def test_versions(self):
+ self.assertTrue(self.make_port().name() in ('mac-snowleopard', 'mac-lion', 'mac-mountainlion', 'mac-mavericks'))
+
+ self.assert_name(None, 'snowleopard', 'mac-snowleopard')
+ self.assert_name('mac', 'snowleopard', 'mac-snowleopard')
+ self.assert_name('mac-snowleopard', 'leopard', 'mac-snowleopard')
+ self.assert_name('mac-snowleopard', 'snowleopard', 'mac-snowleopard')
+
+ self.assert_name(None, 'lion', 'mac-lion')
+ self.assert_name(None, 'mountainlion', 'mac-mountainlion')
+ self.assert_name(None, 'mavericks', 'mac-mavericks')
+ self.assert_name(None, 'future', 'mac-mavericks')
+
+ self.assert_name('mac', 'lion', 'mac-lion')
+ self.assertRaises(AssertionError, self.assert_name, None, 'tiger', 'should-raise-assertion-so-this-value-does-not-matter')
+
+ def test_baseline_path(self):
+ port = self.make_port(port_name='mac-snowleopard')
+ self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-snowleopard'))
+
+ port = self.make_port(port_name='mac-lion')
+ self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-lion'))
+
+ port = self.make_port(port_name='mac-mountainlion')
+ self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac-mountainlion'))
+
+ port = self.make_port(port_name='mac-mavericks')
+ self.assertEqual(port.baseline_path(), port._webkit_baseline_path('mac'))
+
+ def test_operating_system(self):
+ self.assertEqual('mac', self.make_port().operating_system())
+
+ def test_build_path(self):
+ # Test that optional paths are used regardless of whether they exist.
+ options = MockOptions(configuration='Release', build_directory='/foo')
+ self.assert_build_path(options, ['/mock-checkout/out/Release'], '/foo/Release')
+
+ # Test that optional relative paths are returned unmodified.
+ options = MockOptions(configuration='Release', build_directory='foo')
+ self.assert_build_path(options, ['/mock-checkout/out/Release'], 'foo/Release')
+
+ # Test that we prefer the legacy dir over the new dir.
+ options = MockOptions(configuration='Release', build_directory=None)
+ self.assert_build_path(options, ['/mock-checkout/xcodebuild/Release', '/mock-checkout/out/Release'], '/mock-checkout/xcodebuild/Release')
+
+ def test_build_path_timestamps(self):
+ options = MockOptions(configuration='Release', build_directory=None)
+ port = self.make_port(options=options)
+ port.host.filesystem.maybe_make_directory('/mock-checkout/out/Release')
+ port.host.filesystem.maybe_make_directory('/mock-checkout/xcodebuild/Release')
+ # Check with 'out' being newer.
+ port.host.filesystem.mtime = lambda f: 5 if '/out/' in f else 4
+ self.assertEqual(port._build_path(), '/mock-checkout/out/Release')
+ # Check with 'xcodebuild' being newer.
+ port.host.filesystem.mtime = lambda f: 5 if '/xcodebuild/' in f else 4
+ self.assertEqual(port._build_path(), '/mock-checkout/xcodebuild/Release')
+
+ def test_driver_name_option(self):
+ self.assertTrue(self.make_port()._path_to_driver().endswith('Content Shell'))
+ self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver'))
+
+ def test_path_to_image_diff(self):
+ self.assertEqual(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/image_diff')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py
new file mode 100644
index 0000000..d6c1bc0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py
@@ -0,0 +1,286 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+This is an implementation of the Port interface that overrides other
+ports and changes the Driver binary to "MockDRT".
+
+The MockDRT objects emulate what a real DRT would do. In particular, they
+return the output a real DRT would return for a given test, assuming that
+test actually passes (except for reftests, which currently cause the
+MockDRT to crash).
+"""
+
+import base64
+import logging
+import optparse
+import os
+import sys
+import types
+
+# Since we execute this script directly as part of the unit tests, we need to ensure
+# that Tools/Scripts is in sys.path for the next imports to work correctly.
+script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
+if script_dir not in sys.path:
+ sys.path.append(script_dir)
+
+from webkitpy.common import read_checksum_from_png
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
+from webkitpy.layout_tests.port.factory import PortFactory
+
+_log = logging.getLogger(__name__)
+
+
+class MockDRTPort(object):
+ port_name = 'mock'
+
+ @classmethod
+ def determine_full_port_name(cls, host, options, port_name):
+ return port_name
+
+ def __init__(self, host, port_name, **kwargs):
+ self.__delegate = PortFactory(host).get(port_name.replace('mock-', ''), **kwargs)
+ self.__delegate_driver_class = self.__delegate._driver_class
+ self.__delegate._driver_class = types.MethodType(self._driver_class, self.__delegate)
+
+ def __getattr__(self, name):
+ return getattr(self.__delegate, name)
+
+ def check_build(self, needs_http, printer):
+ return True
+
+ def check_sys_deps(self, needs_http):
+ return True
+
+ def _driver_class(self, delegate):
+ return self._mocked_driver_maker
+
+ def _mocked_driver_maker(self, port, worker_number, pixel_tests, no_timeout=False):
+ path_to_this_file = self.host.filesystem.abspath(__file__.replace('.pyc', '.py'))
+ driver = self.__delegate_driver_class()(self, worker_number, pixel_tests, no_timeout)
+ driver.cmd_line = self._overriding_cmd_line(driver.cmd_line,
+ self.__delegate._path_to_driver(),
+ sys.executable,
+ path_to_this_file,
+ self.__delegate.name())
+ return driver
+
+ @staticmethod
+ def _overriding_cmd_line(original_cmd_line, driver_path, python_exe, this_file, port_name):
+ def new_cmd_line(pixel_tests, per_test_args):
+ cmd_line = original_cmd_line(pixel_tests, per_test_args)
+ index = cmd_line.index(driver_path)
+ cmd_line[index:index + 1] = [python_exe, this_file, '--platform', port_name]
+ return cmd_line
+
+ return new_cmd_line
+
+ def start_helper(self):
+ pass
+
+ def start_http_server(self, additional_dirs, number_of_servers):
+ pass
+
+ def start_websocket_server(self):
+ pass
+
+ def acquire_http_lock(self):
+ pass
+
+ def stop_helper(self):
+ pass
+
+ def stop_http_server(self):
+ pass
+
+ def stop_websocket_server(self):
+ pass
+
+ def release_http_lock(self):
+ pass
+
+ def _make_wdiff_available(self):
+ self.__delegate._wdiff_available = True
+
+ def setup_environ_for_server(self, server_name):
+ env = self.__delegate.setup_environ_for_server()
+ # We need to propagate PATH down so the python code can find the checkout.
+ env['PATH'] = os.environ['PATH']
+ return env
+
+ def lookup_virtual_test_args(self, test_name):
+ suite = self.__delegate.lookup_virtual_suite(test_name)
+ return suite.args + ['--virtual-test-suite-name', suite.name, '--virtual-test-suite-base', suite.base]
+
+def main(argv, host, stdin, stdout, stderr):
+ """Run the tests."""
+
+ options, args = parse_options(argv)
+ drt = MockDRT(options, args, host, stdin, stdout, stderr)
+ return drt.run()
+
+
+def parse_options(argv):
+ # We do custom arg parsing instead of using the optparse module
+ # because we don't want to have to list every command line flag DRT
+ # accepts, and optparse complains about unrecognized flags.
+
+ def get_arg(arg_name):
+ if arg_name in argv:
+ index = argv.index(arg_name)
+ return argv[index + 1]
+ return None
+
+ options = optparse.Values({
+ 'actual_directory': get_arg('--actual-directory'),
+ 'platform': get_arg('--platform'),
+ 'virtual_test_suite_base': get_arg('--virtual-test-suite-base'),
+ 'virtual_test_suite_name': get_arg('--virtual-test-suite-name'),
+ })
+ return (options, argv)
+
+
+class MockDRT(object):
+ def __init__(self, options, args, host, stdin, stdout, stderr):
+ self._options = options
+ self._args = args
+ self._host = host
+ self._stdout = stdout
+ self._stdin = stdin
+ self._stderr = stderr
+
+ port_name = None
+ if options.platform:
+ port_name = options.platform
+ self._port = PortFactory(host).get(port_name=port_name, options=options)
+ self._driver = self._port.create_driver(0)
+
+ def run(self):
+ while True:
+ line = self._stdin.readline()
+ if not line:
+ return 0
+ driver_input = self.input_from_line(line)
+ dirname, basename = self._port.split_test(driver_input.test_name)
+ is_reftest = (self._port.reference_files(driver_input.test_name) or
+ self._port.is_reference_html_file(self._port._filesystem, dirname, basename))
+ output = self.output_for_test(driver_input, is_reftest)
+ self.write_test_output(driver_input, output, is_reftest)
+
+ def input_from_line(self, line):
+ vals = line.strip().split("'")
+ uri = vals[0]
+ checksum = None
+ should_run_pixel_tests = False
+ if len(vals) == 2 and vals[1] == '--pixel-test':
+ should_run_pixel_tests = True
+ elif len(vals) == 3 and vals[1] == '--pixel-test':
+ should_run_pixel_tests = True
+ checksum = vals[2]
+ elif len(vals) != 1:
+ raise NotImplementedError
+
+ if uri.startswith('http://') or uri.startswith('https://'):
+ test_name = self._driver.uri_to_test(uri)
+ else:
+ test_name = self._port.relative_test_filename(uri)
+
+ return DriverInput(test_name, 0, checksum, should_run_pixel_tests, args=[])
+
+ def output_for_test(self, test_input, is_reftest):
+ port = self._port
+ if self._options.virtual_test_suite_name:
+ test_input.test_name = test_input.test_name.replace(self._options.virtual_test_suite_base, self._options.virtual_test_suite_name)
+ actual_text = port.expected_text(test_input.test_name)
+ actual_audio = port.expected_audio(test_input.test_name)
+ actual_image = None
+ actual_checksum = None
+ if is_reftest:
+ # Make up some output for reftests.
+ actual_text = 'reference text\n'
+ actual_checksum = 'mock-checksum'
+ actual_image = 'blank'
+ if test_input.test_name.endswith('-mismatch.html'):
+ actual_text = 'not reference text\n'
+ actual_checksum = 'not-mock-checksum'
+ actual_image = 'not blank'
+ elif test_input.should_run_pixel_test and test_input.image_hash:
+ actual_checksum = port.expected_checksum(test_input.test_name)
+ actual_image = port.expected_image(test_input.test_name)
+
+ if self._options.actual_directory:
+ actual_path = port._filesystem.join(self._options.actual_directory, test_input.test_name)
+ root, _ = port._filesystem.splitext(actual_path)
+ text_path = root + '-actual.txt'
+ if port._filesystem.exists(text_path):
+ actual_text = port._filesystem.read_binary_file(text_path)
+ audio_path = root + '-actual.wav'
+ if port._filesystem.exists(audio_path):
+ actual_audio = port._filesystem.read_binary_file(audio_path)
+ image_path = root + '-actual.png'
+ if port._filesystem.exists(image_path):
+ actual_image = port._filesystem.read_binary_file(image_path)
+ with port._filesystem.open_binary_file_for_reading(image_path) as filehandle:
+ actual_checksum = read_checksum_from_png.read_checksum(filehandle)
+
+ return DriverOutput(actual_text, actual_image, actual_checksum, actual_audio)
+
+ def write_test_output(self, test_input, output, is_reftest):
+ if output.audio:
+ self._stdout.write('Content-Type: audio/wav\n')
+ self._stdout.write('Content-Transfer-Encoding: base64\n')
+ self._stdout.write(base64.b64encode(output.audio))
+ self._stdout.write('\n')
+ else:
+ self._stdout.write('Content-Type: text/plain\n')
+ # FIXME: Note that we don't ensure there is a trailing newline!
+ # This mirrors actual (Mac) DRT behavior but is a bug.
+ if output.text:
+ self._stdout.write(output.text)
+
+ self._stdout.write('#EOF\n')
+
+ if test_input.should_run_pixel_test and output.image_hash:
+ self._stdout.write('\n')
+ self._stdout.write('ActualHash: %s\n' % output.image_hash)
+ self._stdout.write('ExpectedHash: %s\n' % test_input.image_hash)
+ if output.image_hash != test_input.image_hash:
+ self._stdout.write('Content-Type: image/png\n')
+ self._stdout.write('Content-Length: %s\n' % len(output.image))
+ self._stdout.write(output.image)
+ self._stdout.write('#EOF\n')
+ self._stdout.flush()
+ self._stderr.write('#EOF\n')
+ self._stderr.flush()
+
+
+if __name__ == '__main__':
+ # Note that the Mock in MockDRT refers to the fact that it is emulating a
+ # real DRT, and as such, it needs access to a real SystemHost, not a MockSystemHost.
+ sys.exit(main(sys.argv[1:], SystemHost(), sys.stdin, sys.stdout, sys.stderr))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
new file mode 100644
index 0000000..72e37cf
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
@@ -0,0 +1,202 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for MockDRT."""
+
+import io
+import sys
+import unittest
+
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.layout_tests.port import mock_drt
+from webkitpy.layout_tests.port import port_testcase
+from webkitpy.layout_tests.port import test
+from webkitpy.layout_tests.port.factory import PortFactory
+from webkitpy.tool import mocktool
+
+
+mock_options = mocktool.MockOptions(configuration='Release')
+
+
+class MockDRTPortTest(port_testcase.PortTestCase):
+
+ def make_port(self, host=None, options=mock_options):
+ host = host or MockSystemHost()
+ test.add_unit_tests_to_mock_filesystem(host.filesystem)
+ return mock_drt.MockDRTPort(host, port_name='mock-mac', options=options)
+
+ def make_wdiff_available(self, port):
+ port._make_wdiff_available()
+
+ def test_port_name_in_constructor(self):
+ self.assertTrue(mock_drt.MockDRTPort(MockSystemHost(), port_name='mock-test'))
+
+ def test_check_sys_deps(self):
+ pass
+
+ def test_default_max_locked_shards(self):
+ pass
+
+ def test_diff_image(self):
+ pass
+
+ def test_diff_image_crashed(self):
+ pass
+
+ def test_uses_apache(self):
+ pass
+
+ def test_get_crash_log(self):
+ pass
+
+ def test_check_build(self):
+ pass
+
+ def test_virtual_test_suites(self):
+ pass
+
+
+class MockDRTTest(unittest.TestCase):
+ def input_line(self, port, test_name, pixel_tests, checksum=None):
+ url = port.create_driver(0).test_to_uri(test_name)
+ if url.startswith('file://'):
+ url = url[len('file://'):]
+ if pixel_tests:
+ url += "'--pixel-test"
+ if checksum:
+ url += "'" + checksum
+ return url + '\n'
+
+ def make_drt(self, options, args, host, stdin, stdout, stderr):
+ return mock_drt.MockDRT(options, args, host, stdin, stdout, stderr)
+
+ def make_input_output(self, port, test_name, pixel_tests,
+ expected_checksum, drt_output, drt_input=None, expected_text=None):
+ if pixel_tests:
+ if not expected_checksum:
+ expected_checksum = port.expected_checksum(test_name)
+ if not drt_input:
+ drt_input = self.input_line(port, test_name, pixel_tests, expected_checksum)
+ text_output = expected_text or port.expected_text(test_name) or ''
+
+ if not drt_output:
+ drt_output = self.expected_output(port, test_name, pixel_tests,
+ text_output, expected_checksum)
+ return (drt_input, drt_output)
+
+ def expected_output(self, port, test_name, pixel_tests, text_output, expected_checksum):
+ output = ['Content-Type: text/plain\n']
+ if text_output:
+ output.append(text_output)
+ output.append('#EOF\n')
+ if pixel_tests and expected_checksum:
+ output.extend(['\n',
+ 'ActualHash: %s\n' % expected_checksum,
+ 'ExpectedHash: %s\n' % expected_checksum])
+ output.append('#EOF\n')
+ return output
+
+ def assertTest(self, test_name, pixel_tests, expected_checksum=None, drt_output=None, host=None, expected_text=None):
+ port_name = 'test'
+ host = host or MockSystemHost()
+ test.add_unit_tests_to_mock_filesystem(host.filesystem)
+ port = PortFactory(host).get(port_name)
+ drt_input, drt_output = self.make_input_output(port, test_name,
+ pixel_tests, expected_checksum, drt_output, drt_input=None, expected_text=expected_text)
+
+ args = ['--dump-render-tree', '--platform', port_name, '-']
+ stdin = io.BytesIO(drt_input)
+ stdout = io.BytesIO()
+ stderr = io.BytesIO()
+ options, args = mock_drt.parse_options(args)
+
+ drt = self.make_drt(options, args, host, stdin, stdout, stderr)
+ res = drt.run()
+
+ self.assertEqual(res, 0)
+
+ self.assertEqual(stdout.getvalue(), ''.join(drt_output))
+ self.assertEqual(stderr.getvalue(), '#EOF\n')
+
+ def test_main(self):
+ host = MockSystemHost()
+ test.add_unit_tests_to_mock_filesystem(host.filesystem)
+ stdin = io.BytesIO()
+ stdout = io.BytesIO()
+ stderr = io.BytesIO()
+ res = mock_drt.main(['--dump-render-tree', '--platform', 'test', '-'],
+ host, stdin, stdout, stderr)
+ self.assertEqual(res, 0)
+ self.assertEqual(stdout.getvalue(), '')
+ self.assertEqual(stderr.getvalue(), '')
+ self.assertEqual(host.filesystem.written_files, {})
+
+ def test_pixeltest_passes(self):
+ # This also tests that we handle HTTP: test URLs properly.
+ self.assertTest('http/tests/passes/text.html', True)
+
+ def test_pixeltest__fails(self):
+ self.assertTest('failures/expected/image_checksum.html', pixel_tests=True,
+ expected_checksum='image_checksum-checksum',
+ drt_output=['Content-Type: text/plain\n',
+ 'image_checksum-txt',
+ '#EOF\n',
+ '\n',
+ 'ActualHash: image_checksum-checksum\n',
+ 'ExpectedHash: image_checksum-checksum\n',
+ '#EOF\n'])
+
+ def test_textonly(self):
+ self.assertTest('passes/image.html', False)
+
+ def test_checksum_in_png(self):
+ self.assertTest('passes/checksum_in_image.html', True)
+
+ def test_missing_image(self):
+ self.assertTest('failures/expected/missing_image.html', True)
+
+ def test_missing_text(self):
+ self.assertTest('failures/expected/missing_text.html', True)
+
+ def test_reftest_match(self):
+ self.assertTest('passes/reftest.html', True, expected_checksum='mock-checksum', expected_text='reference text\n')
+
+ def test_reftest_mismatch(self):
+ self.assertTest('passes/mismatch.html', True, expected_checksum='mock-checksum', expected_text='reference text\n')
+
+ def test_audio(self):
+ self.assertTest('passes/audio.html', pixel_tests=True,
+ drt_output=['Content-Type: audio/wav\n',
+ 'Content-Transfer-Encoding: base64\n',
+ 'YXVkaW8td2F2',
+ '\n',
+ '#EOF\n',
+ '#EOF\n'])
+
+ def test_virtual(self):
+ self.assertTest('virtual/passes/text.html', True)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
new file mode 100644
index 0000000..94d7b2c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
@@ -0,0 +1,486 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit testing base class for Port implementations."""
+
+import collections
+import errno
+import logging
+import os
+import socket
+import sys
+import time
+import unittest
+
+from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.port.base import Port, TestConfiguration
+from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
+from webkitpy.tool.mocktool import MockOptions
+
+
+# FIXME: get rid of this fixture
+class TestWebKitPort(Port):
+ port_name = "testwebkitport"
+
+ def __init__(self, port_name=None, symbols_string=None,
+ expectations_file=None, skips_file=None, host=None, config=None,
+ **kwargs):
+ port_name = port_name or TestWebKitPort.port_name
+ self.symbols_string = symbols_string # Passing "" disables all staticly-detectable features.
+ host = host or MockSystemHost()
+ super(TestWebKitPort, self).__init__(host, port_name=port_name, **kwargs)
+
+ def all_test_configurations(self):
+ return [self.test_configuration()]
+
+ def _symbols_string(self):
+ return self.symbols_string
+
+ def _tests_for_disabled_features(self):
+ return ["accessibility", ]
+
+
+class FakePrinter(object):
+ def write_update(self, msg):
+ pass
+
+ def write_throttled_update(self, msg):
+ pass
+
+
+
+class PortTestCase(unittest.TestCase):
+ """Tests that all Port implementations must pass."""
+ HTTP_PORTS = (8000, 8080, 8443)
+ WEBSOCKET_PORTS = (8880,)
+
+ # Subclasses override this to point to their Port subclass.
+ os_name = None
+ os_version = None
+ port_maker = TestWebKitPort
+ port_name = None
+ full_port_name = None
+
+ def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, **kwargs):
+ host = host or MockSystemHost(os_name=(os_name or self.os_name), os_version=(os_version or self.os_version))
+ options = options or MockOptions(configuration='Release')
+ port_name = port_name or self.port_name
+ port_name = self.port_maker.determine_full_port_name(host, options, port_name)
+ port = self.port_maker(host, port_name, options=options, **kwargs)
+ port._config.build_directory = lambda configuration: '/mock-build'
+ return port
+
+ def make_wdiff_available(self, port):
+ port._wdiff_available = True
+
+ def test_check_build(self):
+ port = self.make_port()
+ port._check_file_exists = lambda path, desc: True
+ if port._dump_reader:
+ port._dump_reader.check_is_functional = lambda: True
+ port._options.build = True
+ port._check_driver_build_up_to_date = lambda config: True
+ port.check_httpd = lambda: True
+ oc = OutputCapture()
+ try:
+ oc.capture_output()
+ self.assertEqual(port.check_build(needs_http=True, printer=FakePrinter()),
+ test_run_results.OK_EXIT_STATUS)
+ finally:
+ out, err, logs = oc.restore_output()
+ self.assertIn('pretty patches', logs) # We should get a warning about PrettyPatch being missing,
+ self.assertNotIn('build requirements', logs) # but not the driver itself.
+
+ port._check_file_exists = lambda path, desc: False
+ port._check_driver_build_up_to_date = lambda config: False
+ try:
+ oc.capture_output()
+ self.assertEqual(port.check_build(needs_http=True, printer=FakePrinter()),
+ test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
+ finally:
+ out, err, logs = oc.restore_output()
+ self.assertIn('pretty patches', logs) # And, hereere we should get warnings about both.
+ self.assertIn('build requirements', logs)
+
+ def test_default_child_processes(self):
+ port = self.make_port()
+ num_workers = port.default_child_processes()
+ self.assertGreaterEqual(num_workers, 1)
+
+ # Test that we reduce the number of workers for sanitizer builds.
+ port._options.enable_sanitizer = True
+ port.host.executive.cpu_count = lambda: 8
+ num_sanitized_workers = port.default_child_processes()
+ self.assertLess(num_sanitized_workers, 8)
+
+ def test_default_max_locked_shards(self):
+ port = self.make_port()
+ port.default_child_processes = lambda: 16
+ self.assertEqual(port.default_max_locked_shards(), 4)
+ port.default_child_processes = lambda: 2
+ self.assertEqual(port.default_max_locked_shards(), 1)
+
+ def test_default_timeout_ms(self):
+ self.assertEqual(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(), 6000)
+ self.assertEqual(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(), 18000)
+
+ def test_default_pixel_tests(self):
+ self.assertEqual(self.make_port().default_pixel_tests(), True)
+
+ def test_driver_cmd_line(self):
+ port = self.make_port()
+ self.assertTrue(len(port.driver_cmd_line()))
+
+ options = MockOptions(additional_drt_flag=['--foo=bar', '--foo=baz'])
+ port = self.make_port(options=options)
+ cmd_line = port.driver_cmd_line()
+ self.assertTrue('--foo=bar' in cmd_line)
+ self.assertTrue('--foo=baz' in cmd_line)
+
+ def assert_servers_are_down(self, host, ports):
+ for port in ports:
+ try:
+ test_socket = socket.socket()
+ test_socket.connect((host, port))
+ self.fail()
+ except IOError, e:
+ self.assertTrue(e.errno in (errno.ECONNREFUSED, errno.ECONNRESET))
+ finally:
+ test_socket.close()
+
+ def assert_servers_are_up(self, host, ports):
+ for port in ports:
+ try:
+ test_socket = socket.socket()
+ test_socket.connect((host, port))
+ except IOError, e:
+ self.fail('failed to connect to %s:%d' % (host, port))
+ finally:
+ test_socket.close()
+
+ def test_diff_image__missing_both(self):
+ port = self.make_port()
+ self.assertEqual(port.diff_image(None, None), (None, None))
+ self.assertEqual(port.diff_image(None, ''), (None, None))
+ self.assertEqual(port.diff_image('', None), (None, None))
+
+ self.assertEqual(port.diff_image('', ''), (None, None))
+
+ def test_diff_image__missing_actual(self):
+ port = self.make_port()
+ self.assertEqual(port.diff_image(None, 'foo'), ('foo', None))
+ self.assertEqual(port.diff_image('', 'foo'), ('foo', None))
+
+ def test_diff_image__missing_expected(self):
+ port = self.make_port()
+ self.assertEqual(port.diff_image('foo', None), ('foo', None))
+ self.assertEqual(port.diff_image('foo', ''), ('foo', None))
+
+ def test_diff_image(self):
+ def _path_to_image_diff():
+ return "/path/to/image_diff"
+
+ port = self.make_port()
+ port._path_to_image_diff = _path_to_image_diff
+
+ mock_image_diff = "MOCK Image Diff"
+
+ def mock_run_command(args):
+ port._filesystem.write_binary_file(args[4], mock_image_diff)
+ return 1
+
+ # Images are different.
+ port._executive = MockExecutive2(run_command_fn=mock_run_command)
+ self.assertEqual(mock_image_diff, port.diff_image("EXPECTED", "ACTUAL")[0])
+
+ # Images are the same.
+ port._executive = MockExecutive2(exit_code=0)
+ self.assertEqual(None, port.diff_image("EXPECTED", "ACTUAL")[0])
+
+ # There was some error running image_diff.
+ port._executive = MockExecutive2(exit_code=2)
+ exception_raised = False
+ try:
+ port.diff_image("EXPECTED", "ACTUAL")
+ except ValueError, e:
+ exception_raised = True
+ self.assertFalse(exception_raised)
+
+ def test_diff_image_crashed(self):
+ port = self.make_port()
+ port._executive = MockExecutive2(exit_code=2)
+ self.assertEqual(port.diff_image("EXPECTED", "ACTUAL"), (None, 'Image diff returned an exit code of 2. See http://crbug.com/278596'))
+
+ def test_check_wdiff(self):
+ port = self.make_port()
+ port.check_wdiff()
+
+ def test_wdiff_text_fails(self):
+ host = MockSystemHost(os_name=self.os_name, os_version=self.os_version)
+ host.executive = MockExecutive(should_throw=True)
+ port = self.make_port(host=host)
+ port._executive = host.executive # AndroidPortTest.make_port sets its own executive, so reset that as well.
+
+ # This should raise a ScriptError that gets caught and turned into the
+ # error text, and also mark wdiff as not available.
+ self.make_wdiff_available(port)
+ self.assertTrue(port.wdiff_available())
+ diff_txt = port.wdiff_text("/tmp/foo.html", "/tmp/bar.html")
+ self.assertEqual(diff_txt, port._wdiff_error_html)
+ self.assertFalse(port.wdiff_available())
+
+ def test_missing_symbol_to_skipped_tests(self):
+ # Test that we get the chromium skips and not the webkit default skips
+ port = self.make_port()
+ skip_dict = port._missing_symbol_to_skipped_tests()
+ if port.PORT_HAS_AUDIO_CODECS_BUILT_IN:
+ self.assertEqual(skip_dict, {})
+ else:
+ self.assertTrue('ff_mp3_decoder' in skip_dict)
+ self.assertFalse('WebGLShader' in skip_dict)
+
+ def test_test_configuration(self):
+ port = self.make_port()
+ self.assertTrue(port.test_configuration())
+
+ def test_all_test_configurations(self):
+ """Validate the complete set of configurations this port knows about."""
+ port = self.make_port()
+ self.assertEqual(set(port.all_test_configurations()), set([
+ TestConfiguration('snowleopard', 'x86', 'debug'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('lion', 'x86', 'debug'),
+ TestConfiguration('lion', 'x86', 'release'),
+ TestConfiguration('retina', 'x86', 'debug'),
+ TestConfiguration('retina', 'x86', 'release'),
+ TestConfiguration('mountainlion', 'x86', 'debug'),
+ TestConfiguration('mountainlion', 'x86', 'release'),
+ TestConfiguration('mavericks', 'x86', 'debug'),
+ TestConfiguration('mavericks', 'x86', 'release'),
+ TestConfiguration('xp', 'x86', 'debug'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'debug'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'debug'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'debug'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
+ TestConfiguration('icecreamsandwich', 'x86', 'debug'),
+ TestConfiguration('icecreamsandwich', 'x86', 'release'),
+ ]))
+ def test_get_crash_log(self):
+ port = self.make_port()
+ self.assertEqual(port._get_crash_log(None, None, None, None, newer_than=None),
+ (None,
+ 'crash log for <unknown process name> (pid <unknown>):\n'
+ 'STDOUT: <empty>\n'
+ 'STDERR: <empty>\n'))
+
+ self.assertEqual(port._get_crash_log('foo', 1234, 'out bar\nout baz', 'err bar\nerr baz\n', newer_than=None),
+ ('err bar\nerr baz\n',
+ 'crash log for foo (pid 1234):\n'
+ 'STDOUT: out bar\n'
+ 'STDOUT: out baz\n'
+ 'STDERR: err bar\n'
+ 'STDERR: err baz\n'))
+
+ self.assertEqual(port._get_crash_log('foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=None),
+ ('foo\xa6bar',
+ u'crash log for foo (pid 1234):\n'
+ u'STDOUT: foo\ufffdbar\n'
+ u'STDERR: foo\ufffdbar\n'))
+
+ self.assertEqual(port._get_crash_log('foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=1.0),
+ ('foo\xa6bar',
+ u'crash log for foo (pid 1234):\n'
+ u'STDOUT: foo\ufffdbar\n'
+ u'STDERR: foo\ufffdbar\n'))
+
+ def assert_build_path(self, options, dirs, expected_path):
+ port = self.make_port(options=options)
+ for directory in dirs:
+ port.host.filesystem.maybe_make_directory(directory)
+ self.assertEqual(port._build_path(), expected_path)
+
+ def test_expectations_files(self):
+ port = self.make_port()
+
+ generic_path = port.path_to_generic_test_expectations_file()
+ never_fix_tests_path = port._filesystem.join(port.layout_tests_dir(), 'NeverFixTests')
+ stale_tests_path = port._filesystem.join(port.layout_tests_dir(), 'StaleTestExpectations')
+ slow_tests_path = port._filesystem.join(port.layout_tests_dir(), 'SlowTests')
+ flaky_tests_path = port._filesystem.join(port.layout_tests_dir(), 'FlakyTests')
+ skia_overrides_path = port.path_from_chromium_base(
+ 'skia', 'skia_test_expectations.txt')
+
+ port._filesystem.write_text_file(skia_overrides_path, 'dummy text')
+
+ port._options.builder_name = 'DUMMY_BUILDER_NAME'
+ self.assertEqual(port.expectations_files(),
+ [generic_path, skia_overrides_path,
+ never_fix_tests_path, stale_tests_path, slow_tests_path,
+ flaky_tests_path])
+
+ port._options.builder_name = 'builder (deps)'
+ self.assertEqual(port.expectations_files(),
+ [generic_path, skia_overrides_path,
+ never_fix_tests_path, stale_tests_path, slow_tests_path,
+ flaky_tests_path])
+
+ # A builder which does NOT observe the Chromium test_expectations,
+ # but still observes the Skia test_expectations...
+ port._options.builder_name = 'builder'
+ self.assertEqual(port.expectations_files(),
+ [generic_path, skia_overrides_path,
+ never_fix_tests_path, stale_tests_path, slow_tests_path,
+ flaky_tests_path])
+
+ def test_check_sys_deps(self):
+ port = self.make_port()
+ port._executive = MockExecutive2(exit_code=0)
+ self.assertEqual(port.check_sys_deps(needs_http=False), test_run_results.OK_EXIT_STATUS)
+ port._executive = MockExecutive2(exit_code=1, output='testing output failure')
+ self.assertEqual(port.check_sys_deps(needs_http=False), test_run_results.SYS_DEPS_EXIT_STATUS)
+
+ def test_expectations_ordering(self):
+ port = self.make_port()
+ for path in port.expectations_files():
+ port._filesystem.write_text_file(path, '')
+ ordered_dict = port.expectations_dict()
+ self.assertEqual(port.path_to_generic_test_expectations_file(), ordered_dict.keys()[0])
+
+ options = MockOptions(additional_expectations=['/tmp/foo', '/tmp/bar'])
+ port = self.make_port(options=options)
+ for path in port.expectations_files():
+ port._filesystem.write_text_file(path, '')
+ port._filesystem.write_text_file('/tmp/foo', 'foo')
+ port._filesystem.write_text_file('/tmp/bar', 'bar')
+ ordered_dict = port.expectations_dict()
+ self.assertEqual(ordered_dict.keys()[-2:], options.additional_expectations) # pylint: disable=E1101
+ self.assertEqual(ordered_dict.values()[-2:], ['foo', 'bar'])
+
+ def test_skipped_directories_for_symbols(self):
+ # This first test confirms that the commonly found symbols result in the expected skipped directories.
+ symbols_string = " ".join(["fooSymbol"])
+ expected_directories = set([
+ "webaudio/codec-tests/mp3",
+ "webaudio/codec-tests/aac",
+ ])
+
+ result_directories = set(TestWebKitPort(symbols_string=symbols_string)._skipped_tests_for_unsupported_features(test_list=['webaudio/codec-tests/mp3/foo.html']))
+ self.assertEqual(result_directories, expected_directories)
+
+ # Test that the nm string parsing actually works:
+ symbols_string = """
+000000000124f498 s __ZZN7WebCore13ff_mp3_decoder12replaceChildEPS0_S1_E19__PRETTY_FUNCTION__
+000000000124f500 s __ZZN7WebCore13ff_mp3_decoder13addChildAboveEPS0_S1_E19__PRETTY_FUNCTION__
+000000000124f670 s __ZZN7WebCore13ff_mp3_decoder13addChildBelowEPS0_S1_E19__PRETTY_FUNCTION__
+"""
+ # Note 'compositing' is not in the list of skipped directories (hence the parsing of GraphicsLayer worked):
+ expected_directories = set([
+ "webaudio/codec-tests/aac",
+ ])
+ result_directories = set(TestWebKitPort(symbols_string=symbols_string)._skipped_tests_for_unsupported_features(test_list=['webaudio/codec-tests/mp3/foo.html']))
+ self.assertEqual(result_directories, expected_directories)
+
+ def _assert_config_file_for_platform(self, port, platform, config_file):
+ self.assertEqual(port._apache_config_file_name_for_platform(platform), config_file)
+
+ def test_linux_distro_detection(self):
+ port = TestWebKitPort()
+ self.assertFalse(port._is_redhat_based())
+ self.assertFalse(port._is_debian_based())
+
+ port._filesystem = MockFileSystem({'/etc/redhat-release': ''})
+ self.assertTrue(port._is_redhat_based())
+ self.assertFalse(port._is_debian_based())
+
+ port._filesystem = MockFileSystem({'/etc/debian_version': ''})
+ self.assertFalse(port._is_redhat_based())
+ self.assertTrue(port._is_debian_based())
+
+ def test_apache_config_file_name_for_platform(self):
+ port = TestWebKitPort()
+ self._assert_config_file_for_platform(port, 'cygwin', 'cygwin-httpd.conf')
+
+ self._assert_config_file_for_platform(port, 'linux2', 'apache2-httpd.conf')
+ self._assert_config_file_for_platform(port, 'linux3', 'apache2-httpd.conf')
+
+ port._is_redhat_based = lambda: True
+ port._apache_version = lambda: '2.2'
+ self._assert_config_file_for_platform(port, 'linux2', 'fedora-httpd-2.2.conf')
+
+ port = TestWebKitPort()
+ port._is_debian_based = lambda: True
+ port._apache_version = lambda: '2.2'
+ self._assert_config_file_for_platform(port, 'linux2', 'debian-httpd-2.2.conf')
+
+ self._assert_config_file_for_platform(port, 'mac', 'apache2-httpd.conf')
+ self._assert_config_file_for_platform(port, 'win32', 'apache2-httpd.conf') # win32 isn't a supported sys.platform. AppleWin/WinCairo/WinCE ports all use cygwin.
+ self._assert_config_file_for_platform(port, 'barf', 'apache2-httpd.conf')
+
+ def test_path_to_apache_config_file(self):
+ port = TestWebKitPort()
+
+ saved_environ = os.environ.copy()
+ try:
+ os.environ['WEBKIT_HTTP_SERVER_CONF_PATH'] = '/path/to/httpd.conf'
+ self.assertRaises(IOError, port.path_to_apache_config_file)
+ port._filesystem.write_text_file('/existing/httpd.conf', 'Hello, world!')
+ os.environ['WEBKIT_HTTP_SERVER_CONF_PATH'] = '/existing/httpd.conf'
+ self.assertEqual(port.path_to_apache_config_file(), '/existing/httpd.conf')
+ finally:
+ os.environ = saved_environ.copy()
+
+ # Mock out _apache_config_file_name_for_platform to ignore the passed sys.platform value.
+ port._apache_config_file_name_for_platform = lambda platform: 'httpd.conf'
+ self.assertEqual(port.path_to_apache_config_file(), '/mock-checkout/third_party/WebKit/LayoutTests/http/conf/httpd.conf')
+
+ # Check that even if we mock out _apache_config_file_name, the environment variable takes precedence.
+ saved_environ = os.environ.copy()
+ try:
+ os.environ['WEBKIT_HTTP_SERVER_CONF_PATH'] = '/existing/httpd.conf'
+ self.assertEqual(port.path_to_apache_config_file(), '/existing/httpd.conf')
+ finally:
+ os.environ = saved_environ.copy()
+
+ def test_additional_platform_directory(self):
+ port = self.make_port(options=MockOptions(additional_platform_directory=['/tmp/foo']))
+ self.assertEqual(port.baseline_search_path()[0], '/tmp/foo')
+
+ def test_virtual_test_suites(self):
+ # We test that we can load the real LayoutTests/VirtualTestSuites file properly, so we
+ # use a real SystemHost(). We don't care what virtual_test_suites() returns as long
+ # as it is iterable.
+ port = self.make_port(host=SystemHost(), port_name=self.full_port_name)
+ self.assertTrue(isinstance(port.virtual_test_suites(), collections.Iterable))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/server_process.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
new file mode 100644
index 0000000..f9ec1a7
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
@@ -0,0 +1,414 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Package that implements the ServerProcess wrapper class"""
+
+import errno
+import logging
+import re
+import signal
+import sys
+import time
+
+# Note that although win32 python does provide an implementation of
+# the win32 select API, it only works on sockets, and not on the named pipes
+# used by subprocess, so we have to use the native APIs directly.
+_quote_cmd = None
+
+if sys.platform == 'win32':
+ import msvcrt
+ import win32pipe
+ import win32file
+ import subprocess
+ _quote_cmd = subprocess.list2cmdline
+else:
+ import fcntl
+ import os
+ import pipes
+ import select
+ _quote_cmd = lambda cmdline: ' '.join(pipes.quote(arg) for arg in cmdline)
+
+from webkitpy.common.system.executive import ScriptError
+
+
+_log = logging.getLogger(__name__)
+
+
+_trailing_spaces_re = re.compile('(.*[^ ])?( +)$')
+
+
+def quote_data(data):
+ txt = repr(data).replace('\\n', '\\n\n')[1:-1]
+ lines = []
+ for l in txt.splitlines():
+ m = _trailing_spaces_re.match(l)
+ if m:
+ l = m.group(1) + m.group(2).replace(' ', '\x20')
+ lines.append(l)
+ return lines
+
+class ServerProcess(object):
+ """This class provides a wrapper around a subprocess that
+ implements a simple request/response usage model. The primary benefit
+ is that reading responses takes a deadline, so that we don't ever block
+ indefinitely. The class also handles transparently restarting processes
+ as necessary to keep issuing commands."""
+
+ def __init__(self, port_obj, name, cmd, env=None, universal_newlines=False, treat_no_data_as_crash=False,
+ logging=False):
+ self._port = port_obj
+ self._name = name # Should be the command name (e.g. content_shell, image_diff)
+ self._cmd = cmd
+ self._env = env
+ # Set if the process outputs non-standard newlines like '\r\n' or '\r'.
+ # Don't set if there will be binary data or the data must be ASCII encoded.
+ self._universal_newlines = universal_newlines
+ self._treat_no_data_as_crash = treat_no_data_as_crash
+ self._logging = logging
+ self._host = self._port.host
+ self._pid = None
+ self._reset()
+
+ # See comment in imports for why we need the win32 APIs and can't just use select.
+ # FIXME: there should be a way to get win32 vs. cygwin from platforminfo.
+ self._use_win32_apis = sys.platform == 'win32'
+
+ def name(self):
+ return self._name
+
+ def pid(self):
+ return self._pid
+
+ def _reset(self):
+ if getattr(self, '_proc', None):
+ if self._proc.stdin:
+ self._proc.stdin.close()
+ self._proc.stdin = None
+ if self._proc.stdout:
+ self._proc.stdout.close()
+ self._proc.stdout = None
+ if self._proc.stderr:
+ self._proc.stderr.close()
+ self._proc.stderr = None
+
+ self._proc = None
+ self._output = str() # bytesarray() once we require Python 2.6
+ self._error = str() # bytesarray() once we require Python 2.6
+ self._crashed = False
+ self.timed_out = False
+
+ def process_name(self):
+ return self._name
+
+ def _start(self):
+ if self._proc:
+ raise ValueError("%s already running" % self._name)
+ self._reset()
+ # close_fds is a workaround for http://bugs.python.org/issue2320
+ close_fds = not self._host.platform.is_win()
+ if self._logging:
+ env_str = ''
+ if self._env:
+ env_str += '\n'.join("%s=%s" % (k, v) for k, v in self._env.items()) + '\n'
+ _log.info('CMD: \n%s%s\n', env_str, _quote_cmd(self._cmd))
+ self._proc = self._host.executive.popen(self._cmd, stdin=self._host.executive.PIPE,
+ stdout=self._host.executive.PIPE,
+ stderr=self._host.executive.PIPE,
+ close_fds=close_fds,
+ env=self._env,
+ universal_newlines=self._universal_newlines)
+ self._pid = self._proc.pid
+ fd = self._proc.stdout.fileno()
+ if not self._use_win32_apis:
+ fl = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
+ fd = self._proc.stderr.fileno()
+ fl = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
+
+ def _handle_possible_interrupt(self):
+ """This routine checks to see if the process crashed or exited
+ because of a keyboard interrupt and raises KeyboardInterrupt
+ accordingly."""
+ # FIXME: Linux and Mac set the returncode to -signal.SIGINT if a
+ # subprocess is killed with a ctrl^C. Previous comments in this
+ # routine said that supposedly Windows returns 0xc000001d, but that's not what
+ # -1073741510 evaluates to. Figure out what the right value is
+ # for win32 and cygwin here ...
+ if self._proc.returncode in (-1073741510, -signal.SIGINT):
+ raise KeyboardInterrupt
+
+ def poll(self):
+ """Check to see if the underlying process is running; returns None
+ if it still is (wrapper around subprocess.poll)."""
+ if self._proc:
+ return self._proc.poll()
+ return None
+
+ def write(self, bytes):
+ """Write a request to the subprocess. The subprocess is (re-)start()'ed
+ if is not already running."""
+ if not self._proc:
+ self._start()
+ try:
+ self._log_data(' IN', bytes)
+ self._proc.stdin.write(bytes)
+ except IOError, e:
+ self.stop(0.0)
+ # stop() calls _reset(), so we have to set crashed to True after calling stop().
+ self._crashed = True
+
+ def _pop_stdout_line_if_ready(self):
+ index_after_newline = self._output.find('\n') + 1
+ if index_after_newline > 0:
+ return self._pop_output_bytes(index_after_newline)
+ return None
+
+ def _pop_stderr_line_if_ready(self):
+ index_after_newline = self._error.find('\n') + 1
+ if index_after_newline > 0:
+ return self._pop_error_bytes(index_after_newline)
+ return None
+
+ def pop_all_buffered_stderr(self):
+ return self._pop_error_bytes(len(self._error))
+
+ def read_stdout_line(self, deadline):
+ return self._read(deadline, self._pop_stdout_line_if_ready)
+
+ def read_stderr_line(self, deadline):
+ return self._read(deadline, self._pop_stderr_line_if_ready)
+
+ def read_either_stdout_or_stderr_line(self, deadline):
+ def retrieve_bytes_from_buffers():
+ stdout_line = self._pop_stdout_line_if_ready()
+ if stdout_line:
+ return stdout_line, None
+ stderr_line = self._pop_stderr_line_if_ready()
+ if stderr_line:
+ return None, stderr_line
+ return None # Instructs the caller to keep waiting.
+
+ return_value = self._read(deadline, retrieve_bytes_from_buffers)
+ # FIXME: This is a bit of a hack around the fact that _read normally only returns one value, but this caller wants it to return two.
+ if return_value is None:
+ return None, None
+ return return_value
+
+ def read_stdout(self, deadline, size):
+ if size <= 0:
+ raise ValueError('ServerProcess.read() called with a non-positive size: %d ' % size)
+
+ def retrieve_bytes_from_stdout_buffer():
+ if len(self._output) >= size:
+ return self._pop_output_bytes(size)
+ return None
+
+ return self._read(deadline, retrieve_bytes_from_stdout_buffer)
+
+ def _log(self, message):
+ # This is a bit of a hack, but we first log a blank line to avoid
+ # messing up the master process's output.
+ _log.info('')
+ _log.info(message)
+
+ def _log_data(self, prefix, data):
+ if self._logging and data and len(data):
+ for line in quote_data(data):
+ _log.info('%s: %s', prefix, line)
+
+ def _handle_timeout(self):
+ self.timed_out = True
+ self._port.sample_process(self._name, self._proc.pid)
+
+ def _split_string_after_index(self, string, index):
+ return string[:index], string[index:]
+
+ def _pop_output_bytes(self, bytes_count):
+ output, self._output = self._split_string_after_index(self._output, bytes_count)
+ return output
+
+ def _pop_error_bytes(self, bytes_count):
+ output, self._error = self._split_string_after_index(self._error, bytes_count)
+ return output
+
+ def _wait_for_data_and_update_buffers_using_select(self, deadline, stopping=False):
+ if self._proc.stdout.closed or self._proc.stderr.closed:
+ # If the process crashed and is using FIFOs, like Chromium Android, the
+ # stdout and stderr pipes will be closed.
+ return
+
+ out_fd = self._proc.stdout.fileno()
+ err_fd = self._proc.stderr.fileno()
+ select_fds = (out_fd, err_fd)
+ try:
+ read_fds, _, _ = select.select(select_fds, [], select_fds, max(deadline - time.time(), 0))
+ except select.error, e:
+ # We can ignore EINVAL since it's likely the process just crashed and we'll
+ # figure that out the next time through the loop in _read().
+ if e.args[0] == errno.EINVAL:
+ return
+ raise
+
+ try:
+ # Note that we may get no data during read() even though
+ # select says we got something; see the select() man page
+ # on linux. I don't know if this happens on Mac OS and
+ # other Unixen as well, but we don't bother special-casing
+ # Linux because it's relatively harmless either way.
+ if out_fd in read_fds:
+ data = self._proc.stdout.read()
+ if not data and not stopping and (self._treat_no_data_as_crash or self._proc.poll()):
+ self._crashed = True
+ self._log_data('OUT', data)
+ self._output += data
+
+ if err_fd in read_fds:
+ data = self._proc.stderr.read()
+ if not data and not stopping and (self._treat_no_data_as_crash or self._proc.poll()):
+ self._crashed = True
+ self._log_data('ERR', data)
+ self._error += data
+ except IOError, e:
+ # We can ignore the IOErrors because we will detect if the subporcess crashed
+ # the next time through the loop in _read()
+ pass
+
+ def _wait_for_data_and_update_buffers_using_win32_apis(self, deadline):
+ # See http://code.activestate.com/recipes/440554-module-to-allow-asynchronous-subprocess-use-on-win/
+ # and http://docs.activestate.com/activepython/2.6/pywin32/modules.html
+ # for documentation on all of these win32-specific modules.
+ now = time.time()
+ out_fh = msvcrt.get_osfhandle(self._proc.stdout.fileno())
+ err_fh = msvcrt.get_osfhandle(self._proc.stderr.fileno())
+ while (self._proc.poll() is None) and (now < deadline):
+ output = self._non_blocking_read_win32(out_fh)
+ self._log_data('OUT', output)
+ error = self._non_blocking_read_win32(err_fh)
+ self._log_data('ERR', error)
+ if output or error:
+ if output:
+ self._output += output
+ if error:
+ self._error += error
+ return
+ time.sleep(0.01)
+ now = time.time()
+ return
+
+ def _non_blocking_read_win32(self, handle):
+ try:
+ _, avail, _ = win32pipe.PeekNamedPipe(handle, 0)
+ if avail > 0:
+ _, buf = win32file.ReadFile(handle, avail, None)
+ return buf
+ except Exception, e:
+ if e[0] not in (109, errno.ESHUTDOWN): # 109 == win32 ERROR_BROKEN_PIPE
+ raise
+ return None
+
+ def has_crashed(self):
+ if not self._crashed and self.poll():
+ self._crashed = True
+ self._handle_possible_interrupt()
+ return self._crashed
+
+ # This read function is a bit oddly-designed, as it polls both stdout and stderr, yet
+ # only reads/returns from one of them (buffering both in local self._output/self._error).
+ # It might be cleaner to pass in the file descriptor to poll instead.
+ def _read(self, deadline, fetch_bytes_from_buffers_callback):
+ while True:
+ if self.has_crashed():
+ return None
+
+ if time.time() > deadline:
+ self._handle_timeout()
+ return None
+
+ bytes = fetch_bytes_from_buffers_callback()
+ if bytes is not None:
+ return bytes
+
+ if self._use_win32_apis:
+ self._wait_for_data_and_update_buffers_using_win32_apis(deadline)
+ else:
+ self._wait_for_data_and_update_buffers_using_select(deadline)
+
+ def start(self):
+ if not self._proc:
+ self._start()
+
+ def stop(self, timeout_secs=0.0):
+ if not self._proc:
+ return (None, None)
+
+ now = time.time()
+ if self._proc.stdin:
+ if self._logging:
+ _log.info(' IN: ^D')
+ self._proc.stdin.close()
+ self._proc.stdin = None
+ killed = False
+ if timeout_secs:
+ deadline = now + timeout_secs
+ while self._proc.poll() is None and time.time() < deadline:
+ time.sleep(0.01)
+ if self._proc.poll() is None:
+ _log.warning('stopping %s(pid %d) timed out, killing it' % (self._name, self._proc.pid))
+
+ if self._proc.poll() is None:
+ self._kill()
+ killed = True
+ _log.debug('killed pid %d' % self._proc.pid)
+
+ # read any remaining data on the pipes and return it.
+ if not killed:
+ if self._use_win32_apis:
+ self._wait_for_data_and_update_buffers_using_win32_apis(now)
+ else:
+ self._wait_for_data_and_update_buffers_using_select(now, stopping=True)
+ out, err = self._output, self._error
+ self._reset()
+ return (out, err)
+
+ def kill(self):
+ self.stop(0.0)
+
+ def _kill(self):
+ self._host.executive.kill_process(self._proc.pid)
+ if self._proc.poll() is not None:
+ self._proc.wait()
+
+ def replace_outputs(self, stdout, stderr):
+ assert self._proc
+ if stdout:
+ self._proc.stdout.close()
+ self._proc.stdout = stdout
+ if stderr:
+ self._proc.stderr.close()
+ self._proc.stderr = stderr
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py
new file mode 100644
index 0000000..607bc51
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py
@@ -0,0 +1,79 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class MockServerProcess(object):
+ def __init__(self, port_obj=None, name=None, cmd=None, env=None, universal_newlines=False, treat_no_data_as_crash=False, logging=False, lines=None, crashed=False):
+ self.timed_out = False
+ self.lines = lines or ['#READY']
+ self.crashed = crashed
+ self.writes = []
+ self.cmd = cmd
+ self.env = env
+ self.treat_no_data_as_crash = treat_no_data_as_crash
+ self.logging = logging
+ self.started = False
+ self.stopped = False
+
+ def write(self, bytes):
+ self.writes.append(bytes)
+
+ def has_crashed(self):
+ return self.crashed
+
+ def read_stdout_line(self, deadline):
+ return self.lines.pop(0) + "\n"
+
+ def read_stdout(self, deadline, size):
+ first_line = self.lines[0]
+ if size > len(first_line):
+ self.lines.pop(0)
+ remaining_size = size - len(first_line) - 1
+ if not remaining_size:
+ return first_line + "\n"
+ return first_line + "\n" + self.read_stdout(deadline, remaining_size)
+ result = self.lines[0][:size]
+ self.lines[0] = self.lines[0][size:]
+ return result
+
+ def pop_all_buffered_stderr(self):
+ return ''
+
+ def read_either_stdout_or_stderr_line(self, deadline):
+ # FIXME: We should have tests which intermix stderr and stdout lines.
+ return self.read_stdout_line(deadline), None
+
+ def start(self):
+ self.started = True
+
+ def stop(self, timeout_sec=0.0):
+ self.stopped = True
+ return
+
+ def kill(self):
+ return
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py
new file mode 100644
index 0000000..c5f9ba6
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py
@@ -0,0 +1,169 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import time
+import unittest
+
+from webkitpy.layout_tests.port.factory import PortFactory
+from webkitpy.layout_tests.port import server_process
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.common.system.outputcapture import OutputCapture
+
+
+class TrivialMockPort(object):
+ def __init__(self):
+ self.host = MockSystemHost()
+ self.host.executive.kill_process = lambda x: None
+ self.host.executive.kill_process = lambda x: None
+
+ def results_directory(self):
+ return "/mock-results"
+
+ def process_kill_time(self):
+ return 1
+
+
+class MockFile(object):
+ def __init__(self, server_process):
+ self._server_process = server_process
+ self.closed = False
+
+ def fileno(self):
+ return 1
+
+ def write(self, line):
+ self._server_process.broken_pipes.append(self)
+ raise IOError
+
+ def close(self):
+ self.closed = True
+
+
+class MockProc(object):
+ def __init__(self, server_process):
+ self.stdin = MockFile(server_process)
+ self.stdout = MockFile(server_process)
+ self.stderr = MockFile(server_process)
+ self.pid = 1
+
+ def poll(self):
+ return 1
+
+ def wait(self):
+ return 0
+
+
+class FakeServerProcess(server_process.ServerProcess):
+ def _start(self):
+ self._proc = MockProc(self)
+ self.stdin = self._proc.stdin
+ self.stdout = self._proc.stdout
+ self.stderr = self._proc.stderr
+ self._pid = self._proc.pid
+ self.broken_pipes = []
+
+
+class TestServerProcess(unittest.TestCase):
+ def test_basic(self):
+ cmd = [sys.executable, '-c', 'import sys; import time; time.sleep(0.02); print "stdout"; sys.stdout.flush(); print >>sys.stderr, "stderr"']
+ host = SystemHost()
+ factory = PortFactory(host)
+ port = factory.get()
+ now = time.time()
+ proc = server_process.ServerProcess(port, 'python', cmd)
+ proc.write('')
+
+ self.assertEqual(proc.poll(), None)
+ self.assertFalse(proc.has_crashed())
+
+ # check that doing a read after an expired deadline returns
+ # nothing immediately.
+ line = proc.read_stdout_line(now - 1)
+ self.assertEqual(line, None)
+
+ # FIXME: This part appears to be flaky. line should always be non-None.
+ # FIXME: https://bugs.webkit.org/show_bug.cgi?id=88280
+ line = proc.read_stdout_line(now + 1.0)
+ if line:
+ self.assertEqual(line.strip(), "stdout")
+
+ line = proc.read_stderr_line(now + 1.0)
+ if line:
+ self.assertEqual(line.strip(), "stderr")
+
+ proc.stop(0)
+
+ def test_cleanup(self):
+ port_obj = TrivialMockPort()
+ server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
+ server_process._start()
+ server_process.stop()
+ self.assertTrue(server_process.stdin.closed)
+ self.assertTrue(server_process.stdout.closed)
+ self.assertTrue(server_process.stderr.closed)
+
+ def test_broken_pipe(self):
+ port_obj = TrivialMockPort()
+
+ port_obj.host.platform.os_name = 'win'
+ server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
+ server_process.write("should break")
+ self.assertTrue(server_process.has_crashed())
+ self.assertIsNotNone(server_process.pid())
+ self.assertIsNone(server_process._proc)
+ self.assertEqual(server_process.broken_pipes, [server_process.stdin])
+
+ port_obj.host.platform.os_name = 'mac'
+ server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
+ server_process.write("should break")
+ self.assertTrue(server_process.has_crashed())
+ self.assertIsNone(server_process._proc)
+ self.assertEqual(server_process.broken_pipes, [server_process.stdin])
+
+
+class TestQuoteData(unittest.TestCase):
+ def test_plain(self):
+ qd = server_process.quote_data
+ self.assertEqual(qd("foo"), ["foo"])
+
+ def test_trailing_spaces(self):
+ qd = server_process.quote_data
+ self.assertEqual(qd("foo "),
+ ["foo\x20\x20"])
+
+ def test_newlines(self):
+ qd = server_process.quote_data
+ self.assertEqual(qd("foo \nbar\n"),
+ ["foo\x20\\n", "bar\\n"])
+
+ def test_binary_data(self):
+ qd = server_process.quote_data
+ self.assertEqual(qd("\x00\x01ab"),
+ ["\\x00\\x01ab"])
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/test.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/test.py
new file mode 100644
index 0000000..87136c1
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/test.py
@@ -0,0 +1,646 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import base64
+import copy
+import sys
+import time
+
+from webkitpy.layout_tests.port import DeviceFailure, Driver, DriverOutput, Port
+from webkitpy.layout_tests.port.base import VirtualTestSuite
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.crashlogs import CrashLogs
+
+
+# This sets basic expectations for a test. Each individual expectation
+# can be overridden by a keyword argument in TestList.add().
+class TestInstance(object):
+ def __init__(self, name):
+ self.name = name
+ self.base = name[(name.rfind("/") + 1):name.rfind(".")]
+ self.crash = False
+ self.web_process_crash = False
+ self.exception = False
+ self.keyboard = False
+ self.error = ''
+ self.timeout = False
+ self.is_reftest = False
+ self.device_failure = False
+ self.leak = False
+
+ # The values of each field are treated as raw byte strings. They
+ # will be converted to unicode strings where appropriate using
+ # FileSystem.read_text_file().
+ self.actual_text = self.base + '-txt'
+ self.actual_checksum = self.base + '-checksum'
+
+ # We add the '\x8a' for the image file to prevent the value from
+ # being treated as UTF-8 (the character is invalid)
+ self.actual_image = self.base + '\x8a' + '-png' + 'tEXtchecksum\x00' + self.actual_checksum
+
+ self.expected_text = self.actual_text
+ self.expected_image = self.actual_image
+
+ self.actual_audio = None
+ self.expected_audio = None
+
+
+# This is an in-memory list of tests, what we want them to produce, and
+# what we want to claim are the expected results.
+class TestList(object):
+ def __init__(self):
+ self.tests = {}
+
+ def add(self, name, **kwargs):
+ test = TestInstance(name)
+ for key, value in kwargs.items():
+ test.__dict__[key] = value
+ self.tests[name] = test
+
+ def add_reftest(self, name, reference_name, same_image, crash=False):
+ self.add(name, actual_checksum='xxx', actual_image='XXX', is_reftest=True, crash=crash)
+ if same_image:
+ self.add(reference_name, actual_checksum='xxx', actual_image='XXX', is_reftest=True)
+ else:
+ self.add(reference_name, actual_checksum='yyy', actual_image='YYY', is_reftest=True)
+
+ def keys(self):
+ return self.tests.keys()
+
+ def __contains__(self, item):
+ return item in self.tests
+
+ def __getitem__(self, item):
+ return self.tests[item]
+
+#
+# These numbers may need to be updated whenever we add or delete tests. This includes virtual tests.
+#
+TOTAL_TESTS = 113
+TOTAL_SKIPS = 29
+
+UNEXPECTED_PASSES = 1
+UNEXPECTED_FAILURES = 26
+
+def unit_test_list():
+ tests = TestList()
+ tests.add('failures/expected/crash.html', crash=True)
+ tests.add('failures/expected/exception.html', exception=True)
+ tests.add('failures/expected/device_failure.html', device_failure=True)
+ tests.add('failures/expected/timeout.html', timeout=True)
+ tests.add('failures/expected/leak.html', leak=True)
+ tests.add('failures/expected/missing_text.html', expected_text=None)
+ tests.add('failures/expected/needsrebaseline.html', actual_text='needsrebaseline text')
+ tests.add('failures/expected/needsmanualrebaseline.html', actual_text='needsmanualrebaseline text')
+ tests.add('failures/expected/image.html',
+ actual_image='image_fail-pngtEXtchecksum\x00checksum_fail',
+ expected_image='image-pngtEXtchecksum\x00checksum-png')
+ tests.add('failures/expected/image_checksum.html',
+ actual_checksum='image_checksum_fail-checksum',
+ actual_image='image_checksum_fail-png')
+ tests.add('failures/expected/audio.html',
+ actual_audio=base64.b64encode('audio_fail-wav'), expected_audio='audio-wav',
+ actual_text=None, expected_text=None,
+ actual_image=None, expected_image=None,
+ actual_checksum=None)
+ tests.add('failures/expected/keyboard.html', keyboard=True)
+ tests.add('failures/expected/missing_check.html',
+ expected_image='missing_check-png')
+ tests.add('failures/expected/missing_image.html', expected_image=None)
+ tests.add('failures/expected/missing_audio.html', expected_audio=None,
+ actual_text=None, expected_text=None,
+ actual_image=None, expected_image=None,
+ actual_checksum=None)
+ tests.add('failures/expected/missing_text.html', expected_text=None)
+ tests.add('failures/expected/newlines_leading.html',
+ expected_text="\nfoo\n", actual_text="foo\n")
+ tests.add('failures/expected/newlines_trailing.html',
+ expected_text="foo\n\n", actual_text="foo\n")
+ tests.add('failures/expected/newlines_with_excess_CR.html',
+ expected_text="foo\r\r\r\n", actual_text="foo\n")
+ tests.add('failures/expected/text.html', actual_text='text_fail-png')
+ tests.add('failures/expected/crash_then_text.html')
+ tests.add('failures/expected/skip_text.html', actual_text='text diff')
+ tests.add('failures/flaky/text.html')
+ tests.add('failures/unexpected/missing_text.html', expected_text=None)
+ tests.add('failures/unexpected/missing_check.html', expected_image='missing-check-png')
+ tests.add('failures/unexpected/missing_image.html', expected_image=None)
+ tests.add('failures/unexpected/missing_render_tree_dump.html', actual_text="""layer at (0,0) size 800x600
+ RenderView at (0,0) size 800x600
+layer at (0,0) size 800x34
+ RenderBlock {HTML} at (0,0) size 800x34
+ RenderBody {BODY} at (8,8) size 784x18
+ RenderText {#text} at (0,0) size 133x18
+ text run at (0,0) width 133: "This is an image test!"
+""", expected_text=None)
+ tests.add('failures/unexpected/crash.html', crash=True)
+ tests.add('failures/unexpected/crash-with-stderr.html', crash=True,
+ error="mock-std-error-output")
+ tests.add('failures/unexpected/web-process-crash-with-stderr.html', web_process_crash=True,
+ error="mock-std-error-output")
+ tests.add('failures/unexpected/pass.html')
+ tests.add('failures/unexpected/text-checksum.html',
+ actual_text='text-checksum_fail-txt',
+ actual_checksum='text-checksum_fail-checksum')
+ tests.add('failures/unexpected/text-image-checksum.html',
+ actual_text='text-image-checksum_fail-txt',
+ actual_image='text-image-checksum_fail-pngtEXtchecksum\x00checksum_fail',
+ actual_checksum='text-image-checksum_fail-checksum')
+ tests.add('failures/unexpected/checksum-with-matching-image.html',
+ actual_checksum='text-image-checksum_fail-checksum')
+ tests.add('failures/unexpected/skip_pass.html')
+ tests.add('failures/unexpected/text.html', actual_text='text_fail-txt')
+ tests.add('failures/unexpected/text_then_crash.html')
+ tests.add('failures/unexpected/timeout.html', timeout=True)
+ tests.add('failures/unexpected/leak.html', leak=True)
+ tests.add('http/tests/passes/text.html')
+ tests.add('http/tests/passes/image.html')
+ tests.add('http/tests/ssl/text.html')
+ tests.add('passes/args.html')
+ tests.add('passes/error.html', error='stuff going to stderr')
+ tests.add('passes/image.html')
+ tests.add('passes/audio.html',
+ actual_audio=base64.b64encode('audio-wav'), expected_audio='audio-wav',
+ actual_text=None, expected_text=None,
+ actual_image=None, expected_image=None,
+ actual_checksum=None)
+ tests.add('passes/platform_image.html')
+ tests.add('passes/checksum_in_image.html',
+ expected_image='tEXtchecksum\x00checksum_in_image-checksum')
+ tests.add('passes/skipped/skip.html')
+
+ # Note that here the checksums don't match but the images do, so this test passes "unexpectedly".
+ # See https://bugs.webkit.org/show_bug.cgi?id=69444 .
+ tests.add('failures/unexpected/checksum.html', actual_checksum='checksum_fail-checksum')
+
+ # Text output files contain "\r\n" on Windows. This may be
+ # helpfully filtered to "\r\r\n" by our Python/Cygwin tooling.
+ tests.add('passes/text.html',
+ expected_text='\nfoo\n\n', actual_text='\nfoo\r\n\r\r\n')
+
+ # For reftests.
+ tests.add_reftest('passes/reftest.html', 'passes/reftest-expected.html', same_image=True)
+
+ # This adds a different virtual reference to ensure that that also works.
+ tests.add('virtual/virtual_passes/passes/reftest-expected.html', actual_checksum='xxx', actual_image='XXX', is_reftest=True)
+
+ tests.add_reftest('passes/mismatch.html', 'passes/mismatch-expected-mismatch.html', same_image=False)
+ tests.add_reftest('passes/svgreftest.svg', 'passes/svgreftest-expected.svg', same_image=True)
+ tests.add_reftest('passes/xhtreftest.xht', 'passes/xhtreftest-expected.html', same_image=True)
+ tests.add_reftest('passes/phpreftest.php', 'passes/phpreftest-expected-mismatch.svg', same_image=False)
+ tests.add_reftest('failures/expected/reftest.html', 'failures/expected/reftest-expected.html', same_image=False)
+ tests.add_reftest('failures/expected/mismatch.html', 'failures/expected/mismatch-expected-mismatch.html', same_image=True)
+ tests.add_reftest('failures/unexpected/crash-reftest.html', 'failures/unexpected/crash-reftest-expected.html', same_image=True, crash=True)
+ tests.add_reftest('failures/unexpected/reftest.html', 'failures/unexpected/reftest-expected.html', same_image=False)
+ tests.add_reftest('failures/unexpected/mismatch.html', 'failures/unexpected/mismatch-expected-mismatch.html', same_image=True)
+ tests.add('failures/unexpected/reftest-nopixel.html', actual_checksum=None, actual_image=None, is_reftest=True)
+ tests.add('failures/unexpected/reftest-nopixel-expected.html', actual_checksum=None, actual_image=None, is_reftest=True)
+ tests.add('reftests/foo/test.html')
+ tests.add('reftests/foo/test-ref.html')
+
+ tests.add('reftests/foo/multiple-match-success.html', actual_checksum='abc', actual_image='abc')
+ tests.add('reftests/foo/multiple-match-failure.html', actual_checksum='abc', actual_image='abc')
+ tests.add('reftests/foo/multiple-mismatch-success.html', actual_checksum='abc', actual_image='abc')
+ tests.add('reftests/foo/multiple-mismatch-failure.html', actual_checksum='abc', actual_image='abc')
+ tests.add('reftests/foo/multiple-both-success.html', actual_checksum='abc', actual_image='abc')
+ tests.add('reftests/foo/multiple-both-failure.html', actual_checksum='abc', actual_image='abc')
+
+ tests.add('reftests/foo/matching-ref.html', actual_checksum='abc', actual_image='abc')
+ tests.add('reftests/foo/mismatching-ref.html', actual_checksum='def', actual_image='def')
+ tests.add('reftests/foo/second-mismatching-ref.html', actual_checksum='ghi', actual_image='ghi')
+
+ # The following files shouldn't be treated as reftests
+ tests.add_reftest('reftests/foo/unlistedtest.html', 'reftests/foo/unlistedtest-expected.html', same_image=True)
+ tests.add('reftests/foo/reference/bar/common.html')
+ tests.add('reftests/foo/reftest/bar/shared.html')
+
+ tests.add('websocket/tests/passes/text.html')
+
+ # For testing that we don't run tests under platform/. Note that these don't contribute to TOTAL_TESTS.
+ tests.add('platform/test-mac-leopard/http/test.html')
+ tests.add('platform/test-win-win7/http/test.html')
+
+ # For testing if perf tests are running in a locked shard.
+ tests.add('perf/foo/test.html')
+ tests.add('perf/foo/test-ref.html')
+
+ # For testing --pixel-test-directories.
+ tests.add('failures/unexpected/pixeldir/image_in_pixeldir.html',
+ actual_image='image_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
+ expected_image='image_in_pixeldir-pngtEXtchecksum\x00checksum-png')
+ tests.add('failures/unexpected/image_not_in_pixeldir.html',
+ actual_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
+ expected_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum-png')
+
+ # For testing that virtual test suites don't expand names containing themselves
+ # See webkit.org/b/97925 and base_unittest.PortTest.test_tests().
+ tests.add('passes/test-virtual-passes.html')
+ tests.add('passes/virtual_passes/test-virtual-passes.html')
+
+ return tests
+
+
+# Here we use a non-standard location for the layout tests, to ensure that
+# this works. The path contains a '.' in the name because we've seen bugs
+# related to this before.
+
+LAYOUT_TEST_DIR = '/test.checkout/LayoutTests'
+PERF_TEST_DIR = '/test.checkout/PerformanceTests'
+
+
+# Here we synthesize an in-memory filesystem from the test list
+# in order to fully control the test output and to demonstrate that
+# we don't need a real filesystem to run the tests.
+def add_unit_tests_to_mock_filesystem(filesystem):
+ # Add the test_expectations file.
+ filesystem.maybe_make_directory('/mock-checkout/LayoutTests')
+ if not filesystem.exists('/mock-checkout/LayoutTests/TestExpectations'):
+ filesystem.write_text_file('/mock-checkout/LayoutTests/TestExpectations', """
+Bug(test) failures/expected/crash.html [ Crash ]
+Bug(test) failures/expected/crash_then_text.html [ Failure ]
+Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
+Bug(test) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
+Bug(test) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
+Bug(test) failures/expected/audio.html [ Failure ]
+Bug(test) failures/expected/image_checksum.html [ ImageOnlyFailure ]
+Bug(test) failures/expected/mismatch.html [ ImageOnlyFailure ]
+Bug(test) failures/expected/missing_check.html [ Missing Pass ]
+Bug(test) failures/expected/missing_image.html [ Missing Pass ]
+Bug(test) failures/expected/missing_audio.html [ Missing Pass ]
+Bug(test) failures/expected/missing_text.html [ Missing Pass ]
+Bug(test) failures/expected/newlines_leading.html [ Failure ]
+Bug(test) failures/expected/newlines_trailing.html [ Failure ]
+Bug(test) failures/expected/newlines_with_excess_CR.html [ Failure ]
+Bug(test) failures/expected/reftest.html [ ImageOnlyFailure ]
+Bug(test) failures/expected/text.html [ Failure ]
+Bug(test) failures/expected/timeout.html [ Timeout ]
+Bug(test) failures/expected/keyboard.html [ WontFix ]
+Bug(test) failures/expected/exception.html [ WontFix ]
+Bug(test) failures/expected/device_failure.html [ WontFix ]
+Bug(test) failures/expected/leak.html [ Leak ]
+Bug(test) failures/unexpected/pass.html [ Failure ]
+Bug(test) passes/skipped/skip.html [ Skip ]
+Bug(test) passes/text.html [ Pass ]
+""")
+
+ filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/reftests/foo')
+ filesystem.write_text_file(LAYOUT_TEST_DIR + '/reftests/foo/reftest.list', """
+== test.html test-ref.html
+
+== multiple-match-success.html mismatching-ref.html
+== multiple-match-success.html matching-ref.html
+== multiple-match-failure.html mismatching-ref.html
+== multiple-match-failure.html second-mismatching-ref.html
+!= multiple-mismatch-success.html mismatching-ref.html
+!= multiple-mismatch-success.html second-mismatching-ref.html
+!= multiple-mismatch-failure.html mismatching-ref.html
+!= multiple-mismatch-failure.html matching-ref.html
+== multiple-both-success.html matching-ref.html
+== multiple-both-success.html mismatching-ref.html
+!= multiple-both-success.html second-mismatching-ref.html
+== multiple-both-failure.html matching-ref.html
+!= multiple-both-failure.html second-mismatching-ref.html
+!= multiple-both-failure.html matching-ref.html
+""")
+
+ # FIXME: This test was only being ignored because of missing a leading '/'.
+ # Fixing the typo causes several tests to assert, so disabling the test entirely.
+ # Add in a file should be ignored by port.find_test_files().
+ #files[LAYOUT_TEST_DIR + '/userscripts/resources/iframe.html'] = 'iframe'
+
+ def add_file(test, suffix, contents):
+ dirname = filesystem.join(LAYOUT_TEST_DIR, test.name[0:test.name.rfind('/')])
+ base = test.base
+ filesystem.maybe_make_directory(dirname)
+ filesystem.write_binary_file(filesystem.join(dirname, base + suffix), contents)
+
+ # Add each test and the expected output, if any.
+ test_list = unit_test_list()
+ for test in test_list.tests.values():
+ add_file(test, test.name[test.name.rfind('.'):], '')
+ if test.is_reftest:
+ continue
+ if test.actual_audio:
+ add_file(test, '-expected.wav', test.expected_audio)
+ continue
+ add_file(test, '-expected.txt', test.expected_text)
+ add_file(test, '-expected.png', test.expected_image)
+
+ filesystem.write_text_file(filesystem.join(LAYOUT_TEST_DIR, 'virtual', 'virtual_passes', 'passes', 'args-expected.txt'), 'args-txt --virtual-arg')
+ # Clear the list of written files so that we can watch what happens during testing.
+ filesystem.clear_written_files()
+
+
+class TestPort(Port):
+ port_name = 'test'
+ default_port_name = 'test-mac-leopard'
+
+ """Test implementation of the Port interface."""
+ ALL_BASELINE_VARIANTS = (
+ 'test-linux-x86_64',
+ 'test-mac-snowleopard', 'test-mac-leopard',
+ 'test-win-win7', 'test-win-xp',
+ )
+
+ FALLBACK_PATHS = {
+ 'xp': ['test-win-win7', 'test-win-xp'],
+ 'win7': ['test-win-win7'],
+ 'leopard': ['test-mac-leopard', 'test-mac-snowleopard'],
+ 'snowleopard': ['test-mac-snowleopard'],
+ 'lucid': ['test-linux-x86_64', 'test-win-win7'],
+ }
+
+ @classmethod
+ def determine_full_port_name(cls, host, options, port_name):
+ if port_name == 'test':
+ return TestPort.default_port_name
+ return port_name
+
+ def __init__(self, host, port_name=None, **kwargs):
+ Port.__init__(self, host, port_name or TestPort.default_port_name, **kwargs)
+ self._tests = unit_test_list()
+ self._flakes = set()
+
+ # FIXME: crbug.com/279494. This needs to be in the "real layout tests
+ # dir" in a mock filesystem, rather than outside of the checkout, so
+ # that tests that want to write to a TestExpectations file can share
+ # this between "test" ports and "real" ports. This is the result of
+ # rebaseline_unittest.py having tests that refer to "real" port names
+ # and real builders instead of fake builders that point back to the
+ # test ports. rebaseline_unittest.py needs to not mix both "real" ports
+ # and "test" ports
+
+ self._generic_expectations_path = '/mock-checkout/LayoutTests/TestExpectations'
+ self._results_directory = None
+
+ self._operating_system = 'mac'
+ if self._name.startswith('test-win'):
+ self._operating_system = 'win'
+ elif self._name.startswith('test-linux'):
+ self._operating_system = 'linux'
+
+ version_map = {
+ 'test-win-xp': 'xp',
+ 'test-win-win7': 'win7',
+ 'test-mac-leopard': 'leopard',
+ 'test-mac-snowleopard': 'snowleopard',
+ 'test-linux-x86_64': 'lucid',
+ }
+ self._version = version_map[self._name]
+
+ def repository_paths(self):
+ """Returns a list of (repository_name, repository_path) tuples of its depending code base."""
+ # FIXME: We override this just to keep the perf tests happy.
+ return [('blink', self.layout_tests_dir())]
+
+ def buildbot_archives_baselines(self):
+ return self._name != 'test-win-xp'
+
+ def default_pixel_tests(self):
+ return True
+
+ def _path_to_driver(self):
+ # This routine shouldn't normally be called, but it is called by
+ # the mock_drt Driver. We return something, but make sure it's useless.
+ return 'MOCK _path_to_driver'
+
+ def default_child_processes(self):
+ return 1
+
+ def check_build(self, needs_http, printer):
+ return test_run_results.OK_EXIT_STATUS
+
+ def check_sys_deps(self, needs_http):
+ return test_run_results.OK_EXIT_STATUS
+
+ def default_configuration(self):
+ return 'Release'
+
+ def diff_image(self, expected_contents, actual_contents):
+ diffed = actual_contents != expected_contents
+ if not actual_contents and not expected_contents:
+ return (None, None)
+ if not actual_contents or not expected_contents:
+ return (True, None)
+ if diffed:
+ return ("< %s\n---\n> %s\n" % (expected_contents, actual_contents), None)
+ return (None, None)
+
+ def layout_tests_dir(self):
+ return LAYOUT_TEST_DIR
+
+ def perf_tests_dir(self):
+ return PERF_TEST_DIR
+
+ def webkit_base(self):
+ return '/test.checkout'
+
+ def _skipped_tests_for_unsupported_features(self, test_list):
+ return set(['failures/expected/skip_text.html',
+ 'failures/unexpected/skip_pass.html',
+ 'virtual/skipped/failures/expected'])
+
+ def name(self):
+ return self._name
+
+ def operating_system(self):
+ return self._operating_system
+
+ def _path_to_wdiff(self):
+ return None
+
+ def default_results_directory(self):
+ return '/tmp/layout-test-results'
+
+ def setup_test_run(self):
+ pass
+
+ def _driver_class(self):
+ return TestDriver
+
+ def start_http_server(self, additional_dirs, number_of_drivers):
+ pass
+
+ def start_websocket_server(self):
+ pass
+
+ def acquire_http_lock(self):
+ pass
+
+ def stop_http_server(self):
+ pass
+
+ def stop_websocket_server(self):
+ pass
+
+ def release_http_lock(self):
+ pass
+
+ def path_to_apache(self):
+ return "/usr/sbin/httpd"
+
+ def path_to_apache_config_file(self):
+ return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'httpd.conf')
+
+ def path_to_generic_test_expectations_file(self):
+ return self._generic_expectations_path
+
+ def _port_specific_expectations_files(self):
+ return [self._filesystem.join(self._webkit_baseline_path(d), 'TestExpectations') for d in ['test', 'test-win-xp']]
+
+ def all_test_configurations(self):
+ """Returns a sequence of the TestConfigurations the port supports."""
+ # By default, we assume we want to test every graphics type in
+ # every configuration on every system.
+ test_configurations = []
+ for version, architecture in self._all_systems():
+ for build_type in self._all_build_types():
+ test_configurations.append(TestConfiguration(
+ version=version,
+ architecture=architecture,
+ build_type=build_type))
+ return test_configurations
+
+ def _all_systems(self):
+ return (('leopard', 'x86'),
+ ('snowleopard', 'x86'),
+ ('xp', 'x86'),
+ ('win7', 'x86'),
+ ('lucid', 'x86'),
+ ('lucid', 'x86_64'))
+
+ def _all_build_types(self):
+ return ('debug', 'release')
+
+ def configuration_specifier_macros(self):
+ """To avoid surprises when introducing new macros, these are intentionally fixed in time."""
+ return {'mac': ['leopard', 'snowleopard'], 'win': ['xp', 'win7'], 'linux': ['lucid']}
+
+ def all_baseline_variants(self):
+ return self.ALL_BASELINE_VARIANTS
+
+ def virtual_test_suites(self):
+ return [
+ VirtualTestSuite(prefix='virtual_passes', base='passes', args=['--virtual-arg']),
+ VirtualTestSuite(prefix='skipped', base='failures/expected', args=['--virtual-arg2']),
+ ]
+
+
+class TestDriver(Driver):
+ """Test/Dummy implementation of the driver interface."""
+ next_pid = 1
+
+ def __init__(self, *args, **kwargs):
+ super(TestDriver, self).__init__(*args, **kwargs)
+ self.started = False
+ self.pid = 0
+
+ def cmd_line(self, pixel_tests, per_test_args):
+ pixel_tests_flag = '-p' if pixel_tests else ''
+ return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_drt_flag', []) + per_test_args
+
+ def run_test(self, driver_input, stop_when_done):
+ if not self.started:
+ self.started = True
+ self.pid = TestDriver.next_pid
+ TestDriver.next_pid += 1
+
+ start_time = time.time()
+ test_name = driver_input.test_name
+ test_args = driver_input.args or []
+ test = self._port._tests[test_name]
+ if test.keyboard:
+ raise KeyboardInterrupt
+ if test.exception:
+ raise ValueError('exception from ' + test_name)
+ if test.device_failure:
+ raise DeviceFailure('device failure in ' + test_name)
+
+ audio = None
+ actual_text = test.actual_text
+ crash = test.crash
+ web_process_crash = test.web_process_crash
+
+ if 'flaky/text.html' in test_name and not test_name in self._port._flakes:
+ self._port._flakes.add(test_name)
+ actual_text = 'flaky text failure'
+
+ if 'crash_then_text.html' in test_name:
+ if test_name in self._port._flakes:
+ actual_text = 'text failure'
+ else:
+ self._port._flakes.add(test_name)
+ crashed_process_name = self._port.driver_name()
+ crashed_pid = 1
+ crash = True
+
+ if 'text_then_crash.html' in test_name:
+ if test_name in self._port._flakes:
+ crashed_process_name = self._port.driver_name()
+ crashed_pid = 1
+ crash = True
+ else:
+ self._port._flakes.add(test_name)
+ actual_text = 'text failure'
+
+ if actual_text and test_args and test_name == 'passes/args.html':
+ actual_text = actual_text + ' ' + ' '.join(test_args)
+
+ if test.actual_audio:
+ audio = base64.b64decode(test.actual_audio)
+ crashed_process_name = None
+ crashed_pid = None
+ if crash:
+ crashed_process_name = self._port.driver_name()
+ crashed_pid = 1
+ elif web_process_crash:
+ crashed_process_name = 'WebProcess'
+ crashed_pid = 2
+
+ crash_log = ''
+ if crashed_process_name:
+ crash_logs = CrashLogs(self._port.host)
+ crash_log = crash_logs.find_newest_log(crashed_process_name, None) or ''
+
+ if stop_when_done:
+ self.stop()
+
+ if test.actual_checksum == driver_input.image_hash:
+ image = None
+ else:
+ image = test.actual_image
+ return DriverOutput(actual_text, image, test.actual_checksum, audio,
+ crash=(crash or web_process_crash), crashed_process_name=crashed_process_name,
+ crashed_pid=crashed_pid, crash_log=crash_log,
+ test_time=time.time() - start_time, timeout=test.timeout, error=test.error, pid=self.pid,
+ leak=test.leak)
+
+ def stop(self):
+ self.started = False
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/win.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/win.py
new file mode 100644
index 0000000..f206497
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/win.py
@@ -0,0 +1,246 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Windows implementation of the Port interface."""
+
+import errno
+import os
+import logging
+
+try:
+ import _winreg
+except ImportError as e:
+ _winreg = None
+ WindowsError = Exception # this shuts up pylint.
+
+from webkitpy.layout_tests.breakpad.dump_reader_win import DumpReaderWin
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.port import base
+from webkitpy.layout_tests.servers import crash_service
+
+
+_log = logging.getLogger(__name__)
+
+
+class WinPort(base.Port):
+ port_name = 'win'
+
+ # FIXME: Figure out how to unify this with base.TestConfiguration.all_systems()?
+ SUPPORTED_VERSIONS = ('xp', 'win7')
+
+ FALLBACK_PATHS = { 'win7': [ 'win' ]}
+ FALLBACK_PATHS['xp'] = ['win-xp'] + FALLBACK_PATHS['win7']
+
+ DEFAULT_BUILD_DIRECTORIES = ('build', 'out')
+
+ BUILD_REQUIREMENTS_URL = 'http://www.chromium.org/developers/how-tos/build-instructions-windows'
+
+ @classmethod
+ def determine_full_port_name(cls, host, options, port_name):
+ if port_name.endswith('win'):
+ assert host.platform.is_win()
+ # We don't maintain separate baselines for vista, so we pretend it is win7.
+ if host.platform.os_version in ('vista', '7sp0', '7sp1', 'future'):
+ version = 'win7'
+ else:
+ version = host.platform.os_version
+ port_name = port_name + '-' + version
+ return port_name
+
+ def __init__(self, host, port_name, **kwargs):
+ super(WinPort, self).__init__(host, port_name, **kwargs)
+ self._version = port_name[port_name.index('win-') + len('win-'):]
+ assert self._version in self.SUPPORTED_VERSIONS, "%s is not in %s" % (self._version, self.SUPPORTED_VERSIONS)
+ if not self.get_option('disable_breakpad'):
+ self._dump_reader = DumpReaderWin(host, self._build_path())
+ self._crash_service = None
+ self._crash_service_available = None
+
+ def additional_drt_flag(self):
+ flags = super(WinPort, self).additional_drt_flag()
+ flags += ['--enable-direct-write']
+ if not self.get_option('disable_breakpad'):
+ flags += ['--enable-crash-reporter', '--crash-dumps-dir=%s' % self._dump_reader.crash_dumps_directory()]
+ return flags
+
+ def check_httpd(self):
+ res = super(WinPort, self).check_httpd()
+ if self.uses_apache():
+ # In order to run CGI scripts on Win32 that use unix shebang lines, we need to
+ # create entries in the registry that remap the extensions (.pl and .cgi) to the
+ # appropriate Win32 paths. The command line arguments must match the command
+ # line arguments in the shebang line exactly.
+ if _winreg:
+ res = self._check_reg(r'.cgi\Shell\ExecCGI\Command') and res
+ res = self._check_reg(r'.pl\Shell\ExecCGI\Command') and res
+ else:
+ _log.warning("Could not check the registry; http may not work correctly.")
+
+ return res
+
+ def _check_reg(self, sub_key):
+ # see comments in check_httpd(), above, for why this routine exists and what it's doing.
+ try:
+ # Note that we HKCR is a union of HKLM and HKCR (with the latter
+ # overridding the former), so reading from HKCR ensures that we get
+ # the value if it is set in either place. See als comments below.
+ hkey = _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, sub_key)
+ args = _winreg.QueryValue(hkey, '').split()
+ _winreg.CloseKey(hkey)
+
+ # In order to keep multiple checkouts from stepping on each other, we simply check that an
+ # existing entry points to a valid path and has the right command line.
+ if len(args) == 2 and self._filesystem.exists(args[0]) and args[0].endswith('perl.exe') and args[1] == '-wT':
+ return True
+ except WindowsError, e:
+ if e.errno != errno.ENOENT:
+ raise e
+ # The key simply probably doesn't exist.
+ pass
+
+ # Note that we write to HKCU so that we don't need privileged access
+ # to the registry, and that will get reflected in HKCR when it is read, above.
+ cmdline = self.path_from_chromium_base('third_party', 'perl', 'perl', 'bin', 'perl.exe') + ' -wT'
+ hkey = _winreg.CreateKeyEx(_winreg.HKEY_CURRENT_USER, 'Software\\Classes\\' + sub_key, 0, _winreg.KEY_WRITE)
+ _winreg.SetValue(hkey, '', _winreg.REG_SZ, cmdline)
+ _winreg.CloseKey(hkey)
+ return True
+
+ def setup_test_run(self):
+ super(WinPort, self).setup_test_run()
+
+ if not self.get_option('disable_breakpad'):
+ assert not self._crash_service, 'Already running a crash service'
+ if self._crash_service_available == None:
+ self._crash_service_available = self._check_crash_service_available()
+ if not self._crash_service_available:
+ return
+ service = crash_service.CrashService(self, self._dump_reader.crash_dumps_directory())
+ service.start()
+ self._crash_service = service
+
+ def clean_up_test_run(self):
+ super(WinPort, self).clean_up_test_run()
+
+ if self._crash_service:
+ self._crash_service.stop()
+ self._crash_service = None
+
+ def setup_environ_for_server(self, server_name=None):
+ env = super(WinPort, self).setup_environ_for_server(server_name)
+
+ # FIXME: This is a temporary hack to get the cr-win bot online until
+ # someone from the cr-win port can take a look.
+ apache_envvars = ['SYSTEMDRIVE', 'SYSTEMROOT', 'TEMP', 'TMP']
+ for key, value in os.environ.items():
+ if key not in env and key in apache_envvars:
+ env[key] = value
+
+ # Put the cygwin directory first in the path to find cygwin1.dll.
+ env["PATH"] = "%s;%s" % (self.path_from_chromium_base("third_party", "cygwin", "bin"), env["PATH"])
+ # Configure the cygwin directory so that pywebsocket finds proper
+ # python executable to run cgi program.
+ env["CYGWIN_PATH"] = self.path_from_chromium_base("third_party", "cygwin", "bin")
+ if self.get_option('register_cygwin'):
+ setup_mount = self.path_from_chromium_base("third_party", "cygwin", "setup_mount.bat")
+ self._executive.run_command([setup_mount]) # Paths are all absolute, so this does not require a cwd.
+ return env
+
+ def _modules_to_search_for_symbols(self):
+ # FIXME: we should return the path to the ffmpeg equivalents to detect if we have the mp3 and aac codecs installed.
+ # See https://bugs.webkit.org/show_bug.cgi?id=89706.
+ return []
+
+ def check_build(self, needs_http, printer):
+ result = super(WinPort, self).check_build(needs_http, printer)
+
+ self._crash_service_available = self._check_crash_service_available()
+ if not self._crash_service_available:
+ result = test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
+
+ if result:
+ _log.error('For complete Windows build requirements, please see:')
+ _log.error('')
+ _log.error(' http://dev.chromium.org/developers/how-tos/build-instructions-windows')
+ return result
+
+ def operating_system(self):
+ return 'win'
+
+ def relative_test_filename(self, filename):
+ path = filename[len(self.layout_tests_dir()) + 1:]
+ return path.replace('\\', '/')
+
+ def uses_apache(self):
+ val = self.get_option('use_apache')
+ if val is None:
+ return True
+ return val
+
+ def path_to_apache(self):
+ return self.path_from_chromium_base('third_party', 'apache-win32', 'bin', 'httpd.exe')
+
+ def path_to_apache_config_file(self):
+ return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'win-httpd.conf')
+
+ #
+ # PROTECTED ROUTINES
+ #
+
+ def _path_to_driver(self, configuration=None):
+ binary_name = '%s.exe' % self.driver_name()
+ return self._build_path_with_configuration(configuration, binary_name)
+
+ def _path_to_crash_service(self):
+ binary_name = 'content_shell_crash_service.exe'
+ return self._build_path(binary_name)
+
+ def _path_to_image_diff(self):
+ binary_name = 'image_diff.exe'
+ return self._build_path(binary_name)
+
+ def _path_to_wdiff(self):
+ return self.path_from_chromium_base('third_party', 'cygwin', 'bin', 'wdiff.exe')
+
+ def _check_crash_service_available(self):
+ """Checks whether the crash service binary is present."""
+ result = self._check_file_exists(self._path_to_crash_service(), "content_shell_crash_service.exe")
+ if not result:
+ _log.error(" Could not find crash service, unexpected crashes won't be symbolized.")
+ _log.error(' Did you build the target blink_tests?')
+ _log.error('')
+ return result
+
+ def look_for_new_crash_logs(self, crashed_processes, start_time):
+ if self.get_option('disable_breakpad'):
+ return None
+ return self._dump_reader.look_for_new_crash_logs(crashed_processes, start_time)
+
+ def clobber_old_port_specific_results(self):
+ if not self.get_option('disable_breakpad'):
+ self._dump_reader.clobber_old_results()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py
new file mode 100644
index 0000000..583540c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py
@@ -0,0 +1,131 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import unittest
+
+from webkitpy.common.system import outputcapture
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.layout_tests.port import port_testcase
+from webkitpy.layout_tests.port import win
+from webkitpy.tool.mocktool import MockOptions
+
+
+class WinPortTest(port_testcase.PortTestCase):
+ port_name = 'win'
+ full_port_name = 'win-xp'
+ port_maker = win.WinPort
+ os_name = 'win'
+ os_version = 'xp'
+
+ def test_setup_environ_for_server(self):
+ port = self.make_port()
+ port._executive = MockExecutive(should_log=True)
+ output = outputcapture.OutputCapture()
+ # FIXME: This test should not use the real os.environ
+ orig_environ = os.environ.copy()
+ env = output.assert_outputs(self, port.setup_environ_for_server)
+ self.assertEqual(orig_environ["PATH"], os.environ["PATH"])
+ self.assertNotEqual(env["PATH"], os.environ["PATH"])
+
+ def test_setup_environ_for_server_cygpath(self):
+ port = self.make_port()
+ env = port.setup_environ_for_server(port.driver_name())
+ self.assertEqual(env['CYGWIN_PATH'], '/mock-checkout/third_party/cygwin/bin')
+
+ def test_setup_environ_for_server_register_cygwin(self):
+ port = self.make_port(options=MockOptions(register_cygwin=True, results_directory='/'))
+ port._executive = MockExecutive(should_log=True)
+ expected_logs = "MOCK run_command: ['/mock-checkout/third_party/cygwin/setup_mount.bat'], cwd=None\n"
+ output = outputcapture.OutputCapture()
+ output.assert_outputs(self, port.setup_environ_for_server, expected_logs=expected_logs)
+
+ def assert_name(self, port_name, os_version_string, expected):
+ port = self.make_port(port_name=port_name, os_version=os_version_string)
+ self.assertEqual(expected, port.name())
+
+ def test_versions(self):
+ port = self.make_port()
+ self.assertIn(port.name(), ('win-xp', 'win-win7'))
+
+ self.assert_name(None, 'xp', 'win-xp')
+ self.assert_name('win', 'xp', 'win-xp')
+ self.assert_name('win-xp', 'xp', 'win-xp')
+ self.assert_name('win-xp', '7sp0', 'win-xp')
+
+ self.assert_name(None, '7sp0', 'win-win7')
+ self.assert_name(None, 'vista', 'win-win7')
+ self.assert_name('win', '7sp0', 'win-win7')
+ self.assert_name('win-win7', 'xp', 'win-win7')
+ self.assert_name('win-win7', '7sp0', 'win-win7')
+ self.assert_name('win-win7', 'vista', 'win-win7')
+
+ self.assertRaises(AssertionError, self.assert_name, None, 'w2k', 'win-xp')
+
+ def test_baseline_path(self):
+ port = self.make_port(port_name='win-xp')
+ self.assertEqual(port.baseline_path(), port._webkit_baseline_path('win-xp'))
+
+ port = self.make_port(port_name='win-win7')
+ self.assertEqual(port.baseline_path(), port._webkit_baseline_path('win'))
+
+ def test_build_path(self):
+ # Test that optional paths are used regardless of whether they exist.
+ options = MockOptions(configuration='Release', build_directory='/foo')
+ self.assert_build_path(options, ['/mock-checkout/out/Release'], '/foo/Release')
+
+ # Test that optional relative paths are returned unmodified.
+ options = MockOptions(configuration='Release', build_directory='foo')
+ self.assert_build_path(options, ['/mock-checkout/out/Release'], 'foo/Release')
+
+ # Test that we prefer the legacy dir over the new dir.
+ options = MockOptions(configuration='Release', build_directory=None)
+ self.assert_build_path(options, ['/mock-checkout/build/Release', '/mock-checkout/out'], '/mock-checkout/build/Release')
+
+ def test_build_path_timestamps(self):
+ options = MockOptions(configuration='Release', build_directory=None)
+ port = self.make_port(options=options)
+ port.host.filesystem.maybe_make_directory('/mock-checkout/out/Release')
+ port.host.filesystem.maybe_make_directory('/mock-checkout/build/Release')
+ # Check with 'out' being newer.
+ port.host.filesystem.mtime = lambda f: 5 if '/out/' in f else 4
+ self.assertEqual(port._build_path(), '/mock-checkout/out/Release')
+ # Check with 'build' being newer.
+ port.host.filesystem.mtime = lambda f: 5 if '/build/' in f else 4
+ self.assertEqual(port._build_path(), '/mock-checkout/build/Release')
+
+ def test_operating_system(self):
+ self.assertEqual('win', self.make_port().operating_system())
+
+ def test_driver_name_option(self):
+ self.assertTrue(self.make_port()._path_to_driver().endswith('content_shell.exe'))
+ self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver.exe'))
+
+ def test_path_to_image_diff(self):
+ self.assertEqual(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/image_diff.exe')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/print_layout_test_times.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/print_layout_test_times.py
new file mode 100755
index 0000000..15bc1f0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/print_layout_test_times.py
@@ -0,0 +1,150 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import optparse
+
+from webkitpy.layout_tests.port import Port
+
+
+def main(host, argv):
+ parser = optparse.OptionParser(usage='%prog [times_ms.json]')
+ parser.add_option('-f', '--forward', action='store', type='int',
+ help='group times by first N directories of test')
+ parser.add_option('-b', '--backward', action='store', type='int',
+ help='group times by last N directories of test')
+ parser.add_option('--fastest', action='store', type='float',
+ help='print a list of tests that will take N % of the time')
+
+ epilog = """
+ You can print out aggregate times per directory using the -f and -b
+ flags. The value passed to each flag indicates the "depth" of the flag,
+ similar to positive and negative arguments to python arrays.
+
+ For example, given fast/forms/week/week-input-type.html, -f 1
+ truncates to 'fast', -f 2 and -b 2 truncates to 'fast/forms', and -b 1
+ truncates to fast/forms/week . -f 0 truncates to '', which can be used
+ to produce a single total time for the run."""
+ parser.epilog = '\n'.join(s.lstrip() for s in epilog.splitlines())
+
+ options, args = parser.parse_args(argv)
+
+ port = host.port_factory.get()
+ if args and args[0]:
+ times_ms_path = args[0]
+ else:
+ times_ms_path = host.filesystem.join(port.results_directory(), 'times_ms.json')
+
+ times_trie = json.loads(host.filesystem.read_text_file(times_ms_path))
+
+ times = convert_trie_to_flat_paths(times_trie)
+
+ if options.fastest:
+ if options.forward is None and options.backward is None:
+ options.forward = 0
+ print_fastest(host, port, options, times)
+ else:
+ print_times(host, options, times)
+
+
+def print_times(host, options, times):
+ by_key = times_by_key(times, options.forward, options.backward)
+ for key in sorted(by_key):
+ if key:
+ host.print_("%s %d" % (key, by_key[key]))
+ else:
+ host.print_("%d" % by_key[key])
+
+
+def print_fastest(host, port, options, times):
+ total = times_by_key(times, 0, None)['']
+ by_key = times_by_key(times, options.forward, options.backward)
+ keys_by_time = sorted(by_key, key=lambda k: (by_key[k], k))
+
+ tests_by_key = {}
+ for test_name in sorted(times):
+ key = key_for(test_name, options.forward, options.backward)
+ if key in sorted(tests_by_key):
+ tests_by_key[key].append(test_name)
+ else:
+ tests_by_key[key] = [test_name]
+
+ fast_tests_by_key = {}
+ total_so_far = 0
+ per_key = total * options.fastest / (len(keys_by_time) * 100.0)
+ budget = 0
+ while keys_by_time:
+ budget += per_key
+ key = keys_by_time.pop(0)
+ tests_by_time = sorted(tests_by_key[key], key=lambda t: (times[t], t))
+ fast_tests_by_key[key] = []
+ while tests_by_time and total_so_far <= budget:
+ test = tests_by_time.pop(0)
+ test_time = times[test]
+ # Make sure test time > 0 so we don't include tests that are skipped.
+ if test_time and total_so_far + test_time <= budget:
+ fast_tests_by_key[key].append(test)
+ total_so_far += test_time
+
+ for k in sorted(fast_tests_by_key):
+ for t in fast_tests_by_key[k]:
+ host.print_("%s %d" % (t, times[t]))
+ return
+
+
+def key_for(path, forward, backward):
+ sep = Port.TEST_PATH_SEPARATOR
+ if forward is not None:
+ return sep.join(path.split(sep)[:-1][:forward])
+ if backward is not None:
+ return sep.join(path.split(sep)[:-backward])
+ return path
+
+
+def times_by_key(times, forward, backward):
+ by_key = {}
+ for test_name in times:
+ key = key_for(test_name, forward, backward)
+ if key in by_key:
+ by_key[key] += times[test_name]
+ else:
+ by_key[key] = times[test_name]
+ return by_key
+
+
+def convert_trie_to_flat_paths(trie, prefix=None):
+ result = {}
+ for name, data in trie.iteritems():
+ if prefix:
+ name = prefix + "/" + name
+ if isinstance(data, int):
+ result[name] = data
+ else:
+ result.update(convert_trie_to_flat_paths(data, name))
+
+ return result
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/print_layout_test_times_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/print_layout_test_times_unittest.py
new file mode 100755
index 0000000..97bdf08
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/print_layout_test_times_unittest.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.print_layout_test_times import main
+
+
+class PrintLayoutTestTimesTest(unittest.TestCase):
+
+ def check(self, args, expected_output, files=None):
+ host = MockHost()
+ fs = host.filesystem
+ results_directory = host.port_factory.get().results_directory()
+ if files:
+ fs.files = files
+ else:
+ fs.write_text_file(fs.join(results_directory, 'times_ms.json'), """
+ {"foo": {"foo1": {"fast1.html": 10,
+ "fast2.html": 10,
+ "slow1.html": 80},
+ "foo2": {"fast3.html": 10,
+ "fast4.html": 10,
+ "slow2.html": 80}},
+ "bar": {"bar1": {"fast5.html": 10,
+ "fast6.html": 10,
+ "slow3.html": 80}}}
+ """)
+ main(host, args)
+ self.assertEqual(host.stdout.getvalue(), expected_output)
+
+ def test_fastest_overall(self):
+ # This is the fastest 10% of the tests overall (ignoring dir structure, equivalent to -f 0).
+ self.check(['--fastest', '10'],
+ "bar/bar1/fast5.html 10\n"
+ "bar/bar1/fast6.html 10\n"
+ "foo/foo1/fast1.html 10\n")
+
+ def test_fastest_forward_1(self):
+ # Note that we don't get anything from foo/foo2, as foo/foo1 used up the budget for foo.
+ self.check(['-f', '1', '--fastest', '10'],
+ "bar/bar1/fast5.html 10\n"
+ "foo/foo1/fast1.html 10\n"
+ "foo/foo1/fast2.html 10\n")
+
+ def test_fastest_back_1(self):
+ # Here we get one test from each dir, showing that we are going properly breadth-first.
+ self.check(['-b', '1', '--fastest', '10'],
+ "bar/bar1/fast5.html 10\n"
+ "foo/foo1/fast1.html 10\n"
+ "foo/foo2/fast3.html 10\n")
+
+ def test_no_args(self):
+ # This should be every test, sorted lexicographically.
+ self.check([],
+ "bar/bar1/fast5.html 10\n"
+ "bar/bar1/fast6.html 10\n"
+ "bar/bar1/slow3.html 80\n"
+ "foo/foo1/fast1.html 10\n"
+ "foo/foo1/fast2.html 10\n"
+ "foo/foo1/slow1.html 80\n"
+ "foo/foo2/fast3.html 10\n"
+ "foo/foo2/fast4.html 10\n"
+ "foo/foo2/slow2.html 80\n")
+
+ def test_total(self):
+ self.check(['-f', '0'], "300\n")
+
+ def test_forward_one(self):
+ self.check(['-f', '1'],
+ "bar 100\n"
+ "foo 200\n")
+
+ def test_backward_one(self):
+ self.check(['-b', '1'],
+ "bar/bar1 100\n"
+ "foo/foo1 100\n"
+ "foo/foo2 100\n")
+
+ def test_path_to_file(self):
+ # Tests that we can use a custom file rather than the port's default.
+ self.check(['/tmp/times_ms.json'], "foo/bar.html 1\n",
+ files={'/tmp/times_ms.json': '{"foo":{"bar.html": 1}}'})
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/print_layout_test_types.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/print_layout_test_types.py
new file mode 100644
index 0000000..af80c74
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/print_layout_test_types.py
@@ -0,0 +1,54 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import optparse
+
+from webkitpy.layout_tests.controllers import layout_test_finder
+
+
+def main(host, argv):
+ port = host.port_factory.get()
+
+ parser = optparse.OptionParser()
+ parser.add_option('--test-list', action='append')
+ parser.add_option('--type', action='append',
+ help='limit to tests of type X (valid values %s)' % port.ALL_TEST_TYPES)
+
+ options, args = parser.parse_args(argv)
+ finder = layout_test_finder.LayoutTestFinder(port, options)
+ _, tests = finder.find_tests(options, args)
+
+ for test_name in tests:
+ test_type = port.test_type(test_name)
+ if options.type:
+ if test_type in options.type:
+ host.print_(test_name)
+ else:
+ host.print_(test_name, test_type)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/print_layout_test_types_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/print_layout_test_types_unittest.py
new file mode 100755
index 0000000..c0ac638
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/print_layout_test_types_unittest.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.print_layout_test_types import main
+
+
+class PrintLayoutTestTimesTest(unittest.TestCase):
+
+ def check(self, args, expected_output, files=None):
+ host = MockHost()
+ files = files or {}
+ for path, contents in files.items():
+ host.filesystem.write_binary_file(path, contents)
+ orig_get = host.port_factory.get
+ host.port_factory.get = lambda *args, **kwargs: orig_get('test')
+ main(host, args)
+ self.assertEqual(host.stdout.getvalue(), expected_output)
+
+ def test_test_list(self):
+ files = {'/tmp/test_list': 'passes/image.html'}
+ self.check(['--test-list', '/tmp/test_list'], 'passes/image.html pixel\n', files=files)
+
+ def test_type(self):
+ self.check(['--type', 'audio', 'passes'], 'passes/audio.html\n')
+
+ def test_basic(self):
+ self.check(['failures/unexpected/missing_image.html', 'passes/image.html', 'passes/audio.html', 'passes/reftest.html'],
+ 'failures/unexpected/missing_image.html text\n'
+ 'passes/image.html pixel\n'
+ 'passes/audio.html audio\n'
+ 'passes/reftest.html ref\n')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/process_json_data_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/process_json_data_unittest.py
new file mode 100644
index 0000000..8f1e2c2
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/process_json_data_unittest.py
@@ -0,0 +1,60 @@
+# Copyright (C) 2014 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import unittest
+
+from webkitpy.layout_tests.generate_results_dashboard import ProcessJsonData
+
+
+class ProcessJsonDataTester(unittest.TestCase):
+
+ def test_check_failing_results(self):
+ valid_json_data = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
+ valid_json_data_1 = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'expected': u'TEXT', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
+ valid_json_data_2 = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name_2': {u'test_2.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}, u'test_name': {u'test.html': {u'expected': u'TEXT', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
+ expected_result = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'archived_results': [u'TEXT', u'PASS']}}}}}}
+ process_json_data = ProcessJsonData(valid_json_data, [valid_json_data_1], [valid_json_data_2])
+ actual_result = process_json_data.generate_archived_result()
+ self.assertEqual(expected_result, actual_result)
+
+ def test_check_full_results(self):
+ valid_json_data = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name_2': {u'test_2.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
+ valid_json_data_1 = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'expected': u'TEXT', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
+ valid_json_data_2 = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name_2': {u'test_2.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}, u'test_name': {u'test.html': {u'expected': u'TEXT', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
+ expected_result = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name_2': {u'test_2.html': {u'archived_results': [u'TEXT', u'TEXT']}}}}}}
+ process_json_data = ProcessJsonData(valid_json_data, [valid_json_data_1], [valid_json_data_2])
+ actual_result = process_json_data.generate_archived_result()
+ self.assertEqual(expected_result, actual_result)
+
+ def test_null_check(self):
+ valid_json_data = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'expected': u'PASS', u'actual': u'TEXT', u'is_unexpected': True}}}}}}
+ expected_result = {u'tests': {u'test_category': {u'test_sub_category': {u'test_name': {u'test.html': {u'archived_results': [u'TEXT']}}}}}}
+ process_json_data = ProcessJsonData(valid_json_data, [], [])
+ actual_result = process_json_data.generate_archived_result()
+ self.assertEqual(expected_result, actual_result)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/reftests/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/reftests/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/reftests/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link.py
new file mode 100644
index 0000000..e21d73d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link.py
@@ -0,0 +1,63 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Utility module for reftests."""
+
+
+from HTMLParser import HTMLParser
+
+
+class ExtractReferenceLinkParser(HTMLParser):
+
+ def __init__(self):
+ HTMLParser.__init__(self)
+ self.matches = []
+ self.mismatches = []
+
+ def handle_starttag(self, tag, attrs):
+ if tag != "link":
+ return
+ attrs = dict(attrs)
+ if not "rel" in attrs:
+ return
+ if not "href" in attrs:
+ return
+ if attrs["rel"] == "match":
+ self.matches.append(attrs["href"])
+ if attrs["rel"] == "mismatch":
+ self.mismatches.append(attrs["href"])
+
+
+def get_reference_link(html_string):
+ """Returns reference links in the given html_string.
+
+ Returns:
+ a tuple of two URL lists, (matches, mismatches).
+ """
+ parser = ExtractReferenceLinkParser()
+ parser.feed(html_string)
+ parser.close()
+
+ return parser.matches, parser.mismatches
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py
new file mode 100644
index 0000000..c6c0043
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.reftests import extract_reference_link
+
+
+class ExtractLinkMatchTest(unittest.TestCase):
+
+ def test_getExtractMatch(self):
+ html_1 = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<title>CSS Test: DESCRIPTION OF TEST</title>
+<link rel="author" title="NAME_OF_AUTHOR"
+href="mailto:EMAIL OR http://CONTACT_PAGE"/>
+<link rel="help" href="RELEVANT_SPEC_SECTION"/>
+<link rel="match" href="green-box-ref.xht" />
+<link rel="match" href="blue-box-ref.xht" />
+<link rel="mismatch" href="red-box-notref.xht" />
+<link rel="mismatch" href="red-box-notref.xht" />
+<meta name="flags" content="TOKENS" />
+<meta name="assert" content="TEST ASSERTION"/>
+<style type="text/css"><![CDATA[
+CSS FOR TEST
+]]></style>
+</head>
+<body>
+CONTENT OF TEST
+</body>
+</html>
+"""
+ matches, mismatches = extract_reference_link.get_reference_link(html_1)
+ self.assertItemsEqual(matches,
+ ["green-box-ref.xht", "blue-box-ref.xht"])
+ self.assertItemsEqual(mismatches,
+ ["red-box-notref.xht", "red-box-notref.xht"])
+
+ html_2 = ""
+ empty_tuple_1 = extract_reference_link.get_reference_link(html_2)
+ self.assertEqual(empty_tuple_1, ([], []))
+
+ # Link does not have a "ref" attribute.
+ html_3 = """<link href="RELEVANT_SPEC_SECTION"/>"""
+ empty_tuple_2 = extract_reference_link.get_reference_link(html_3)
+ self.assertEqual(empty_tuple_2, ([], []))
+
+ # Link does not have a "href" attribute.
+ html_4 = """<link rel="match"/>"""
+ empty_tuple_3 = extract_reference_link.get_reference_link(html_4)
+ self.assertEqual(empty_tuple_3, ([], []))
+
+ # Link does not have a "/" at the end.
+ html_5 = """<link rel="help" href="RELEVANT_SPEC_SECTION">"""
+ empty_tuple_4 = extract_reference_link.get_reference_link(html_5)
+ self.assertEqual(empty_tuple_4, ([], []))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
new file mode 100644
index 0000000..aafc43f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -0,0 +1,385 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import optparse
+import os
+import sys
+import traceback
+
+from webkitpy.common.host import Host
+from webkitpy.layout_tests.controllers.manager import Manager
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.port import configuration_options, platform_options
+from webkitpy.layout_tests.views import buildbot_results
+from webkitpy.layout_tests.views import printing
+from webkitpy.layout_tests.generate_results_dashboard import DashBoardGenerator
+
+_log = logging.getLogger(__name__)
+
+
+
+def main(argv, stdout, stderr):
+ options, args = parse_args(argv)
+
+ if options.platform and 'test' in options.platform and not 'browser_test' in options.platform:
+ # It's a bit lame to import mocks into real code, but this allows the user
+ # to run tests against the test platform interactively, which is useful for
+ # debugging test failures.
+ from webkitpy.common.host_mock import MockHost
+ host = MockHost()
+ else:
+ host = Host()
+
+ if options.lint_test_files:
+ from webkitpy.layout_tests.lint_test_expectations import run_checks
+ return run_checks(host, options, stderr)
+
+ try:
+ port = host.port_factory.get(options.platform, options)
+ except NotImplementedError, e:
+ # FIXME: is this the best way to handle unsupported port names?
+ print >> stderr, str(e)
+ return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
+
+ try:
+ run_details = run(port, options, args, stderr)
+ if ((run_details.exit_code not in test_run_results.ERROR_CODES or
+ run_details.exit_code == test_run_results.EARLY_EXIT_STATUS) and
+ not run_details.initial_results.keyboard_interrupted):
+ bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging)
+ bot_printer.print_results(run_details)
+ gen_dash_board = DashBoardGenerator(port)
+ gen_dash_board.generate()
+
+ return run_details.exit_code
+
+ # We need to still handle KeyboardInterrupt, atleast for webkitpy unittest cases.
+ except KeyboardInterrupt:
+ return test_run_results.INTERRUPTED_EXIT_STATUS
+ except test_run_results.TestRunException as e:
+ print >> stderr, e.msg
+ return e.code
+ except BaseException as e:
+ if isinstance(e, Exception):
+ print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
+ traceback.print_exc(file=stderr)
+ return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
+
+
+def parse_args(args):
+ option_group_definitions = []
+
+ option_group_definitions.append(("Platform options", platform_options()))
+ option_group_definitions.append(("Configuration options", configuration_options()))
+ option_group_definitions.append(("Printing Options", printing.print_options()))
+
+ option_group_definitions.append(("Android-specific Options", [
+ optparse.make_option("--adb-device",
+ action="append", default=[],
+ help="Run Android layout tests on these devices."),
+
+ # FIXME: Flip this to be off by default once we can log the device setup more cleanly.
+ optparse.make_option("--no-android-logging",
+ action="store_false", dest='android_logging', default=True,
+ help="Do not log android-specific debug messages (default is to log as part of --debug-rwt-logging"),
+ ]))
+
+ option_group_definitions.append(("Results Options", [
+ optparse.make_option("--add-platform-exceptions", action="store_true", default=False,
+ help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"),
+ optparse.make_option("--additional-drt-flag", action="append",
+ default=[], help="Additional command line flag to pass to the driver "
+ "Specify multiple times to add multiple flags."),
+ optparse.make_option("--additional-expectations", action="append", default=[],
+ help="Path to a test_expectations file that will override previous expectations. "
+ "Specify multiple times for multiple sets of overrides."),
+ optparse.make_option("--additional-platform-directory", action="append",
+ default=[], help="Additional directory where to look for test "
+ "baselines (will take precendence over platform baselines). "
+ "Specify multiple times to add multiple search path entries."),
+ optparse.make_option("--build-directory",
+ help="Path to the directory under which build files are kept (should not include configuration)"),
+ optparse.make_option("--clobber-old-results", action="store_true",
+ default=False, help="Clobbers test results from previous runs."),
+ optparse.make_option("--compare-port", action="store", default=None,
+ help="Use the specified port's baselines first"),
+ optparse.make_option("--driver-name", type="string",
+ help="Alternative driver binary to use"),
+ optparse.make_option("--full-results-html", action="store_true",
+ default=False,
+ help="Show all failures in results.html, rather than only regressions"),
+ optparse.make_option("--new-baseline", action="store_true",
+ default=False, help="Save generated results as new baselines "
+ "into the *most-specific-platform* directory, overwriting whatever's "
+ "already there. Equivalent to --reset-results --add-platform-exceptions"),
+ optparse.make_option("--no-new-test-results", action="store_false",
+ dest="new_test_results", default=True,
+ help="Don't create new baselines when no expected results exist"),
+ optparse.make_option("--no-show-results", action="store_false",
+ default=True, dest="show_results",
+ help="Don't launch a browser with results after the tests "
+ "are done"),
+ optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true",
+ dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
+ optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false",
+ dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
+
+ #FIXME: we should support a comma separated list with --pixel-test-directory as well.
+ optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
+ help="A directory where it is allowed to execute tests as pixel tests. "
+ "Specify multiple times to add multiple directories. "
+ "This option implies --pixel-tests. If specified, only those tests "
+ "will be executed as pixel tests that are located in one of the "
+ "directories enumerated with the option. Some ports may ignore this "
+ "option while others can have a default value that can be overridden here."),
+
+ optparse.make_option("--reset-results", action="store_true",
+ default=False, help="Reset expectations to the "
+ "generated results in their existing location."),
+ optparse.make_option("--results-directory", help="Location of test results"),
+ optparse.make_option("--skip-failing-tests", action="store_true",
+ default=False, help="Skip tests that are expected to fail. "
+ "Note: When using this option, you might miss new crashes "
+ "in these tests."),
+ optparse.make_option("--smoke", action="store_true",
+ help="Run just the SmokeTests"),
+ optparse.make_option("--no-smoke", dest="smoke", action="store_false",
+ help="Do not run just the SmokeTests"),
+ ]))
+
+ option_group_definitions.append(("Testing Options", [
+ optparse.make_option("--additional-env-var", type="string", action="append", default=[],
+ help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
+ optparse.make_option("--batch-size",
+ help=("Run a the tests in batches (n), after every n tests, "
+ "the driver is relaunched."), type="int", default=None),
+ optparse.make_option("--build", dest="build",
+ action="store_true", default=True,
+ help="Check to ensure the build is up-to-date (default)."),
+ optparse.make_option("--no-build", dest="build",
+ action="store_false", help="Don't check to see if the build is up-to-date."),
+ optparse.make_option("--child-processes",
+ help="Number of drivers to run in parallel."),
+ optparse.make_option("--disable-breakpad", action="store_true",
+ help="Don't use breakpad to symbolize unexpected crashes."),
+ optparse.make_option("--driver-logging", action="store_true",
+ help="Print detailed logging of the driver/content_shell"),
+ optparse.make_option("--enable-leak-detection", action="store_true",
+ help="Enable the leak detection of DOM objects."),
+ optparse.make_option("--enable-sanitizer", action="store_true",
+ help="Only alert on sanitizer-related errors and crashes"),
+ optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
+ default=None, help="Exit after the first N crashes instead of "
+ "running all tests"),
+ optparse.make_option("--exit-after-n-failures", type="int", default=None,
+ help="Exit after the first N failures instead of running all "
+ "tests"),
+ optparse.make_option("--ignore-builder-category", action="store",
+ help=("The category of builders to use with the --ignore-flaky-tests "
+ "option ('layout' or 'deps').")),
+ optparse.make_option("--ignore-flaky-tests", action="store",
+ help=("Control whether tests that are flaky on the bots get ignored."
+ "'very-flaky' == Ignore any tests that flaked more than once on the bot."
+ "'maybe-flaky' == Ignore any tests that flaked once on the bot."
+ "'unexpected' == Ignore any tests that had unexpected results on the bot.")),
+ optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
+ optparse.make_option("--max-locked-shards", type="int", default=0,
+ help="Set the maximum number of locked shards"),
+ optparse.make_option("--no-retry-failures", action="store_false",
+ dest="retry_failures",
+ help="Don't re-try any tests that produce unexpected results."),
+ optparse.make_option("--nocheck-sys-deps", action="store_true",
+ default=False,
+ help="Don't check the system dependencies (themes)"),
+ optparse.make_option("--order", action="store", default="natural",
+ help=("determine the order in which the test cases will be run. "
+ "'none' == use the order in which the tests were listed either in arguments or test list, "
+ "'natural' == use the natural order (default), "
+ "'random-seeded' == randomize the test order using a fixed seed, "
+ "'random' == randomize the test order.")),
+ optparse.make_option("--profile", action="store_true",
+ help="Output per-test profile information."),
+ optparse.make_option("--profiler", action="store",
+ help="Output per-test profile information, using the specified profiler."),
+ optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
+ optparse.make_option("--retry-failures", action="store_true",
+ help="Re-try any tests that produce unexpected results. Default is to not retry if an explicit list of tests is passed to run-webkit-tests."),
+ optparse.make_option("--run-chunk",
+ help=("Run a specified chunk (n:l), the nth of len l, "
+ "of the layout tests")),
+ optparse.make_option("--run-part", help=("Run a specified part (n:m), "
+ "the nth of m parts, of the layout tests")),
+ optparse.make_option("--run-singly", action="store_true",
+ default=False, help="DEPRECATED, same as --batch-size=1 --verbose"),
+ optparse.make_option("--skipped", action="store", default=None,
+ help=("control how tests marked SKIP are run. "
+ "'default' == Skip tests unless explicitly listed on the command line, "
+ "'ignore' == Run them anyway, "
+ "'only' == only run the SKIP tests, "
+ "'always' == always skip, even if listed on the command line.")),
+ optparse.make_option("--test-list", action="append",
+ help="read list of tests to run from file", metavar="FILE"),
+ optparse.make_option("--time-out-ms",
+ help="Set the timeout for each test"),
+ optparse.make_option("--wrapper",
+ help="wrapper command to insert before invocations of "
+ "the driver; option is split on whitespace before "
+ "running. (Example: --wrapper='valgrind --smc-check=all')"),
+ # FIXME: Display default number of child processes that will run.
+ optparse.make_option("-f", "--fully-parallel", action="store_true",
+ help="run all tests in parallel"),
+ optparse.make_option("-i", "--ignore-tests", action="append", default=[],
+ help="directories or test to ignore (may specify multiple times)"),
+ optparse.make_option("-n", "--dry-run", action="store_true",
+ default=False,
+ help="Do everything but actually run the tests or upload results."),
+ ]))
+
+ option_group_definitions.append(("Miscellaneous Options", [
+ optparse.make_option("--lint-test-files", action="store_true",
+ default=False, help=("Makes sure the test files parse for all "
+ "configurations. Does not run any tests.")),
+ ]))
+
+ # FIXME: Move these into json_results_generator.py
+ option_group_definitions.append(("Result JSON Options", [
+ optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
+ help=("The name of the builder used in its path, e.g. "
+ "webkit-rel.")),
+ optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
+ help=("The build number of the builder running this script.")),
+ optparse.make_option("--builder-name", default="",
+ help=("The name of the builder shown on the waterfall running "
+ "this script e.g. WebKit.")),
+ optparse.make_option("--master-name", help="The name of the buildbot master."),
+ optparse.make_option("--test-results-server", default="",
+ help=("If specified, upload results json files to this appengine "
+ "server.")),
+ optparse.make_option("--write-full-results-to",
+ help=("If specified, copy full_results.json from the results dir "
+ "to the specified path.")),
+ ]))
+
+ option_parser = optparse.OptionParser()
+
+ for group_name, group_options in option_group_definitions:
+ option_group = optparse.OptionGroup(option_parser, group_name)
+ option_group.add_options(group_options)
+ option_parser.add_option_group(option_group)
+
+ return option_parser.parse_args(args)
+
+
+def _set_up_derived_options(port, options, args):
+ """Sets the options values that depend on other options values."""
+ if not options.child_processes:
+ options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES",
+ str(port.default_child_processes()))
+ if not options.max_locked_shards:
+ options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_SHARDS",
+ str(port.default_max_locked_shards())))
+
+ if not options.configuration:
+ options.configuration = port.default_configuration()
+
+ if options.pixel_tests is None:
+ options.pixel_tests = port.default_pixel_tests()
+
+ if not options.time_out_ms:
+ options.time_out_ms = str(port.default_timeout_ms())
+
+ options.slow_time_out_ms = str(5 * int(options.time_out_ms))
+
+ if options.additional_platform_directory:
+ additional_platform_directories = []
+ for path in options.additional_platform_directory:
+ additional_platform_directories.append(port.host.filesystem.abspath(path))
+ options.additional_platform_directory = additional_platform_directories
+
+ if options.new_baseline:
+ options.reset_results = True
+ options.add_platform_exceptions = True
+
+ if options.pixel_test_directories:
+ options.pixel_tests = True
+ varified_dirs = set()
+ pixel_test_directories = options.pixel_test_directories
+ for directory in pixel_test_directories:
+ # FIXME: we should support specifying the directories all the ways we support it for additional
+ # arguments specifying which tests and directories to run. We should also move the logic for that
+ # to Port.
+ filesystem = port.host.filesystem
+ if not filesystem.isdir(filesystem.join(port.layout_tests_dir(), directory)):
+ _log.warning("'%s' was passed to --pixel-test-directories, which doesn't seem to be a directory" % str(directory))
+ else:
+ varified_dirs.add(directory)
+
+ options.pixel_test_directories = list(varified_dirs)
+
+ if options.run_singly:
+ options.batch_size = 1
+ options.verbose = True
+
+ if not args and not options.test_list and options.smoke is None:
+ options.smoke = port.default_smoke_test_only()
+ if options.smoke:
+ if not args and not options.test_list and options.retry_failures is None:
+ # Retry failures by default if we're doing just a smoke test (no additional tests).
+ options.retry_failures = True
+
+ if not options.test_list:
+ options.test_list = []
+ options.test_list.append(port.host.filesystem.join(port.layout_tests_dir(), 'SmokeTests'))
+ if not options.skipped:
+ options.skipped = 'always'
+
+ if not options.skipped:
+ options.skipped = 'default'
+
+def run(port, options, args, logging_stream):
+ logger = logging.getLogger()
+ logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
+
+ try:
+ printer = printing.Printer(port, options, logging_stream, logger=logger)
+
+ _set_up_derived_options(port, options, args)
+ manager = Manager(port, options, printer)
+ printer.print_config(port.results_directory())
+
+ run_details = manager.run(args)
+ _log.debug("Testing completed, Exit status: %d" % run_details.exit_code)
+ return run_details
+ finally:
+ printer.cleanup()
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
new file mode 100644
index 0000000..7ac88d3
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
@@ -0,0 +1,1060 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import Queue
+import StringIO
+import codecs
+import json
+import logging
+import os
+import platform
+import re
+import sys
+import thread
+import time
+import threading
+import unittest
+
+from webkitpy.common.system import outputcapture, path
+from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.host import Host
+from webkitpy.common.host_mock import MockHost
+
+from webkitpy.layout_tests import port
+from webkitpy.layout_tests import run_webkit_tests
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.port import Port
+from webkitpy.layout_tests.port import test
+from webkitpy.tool import grammar
+from webkitpy.tool.mocktool import MockOptions
+
+
+def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
+ extra_args = extra_args or []
+ args = []
+ if not '--platform' in extra_args:
+ args.extend(['--platform', 'test'])
+ if not new_results:
+ args.append('--no-new-test-results')
+
+ if not '--child-processes' in extra_args:
+ args.extend(['--child-processes', 1])
+ args.extend(extra_args)
+ if not tests_included:
+ # We use the glob to test that globbing works.
+ args.extend(['passes',
+ 'http/tests',
+ 'websocket/tests',
+ 'failures/expected/*'])
+ return run_webkit_tests.parse_args(args)
+
+
+def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
+ options, parsed_args = parse_args(extra_args, tests_included)
+ if not port_obj:
+ host = host or MockHost()
+ port_obj = host.port_factory.get(port_name=options.platform, options=options)
+
+ if shared_port:
+ port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
+
+ logging_stream = StringIO.StringIO()
+ run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
+ return run_details.exit_code == 0
+
+
+def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
+ options, parsed_args = parse_args(extra_args=extra_args,
+ tests_included=tests_included,
+ print_nothing=False, new_results=new_results)
+ host = host or MockHost()
+ if not port_obj:
+ port_obj = host.port_factory.get(port_name=options.platform, options=options)
+
+ run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
+ return (run_details, output, host.user)
+
+
+def run_and_capture(port_obj, options, parsed_args, shared_port=True):
+ if shared_port:
+ port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
+ oc = outputcapture.OutputCapture()
+ try:
+ oc.capture_output()
+ logging_stream = StringIO.StringIO()
+ run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
+ finally:
+ oc.restore_output()
+ return (run_details, logging_stream)
+
+
+def get_tests_run(args, host=None, port_obj=None):
+ results = get_test_results(args, host=host, port_obj=port_obj)
+ return [result.test_name for result in results]
+
+
+def get_test_batches(args, host=None):
+ results = get_test_results(args, host)
+ batches = []
+ batch = []
+ current_pid = None
+ for result in results:
+ if batch and result.pid != current_pid:
+ batches.append(batch)
+ batch = []
+ batch.append(result.test_name)
+ if batch:
+ batches.append(batch)
+ return batches
+
+
+def get_test_results(args, host=None, port_obj=None):
+ options, parsed_args = parse_args(args, tests_included=True)
+
+ host = host or MockHost()
+ port_obj = port_obj or host.port_factory.get(port_name=options.platform, options=options)
+
+ oc = outputcapture.OutputCapture()
+ oc.capture_output()
+ logging_stream = StringIO.StringIO()
+ try:
+ run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
+ finally:
+ oc.restore_output()
+
+ all_results = []
+ if run_details.initial_results:
+ all_results.extend(run_details.initial_results.all_results)
+
+ if run_details.retry_results:
+ all_results.extend(run_details.retry_results.all_results)
+ return all_results
+
+
+def parse_full_results(full_results_text):
+ json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
+ compressed_results = json.loads(json_to_eval)
+ return compressed_results
+
+
+class StreamTestingMixin(object):
+ def assertContains(self, stream, string):
+ self.assertTrue(string in stream.getvalue())
+
+ def assertEmpty(self, stream):
+ self.assertFalse(stream.getvalue())
+
+ def assertNotEmpty(self, stream):
+ self.assertTrue(stream.getvalue())
+
+
+class RunTest(unittest.TestCase, StreamTestingMixin):
+ def setUp(self):
+ # A real PlatformInfo object is used here instead of a
+ # MockPlatformInfo because we need to actually check for
+ # Windows and Mac to skip some tests.
+ self._platform = SystemHost().platform
+
+ # FIXME: Remove this when we fix test-webkitpy to work
+ # properly on cygwin (bug 63846).
+ self.should_test_processes = not self._platform.is_win()
+
+ def test_basic(self):
+ options, args = parse_args(tests_included=True)
+ logging_stream = StringIO.StringIO()
+ host = MockHost()
+ port_obj = host.port_factory.get(options.platform, options)
+ details = run_webkit_tests.run(port_obj, options, args, logging_stream)
+
+ # These numbers will need to be updated whenever we add new tests.
+ self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
+ self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
+ self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
+ self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
+ self.assertEqual(details.retry_results.total, test.UNEXPECTED_FAILURES)
+
+ expected_tests = details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name)
+ expected_summary_str = ''
+ if details.initial_results.expected_failures > 0:
+ expected_summary_str = " (%d passed, %d didn't)" % (expected_tests - details.initial_results.expected_failures, details.initial_results.expected_failures)
+ one_line_summary = "%d tests ran as expected%s, %d didn't:\n" % (
+ expected_tests,
+ expected_summary_str,
+ len(details.initial_results.unexpected_results_by_name))
+ self.assertTrue(one_line_summary in logging_stream.buflist)
+
+ # Ensure the results were summarized properly.
+ self.assertEqual(details.summarized_failing_results['num_regressions'], details.exit_code)
+
+ # Ensure the results were written out and displayed.
+ failing_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
+ json_to_eval = failing_results_text.replace("ADD_RESULTS(", "").replace(");", "")
+ self.assertEqual(json.loads(json_to_eval), details.summarized_failing_results)
+
+ full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+ self.assertEqual(json.loads(full_results_text), details.summarized_full_results)
+
+ self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
+
+ def test_batch_size(self):
+ batch_tests_run = get_test_batches(['--batch-size', '2'])
+ for batch in batch_tests_run:
+ self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
+
+ def test_max_locked_shards(self):
+ # Tests for the default of using one locked shard even in the case of more than one child process.
+ if not self.should_test_processes:
+ return
+ save_env_webkit_test_max_locked_shards = None
+ if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
+ save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
+ del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
+ _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
+ try:
+ self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
+ finally:
+ if save_env_webkit_test_max_locked_shards:
+ os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
+
+ def test_child_processes_2(self):
+ if self.should_test_processes:
+ _, regular_output, _ = logging_run(
+ ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
+ self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
+
+ def test_child_processes_min(self):
+ if self.should_test_processes:
+ _, regular_output, _ = logging_run(
+ ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/virtual_passes', 'passes'],
+ tests_included=True, shared_port=False)
+ self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
+
+ def test_dryrun(self):
+ tests_run = get_tests_run(['--dry-run'])
+ self.assertEqual(tests_run, [])
+
+ tests_run = get_tests_run(['-n'])
+ self.assertEqual(tests_run, [])
+
+ def test_enable_sanitizer(self):
+ self.assertTrue(passing_run(['--enable-sanitizer', 'failures/expected/text.html']))
+
+ def test_exception_raised(self):
+ # Exceptions raised by a worker are treated differently depending on
+ # whether they are in-process or out. inline exceptions work as normal,
+ # which allows us to get the full stack trace and traceback from the
+ # worker. The downside to this is that it could be any error, but this
+ # is actually useful in testing.
+ #
+ # Exceptions raised in a separate process are re-packaged into
+ # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
+ # be printed, but don't display properly in the unit test exception handlers.
+ self.assertRaises(BaseException, logging_run,
+ ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
+
+ if self.should_test_processes:
+ self.assertRaises(BaseException, logging_run,
+ ['--child-processes', '2', '--skipped=ignore', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
+
+ def test_device_failure(self):
+ # Test that we handle a device going offline during a test properly.
+ details, regular_output, _ = logging_run(['failures/expected/device_failure.html'], tests_included=True)
+ self.assertEqual(details.exit_code, 0)
+ self.assertTrue('worker/0 has failed' in regular_output.getvalue())
+
+ def test_full_results_html(self):
+ host = MockHost()
+ details, _, _ = logging_run(['--full-results-html'], host=host)
+ self.assertEqual(details.exit_code, 0)
+ self.assertEqual(len(host.user.opened_urls), 1)
+
+ def test_keyboard_interrupt(self):
+ # Note that this also tests running a test marked as SKIP if
+ # you specify it explicitly.
+ details, _, _ = logging_run(['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
+ self.assertEqual(details.exit_code, test_run_results.INTERRUPTED_EXIT_STATUS)
+
+ if self.should_test_processes:
+ _, regular_output, _ = logging_run(['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--skipped=ignore'], tests_included=True, shared_port=False)
+ self.assertTrue(any(['Interrupted, exiting' in line for line in regular_output.buflist]))
+
+ def test_no_tests_found(self):
+ details, err, _ = logging_run(['resources'], tests_included=True)
+ self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
+ self.assertContains(err, 'No tests to run.\n')
+
+ def test_no_tests_found_2(self):
+ details, err, _ = logging_run(['foo'], tests_included=True)
+ self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
+ self.assertContains(err, 'No tests to run.\n')
+
+ def test_no_tests_found_3(self):
+ details, err, _ = logging_run(['--run-chunk', '5:400', 'foo/bar.html'], tests_included=True)
+ self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
+ self.assertContains(err, 'No tests to run.\n')
+
+ def test_natural_order(self):
+ tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
+ tests_run = get_tests_run(['--order=natural'] + tests_to_run)
+ self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
+
+ def test_natural_order_test_specified_multiple_times(self):
+ tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
+ tests_run = get_tests_run(['--order=natural'] + tests_to_run)
+ self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
+
+ def test_random_order(self):
+ tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
+ tests_run = get_tests_run(['--order=random'] + tests_to_run)
+ self.assertEqual(sorted(tests_to_run), sorted(tests_run))
+
+ def test_random_daily_seed_order(self):
+ tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
+ tests_run = get_tests_run(['--order=random-seeded'] + tests_to_run)
+ self.assertEqual(sorted(tests_to_run), sorted(tests_run))
+
+ def test_random_order_test_specified_multiple_times(self):
+ tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
+ tests_run = get_tests_run(['--order=random'] + tests_to_run)
+ self.assertEqual(tests_run.count('passes/audio.html'), 2)
+ self.assertEqual(tests_run.count('passes/args.html'), 2)
+
+ def test_no_order(self):
+ tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
+ tests_run = get_tests_run(['--order=none'] + tests_to_run)
+ self.assertEqual(tests_to_run, tests_run)
+
+ def test_no_order_test_specified_multiple_times(self):
+ tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
+ tests_run = get_tests_run(['--order=none'] + tests_to_run)
+ self.assertEqual(tests_to_run, tests_run)
+
+ def test_no_order_with_directory_entries_in_natural_order(self):
+ tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
+ tests_run = get_tests_run(['--order=none'] + tests_to_run)
+ self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
+
+ def test_repeat_each(self):
+ tests_to_run = ['passes/image.html', 'passes/text.html']
+ tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
+ self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
+
+ def test_ignore_flag(self):
+ # Note that passes/image.html is expected to be run since we specified it directly.
+ tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
+ self.assertFalse('passes/text.html' in tests_run)
+ self.assertTrue('passes/image.html' in tests_run)
+
+ def test_skipped_flag(self):
+ tests_run = get_tests_run(['passes'])
+ self.assertFalse('passes/skipped/skip.html' in tests_run)
+ num_tests_run_by_default = len(tests_run)
+
+ # Check that nothing changes when we specify skipped=default.
+ self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
+ num_tests_run_by_default)
+
+ # Now check that we run one more test (the skipped one).
+ tests_run = get_tests_run(['--skipped=ignore', 'passes'])
+ self.assertTrue('passes/skipped/skip.html' in tests_run)
+ self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
+
+ # Now check that we only run the skipped test.
+ self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
+
+ # Now check that we don't run anything.
+ self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
+
+ def test_iterations(self):
+ tests_to_run = ['passes/image.html', 'passes/text.html']
+ tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
+ self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
+
+ def test_repeat_each_iterations_num_tests(self):
+ # The total number of tests should be: number_of_tests *
+ # repeat_each * iterations
+ host = MockHost()
+ _, err, _ = logging_run(
+ ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
+ tests_included=True, host=host)
+ self.assertContains(err, "All 16 tests ran as expected (8 passed, 8 didn't).\n")
+
+ def test_run_chunk(self):
+ # Test that we actually select the right chunk
+ all_tests_run = get_tests_run(['passes', 'failures'])
+ chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
+ self.assertEqual(all_tests_run[4:8], chunk_tests_run)
+
+ # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
+ tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
+ chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
+ self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
+
+ def test_run_part(self):
+ # Test that we actually select the right part
+ tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
+ tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
+ self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
+
+ # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
+ # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
+ # last part repeats the first two tests).
+ chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
+ self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
+
+ def test_run_singly(self):
+ batch_tests_run = get_test_batches(['--run-singly'])
+ for batch in batch_tests_run:
+ self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
+
+ def test_skip_failing_tests(self):
+ # This tests that we skip both known failing and known flaky tests. Because there are
+ # no known flaky tests in the default test_expectations, we add additional expectations.
+ host = MockHost()
+ host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
+
+ batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
+ has_passes_text = False
+ for batch in batches:
+ self.assertFalse('failures/expected/text.html' in batch)
+ self.assertFalse('passes/image.html' in batch)
+ has_passes_text = has_passes_text or ('passes/text.html' in batch)
+ self.assertTrue(has_passes_text)
+
+ def test_single_file(self):
+ tests_run = get_tests_run(['passes/text.html'])
+ self.assertEqual(tests_run, ['passes/text.html'])
+
+ def test_single_file_with_prefix(self):
+ tests_run = get_tests_run(['LayoutTests/passes/text.html'])
+ self.assertEqual(['passes/text.html'], tests_run)
+
+ def test_single_skipped_file(self):
+ tests_run = get_tests_run(['failures/expected/keybaord.html'])
+ self.assertEqual([], tests_run)
+
+ def test_stderr_is_saved(self):
+ host = MockHost()
+ self.assertTrue(passing_run(host=host))
+ self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
+ 'stuff going to stderr')
+
+ def test_test_list(self):
+ host = MockHost()
+ filename = '/tmp/foo.txt'
+ host.filesystem.write_text_file(filename, 'passes/text.html')
+ tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
+ self.assertEqual(['passes/text.html'], tests_run)
+ host.filesystem.remove(filename)
+ details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
+ self.assertNotEmpty(err)
+
+ def test_test_list_with_prefix(self):
+ host = MockHost()
+ filename = '/tmp/foo.txt'
+ host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
+ tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
+ self.assertEqual(['passes/text.html'], tests_run)
+
+ def test_smoke_test(self):
+ host = MockHost()
+ smoke_test_filename = test.LAYOUT_TEST_DIR + '/SmokeTests'
+ host.filesystem.write_text_file(smoke_test_filename, 'passes/text.html\n')
+
+ # Test the default smoke testing.
+ tests_run = get_tests_run(['--smoke'], host=host)
+ self.assertEqual(['passes/text.html'], tests_run)
+
+ # Test running the smoke tests plus some manually-specified tests.
+ tests_run = get_tests_run(['--smoke', 'passes/image.html'], host=host)
+ self.assertEqual(['passes/image.html', 'passes/text.html'], tests_run)
+
+ # Test running the smoke tests plus some manually-specified tests.
+ tests_run = get_tests_run(['--no-smoke', 'passes/image.html'], host=host)
+ self.assertEqual(['passes/image.html'], tests_run)
+
+ # Test that we don't run just the smoke tests by default on a normal test port.
+ tests_run = get_tests_run([], host=host)
+ self.assertNotEqual(['passes/text.html'], tests_run)
+
+ # Create a port that does run only the smoke tests by default, and verify that works as expected.
+ port_obj = host.port_factory.get('test')
+ port_obj.default_smoke_test_only = lambda: True
+ tests_run = get_tests_run([], host=host, port_obj=port_obj)
+ self.assertEqual(['passes/text.html'], tests_run)
+
+ # Verify that --no-smoke continues to work on a smoke-by-default port.
+ tests_run = get_tests_run(['--no-smoke'], host=host, port_obj=port_obj)
+ self.assertNotEqual(['passes/text.html'], tests_run)
+
+ def test_missing_and_unexpected_results(self):
+ # Test that we update expectations in place. If the expectation
+ # is missing, update the expected generic location.
+ host = MockHost()
+ details, err, _ = logging_run(['--no-show-results', '--retry-failures',
+ 'failures/expected/missing_image.html',
+ 'failures/unexpected/missing_text.html',
+ 'failures/unexpected/text-image-checksum.html'],
+ tests_included=True, host=host)
+ file_list = host.filesystem.written_files.keys()
+ self.assertEqual(details.exit_code, 2)
+ json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+ self.assertTrue(json_string.find('"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","is_unexpected":true') != -1)
+ self.assertTrue(json_string.find('"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -1)
+ self.assertTrue(json_string.find('"num_regressions":2') != -1)
+ self.assertTrue(json_string.find('"num_flaky":0') != -1)
+
+ def test_different_failure_on_retry(self):
+ # This tests that if a test fails two different ways -- both unexpected
+ # -- we treat it as a failure rather than a flaky result. We use the
+ # initial failure for simplicity and consistency w/ the flakiness
+ # dashboard, even if the second failure is worse.
+
+ details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/text_then_crash.html'], tests_included=True)
+ self.assertEqual(details.exit_code, 1)
+ self.assertEqual(details.summarized_failing_results['tests']['failures']['unexpected']['text_then_crash.html']['actual'],
+ 'TEXT CRASH')
+
+ # If we get a test that fails two different ways -- but the second one is expected --
+ # we should treat it as a flaky result and report the initial unexpected failure type
+ # to the dashboard. However, the test should be considered passing.
+ details, err, _ = logging_run(['--retry-failures', 'failures/expected/crash_then_text.html'], tests_included=True)
+ self.assertEqual(details.exit_code, 0)
+ self.assertEqual(details.summarized_failing_results['tests']['failures']['expected']['crash_then_text.html']['actual'],
+ 'CRASH FAIL')
+
+ def test_pixel_test_directories(self):
+ host = MockHost()
+
+ """Both tests have failing checksum. We include only the first in pixel tests so only that should fail."""
+ args = ['--pixel-tests', '--retry-failures', '--pixel-test-directory', 'failures/unexpected/pixeldir',
+ 'failures/unexpected/pixeldir/image_in_pixeldir.html',
+ 'failures/unexpected/image_not_in_pixeldir.html']
+ details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
+
+ self.assertEqual(details.exit_code, 1)
+ expected_token = '"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE","is_unexpected":true'
+ json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+ self.assertTrue(json_string.find(expected_token) != -1)
+
+ def test_crash_with_stderr(self):
+ host = MockHost()
+ _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
+ self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true,"is_unexpected":true') != -1)
+
+ def test_no_image_failure_with_image_diff(self):
+ host = MockHost()
+ _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
+ self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
+
+ def test_exit_after_n_failures_upload(self):
+ host = MockHost()
+ details, regular_output, user = logging_run(
+ ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
+ tests_included=True, host=host)
+
+ # By returning False, we know that the incremental results were generated and then deleted.
+ self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
+
+ self.assertEqual(details.exit_code, test_run_results.EARLY_EXIT_STATUS)
+
+ # This checks that passes/text.html is considered SKIPped.
+ self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
+
+ # This checks that we told the user we bailed out.
+ self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
+
+ # This checks that neither test ran as expected.
+ # FIXME: This log message is confusing; tests that were skipped should be called out separately.
+ self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
+
+ def test_exit_after_n_failures(self):
+ # Unexpected failures should result in tests stopping.
+ tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
+ self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
+
+ # But we'll keep going for expected ones.
+ tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
+ self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
+
+ def test_exit_after_n_crashes(self):
+ # Unexpected crashes should result in tests stopping.
+ tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
+ self.assertEqual(['failures/unexpected/crash.html'], tests_run)
+
+ # Same with timeouts.
+ tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
+ self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
+
+ # But we'll keep going for expected ones.
+ tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
+ self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
+
+ def test_results_directory_absolute(self):
+ # We run a configuration that should fail, to generate output, then
+ # look for what the output results url was.
+
+ host = MockHost()
+ with host.filesystem.mkdtemp() as tmpdir:
+ _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
+ self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
+
+ def test_results_directory_default(self):
+ # We run a configuration that should fail, to generate output, then
+ # look for what the output results url was.
+
+ # This is the default location.
+ _, _, user = logging_run(tests_included=True)
+ self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
+
+ def test_results_directory_relative(self):
+ # We run a configuration that should fail, to generate output, then
+ # look for what the output results url was.
+ host = MockHost()
+ host.filesystem.maybe_make_directory('/tmp/cwd')
+ host.filesystem.chdir('/tmp/cwd')
+ _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
+ self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
+
+ def test_retrying_default_value(self):
+ host = MockHost()
+ details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, 1)
+ self.assertFalse('Retrying' in err.getvalue())
+
+ host = MockHost()
+ details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected'], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7) # FIXME: This should be a constant in test.py .
+ self.assertTrue('Retrying' in err.getvalue())
+
+ def test_retrying_default_value_test_list(self):
+ host = MockHost()
+ filename = '/tmp/foo.txt'
+ host.filesystem.write_text_file(filename, 'failures/unexpected/text-image-checksum.html\nfailures/unexpected/crash.html')
+ details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, 2)
+ self.assertFalse('Retrying' in err.getvalue())
+
+ host = MockHost()
+ filename = '/tmp/foo.txt'
+ host.filesystem.write_text_file(filename, 'failures')
+ details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7)
+ self.assertTrue('Retrying' in err.getvalue())
+
+ def test_retrying_and_flaky_tests(self):
+ host = MockHost()
+ details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/flaky'], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, 0)
+ self.assertTrue('Retrying' in err.getvalue())
+ self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
+ self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
+ self.assertEqual(len(host.user.opened_urls), 0)
+
+ # Now we test that --clobber-old-results does remove the old entries and the old retries,
+ # and that we don't retry again.
+ host = MockHost()
+ details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, 1)
+ self.assertTrue('Clobbering old results' in err.getvalue())
+ self.assertTrue('flaky/text.html' in err.getvalue())
+ self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
+ self.assertFalse(host.filesystem.exists('retries'))
+ self.assertEqual(len(host.user.opened_urls), 1)
+
+ def test_retrying_crashed_tests(self):
+ host = MockHost()
+ details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/crash.html'], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, 1)
+ self.assertTrue('Retrying' in err.getvalue())
+
+ def test_retrying_leak_tests(self):
+ host = MockHost()
+ details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/leak.html'], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, 1)
+ self.assertTrue('Retrying' in err.getvalue())
+
+ def test_retrying_force_pixel_tests(self):
+ host = MockHost()
+ details, err, _ = logging_run(['--no-pixel-tests', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, 1)
+ self.assertTrue('Retrying' in err.getvalue())
+ self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
+ self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
+ self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
+ self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
+ json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+ json = parse_full_results(json_string)
+ self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
+ {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "is_unexpected": True})
+ self.assertFalse(json["pixel_tests_enabled"])
+ self.assertEqual(details.enabled_pixel_tests_in_retry, True)
+
+ def test_retrying_uses_retries_directory(self):
+ host = MockHost()
+ details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, 1)
+ self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
+ self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
+
+ def test_run_order__inline(self):
+ # These next tests test that we run the tests in ascending alphabetical
+ # order per directory. HTTP tests are sharded separately from other tests,
+ # so we have to test both.
+ tests_run = get_tests_run(['-i', 'passes/virtual_passes', 'passes'])
+ self.assertEqual(tests_run, sorted(tests_run))
+
+ tests_run = get_tests_run(['http/tests/passes'])
+ self.assertEqual(tests_run, sorted(tests_run))
+
+ def test_virtual(self):
+ self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
+ 'virtual/passes/text.html', 'virtual/passes/args.html']))
+
+ def test_reftest_run(self):
+ tests_run = get_tests_run(['passes/reftest.html'])
+ self.assertEqual(['passes/reftest.html'], tests_run)
+
+ def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
+ tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
+ self.assertEqual(['passes/reftest.html'], tests_run)
+
+ def test_reftest_expected_html_should_be_ignored(self):
+ tests_run = get_tests_run(['passes/reftest-expected.html'])
+ self.assertEqual([], tests_run)
+
+ def test_reftest_driver_should_run_expected_html(self):
+ tests_run = get_test_results(['passes/reftest.html'])
+ self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
+
+ def test_reftest_driver_should_run_expected_mismatch_html(self):
+ tests_run = get_test_results(['passes/mismatch.html'])
+ self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
+
+ def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
+ host = MockHost()
+ _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
+ results = parse_full_results(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
+
+ self.assertEqual(results["tests"]["reftests"]["foo"]["unlistedtest.html"]["actual"], "MISSING"),
+ self.assertEqual(results["num_regressions"], 5)
+ self.assertEqual(results["num_flaky"], 0)
+
+ def test_reftest_crash(self):
+ test_results = get_test_results(['failures/unexpected/crash-reftest.html'])
+ # The list of references should be empty since the test crashed and we didn't run any references.
+ self.assertEqual(test_results[0].references, [])
+
+ def test_reftest_with_virtual_reference(self):
+ _, err, _ = logging_run(['--details', 'virtual/virtual_passes/passes/reftest.html'], tests_included=True)
+ self.assertTrue('ref: virtual/virtual_passes/passes/reftest-expected.html' in err.getvalue())
+
+ def test_additional_platform_directory(self):
+ self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
+ self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
+ self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
+ self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
+
+ def test_additional_expectations(self):
+ host = MockHost()
+ host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
+ self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
+ tests_included=True, host=host))
+
+ @staticmethod
+ def has_test_of_type(tests, type):
+ return [test for test in tests if type in test]
+
+ def test_platform_directories_ignored_when_searching_for_tests(self):
+ tests_run = get_tests_run(['--platform', 'test-mac-leopard'])
+ self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
+ self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
+
+ def test_platform_directories_not_searched_for_additional_tests(self):
+ tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
+ self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
+ self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
+
+ def test_output_diffs(self):
+ # Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
+ # aren't available.
+ host = MockHost()
+ _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
+ written_files = host.filesystem.written_files
+ self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
+ self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
+ self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
+
+ full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+ full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
+ self.assertEqual(full_results['has_wdiff'], False)
+ self.assertEqual(full_results['has_pretty_patch'], False)
+
+ def test_unsupported_platform(self):
+ stdout = StringIO.StringIO()
+ stderr = StringIO.StringIO()
+ res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
+
+ self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
+ self.assertEqual(stdout.getvalue(), '')
+ self.assertTrue('unsupported platform' in stderr.getvalue())
+
+ def test_build_check(self):
+ # By using a port_name for a different platform than the one we're running on, the build check should always fail.
+ if sys.platform == 'darwin':
+ port_name = 'linux-x86'
+ else:
+ port_name = 'mac-lion'
+ out = StringIO.StringIO()
+ err = StringIO.StringIO()
+ self.assertEqual(run_webkit_tests.main(['--platform', port_name, 'fast/harness/results.html'], out, err), test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
+
+ def test_verbose_in_child_processes(self):
+ # When we actually run multiple processes, we may have to reconfigure logging in the
+ # child process (e.g., on win32) and we need to make sure that works and we still
+ # see the verbose log output. However, we can't use logging_run() because using
+ # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
+
+ # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
+ if not self.should_test_processes:
+ return
+
+ options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
+ host = MockHost()
+ port_obj = host.port_factory.get(port_name=options.platform, options=options)
+ logging_stream = StringIO.StringIO()
+ run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
+ self.assertTrue('text.html passed' in logging_stream.getvalue())
+ self.assertTrue('image.html passed' in logging_stream.getvalue())
+
+ def disabled_test_driver_logging(self):
+ # FIXME: Figure out how to either use a mock-test port to
+ # get output or mack mock ports work again.
+ host = Host()
+ _, err, _ = logging_run(['--platform', 'mock-win', '--driver-logging', 'fast/harness/results.html'],
+ tests_included=True, host=host)
+ self.assertTrue('OUT:' in err.getvalue())
+
+ def test_write_full_results_to(self):
+ host = MockHost()
+ details, _, _ = logging_run(['--write-full-results-to', '/tmp/full_results.json'], host=host)
+ self.assertEqual(details.exit_code, 0)
+ self.assertTrue(host.filesystem.exists('/tmp/full_results.json'))
+
+
+class EndToEndTest(unittest.TestCase):
+ def test_reftest_with_two_notrefs(self):
+ # Test that we update expectations in place. If the expectation
+ # is missing, update the expected generic location.
+ host = MockHost()
+ _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
+ file_list = host.filesystem.written_files.keys()
+
+ json_string = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
+ json = parse_full_results(json_string)
+ self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
+ self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
+ self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
+
+ self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
+ {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "is_unexpected": True})
+ self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
+ {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "is_unexpected": True})
+ self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
+ {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "is_unexpected": True})
+
+
+class RebaselineTest(unittest.TestCase, StreamTestingMixin):
+ def assertBaselines(self, file_list, file, extensions, err):
+ "assert that the file_list contains the baselines."""
+ for ext in extensions:
+ baseline = file + "-expected" + ext
+ baseline_msg = 'Writing new expected result "%s"\n' % baseline
+ self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
+ self.assertContains(err, baseline_msg)
+
+ # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
+ # supposed to be.
+
+ def test_reset_results(self):
+ # Test that we update expectations in place. If the expectation
+ # is missing, update the expected generic location.
+ host = MockHost()
+ details, err, _ = logging_run(
+ ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
+ tests_included=True, host=host, new_results=True)
+ file_list = host.filesystem.written_files.keys()
+ self.assertEqual(details.exit_code, 0)
+ self.assertEqual(len(file_list), 8)
+ self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
+ self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
+
+ def test_missing_results(self):
+ # Test that we update expectations in place. If the expectation
+ # is missing, update the expected generic location.
+ host = MockHost()
+ details, err, _ = logging_run(['--no-show-results',
+ 'failures/unexpected/missing_text.html',
+ 'failures/unexpected/missing_image.html',
+ 'failures/unexpected/missing_render_tree_dump.html'],
+ tests_included=True, host=host, new_results=True)
+ file_list = host.filesystem.written_files.keys()
+ self.assertEqual(details.exit_code, 3)
+ self.assertEqual(len(file_list), 10)
+ self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
+ self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
+ self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
+
+ def test_missing_results_not_added_if_expected_missing(self):
+ # Test that we update expectations in place. If the expectation
+ # is missing, update the expected generic location.
+ host = MockHost()
+ options, parsed_args = run_webkit_tests.parse_args([])
+
+ port = test.TestPort(host, options=options)
+ host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
+Bug(foo) failures/unexpected/missing_text.html [ Missing ]
+Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
+Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
+Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
+""")
+ details, err, _ = logging_run(['--no-show-results',
+ 'failures/unexpected/missing_text.html',
+ 'failures/unexpected/missing_image.html',
+ 'failures/unexpected/missing_audio.html',
+ 'failures/unexpected/missing_render_tree_dump.html'],
+ tests_included=True, host=host, new_results=True, port_obj=port)
+ file_list = host.filesystem.written_files.keys()
+ self.assertEqual(details.exit_code, 0)
+ self.assertEqual(len(file_list), 7)
+ self.assertFalse(any('failures/unexpected/missing_text-expected' in file for file in file_list))
+ self.assertFalse(any('failures/unexpected/missing_image-expected' in file for file in file_list))
+ self.assertFalse(any('failures/unexpected/missing_render_tree_dump-expected' in file for file in file_list))
+
+ def test_missing_results_not_added_if_expected_missing_and_reset_results(self):
+ # Test that we update expectations in place. If the expectation
+ # is missing, update the expected generic location.
+ host = MockHost()
+ options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--reset-results'])
+
+ port = test.TestPort(host, options=options)
+ host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
+Bug(foo) failures/unexpected/missing_text.html [ Missing ]
+Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
+Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
+Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
+""")
+ details, err, _ = logging_run(['--pixel-tests', '--reset-results',
+ 'failures/unexpected/missing_text.html',
+ 'failures/unexpected/missing_image.html',
+ 'failures/unexpected/missing_audio.html',
+ 'failures/unexpected/missing_render_tree_dump.html'],
+ tests_included=True, host=host, new_results=True, port_obj=port)
+ file_list = host.filesystem.written_files.keys()
+ self.assertEqual(details.exit_code, 0)
+ self.assertEqual(len(file_list), 11)
+ self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
+ self.assertBaselines(file_list, "failures/unexpected/missing_image", [".png"], err)
+ self.assertBaselines(file_list, "failures/unexpected/missing_render_tree_dump", [".txt"], err)
+
+ def test_new_baseline(self):
+ # Test that we update the platform expectations in the version-specific directories
+ # for both existing and new baselines.
+ host = MockHost()
+ details, err, _ = logging_run(
+ ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
+ tests_included=True, host=host, new_results=True)
+ file_list = host.filesystem.written_files.keys()
+ self.assertEqual(details.exit_code, 0)
+ self.assertEqual(len(file_list), 8)
+ self.assertBaselines(file_list,
+ "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
+ self.assertBaselines(file_list,
+ "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
+
+
+class PortTest(unittest.TestCase):
+ def assert_mock_port_works(self, port_name, args=[]):
+ self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
+
+ def disabled_test_mac_lion(self):
+ self.assert_mock_port_works('mac-lion')
+
+
+class MainTest(unittest.TestCase):
+ def test_exception_handling(self):
+ orig_run_fn = run_webkit_tests.run
+
+ # unused args pylint: disable=W0613
+ def interrupting_run(port, options, args, stderr):
+ raise KeyboardInterrupt
+
+ def successful_run(port, options, args, stderr):
+
+ class FakeRunDetails(object):
+ exit_code = test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
+
+ return FakeRunDetails()
+
+ def exception_raising_run(port, options, args, stderr):
+ assert False
+
+ stdout = StringIO.StringIO()
+ stderr = StringIO.StringIO()
+ try:
+ run_webkit_tests.run = interrupting_run
+ res = run_webkit_tests.main([], stdout, stderr)
+ self.assertEqual(res, test_run_results.INTERRUPTED_EXIT_STATUS)
+
+ run_webkit_tests.run = successful_run
+ res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
+ self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
+
+ run_webkit_tests.run = exception_raising_run
+ res = run_webkit_tests.main([], stdout, stderr)
+ self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
+ finally:
+ run_webkit_tests.run = orig_run_fn
+
+ def test_buildbot_results_are_printed_on_early_exit(self):
+ # unused args pylint: disable=W0613
+ stdout = StringIO.StringIO()
+ stderr = StringIO.StringIO()
+ res = run_webkit_tests.main(['--platform', 'test', '--exit-after-n-failures', '1',
+ 'failures/unexpected/missing_text.html',
+ 'failures/unexpected/missing_image.html'],
+ stdout, stderr)
+ self.assertEqual(res, test_run_results.EARLY_EXIT_STATUS)
+ self.assertEqual(stdout.getvalue(),
+ ('\n'
+ 'Regressions: Unexpected missing results (1)\n'
+ ' failures/unexpected/missing_image.html [ Missing ]\n\n'))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/apache_http.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/apache_http.py
new file mode 100644
index 0000000..c28f4d5
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/apache_http.py
@@ -0,0 +1,172 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Start and stop the Apache HTTP server as it is used by the layout tests."""
+
+import logging
+import os
+import socket
+
+from webkitpy.layout_tests.servers import server_base
+
+
+_log = logging.getLogger(__name__)
+
+
+class ApacheHTTP(server_base.ServerBase):
+ def __init__(self, port_obj, output_dir, additional_dirs, number_of_servers):
+ super(ApacheHTTP, self).__init__(port_obj, output_dir)
+ # We use the name "httpd" instead of "apache" to make our paths (e.g. the pid file: /tmp/WebKit/httpd.pid)
+ # match old-run-webkit-tests: https://bugs.webkit.org/show_bug.cgi?id=63956
+ self._name = 'httpd'
+ self._log_prefixes = ('access_log', 'error_log')
+ self._mappings = [{'port': 8000},
+ {'port': 8080},
+ {'port': 8443, 'sslcert': True}]
+ self._number_of_servers = number_of_servers
+
+ self._pid_file = self._filesystem.join(self._runtime_path, '%s.pid' % self._name)
+
+ executable = self._port_obj.path_to_apache()
+ server_root = self._filesystem.dirname(self._filesystem.dirname(executable))
+
+ test_dir = self._port_obj.layout_tests_dir()
+ document_root = self._filesystem.join(test_dir, "http", "tests")
+ js_test_resources_dir = self._filesystem.join(test_dir, "resources")
+ media_resources_dir = self._filesystem.join(test_dir, "media")
+ mime_types_path = self._filesystem.join(test_dir, "http", "conf", "mime.types")
+ cert_file = self._filesystem.join(test_dir, "http", "conf", "webkit-httpd.pem")
+
+ self._access_log_path = self._filesystem.join(output_dir, "access_log.txt")
+ self._error_log_path = self._filesystem.join(output_dir, "error_log.txt")
+
+ self._is_win = self._port_obj.host.platform.is_win()
+
+ start_cmd = [executable,
+ '-f', '%s' % self._port_obj.path_to_apache_config_file(),
+ '-C', 'ServerRoot "%s"' % server_root,
+ '-C', 'DocumentRoot "%s"' % document_root,
+ '-c', 'Alias /js-test-resources "%s"' % js_test_resources_dir,
+ '-c', 'Alias /media-resources "%s"' % media_resources_dir,
+ '-c', 'TypesConfig "%s"' % mime_types_path,
+ '-c', 'CustomLog "%s" common' % self._access_log_path,
+ '-c', 'ErrorLog "%s"' % self._error_log_path,
+ '-c', 'PidFile %s' % self._pid_file,
+ '-c', 'SSLCertificateFile "%s"' % cert_file,
+ ]
+
+ if self._is_win:
+ start_cmd += ['-c', "ThreadsPerChild %d" % (self._number_of_servers * 2)]
+ else:
+ start_cmd += ['-c', "StartServers %d" % self._number_of_servers,
+ '-c', "MinSpareServers %d" % self._number_of_servers,
+ '-c', "MaxSpareServers %d" % self._number_of_servers,
+ '-C', 'User "%s"' % os.environ.get('USERNAME', os.environ.get('USER', '')),
+ '-k', 'start']
+
+ enable_ipv6 = self._port_obj.http_server_supports_ipv6()
+ # Perform part of the checks Apache's APR does when trying to listen to
+ # a specific host/port. This allows us to avoid trying to listen to
+ # IPV6 addresses when it fails on Apache. APR itself tries to call
+ # getaddrinfo() again without AI_ADDRCONFIG if the first call fails
+ # with EBADFLAGS, but that is not how it normally fails in our use
+ # cases, so ignore that for now.
+ # See https://bugs.webkit.org/show_bug.cgi?id=98602#c7
+ try:
+ socket.getaddrinfo('::1', 0, 0, 0, 0, socket.AI_ADDRCONFIG)
+ except:
+ enable_ipv6 = False
+
+ for mapping in self._mappings:
+ port = mapping['port']
+
+ start_cmd += ['-C', "Listen 127.0.0.1:%d" % port]
+
+ # We listen to both IPv4 and IPv6 loop-back addresses, but ignore
+ # requests to 8000 from random users on network.
+ # See https://bugs.webkit.org/show_bug.cgi?id=37104
+ if enable_ipv6:
+ start_cmd += ['-C', "Listen [::1]:%d" % port]
+
+ if additional_dirs:
+ self._start_cmd = start_cmd
+ for alias, path in additional_dirs.iteritems():
+ start_cmd += ['-c', 'Alias %s "%s"' % (alias, path),
+ # Disable CGI handler for additional dirs.
+ '-c', '<Location %s>' % alias,
+ '-c', 'RemoveHandler .cgi .pl',
+ '-c', '</Location>']
+
+ self._start_cmd = start_cmd
+
+ def _spawn_process(self):
+ _log.debug('Starting %s server, cmd="%s"' % (self._name, str(self._start_cmd)))
+ self._process = self._executive.popen(self._start_cmd, stderr=self._executive.PIPE)
+ if self._process.returncode is not None:
+ retval = self._process.returncode
+ err = self._process.stderr.read()
+ if retval or len(err):
+ raise server_base.ServerError('Failed to start %s: %s' % (self._name, err))
+
+ # For some reason apache isn't guaranteed to have created the pid file before
+ # the process exits, so we wait a little while longer.
+ if not self._wait_for_action(lambda: self._filesystem.exists(self._pid_file)):
+ self._log_errors_from_subprocess()
+ raise server_base.ServerError('Failed to start %s: no pid file found' % self._name)
+
+ return int(self._filesystem.read_text_file(self._pid_file))
+
+ def stop(self):
+ self._stop_running_server()
+
+ def _stop_running_server(self):
+ # If apache was forcefully killed, the pid file will not have been deleted, so check
+ # that the process specified by the pid_file no longer exists before deleting the file.
+ if self._pid and not self._executive.check_running_pid(self._pid):
+ self._filesystem.remove(self._pid_file)
+ return
+
+ if self._is_win:
+ self._executive.kill_process(self._pid)
+ return
+
+ proc = self._executive.popen([self._port_obj.path_to_apache(),
+ '-f', self._port_obj.path_to_apache_config_file(),
+ '-c', 'PidFile "%s"' % self._pid_file,
+ '-k', 'stop'], stderr=self._executive.PIPE)
+ proc.wait()
+ retval = proc.returncode
+ err = proc.stderr.read()
+ if retval or len(err):
+ raise server_base.ServerError('Failed to stop %s: %s' % (self._name, err))
+
+ # For some reason apache isn't guaranteed to have actually stopped after
+ # the stop command returns, so we wait a little while longer for the
+ # pid file to be removed.
+ if not self._wait_for_action(lambda: not self._filesystem.exists(self._pid_file)):
+ raise server_base.ServerError('Failed to stop %s: pid file still exists' % self._name)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_unittest.py
new file mode 100644
index 0000000..f4d8601
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_unittest.py
@@ -0,0 +1,69 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+import sys
+import unittest
+
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.port import test
+from webkitpy.layout_tests.servers.apache_http import ApacheHTTP
+from webkitpy.layout_tests.servers.server_base import ServerError
+
+
+class TestApacheHTTP(unittest.TestCase):
+ def test_start_cmd(self):
+ # Fails on win - see https://bugs.webkit.org/show_bug.cgi?id=84726
+ if sys.platform in ('cygwin', 'win32'):
+ return
+
+ def fake_pid(_):
+ host.filesystem.write_text_file('/tmp/WebKit/httpd.pid', '42')
+ return True
+
+ host = MockHost()
+ host.executive = MockExecutive(should_log=True)
+ test_port = test.TestPort(host)
+ host.filesystem.write_text_file(test_port.path_to_apache_config_file(), '')
+
+ server = ApacheHTTP(test_port, "/mock/output_dir", additional_dirs=[], number_of_servers=4)
+ server._check_that_all_ports_are_available = lambda: True
+ server._is_server_running_on_all_ports = lambda: True
+ server._wait_for_action = fake_pid
+ oc = OutputCapture()
+ try:
+ oc.capture_output()
+ server.start()
+ server.stop()
+ finally:
+ _, _, logs = oc.restore_output()
+ self.assertIn("StartServers 4", logs)
+ self.assertIn("MinSpareServers 4", logs)
+ self.assertIn("MaxSpareServers 4", logs)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/cli_wrapper.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/cli_wrapper.py
new file mode 100644
index 0000000..49b9800
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/cli_wrapper.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A utility script for starting and stopping servers as they are used in the layout tests."""
+
+import logging
+import optparse
+
+from webkitpy.common.host import Host
+
+_log = logging.getLogger(__name__)
+
+
+def main(server_constructor, input_fn=None, argv=None, **kwargs):
+ input_fn = input_fn or raw_input
+
+ option_parser = optparse.OptionParser()
+ option_parser.add_option('--output-dir', dest='output_dir',
+ default=None, help='output directory.')
+ option_parser.add_option('-v', '--verbose', action='store_true')
+ options, args = option_parser.parse_args(argv)
+
+ logging.basicConfig()
+ logger = logging.getLogger()
+ logger.setLevel(logging.DEBUG if options.verbose else logging.INFO)
+
+ host = Host()
+ port_obj = host.port_factory.get()
+ if not options.output_dir:
+ options.output_dir = port_obj.default_results_directory()
+
+ # Create the output directory if it doesn't already exist.
+ port_obj.host.filesystem.maybe_make_directory(options.output_dir)
+
+ server = server_constructor(port_obj, options.output_dir, **kwargs)
+ server.start()
+ try:
+ _ = input_fn('Hit any key to stop the server and exit.')
+ except (KeyboardInterrupt, EOFError) as e:
+ pass
+
+ server.stop()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/cli_wrapper_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/cli_wrapper_unittest.py
new file mode 100644
index 0000000..5464739
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/cli_wrapper_unittest.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+from webkitpy.layout_tests.servers import server_base
+from webkitpy.layout_tests.servers import cli_wrapper
+
+
+class MockServer(object):
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+ self.start_called = False
+ self.stop_called = False
+
+ def start(self):
+ self.start_called = True
+
+ def stop(self):
+ self.stop_called = True
+
+
+class TestCliWrapper(unittest.TestCase):
+
+ def test_main(self):
+ def mock_server_constructor(*args, **kwargs):
+ self.server = MockServer(args, kwargs)
+ return self.server
+
+ cli_wrapper.main(mock_server_constructor, input_fn=lambda msg: True, argv=[])
+ self.assertTrue(self.server.start_called)
+ self.assertTrue(self.server.stop_called)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/crash_service.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/crash_service.py
new file mode 100644
index 0000000..c193d0b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/crash_service.py
@@ -0,0 +1,49 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Start and stop the crash service as it is used by the layout tests."""
+
+from webkitpy.layout_tests.servers import server_base
+
+
+class CrashService(server_base.ServerBase):
+
+ def __init__(self, port_obj, crash_dumps_dir):
+ """Args:
+ crash_dumps_dir: the absolute path to the directory where to store crash dumps
+ """
+ # Webkit tests
+ super(CrashService, self).__init__(port_obj, port_obj.default_results_directory())
+ self._name = 'CrashService'
+ self._crash_dumps_dir = crash_dumps_dir
+ self._env = None
+
+ self._pid_file = self._filesystem.join(self._runtime_path, '%s.pid' % self._name)
+ self._start_cmd = [self._port_obj._path_to_crash_service(),
+ '--dumps-dir=%s' % self._crash_dumps_dir,
+ '--no-window']
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/crash_service_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/crash_service_unittest.py
new file mode 100644
index 0000000..95bf15a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/crash_service_unittest.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+import sys
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.port import test
+from webkitpy.layout_tests.servers.crash_service import CrashService
+from webkitpy.layout_tests.servers.server_base import ServerError
+
+
+class TestCrashService(unittest.TestCase):
+ def test_start_cmd(self):
+ # Fails on win - see https://bugs.webkit.org/show_bug.cgi?id=84726
+ if sys.platform in ('cygwin', 'win32'):
+ return
+
+ host = MockHost()
+ test_port = test.TestPort(host)
+ test_port._path_to_crash_service = lambda: "/mock/crash_service"
+
+ server = CrashService(test_port, "/mock/crash_dumps_dir")
+ self.assertRaises(ServerError, server.start)
+
+ def test_win32_start_and_stop(self):
+ host = MockHost()
+ test_port = test.TestPort(host)
+ test_port._path_to_crash_service = lambda: "/mock/crash_service"
+
+ host.platform.is_win = lambda: True
+ host.platform.is_cygwin = lambda: False
+
+ server = CrashService(test_port, "/mock/crash_dumps_dir")
+ server._check_that_all_ports_are_available = lambda: True
+ server._is_server_running_on_all_ports = lambda: True
+
+ server.start()
+ self.assertNotEquals(host.executive.calls, [])
+
+ def wait_for_action(action):
+ if action():
+ return True
+ return action()
+
+ def mock_returns(return_values):
+ def return_value_thunk(*args, **kwargs):
+ return return_values.pop(0)
+ return return_value_thunk
+
+ host.executive.check_running_pid = mock_returns([True, False])
+ server._wait_for_action = wait_for_action
+
+ server.stop()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/pywebsocket.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/pywebsocket.py
new file mode 100644
index 0000000..ef51fb2
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/pywebsocket.py
@@ -0,0 +1,76 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A class to help start/stop the PyWebSocket server as used by the layout tests."""
+
+import logging
+import os
+import sys
+import time
+
+from webkitpy.layout_tests.servers import server_base
+from webkitpy.thirdparty import mod_pywebsocket
+
+_log = logging.getLogger(__name__)
+
+
+_WS_LOG_PREFIX = 'pywebsocket.ws.log-'
+
+_DEFAULT_WS_PORT = 8880
+
+
+class PyWebSocket(server_base.ServerBase):
+
+ def __init__(self, port_obj, output_dir):
+ super(PyWebSocket, self).__init__(port_obj, output_dir)
+ self._name = 'pywebsocket'
+ self._log_prefixes = (_WS_LOG_PREFIX,)
+ self._mappings = [{'port': _DEFAULT_WS_PORT}]
+ self._pid_file = self._filesystem.join(self._runtime_path, '%s.pid' % self._name)
+
+ self._port = _DEFAULT_WS_PORT
+ self._layout_tests = self._port_obj.layout_tests_dir()
+ self._web_socket_tests = self._filesystem.join(self._layout_tests, 'http', 'tests', 'websocket')
+ time_str = time.strftime('%d%b%Y-%H%M%S')
+ log_file_name = _WS_LOG_PREFIX + time_str
+ self._error_log = self._filesystem.join(self._output_dir, log_file_name + "-err.txt")
+ pywebsocket_base = self._port_obj.path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'thirdparty')
+ pywebsocket_script = self._filesystem.join(pywebsocket_base, 'mod_pywebsocket', 'standalone.py')
+
+ self._start_cmd = [
+ sys.executable, '-u', pywebsocket_script,
+ '--server-host', 'localhost',
+ '--port', str(self._port),
+ '--document-root', self._web_socket_tests,
+ '--scan-dir', self._web_socket_tests,
+ '--cgi-paths', '/',
+ '--log-file', self._error_log,
+ '--websock-handlers-map-file', self._filesystem.join(self._web_socket_tests, 'handler_map.txt'),
+ ]
+ self._env = self._port_obj.setup_environ_for_server()
+ self._env['PYTHONPATH'] = (pywebsocket_base + os.pathsep + self._env.get('PYTHONPATH', ''))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/server_base.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/server_base.py
new file mode 100644
index 0000000..1d6a3e1
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/server_base.py
@@ -0,0 +1,278 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Base class used to start servers used by the layout tests."""
+
+import errno
+import logging
+import socket
+import tempfile
+import time
+
+
+_log = logging.getLogger(__name__)
+
+
+class ServerError(Exception):
+ pass
+
+
+class ServerBase(object):
+ """A skeleton class for starting and stopping servers used by the layout tests."""
+
+ def __init__(self, port_obj, output_dir):
+ self._port_obj = port_obj
+ self._executive = port_obj._executive
+ self._filesystem = port_obj._filesystem
+ self._platform = port_obj.host.platform
+ self._output_dir = output_dir
+
+ # We need a non-checkout-dependent place to put lock files, etc. We
+ # don't use the Python default on the Mac because it defaults to a
+ # randomly-generated directory under /var/folders and no one would ever
+ # look there.
+ tmpdir = tempfile.gettempdir()
+ if self._platform.is_mac():
+ tmpdir = '/tmp'
+
+ self._runtime_path = self._filesystem.join(tmpdir, "WebKit")
+ self._filesystem.maybe_make_directory(self._runtime_path)
+
+ # Subclasses must override these fields.
+ self._name = '<virtual>'
+ self._log_prefixes = tuple()
+ self._mappings = {}
+ self._pid_file = None
+ self._start_cmd = None
+
+ # Subclasses may override these fields.
+ self._env = None
+ self._stdout = self._executive.PIPE
+ self._stderr = self._executive.PIPE
+ self._process = None
+ self._pid = None
+ self._error_log_path = None
+
+ def start(self):
+ """Starts the server. It is an error to start an already started server.
+
+ This method also stops any stale servers started by a previous instance."""
+ assert not self._pid, '%s server is already running' % self._name
+
+ # Stop any stale servers left over from previous instances.
+ if self._filesystem.exists(self._pid_file):
+ try:
+ self._pid = int(self._filesystem.read_text_file(self._pid_file))
+ _log.debug('stale %s pid file, pid %d' % (self._name, self._pid))
+ self._stop_running_server()
+ except (ValueError, UnicodeDecodeError):
+ # These could be raised if the pid file is corrupt.
+ self._remove_pid_file()
+ self._pid = None
+
+ self._remove_stale_logs()
+ self._prepare_config()
+ self._check_that_all_ports_are_available()
+
+ self._pid = self._spawn_process()
+
+ if self._wait_for_action(self._is_server_running_on_all_ports):
+ _log.debug("%s successfully started (pid = %d)" % (self._name, self._pid))
+ else:
+ self._log_errors_from_subprocess()
+ self._stop_running_server()
+ raise ServerError('Failed to start %s server' % self._name)
+
+ def stop(self):
+ """Stops the server. Stopping a server that isn't started is harmless."""
+ actual_pid = None
+ try:
+ if self._filesystem.exists(self._pid_file):
+ try:
+ actual_pid = int(self._filesystem.read_text_file(self._pid_file))
+ except (ValueError, UnicodeDecodeError):
+ # These could be raised if the pid file is corrupt.
+ pass
+ if not self._pid:
+ self._pid = actual_pid
+
+ if not self._pid:
+ return
+
+ if not actual_pid:
+ _log.warning('Failed to stop %s: pid file is missing' % self._name)
+ return
+ if self._pid != actual_pid:
+ _log.warning('Failed to stop %s: pid file contains %d, not %d' %
+ (self._name, actual_pid, self._pid))
+ # Try to kill the existing pid, anyway, in case it got orphaned.
+ self._executive.kill_process(self._pid)
+ self._pid = None
+ return
+
+ _log.debug("Attempting to shut down %s server at pid %d" % (self._name, self._pid))
+ self._stop_running_server()
+ _log.debug("%s server at pid %d stopped" % (self._name, self._pid))
+ self._pid = None
+ finally:
+ # Make sure we delete the pid file no matter what happens.
+ self._remove_pid_file()
+
+ def _prepare_config(self):
+ """This routine can be overridden by subclasses to do any sort
+ of initialization required prior to starting the server that may fail."""
+ pass
+
+ def _remove_stale_logs(self):
+ """This routine can be overridden by subclasses to try and remove logs
+ left over from a prior run. This routine should log warnings if the
+ files cannot be deleted, but should not fail unless failure to
+ delete the logs will actually cause start() to fail."""
+ # Sometimes logs are open in other processes but they should clear eventually.
+ for log_prefix in self._log_prefixes:
+ try:
+ self._remove_log_files(self._output_dir, log_prefix)
+ except OSError, e:
+ _log.warning('Failed to remove old %s %s files' % (self._name, log_prefix))
+
+ def _spawn_process(self):
+ _log.debug('Starting %s server, cmd="%s"' % (self._name, self._start_cmd))
+ process = self._executive.popen(self._start_cmd, env=self._env, stdout=self._stdout, stderr=self._stderr)
+ pid = process.pid
+ self._filesystem.write_text_file(self._pid_file, str(pid))
+ return pid
+
+ def _stop_running_server(self):
+ self._wait_for_action(self._check_and_kill)
+ if self._filesystem.exists(self._pid_file):
+ self._filesystem.remove(self._pid_file)
+
+ def _check_and_kill(self):
+ if self._executive.check_running_pid(self._pid):
+ _log.debug('pid %d is running, killing it' % self._pid)
+ host = self._port_obj.host
+ self._executive.kill_process(self._pid)
+ return False
+ else:
+ _log.debug('pid %d is not running' % self._pid)
+
+ return True
+
+ def _remove_pid_file(self):
+ if self._filesystem.exists(self._pid_file):
+ self._filesystem.remove(self._pid_file)
+
+ def _remove_log_files(self, folder, starts_with):
+ files = self._filesystem.listdir(folder)
+ for file in files:
+ if file.startswith(starts_with):
+ full_path = self._filesystem.join(folder, file)
+ self._filesystem.remove(full_path)
+
+ def _log_errors_from_subprocess(self):
+ _log.error('logging %s errors, if any' % self._name)
+ if self._process:
+ _log.error('%s returncode %s' % (self._name, str(self._process.returncode)))
+ if self._process.stderr:
+ stderr_text = self._process.stderr.read()
+ if stderr_text:
+ _log.error('%s stderr:' % self._name)
+ for line in stderr_text.splitlines():
+ _log.error(' %s' % line)
+ else:
+ _log.error('%s no stderr' % self._name)
+ else:
+ _log.error('%s no stderr handle' % self._name)
+ else:
+ _log.error('%s no process' % self._name)
+ if self._error_log_path and self._filesystem.exists(self._error_log_path):
+ error_log_text = self._filesystem.read_text_file(self._error_log_path)
+ if error_log_text:
+ _log.error('%s error log (%s) contents:' % (self._name, self._error_log_path))
+ for line in error_log_text.splitlines():
+ _log.error(' %s' % line)
+ else:
+ _log.error('%s error log empty' % self._name)
+ _log.error('')
+ else:
+ _log.error('%s no error log' % self._name)
+
+ def _wait_for_action(self, action, wait_secs=20.0, sleep_secs=1.0):
+ """Repeat the action for wait_sec or until it succeeds, sleeping for sleep_secs
+ in between each attempt. Returns whether it succeeded."""
+ start_time = time.time()
+ while time.time() - start_time < wait_secs:
+ if action():
+ return True
+ _log.debug("Waiting for action: %s" % action)
+ time.sleep(sleep_secs)
+
+ return False
+
+ def _is_server_running_on_all_ports(self):
+ """Returns whether the server is running on all the desired ports."""
+
+ # TODO(dpranke): crbug/378444 maybe pid is unreliable on win?
+ if not self._platform.is_win() and not self._executive.check_running_pid(self._pid):
+ _log.debug("Server isn't running at all")
+ self._log_errors_from_subprocess()
+ raise ServerError("Server exited")
+
+ for mapping in self._mappings:
+ s = socket.socket()
+ port = mapping['port']
+ try:
+ s.connect(('localhost', port))
+ _log.debug("Server running on %d" % port)
+ except IOError, e:
+ if e.errno not in (errno.ECONNREFUSED, errno.ECONNRESET):
+ raise
+ _log.debug("Server NOT running on %d: %s" % (port, e))
+ return False
+ finally:
+ s.close()
+ return True
+
+ def _check_that_all_ports_are_available(self):
+ for mapping in self._mappings:
+ s = socket.socket()
+ if not self._platform.is_win():
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ port = mapping['port']
+ try:
+ s.bind(('localhost', port))
+ except IOError, e:
+ if e.errno in (errno.EALREADY, errno.EADDRINUSE):
+ raise ServerError('Port %d is already in use.' % port)
+ elif self._platform.is_win() and e.errno in (errno.WSAEACCES,): # pylint: disable=E1101
+ raise ServerError('Port %d is already in use.' % port)
+ else:
+ raise
+ finally:
+ s.close()
+ _log.debug('all ports are available')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/server_base_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/server_base_unittest.py
new file mode 100644
index 0000000..c15a051
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/servers/server_base_unittest.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.port import test
+from webkitpy.layout_tests.servers.server_base import ServerBase
+
+
+class TestServerBase(unittest.TestCase):
+ def test_corrupt_pid_file(self):
+ # This tests that if the pid file is corrupt or invalid,
+ # both start() and stop() deal with it correctly and delete the file.
+ host = MockHost()
+ test_port = test.TestPort(host)
+
+ server = ServerBase(test_port, test_port.default_results_directory())
+ server._pid_file = '/tmp/pidfile'
+ server._spawn_process = lambda: 4
+ server._is_server_running_on_all_ports = lambda: True
+
+ host.filesystem.write_text_file(server._pid_file, 'foo')
+ server.stop()
+ self.assertEqual(host.filesystem.files[server._pid_file], None)
+
+ host.filesystem.write_text_file(server._pid_file, 'foo')
+ server.start()
+ self.assertEqual(server._pid, 4)
+
+ # Note that the pid file would not be None if _spawn_process()
+ # was actually a real implementation.
+ self.assertEqual(host.filesystem.files[server._pid_file], None)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
new file mode 100644
index 0000000..28b35d6
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
@@ -0,0 +1,166 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from webkitpy.layout_tests.models import test_expectations
+
+from webkitpy.common.net import layouttestresults
+
+
+TestExpectations = test_expectations.TestExpectations
+TestExpectationParser = test_expectations.TestExpectationParser
+
+
+class BuildBotPrinter(object):
+ # This output is parsed by buildbots and must only be changed in coordination with buildbot scripts (see webkit.org's
+ # Tools/BuildSlaveSupport/build.webkit.org-config/master.cfg: RunWebKitTests._parseNewRunWebKitTestsOutput
+ # and chromium.org's buildbot/master.chromium/scripts/master/log_parser/webkit_test_command.py).
+
+ def __init__(self, stream, debug_logging):
+ self.stream = stream
+ self.debug_logging = debug_logging
+
+ def print_results(self, run_details):
+ if self.debug_logging:
+ self.print_run_results(run_details.initial_results)
+ self.print_unexpected_results(run_details.summarized_full_results, run_details.enabled_pixel_tests_in_retry)
+
+ def _print(self, msg):
+ self.stream.write(msg + '\n')
+
+ def print_run_results(self, run_results):
+ failed = run_results.total_failures
+ total = run_results.total
+ passed = total - failed - run_results.remaining
+ percent_passed = 0.0
+ if total > 0:
+ percent_passed = float(passed) * 100 / total
+
+ self._print("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, percent_passed))
+ self._print("")
+ self._print_run_results_entry(run_results, test_expectations.NOW, "Tests to be fixed")
+
+ self._print("")
+ # FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
+ self._print_run_results_entry(run_results, test_expectations.WONTFIX,
+ "Tests that will only be fixed if they crash (WONTFIX)")
+ self._print("")
+
+ def _print_run_results_entry(self, run_results, timeline, heading):
+ total = len(run_results.tests_by_timeline[timeline])
+ not_passing = (total -
+ len(run_results.tests_by_expectation[test_expectations.PASS] &
+ run_results.tests_by_timeline[timeline]))
+ self._print("=> %s (%d):" % (heading, not_passing))
+
+ for result in TestExpectations.EXPECTATION_DESCRIPTIONS.keys():
+ if result in (test_expectations.PASS, test_expectations.SKIP):
+ continue
+ results = (run_results.tests_by_expectation[result] & run_results.tests_by_timeline[timeline])
+ desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
+ if not_passing and len(results):
+ pct = len(results) * 100.0 / not_passing
+ self._print(" %5d %-24s (%4.1f%%)" % (len(results), desc, pct))
+
+ def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False):
+ passes = {}
+ flaky = {}
+ regressions = {}
+
+ def add_to_dict_of_lists(dict, key, value):
+ dict.setdefault(key, []).append(value)
+
+ def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
+ actual = results['actual'].split(" ")
+ expected = results['expected'].split(" ")
+
+ if 'is_unexpected' not in results or not results['is_unexpected']:
+ # Don't print anything for tests that ran as expected.
+ return
+
+ if actual == ['PASS']:
+ if 'CRASH' in expected:
+ add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
+ elif 'TIMEOUT' in expected:
+ add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
+ else:
+ add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
+ elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT']:
+ add_to_dict_of_lists(regressions, actual[0], test)
+ elif len(actual) > 1 and actual[-1] in expected:
+ # We group flaky tests by the first actual result we got.
+ add_to_dict_of_lists(flaky, actual[0], test)
+ else:
+ add_to_dict_of_lists(regressions, actual[0], test)
+
+ layouttestresults.for_each_test(summarized_results['tests'], add_result)
+
+ if len(passes) or len(flaky) or len(regressions):
+ self._print("")
+ if len(passes):
+ for key, tests in passes.iteritems():
+ self._print("%s: (%d)" % (key, len(tests)))
+ tests.sort()
+ for test in tests:
+ self._print(" %s" % test)
+ self._print("")
+ self._print("")
+
+ if len(flaky):
+ descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
+ for key, tests in flaky.iteritems():
+ result_type = TestExpectations.EXPECTATIONS[key.lower()]
+ self._print("Unexpected flakiness: %s (%d)" % (descriptions[result_type], len(tests)))
+ tests.sort()
+
+ for test in tests:
+ result = layouttestresults.result_for_test(summarized_results['tests'], test)
+ actual = result['actual'].split(" ")
+ expected = result['expected'].split(" ")
+ # FIXME: clean this up once the old syntax is gone
+ new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected))]
+ self._print(" %s [ %s ]" % (test, " ".join(new_expectations_list)))
+ self._print("")
+ self._print("")
+
+ if len(regressions):
+ descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
+ for key, tests in regressions.iteritems():
+ result_type = TestExpectations.EXPECTATIONS[key.lower()]
+ self._print("Regressions: Unexpected %s (%d)" % (descriptions[result_type], len(tests)))
+ tests.sort()
+ for test in tests:
+ result = layouttestresults.result_for_test(summarized_results['tests'], test)
+ actual = result['actual'].split(" ")
+ expected = result['expected'].split(" ")
+ new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in actual]
+ self._print(" %s [ %s ]" % (test, " ".join(new_expectations_list)))
+ self._print("")
+
+ if len(summarized_results['tests']) and self.debug_logging:
+ self._print("%s" % ("-" * 78))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py
new file mode 100644
index 0000000..f7aa46a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py
@@ -0,0 +1,109 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.models import test_run_results_unittest
+from webkitpy.layout_tests.views import buildbot_results
+
+
+class BuildBotPrinterTests(unittest.TestCase):
+ def assertEmpty(self, stream):
+ self.assertFalse(stream.getvalue())
+
+ def assertNotEmpty(self, stream):
+ self.assertTrue(stream.getvalue())
+
+ def get_printer(self):
+ stream = StringIO.StringIO()
+ printer = buildbot_results.BuildBotPrinter(stream, debug_logging=True)
+ return printer, stream
+
+ def test_print_unexpected_results(self):
+ port = MockHost().port_factory.get('test')
+ printer, out = self.get_printer()
+
+ # test everything running as expected
+ DASHED_LINE = "-" * 78 + "\n"
+ summary = test_run_results_unittest.summarized_results(port, expected=True, passing=False, flaky=False)
+ printer.print_unexpected_results(summary)
+ self.assertEqual(out.getvalue(), DASHED_LINE)
+
+ # test failures
+ printer, out = self.get_printer()
+ summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(summary)
+ self.assertNotEmpty(out)
+
+ # test unexpected flaky
+ printer, out = self.get_printer()
+ summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=True)
+ printer.print_unexpected_results(summary)
+ self.assertNotEmpty(out)
+
+ printer, out = self.get_printer()
+ summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(summary)
+ self.assertNotEmpty(out)
+
+ printer, out = self.get_printer()
+ summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(summary)
+ self.assertNotEmpty(out)
+
+ printer, out = self.get_printer()
+ summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False)
+ printer.print_unexpected_results(summary)
+ output = out.getvalue()
+ self.assertTrue(output)
+ self.assertTrue(output.find('Skip') == -1)
+
+ def test_print_unexpected_results_fail_on_retry_also(self):
+ port = MockHost().port_factory.get('test')
+ printer, out = self.get_printer()
+ summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=True, fail_on_retry=True)
+ printer.print_unexpected_results(summary)
+ output = out.getvalue()
+ self.assertIn('Regressions: Unexpected crashes (1)\n failures/expected/timeout.html [ Crash Failure ]', output)
+
+ def test_print_results(self):
+ port = MockHost().port_factory.get('test')
+ printer, out = self.get_printer()
+ initial_results = test_run_results_unittest.run_results(port)
+ full_summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False)
+ failing_summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False, only_include_failing=True)
+ details = test_run_results.RunDetails(failing_summary['num_regressions'], full_summary, failing_summary, initial_results, None)
+ printer.print_results(details)
+ self.assertTrue(out.getvalue().find('but passed') != -1)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py
new file mode 100644
index 0000000..fd04ad8
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py
@@ -0,0 +1,135 @@
+# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import sys
+import time
+
+LOG_HANDLER_NAME = 'MeteredStreamLogHandler'
+
+
+class MeteredStream(object):
+ """
+ This class implements a stream wrapper that has 'meters' as well as
+ regular output. A 'meter' is a single line of text that can be erased
+ and rewritten repeatedly, without producing multiple lines of output. It
+ can be used to produce effects like progress bars.
+ """
+
+ @staticmethod
+ def _erasure(txt):
+ num_chars = len(txt)
+ return '\b' * num_chars + ' ' * num_chars + '\b' * num_chars
+
+ @staticmethod
+ def _ensure_newline(txt):
+ return txt if txt.endswith('\n') else txt + '\n'
+
+ def __init__(self, stream=None, verbose=False, logger=None, time_fn=None, pid=None, number_of_columns=None):
+ self._stream = stream or sys.stderr
+ self._verbose = verbose
+ self._time_fn = time_fn or time.time
+ self._pid = pid or os.getpid()
+ self._isatty = self._stream.isatty()
+ self._erasing = self._isatty and not verbose
+ self._last_partial_line = ''
+ self._last_write_time = 0.0
+ self._throttle_delay_in_secs = 0.066 if self._erasing else 10.0
+ self._number_of_columns = sys.maxint
+ if self._isatty and number_of_columns:
+ self._number_of_columns = number_of_columns
+
+ self._logger = logger
+ self._log_handler = None
+ if self._logger:
+ log_level = logging.DEBUG if verbose else logging.INFO
+ self._log_handler = _LogHandler(self)
+ self._log_handler.setLevel(log_level)
+ self._logger.addHandler(self._log_handler)
+
+ def __del__(self):
+ self.cleanup()
+
+ def cleanup(self):
+ if self._logger:
+ self._logger.removeHandler(self._log_handler)
+ self._log_handler = None
+
+ def write_throttled_update(self, txt):
+ now = self._time_fn()
+ if now - self._last_write_time >= self._throttle_delay_in_secs:
+ self.write_update(txt, now)
+
+ def write_update(self, txt, now=None):
+ self.write(txt, now)
+ if self._erasing:
+ self._last_partial_line = txt[txt.rfind('\n') + 1:]
+
+ def write(self, txt, now=None, pid=None):
+ now = now or self._time_fn()
+ pid = pid or self._pid
+ self._last_write_time = now
+ if self._last_partial_line:
+ self._erase_last_partial_line()
+ if self._verbose:
+ now_tuple = time.localtime(now)
+ msg = '%02d:%02d:%02d.%03d %d %s' % (now_tuple.tm_hour, now_tuple.tm_min, now_tuple.tm_sec, int((now * 1000) % 1000), pid, self._ensure_newline(txt))
+ elif self._isatty:
+ msg = txt
+ else:
+ msg = self._ensure_newline(txt)
+
+ self._stream.write(msg)
+
+ def writeln(self, txt, now=None, pid=None):
+ self.write(self._ensure_newline(txt), now, pid)
+
+ def _erase_last_partial_line(self):
+ num_chars = len(self._last_partial_line)
+ self._stream.write(self._erasure(self._last_partial_line))
+ self._last_partial_line = ''
+
+ def flush(self):
+ if self._last_partial_line:
+ self._stream.write('\n')
+ self._last_partial_line = ''
+ self._stream.flush()
+
+ def number_of_columns(self):
+ return self._number_of_columns
+
+
+class _LogHandler(logging.Handler):
+ def __init__(self, meter):
+ logging.Handler.__init__(self)
+ self._meter = meter
+ self.name = LOG_HANDLER_NAME
+
+ def emit(self, record):
+ self._meter.writeln(record.getMessage(), record.created, record.process)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py
new file mode 100644
index 0000000..8cf31c4
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py
@@ -0,0 +1,153 @@
+# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import logging
+import re
+import unittest
+
+from webkitpy.layout_tests.views.metered_stream import MeteredStream
+
+
+class RegularTest(unittest.TestCase):
+ verbose = False
+ isatty = False
+
+ def setUp(self):
+ self.stream = StringIO.StringIO()
+ self.buflist = self.stream.buflist
+ self.stream.isatty = lambda: self.isatty
+
+ # configure a logger to test that log calls do normally get included.
+ self.logger = logging.getLogger(__name__)
+ self.logger.setLevel(logging.DEBUG)
+ self.logger.propagate = False
+
+ # add a dummy time counter for a default behavior.
+ self.times = range(10)
+
+ self.meter = MeteredStream(self.stream, self.verbose, self.logger, self.time_fn, 8675)
+
+ def tearDown(self):
+ if self.meter:
+ self.meter.cleanup()
+ self.meter = None
+
+ def time_fn(self):
+ return self.times.pop(0)
+
+ def test_logging_not_included(self):
+ # This tests that if we don't hand a logger to the MeteredStream,
+ # nothing is logged.
+ logging_stream = StringIO.StringIO()
+ handler = logging.StreamHandler(logging_stream)
+ root_logger = logging.getLogger()
+ orig_level = root_logger.level
+ root_logger.addHandler(handler)
+ root_logger.setLevel(logging.DEBUG)
+ try:
+ self.meter = MeteredStream(self.stream, self.verbose, None, self.time_fn, 8675)
+ self.meter.write_throttled_update('foo')
+ self.meter.write_update('bar')
+ self.meter.write('baz')
+ self.assertEqual(logging_stream.buflist, [])
+ finally:
+ root_logger.removeHandler(handler)
+ root_logger.setLevel(orig_level)
+
+ def _basic(self, times):
+ self.times = times
+ self.meter.write_update('foo')
+ self.meter.write_update('bar')
+ self.meter.write_throttled_update('baz')
+ self.meter.write_throttled_update('baz 2')
+ self.meter.writeln('done')
+ self.assertEqual(self.times, [])
+ return self.buflist
+
+ def test_basic(self):
+ buflist = self._basic([0, 1, 2, 13, 14])
+ self.assertEqual(buflist, ['foo\n', 'bar\n', 'baz 2\n', 'done\n'])
+
+ def _log_after_update(self):
+ self.meter.write_update('foo')
+ self.logger.info('bar')
+ return self.buflist
+
+ def test_log_after_update(self):
+ buflist = self._log_after_update()
+ self.assertEqual(buflist, ['foo\n', 'bar\n'])
+
+ def test_log_args(self):
+ self.logger.info('foo %s %d', 'bar', 2)
+ self.assertEqual(self.buflist, ['foo bar 2\n'])
+
+class TtyTest(RegularTest):
+ verbose = False
+ isatty = True
+
+ def test_basic(self):
+ buflist = self._basic([0, 1, 1.05, 1.1, 2])
+ self.assertEqual(buflist, ['foo',
+ MeteredStream._erasure('foo'), 'bar',
+ MeteredStream._erasure('bar'), 'baz 2',
+ MeteredStream._erasure('baz 2'), 'done\n'])
+
+ def test_log_after_update(self):
+ buflist = self._log_after_update()
+ self.assertEqual(buflist, ['foo',
+ MeteredStream._erasure('foo'), 'bar\n'])
+
+
+class VerboseTest(RegularTest):
+ isatty = False
+ verbose = True
+
+ def test_basic(self):
+ buflist = self._basic([0, 1, 2.1, 13, 14.1234])
+ # We don't bother to match the hours and minutes of the timestamp since
+ # the local timezone can vary and we can't set that portably and easily.
+ self.assertTrue(re.match('\d\d:\d\d:00.000 8675 foo\n', buflist[0]))
+ self.assertTrue(re.match('\d\d:\d\d:01.000 8675 bar\n', buflist[1]))
+ self.assertTrue(re.match('\d\d:\d\d:13.000 8675 baz 2\n', buflist[2]))
+ self.assertTrue(re.match('\d\d:\d\d:14.123 8675 done\n', buflist[3]))
+ self.assertEqual(len(buflist), 4)
+
+ def test_log_after_update(self):
+ buflist = self._log_after_update()
+ self.assertTrue(re.match('\d\d:\d\d:00.000 8675 foo\n', buflist[0]))
+
+ # The second argument should have a real timestamp and pid, so we just check the format.
+ self.assertTrue(re.match('\d\d:\d\d:\d\d.\d\d\d \d+ bar\n', buflist[1]))
+
+ self.assertEqual(len(buflist), 2)
+
+ def test_log_args(self):
+ self.logger.info('foo %s %d', 'bar', 2)
+ self.assertEqual(len(self.buflist), 1)
+ self.assertTrue(self.buflist[0].endswith('foo bar 2\n'))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/printing.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/printing.py
new file mode 100644
index 0000000..57e78c1
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/printing.py
@@ -0,0 +1,434 @@
+# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Package that handles non-debug, non-file output for run-webkit-tests."""
+
+import math
+import optparse
+
+from webkitpy.tool import grammar
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationParser
+from webkitpy.layout_tests.views.metered_stream import MeteredStream
+
+
+NUM_SLOW_TESTS_TO_LOG = 10
+
+
+def print_options():
+ return [
+ optparse.make_option('--debug-rwt-logging', action='store_true', default=False,
+ help='print timestamps and debug information for run-webkit-tests itself'),
+ optparse.make_option('--details', action='store_true', default=False,
+ help='print detailed results for every test'),
+ optparse.make_option('-q', '--quiet', action='store_true', default=False,
+ help='run quietly (errors, warnings, and progress only)'),
+ optparse.make_option('--timing', action='store_true', default=False,
+ help='display test times (summary plus per-test w/ --verbose)'),
+ optparse.make_option('-v', '--verbose', action='store_true', default=False,
+ help='print a summarized result for every test (one line per test)'),
+ ]
+
+
+class Printer(object):
+ """Class handling all non-debug-logging printing done by run-webkit-tests."""
+
+ def __init__(self, port, options, regular_output, logger=None):
+ self.num_completed = 0
+ self.num_tests = 0
+ self._port = port
+ self._options = options
+ self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
+ number_of_columns=self._port.host.platform.terminal_width())
+ self._running_tests = []
+ self._completed_tests = []
+
+ def cleanup(self):
+ self._meter.cleanup()
+
+ def __del__(self):
+ self.cleanup()
+
+ def print_config(self, results_directory):
+ self._print_default("Using port '%s'" % self._port.name())
+ self._print_default("Test configuration: %s" % self._port.test_configuration())
+ self._print_default("View the test results at file://%s/results.html" % results_directory)
+ self._print_default("View the archived results dashboard at file://%s/dashboard.html" % results_directory)
+
+ # FIXME: should these options be in printing_options?
+ if self._options.new_baseline:
+ self._print_default("Placing new baselines in %s" % self._port.baseline_path())
+
+ fs = self._port.host.filesystem
+ fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
+ self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
+
+ self._print_default("Using %s build" % self._options.configuration)
+ if self._options.pixel_tests:
+ self._print_default("Pixel tests enabled")
+ else:
+ self._print_default("Pixel tests disabled")
+
+ self._print_default("Regular timeout: %s, slow test timeout: %s" %
+ (self._options.time_out_ms, self._options.slow_time_out_ms))
+
+ self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
+ self._print_default('')
+
+ def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
+ found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run)
+ if repeat_each * iterations > 1:
+ found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
+ found_str += ', skipping %d' % (num_all_test_files - num_to_run)
+ self._print_default(found_str + '.')
+
+ def print_expected(self, run_results, tests_with_result_type_callback):
+ self._print_expected_results_of_type(run_results, test_expectations.PASS, "passes", tests_with_result_type_callback)
+ self._print_expected_results_of_type(run_results, test_expectations.FAIL, "failures", tests_with_result_type_callback)
+ self._print_expected_results_of_type(run_results, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
+ self._print_debug('')
+
+ def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
+ driver_name = self._port.driver_name()
+ if num_workers == 1:
+ self._print_default("Running 1 %s." % driver_name)
+ self._print_debug("(%s)." % grammar.pluralize('shard', num_shards))
+ else:
+ self._print_default("Running %d %ss in parallel." % (num_workers, driver_name))
+ self._print_debug("(%d shards; %d locked)." % (num_shards, num_locked_shards))
+ self._print_default('')
+
+ def _print_expected_results_of_type(self, run_results, result_type, result_type_str, tests_with_result_type_callback):
+ tests = tests_with_result_type_callback(result_type)
+ now = run_results.tests_by_timeline[test_expectations.NOW]
+ wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]
+
+ # We use a fancy format string in order to print the data out in a
+ # nicely-aligned table.
+ fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
+ % (self._num_digits(now), self._num_digits(wontfix)))
+ self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
+
+ def _num_digits(self, num):
+ ndigits = 1
+ if len(num):
+ ndigits = int(math.log10(len(num))) + 1
+ return ndigits
+
+ def print_results(self, run_time, run_results, summarized_results):
+ self._print_timing_statistics(run_time, run_results)
+ self._print_one_line_summary(run_time, run_results)
+
+ def _print_timing_statistics(self, total_time, run_results):
+ self._print_debug("Test timing:")
+ self._print_debug(" %6.2f total testing time" % total_time)
+ self._print_debug("")
+
+ self._print_worker_statistics(run_results, int(self._options.child_processes))
+ self._print_aggregate_test_statistics(run_results)
+ self._print_individual_test_times(run_results)
+ self._print_directory_timings(run_results)
+
+ def _print_worker_statistics(self, run_results, num_workers):
+ self._print_debug("Thread timing:")
+ stats = {}
+ cuml_time = 0
+ for result in run_results.results_by_name.values():
+ stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time': 0})
+ stats[result.worker_name]['num_tests'] += 1
+ stats[result.worker_name]['total_time'] += result.total_run_time
+ cuml_time += result.total_run_time
+
+ for worker_name in stats:
+ self._print_debug(" %10s: %5d tests, %6.2f secs" % (worker_name, stats[worker_name]['num_tests'], stats[worker_name]['total_time']))
+ self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / num_workers))
+ self._print_debug("")
+
+ def _print_aggregate_test_statistics(self, run_results):
+ times_for_dump_render_tree = [result.test_run_time for result in run_results.results_by_name.values()]
+ self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
+
+ def _print_individual_test_times(self, run_results):
+ # Reverse-sort by the time spent in the driver.
+
+ individual_test_timings = sorted(run_results.results_by_name.values(), key=lambda result: result.test_run_time, reverse=True)
+ num_printed = 0
+ slow_tests = []
+ timeout_or_crash_tests = []
+ unexpected_slow_tests = []
+ for test_tuple in individual_test_timings:
+ test_name = test_tuple.test_name
+ is_timeout_crash_or_slow = False
+ if test_name in run_results.slow_tests:
+ is_timeout_crash_or_slow = True
+ slow_tests.append(test_tuple)
+
+ if test_name in run_results.failures_by_name:
+ result = run_results.results_by_name[test_name].type
+ if (result == test_expectations.TIMEOUT or
+ result == test_expectations.CRASH):
+ is_timeout_crash_or_slow = True
+ timeout_or_crash_tests.append(test_tuple)
+
+ if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG):
+ num_printed = num_printed + 1
+ unexpected_slow_tests.append(test_tuple)
+
+ self._print_debug("")
+ if unexpected_slow_tests:
+ self._print_test_list_timing("%s slowest tests that are not marked as SLOW and did not timeout/crash:" %
+ NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
+ self._print_debug("")
+
+ if slow_tests:
+ self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
+ self._print_debug("")
+
+ if timeout_or_crash_tests:
+ self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
+ self._print_debug("")
+
+ def _print_test_list_timing(self, title, test_list):
+ self._print_debug(title)
+ for test_tuple in test_list:
+ test_run_time = round(test_tuple.test_run_time, 1)
+ self._print_debug(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
+
+ def _print_directory_timings(self, run_results):
+ stats = {}
+ for result in run_results.results_by_name.values():
+ stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0})
+ stats[result.shard_name]['num_tests'] += 1
+ stats[result.shard_name]['total_time'] += result.total_run_time
+
+ min_seconds_to_print = 15
+
+ timings = []
+ for directory in stats:
+ rounded_time = round(stats[directory]['total_time'], 1)
+ if rounded_time > min_seconds_to_print:
+ timings.append((directory, rounded_time, stats[directory]['num_tests']))
+
+ if not timings:
+ return
+
+ timings.sort()
+
+ self._print_debug("Time to process slowest subdirectories:")
+ for timing in timings:
+ self._print_debug(" %s took %s seconds to run %s tests." % timing)
+ self._print_debug("")
+
+ def _print_statistics_for_test_timings(self, title, timings):
+ self._print_debug(title)
+ timings.sort()
+
+ num_tests = len(timings)
+ if not num_tests:
+ return
+ percentile90 = timings[int(.9 * num_tests)]
+ percentile99 = timings[int(.99 * num_tests)]
+
+ if num_tests % 2 == 1:
+ median = timings[((num_tests - 1) / 2) - 1]
+ else:
+ lower = timings[num_tests / 2 - 1]
+ upper = timings[num_tests / 2]
+ median = (float(lower + upper)) / 2
+
+ mean = sum(timings) / num_tests
+
+ for timing in timings:
+ sum_of_deviations = math.pow(timing - mean, 2)
+
+ std_deviation = math.sqrt(sum_of_deviations / num_tests)
+ self._print_debug(" Median: %6.3f" % median)
+ self._print_debug(" Mean: %6.3f" % mean)
+ self._print_debug(" 90th percentile: %6.3f" % percentile90)
+ self._print_debug(" 99th percentile: %6.3f" % percentile99)
+ self._print_debug(" Standard dev: %6.3f" % std_deviation)
+ self._print_debug("")
+
+ def _print_one_line_summary(self, total_time, run_results):
+ if self._options.timing:
+ parallel_time = sum(result.total_run_time for result in run_results.results_by_name.values())
+
+ # There is serial overhead in layout_test_runner.run() that we can't easily account for when
+ # really running in parallel, but taking the min() ensures that in the worst case
+ # (if parallel time is less than run_time) we do account for it.
+ serial_time = total_time - min(run_results.run_time, parallel_time)
+
+ speedup = (parallel_time + serial_time) / total_time
+ timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (total_time, serial_time, speedup)
+ else:
+ timing_summary = ''
+
+ total = run_results.total - run_results.expected_skips
+ expected = run_results.expected - run_results.expected_skips
+ unexpected = run_results.unexpected
+ incomplete = total - expected - unexpected
+ incomplete_str = ''
+ if incomplete:
+ self._print_default("")
+ incomplete_str = " (%d didn't run)" % incomplete
+
+ if self._options.verbose or self._options.debug_rwt_logging or unexpected:
+ self.writeln("")
+
+ expected_summary_str = ''
+ if run_results.expected_failures > 0:
+ expected_summary_str = " (%d passed, %d didn't)" % (expected - run_results.expected_failures, run_results.expected_failures)
+
+ summary = ''
+ if unexpected == 0:
+ if expected == total:
+ if expected > 1:
+ summary = "All %d tests ran as expected%s%s." % (expected, expected_summary_str, timing_summary)
+ else:
+ summary = "The test ran as expected%s%s." % (expected_summary_str, timing_summary)
+ else:
+ summary = "%s ran as expected%s%s%s." % (grammar.pluralize('test', expected), expected_summary_str, incomplete_str, timing_summary)
+ else:
+ summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluralize('test', expected), expected_summary_str, unexpected, incomplete_str, timing_summary)
+
+ self._print_quiet(summary)
+ self._print_quiet("")
+
+ def _test_status_line(self, test_name, suffix):
+ format_string = '[%d/%d] %s%s'
+ status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
+ if len(status_line) > self._meter.number_of_columns():
+ overflow_columns = len(status_line) - self._meter.number_of_columns()
+ ellipsis = '...'
+ if len(test_name) < overflow_columns + len(ellipsis) + 2:
+ # We don't have enough space even if we elide, just show the test filename.
+ fs = self._port.host.filesystem
+ test_name = fs.split(test_name)[1]
+ else:
+ new_length = len(test_name) - overflow_columns - len(ellipsis)
+ prefix = int(new_length / 2)
+ test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
+ return format_string % (self.num_completed, self.num_tests, test_name, suffix)
+
+ def print_started_test(self, test_name):
+ self._running_tests.append(test_name)
+ if len(self._running_tests) > 1:
+ suffix = ' (+%d)' % (len(self._running_tests) - 1)
+ else:
+ suffix = ''
+ if self._options.verbose:
+ write = self._meter.write_update
+ else:
+ write = self._meter.write_throttled_update
+ write(self._test_status_line(test_name, suffix))
+
+ def print_finished_test(self, result, expected, exp_str, got_str):
+ self.num_completed += 1
+ test_name = result.test_name
+
+ result_message = self._result_message(result.type, result.failures, expected,
+ self._options.timing, result.test_run_time)
+
+ if self._options.details:
+ self._print_test_trace(result, exp_str, got_str)
+ elif self._options.verbose or not expected:
+ self.writeln(self._test_status_line(test_name, result_message))
+ elif self.num_completed == self.num_tests:
+ self._meter.write_update('')
+ else:
+ if test_name == self._running_tests[0]:
+ self._completed_tests.insert(0, [test_name, result_message])
+ else:
+ self._completed_tests.append([test_name, result_message])
+
+ for test_name, result_message in self._completed_tests:
+ self._meter.write_throttled_update(self._test_status_line(test_name, result_message))
+ self._completed_tests = []
+ self._running_tests.remove(test_name)
+
+ def _result_message(self, result_type, failures, expected, timing, test_run_time):
+ exp_string = ' unexpectedly' if not expected else ''
+ timing_string = ' %.4fs' % test_run_time if timing else ''
+ if result_type == test_expectations.PASS:
+ return ' passed%s%s' % (exp_string, timing_string)
+ else:
+ return ' failed%s (%s)%s' % (exp_string, ', '.join(failure.message() for failure in failures), timing_string)
+
+ def _print_test_trace(self, result, exp_str, got_str):
+ test_name = result.test_name
+ self._print_default(self._test_status_line(test_name, ''))
+
+ base = self._port.lookup_virtual_test_base(test_name)
+ if base:
+ args = ' '.join(self._port.lookup_virtual_test_args(test_name))
+ self._print_default(' base: %s' % base)
+ self._print_default(' args: %s' % args)
+
+ references = self._port.reference_files(test_name)
+ if references:
+ for _, filename in references:
+ self._print_default(' ref: %s' % self._port.relative_test_filename(filename))
+ else:
+ for extension in ('.txt', '.png', '.wav'):
+ self._print_baseline(test_name, extension)
+
+ self._print_default(' exp: %s' % exp_str)
+ self._print_default(' got: %s' % got_str)
+ self._print_default(' took: %-.3f' % result.test_run_time)
+ self._print_default('')
+
+ def _print_baseline(self, test_name, extension):
+ baseline = self._port.expected_filename(test_name, extension)
+ if self._port._filesystem.exists(baseline):
+ relpath = self._port.relative_test_filename(baseline)
+ else:
+ relpath = '<none>'
+ self._print_default(' %s: %s' % (extension[1:], relpath))
+
+ def _print_quiet(self, msg):
+ self.writeln(msg)
+
+ def _print_default(self, msg):
+ if not self._options.quiet:
+ self.writeln(msg)
+
+ def _print_debug(self, msg):
+ if self._options.debug_rwt_logging:
+ self.writeln(msg)
+
+ def write_throttled_update(self, msg):
+ self._meter.write_throttled_update(msg)
+
+ def write_update(self, msg):
+ self._meter.write_update(msg)
+
+ def writeln(self, msg):
+ self._meter.writeln(msg)
+
+ def flush(self):
+ self._meter.flush()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
new file mode 100644
index 0000000..90bc2c3
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
@@ -0,0 +1,251 @@
+# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for printing.py."""
+
+import StringIO
+import optparse
+import sys
+import time
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+
+from webkitpy.common.system import logtesting
+from webkitpy.layout_tests import port
+from webkitpy.layout_tests.controllers import manager
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.views import printing
+
+
+def get_options(args):
+ print_options = printing.print_options()
+ option_parser = optparse.OptionParser(option_list=print_options)
+ return option_parser.parse_args(args)
+
+
+class TestUtilityFunctions(unittest.TestCase):
+ def test_print_options(self):
+ options, args = get_options([])
+ self.assertIsNotNone(options)
+
+
+class FakeRunResults(object):
+ def __init__(self, total=1, expected=1, unexpected=0, fake_results=None):
+ fake_results = fake_results or []
+ self.total = total
+ self.expected = expected
+ self.expected_failures = 0
+ self.unexpected = unexpected
+ self.expected_skips = 0
+ self.results_by_name = {}
+ total_run_time = 0
+ for result in fake_results:
+ self.results_by_name[result.shard_name] = result
+ total_run_time += result.total_run_time
+ self.run_time = total_run_time + 1
+
+
+class FakeShard(object):
+ def __init__(self, shard_name, total_run_time):
+ self.shard_name = shard_name
+ self.total_run_time = total_run_time
+
+
+class Testprinter(unittest.TestCase):
+ def assertEmpty(self, stream):
+ self.assertFalse(stream.getvalue())
+
+ def assertNotEmpty(self, stream):
+ self.assertTrue(stream.getvalue())
+
+ def assertWritten(self, stream, contents):
+ self.assertEqual(stream.buflist, contents)
+
+ def reset(self, stream):
+ stream.buflist = []
+ stream.buf = ''
+
+ def get_printer(self, args=None):
+ args = args or []
+ printing_options = printing.print_options()
+ option_parser = optparse.OptionParser(option_list=printing_options)
+ options, args = option_parser.parse_args(args)
+ host = MockHost()
+ self._port = host.port_factory.get('test', options)
+ nproc = 2
+
+ regular_output = StringIO.StringIO()
+ printer = printing.Printer(self._port, options, regular_output)
+ return printer, regular_output
+
+ def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
+ failures = []
+ if result_type == test_expectations.TIMEOUT:
+ failures = [test_failures.FailureTimeout()]
+ elif result_type == test_expectations.CRASH:
+ failures = [test_failures.FailureCrash()]
+ return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
+
+ def test_configure_and_cleanup(self):
+ # This test verifies that calling cleanup repeatedly and deleting
+ # the object is safe.
+ printer, err = self.get_printer()
+ printer.cleanup()
+ printer.cleanup()
+ printer = None
+
+ def test_print_config(self):
+ printer, err = self.get_printer()
+ # FIXME: it's lame that i have to set these options directly.
+ printer._options.pixel_tests = True
+ printer._options.new_baseline = True
+ printer._options.time_out_ms = 6000
+ printer._options.slow_time_out_ms = 12000
+ printer.print_config('/tmp')
+ self.assertIn("Using port 'test-mac-leopard'", err.getvalue())
+ self.assertIn('Test configuration: <leopard, x86, release>', err.getvalue())
+ self.assertIn('View the test results at file:///tmp', err.getvalue())
+ self.assertIn('View the archived results dashboard at file:///tmp', err.getvalue())
+ self.assertIn('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic', err.getvalue())
+ self.assertIn('Using Release build', err.getvalue())
+ self.assertIn('Pixel tests enabled', err.getvalue())
+ self.assertIn('Command line:', err.getvalue())
+ self.assertIn('Regular timeout: ', err.getvalue())
+
+ self.reset(err)
+ printer._options.quiet = True
+ printer.print_config('/tmp')
+ self.assertNotIn('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic', err.getvalue())
+
+ def test_print_directory_timings(self):
+ printer, err = self.get_printer()
+ printer._options.debug_rwt_logging = True
+
+ run_results = FakeRunResults()
+ run_results.results_by_name = {
+ "slowShard": FakeShard("slowShard", 16),
+ "borderlineShard": FakeShard("borderlineShard", 15),
+ "fastShard": FakeShard("fastShard", 1),
+ }
+
+ printer._print_directory_timings(run_results)
+ self.assertWritten(err, ['Time to process slowest subdirectories:\n', ' slowShard took 16.0 seconds to run 1 tests.\n', '\n'])
+
+ printer, err = self.get_printer()
+ printer._options.debug_rwt_logging = True
+
+ run_results.results_by_name = {
+ "borderlineShard": FakeShard("borderlineShard", 15),
+ "fastShard": FakeShard("fastShard", 1),
+ }
+
+ printer._print_directory_timings(run_results)
+ self.assertWritten(err, [])
+
+ def test_print_one_line_summary(self):
+ def run_test(total, exp, unexp, shards, result):
+ printer, err = self.get_printer(['--timing'] if shards else None)
+ fake_results = FakeRunResults(total, exp, unexp, shards)
+ total_time = fake_results.run_time + 1
+ printer._print_one_line_summary(total_time, fake_results)
+ self.assertWritten(err, result)
+
+ # Without times:
+ run_test(1, 1, 0, [], ["The test ran as expected.\n", "\n"])
+ run_test(2, 1, 1, [], ["\n", "1 test ran as expected, 1 didn't:\n", "\n"])
+ run_test(3, 2, 1, [], ["\n", "2 tests ran as expected, 1 didn't:\n", "\n"])
+ run_test(3, 2, 0, [], ["\n", "2 tests ran as expected (1 didn't run).\n", "\n"])
+
+ # With times:
+ fake_shards = [FakeShard("foo", 1), FakeShard("bar", 2)]
+ run_test(1, 1, 0, fake_shards, ["The test ran as expected in 5.00s (2.00s in rwt, 1x).\n", "\n"])
+ run_test(2, 1, 1, fake_shards, ["\n", "1 test ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\n", "\n"])
+ run_test(3, 2, 1, fake_shards, ["\n", "2 tests ran as expected, 1 didn't in 5.00s (2.00s in rwt, 1x):\n", "\n"])
+ run_test(3, 2, 0, fake_shards, ["\n", "2 tests ran as expected (1 didn't run) in 5.00s (2.00s in rwt, 1x).\n", "\n"])
+
+ def test_test_status_line(self):
+ printer, _ = self.get_printer()
+ printer._meter.number_of_columns = lambda: 80
+ actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
+ self.assertEqual(80, len(actual))
+ self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associa...after-index-assertion-fail1.html passed')
+
+ printer._meter.number_of_columns = lambda: 89
+ actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
+ self.assertEqual(89, len(actual))
+ self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associated-...ents-after-index-assertion-fail1.html passed')
+
+ printer._meter.number_of_columns = lambda: sys.maxint
+ actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
+ self.assertEqual(90, len(actual))
+ self.assertEqual(actual, '[0/0] fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html passed')
+
+ printer._meter.number_of_columns = lambda: 18
+ actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
+ self.assertEqual(18, len(actual))
+ self.assertEqual(actual, '[0/0] f...l passed')
+
+ printer._meter.number_of_columns = lambda: 10
+ actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
+ self.assertEqual(actual, '[0/0] associated-elements-after-index-assertion-fail1.html passed')
+
+ def test_details(self):
+ printer, err = self.get_printer(['--details'])
+ result = self.get_result('passes/image.html')
+ printer.print_started_test('passes/image.html')
+ printer.print_finished_test(result, expected=False, exp_str='', got_str='')
+ self.assertNotEmpty(err)
+
+ def test_print_found(self):
+ printer, err = self.get_printer()
+
+ printer.print_found(100, 10, 1, 1)
+ self.assertWritten(err, ["Found 100 tests; running 10, skipping 90.\n"])
+
+ self.reset(err)
+ printer.print_found(100, 10, 2, 3)
+ self.assertWritten(err, ["Found 100 tests; running 10 (6 times each: --repeat-each=2 --iterations=3), skipping 90.\n"])
+
+ def test_debug_rwt_logging_is_throttled(self):
+ printer, err = self.get_printer(['--debug-rwt-logging'])
+
+ result = self.get_result('passes/image.html')
+ printer.print_started_test('passes/image.html')
+ printer.print_finished_test(result, expected=True, exp_str='', got_str='')
+
+ printer.print_started_test('passes/text.html')
+ result = self.get_result('passes/text.html')
+ printer.print_finished_test(result, expected=True, exp_str='', got_str='')
+
+ # Only the first test's start should be printed.
+ lines = err.buflist
+ self.assertEqual(len(lines), 1)
+ self.assertTrue(lines[0].endswith('passes/image.html\n'))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/perftest.py b/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/perftest.py
new file mode 100644
index 0000000..9459f29
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/perftest.py
@@ -0,0 +1,318 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+# Copyright (C) 2012 Zoltan Horvath, Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import errno
+import logging
+import math
+import re
+import os
+import signal
+import socket
+import subprocess
+import sys
+import time
+
+from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
+from webkitpy.layout_tests.port.driver import DriverInput
+from webkitpy.layout_tests.port.driver import DriverOutput
+
+DEFAULT_TEST_RUNNER_COUNT = 4
+
+_log = logging.getLogger(__name__)
+
+
+class PerfTestMetric(object):
+ def __init__(self, metric, unit=None, iterations=None):
+ # FIXME: Fix runner.js to report correct metric names
+ self._iterations = iterations or []
+ self._unit = unit or self.metric_to_unit(metric)
+ self._metric = self.time_unit_to_metric(self._unit) if metric == 'Time' else metric
+
+ def name(self):
+ return self._metric
+
+ def has_values(self):
+ return bool(self._iterations)
+
+ def append_group(self, group_values):
+ assert isinstance(group_values, list)
+ self._iterations.append(group_values)
+
+ def grouped_iteration_values(self):
+ return self._iterations
+
+ def flattened_iteration_values(self):
+ return [value for group_values in self._iterations for value in group_values]
+
+ def unit(self):
+ return self._unit
+
+ @staticmethod
+ def metric_to_unit(metric):
+ assert metric in ('Time', 'Malloc', 'JSHeap')
+ return 'ms' if metric == 'Time' else 'bytes'
+
+ @staticmethod
+ def time_unit_to_metric(unit):
+ return {'fps': 'FrameRate', 'runs/s': 'Runs', 'ms': 'Time'}[unit]
+
+
+class PerfTest(object):
+
+ def __init__(self, port, test_name, test_path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT):
+ self._port = port
+ self._test_name = test_name
+ self._test_path = test_path
+ self._description = None
+ self._metrics = {}
+ self._ordered_metrics_name = []
+ self._test_runner_count = test_runner_count
+
+ def test_name(self):
+ return self._test_name
+
+ def test_name_without_file_extension(self):
+ return re.sub(r'\.\w+$', '', self.test_name())
+
+ def test_path(self):
+ return self._test_path
+
+ def description(self):
+ return self._description
+
+ def prepare(self, time_out_ms):
+ return True
+
+ def _create_driver(self):
+ return self._port.create_driver(worker_number=0, no_timeout=True)
+
+ def run(self, time_out_ms):
+ for _ in xrange(self._test_runner_count):
+ driver = self._create_driver()
+ try:
+ if not self._run_with_driver(driver, time_out_ms):
+ return None
+ finally:
+ driver.stop()
+
+ should_log = not self._port.get_option('profile')
+ if should_log and self._description:
+ _log.info('DESCRIPTION: %s' % self._description)
+
+ results = {}
+ for metric_name in self._ordered_metrics_name:
+ metric = self._metrics[metric_name]
+ results[metric.name()] = metric.grouped_iteration_values()
+ if should_log:
+ legacy_chromium_bot_compatible_name = self.test_name_without_file_extension().replace('/', ': ')
+ self.log_statistics(legacy_chromium_bot_compatible_name + ': ' + metric.name(),
+ metric.flattened_iteration_values(), metric.unit())
+
+ return results
+
+ @staticmethod
+ def log_statistics(test_name, values, unit):
+ sorted_values = sorted(values)
+
+ # Compute the mean and variance using Knuth's online algorithm (has good numerical stability).
+ square_sum = 0
+ mean = 0
+ for i, time in enumerate(sorted_values):
+ delta = time - mean
+ sweep = i + 1.0
+ mean += delta / sweep
+ square_sum += delta * (time - mean)
+
+ middle = int(len(sorted_values) / 2)
+ mean = sum(sorted_values) / len(values)
+ median = sorted_values[middle] if len(sorted_values) % 2 else (sorted_values[middle - 1] + sorted_values[middle]) / 2
+ stdev = math.sqrt(square_sum / (len(sorted_values) - 1)) if len(sorted_values) > 1 else 0
+
+ _log.info('RESULT %s= %s %s' % (test_name, mean, unit))
+ _log.info('median= %s %s, stdev= %s %s, min= %s %s, max= %s %s' %
+ (median, unit, stdev, unit, sorted_values[0], unit, sorted_values[-1], unit))
+
+ _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
+ _metrics_regex = re.compile(r'^(?P<metric>Time|Malloc|JS Heap):')
+ _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values']
+ _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)')
+ _console_regex = re.compile(r'^CONSOLE (MESSAGE|WARNING):')
+
+ def _run_with_driver(self, driver, time_out_ms):
+ output = self.run_single(driver, self.test_path(), time_out_ms)
+ self._filter_output(output)
+ if self.run_failed(output):
+ return False
+
+ current_metric = None
+ for line in re.split('\n', output.text):
+ description_match = self._description_regex.match(line)
+ metric_match = self._metrics_regex.match(line)
+ score = self._score_regex.match(line)
+ console_match = self._console_regex.match(line)
+
+ if description_match:
+ self._description = description_match.group('description')
+ elif metric_match:
+ current_metric = metric_match.group('metric').replace(' ', '')
+ elif score:
+ if score.group('key') != 'values':
+ continue
+
+ metric = self._ensure_metrics(current_metric, score.group('unit'))
+ metric.append_group(map(lambda value: float(value), score.group('value').split(', ')))
+ elif console_match:
+ # Ignore console messages such as deprecation warnings.
+ continue
+ else:
+ _log.error('ERROR: ' + line)
+ return False
+
+ return True
+
+ def _ensure_metrics(self, metric_name, unit=None):
+ if metric_name not in self._metrics:
+ self._metrics[metric_name] = PerfTestMetric(metric_name, unit)
+ self._ordered_metrics_name.append(metric_name)
+ return self._metrics[metric_name]
+
+ def run_single(self, driver, test_path, time_out_ms, should_run_pixel_test=False):
+ return driver.run_test(DriverInput(test_path, time_out_ms, image_hash=None, should_run_pixel_test=should_run_pixel_test, args=[]), stop_when_done=False)
+
+ def run_failed(self, output):
+ if output.error:
+ _log.error('error: %s\n%s' % (self.test_name(), output.error))
+
+ if output.text == None:
+ pass
+ elif output.timeout:
+ _log.error('timeout: %s' % self.test_name())
+ elif output.crash:
+ _log.error('crash: %s' % self.test_name())
+ else:
+ return False
+
+ return True
+
+ @staticmethod
+ def _should_ignore_line(regexps, line):
+ if not line:
+ return True
+ for regexp in regexps:
+ if regexp.search(line):
+ return True
+ return False
+
+ _lines_to_ignore_in_stderr = [
+ re.compile(r'^Unknown option:'),
+ re.compile(r'^\[WARNING:proxy_service.cc'),
+ re.compile(r'^\[INFO:'),
+ # These stderr messages come from content_shell on Linux.
+ re.compile(r'INFO:SkFontHost_fontconfig.cpp'),
+ re.compile(r'Running without the SUID sandbox'),
+ # crbug.com/345229
+ re.compile(r'InitializeSandbox\(\) called with multiple threads in process gpu-process')]
+
+ _lines_to_ignore_in_parser_result = [
+ re.compile(r'^\s*Running \d+ times$'),
+ re.compile(r'^\s*Ignoring warm-up '),
+ re.compile(r'^\s*Info:'),
+ re.compile(r'^\s*\d+(.\d+)?(\s*(runs\/s|ms|fps))?$'),
+ # Following are for handle existing test like Dromaeo
+ re.compile(re.escape("""main frame - has 1 onunload handler(s)""")),
+ re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)""")),
+ re.compile(re.escape("""frame "<!--framePath //<!--frame0-->/<!--frame0-->-->" - has 1 onunload handler(s)""")),
+ # Following is for html5.html
+ re.compile(re.escape("""Blocked access to external URL http://www.whatwg.org/specs/web-apps/current-work/""")),
+ re.compile(r"CONSOLE MESSAGE: (line \d+: )?Blocked script execution in '[A-Za-z0-9\-\.:]+' because the document's frame is sandboxed and the 'allow-scripts' permission is not set."),
+ re.compile(r"CONSOLE MESSAGE: (line \d+: )?Not allowed to load local resource"),
+ # Dromaeo reports values for subtests. Ignore them for now.
+ re.compile(r'(?P<name>.+): \[(?P<values>(\d+(.\d+)?,\s+)*\d+(.\d+)?)\]'),
+ ]
+
+ def _filter_output(self, output):
+ if output.error:
+ output.error = '\n'.join([line for line in re.split('\n', output.error) if not self._should_ignore_line(self._lines_to_ignore_in_stderr, line)])
+ if output.text:
+ output.text = '\n'.join([line for line in re.split('\n', output.text) if not self._should_ignore_line(self._lines_to_ignore_in_parser_result, line)])
+
+
+class SingleProcessPerfTest(PerfTest):
+ def __init__(self, port, test_name, test_path, test_runner_count=1):
+ super(SingleProcessPerfTest, self).__init__(port, test_name, test_path, test_runner_count)
+
+
+class ChromiumStylePerfTest(PerfTest):
+ _chromium_style_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$')
+
+ def __init__(self, port, test_name, test_path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT):
+ super(ChromiumStylePerfTest, self).__init__(port, test_name, test_path, test_runner_count)
+
+ def run(self, time_out_ms):
+ driver = self._create_driver()
+ try:
+ output = self.run_single(driver, self.test_path(), time_out_ms)
+ finally:
+ driver.stop()
+
+ self._filter_output(output)
+ if self.run_failed(output):
+ return None
+
+ return self.parse_and_log_output(output)
+
+ def parse_and_log_output(self, output):
+ test_failed = False
+ results = {}
+ for line in re.split('\n', output.text):
+ resultLine = ChromiumStylePerfTest._chromium_style_result_regex.match(line)
+ if resultLine:
+ # FIXME: Store the unit
+ results[resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
+ _log.info(line)
+ elif not len(line) == 0:
+ test_failed = True
+ _log.error(line)
+ return results if results and not test_failed else None
+
+
+class PerfTestFactory(object):
+
+ _pattern_map = [
+ (re.compile(r'^Dromaeo/'), SingleProcessPerfTest),
+ (re.compile(r'^inspector/'), ChromiumStylePerfTest),
+ ]
+
+ @classmethod
+ def create_perf_test(cls, port, test_name, path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT):
+ for (pattern, test_class) in cls._pattern_map:
+ if pattern.match(test_name):
+ return test_class(port, test_name, path, test_runner_count)
+ return PerfTest(port, test_name, path, test_runner_count)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
new file mode 100644
index 0000000..7da2e1e
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
@@ -0,0 +1,241 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import json
+import math
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.port.driver import DriverOutput
+from webkitpy.layout_tests.port.test import TestDriver
+from webkitpy.layout_tests.port.test import TestPort
+from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
+from webkitpy.performance_tests.perftest import PerfTest
+from webkitpy.performance_tests.perftest import PerfTestMetric
+from webkitpy.performance_tests.perftest import PerfTestFactory
+from webkitpy.performance_tests.perftest import SingleProcessPerfTest
+
+
+class MockPort(TestPort):
+ def __init__(self, custom_run_test=None):
+ super(MockPort, self).__init__(host=MockHost(), custom_run_test=custom_run_test)
+
+
+class TestPerfTestMetric(unittest.TestCase):
+ def test_init_set_missing_unit(self):
+ self.assertEqual(PerfTestMetric('Time', iterations=[1, 2, 3, 4, 5]).unit(), 'ms')
+ self.assertEqual(PerfTestMetric('Malloc', iterations=[1, 2, 3, 4, 5]).unit(), 'bytes')
+ self.assertEqual(PerfTestMetric('JSHeap', iterations=[1, 2, 3, 4, 5]).unit(), 'bytes')
+
+ def test_init_set_time_metric(self):
+ self.assertEqual(PerfTestMetric('Time', 'ms').name(), 'Time')
+ self.assertEqual(PerfTestMetric('Time', 'fps').name(), 'FrameRate')
+ self.assertEqual(PerfTestMetric('Time', 'runs/s').name(), 'Runs')
+
+ def test_has_values(self):
+ self.assertFalse(PerfTestMetric('Time').has_values())
+ self.assertTrue(PerfTestMetric('Time', iterations=[1]).has_values())
+
+ def test_append(self):
+ metric = PerfTestMetric('Time')
+ metric2 = PerfTestMetric('Time')
+ self.assertFalse(metric.has_values())
+ self.assertFalse(metric2.has_values())
+
+ metric.append_group([1])
+ self.assertTrue(metric.has_values())
+ self.assertFalse(metric2.has_values())
+ self.assertEqual(metric.grouped_iteration_values(), [[1]])
+ self.assertEqual(metric.flattened_iteration_values(), [1])
+
+ metric.append_group([2])
+ self.assertEqual(metric.grouped_iteration_values(), [[1], [2]])
+ self.assertEqual(metric.flattened_iteration_values(), [1, 2])
+
+ metric2.append_group([3])
+ self.assertTrue(metric2.has_values())
+ self.assertEqual(metric.flattened_iteration_values(), [1, 2])
+ self.assertEqual(metric2.flattened_iteration_values(), [3])
+
+ metric.append_group([4, 5])
+ self.assertEqual(metric.grouped_iteration_values(), [[1], [2], [4, 5]])
+ self.assertEqual(metric.flattened_iteration_values(), [1, 2, 4, 5])
+
+
+class TestPerfTest(unittest.TestCase):
+ def _assert_results_are_correct(self, test, output):
+ test.run_single = lambda driver, path, time_out_ms: output
+ self.assertTrue(test._run_with_driver(None, None))
+ self.assertEqual(test._metrics.keys(), ['Time'])
+ self.assertEqual(test._metrics['Time'].flattened_iteration_values(), [1080, 1120, 1095, 1101, 1104])
+
+ def test_parse_output(self):
+ output = DriverOutput("""
+Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50862 ms
+min 1080 ms
+max 1120 ms
+""", image=None, image_hash=None, audio=None)
+ output_capture = OutputCapture()
+ output_capture.capture_output()
+ try:
+ test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
+ self._assert_results_are_correct(test, output)
+ finally:
+ actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+ self.assertEqual(actual_stdout, '')
+ self.assertEqual(actual_stderr, '')
+ self.assertEqual(actual_logs, '')
+
+ def test_parse_output_with_failing_line(self):
+ output = DriverOutput("""
+Running 20 times
+Ignoring warm-up run (1115)
+
+some-unrecognizable-line
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50862 ms
+min 1080 ms
+max 1120 ms
+""", image=None, image_hash=None, audio=None)
+ output_capture = OutputCapture()
+ output_capture.capture_output()
+ try:
+ test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
+ test.run_single = lambda driver, path, time_out_ms: output
+ self.assertFalse(test._run_with_driver(None, None))
+ finally:
+ actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+ self.assertEqual(actual_stdout, '')
+ self.assertEqual(actual_stderr, '')
+ self.assertEqual(actual_logs, 'ERROR: some-unrecognizable-line\n')
+
+ def test_parse_output_with_description(self):
+ output = DriverOutput("""
+Description: this is a test description.
+
+Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50862 ms
+min 1080 ms
+max 1120 ms""", image=None, image_hash=None, audio=None)
+ test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
+ self._assert_results_are_correct(test, output)
+ self.assertEqual(test.description(), 'this is a test description.')
+
+ def test_ignored_stderr_lines(self):
+ test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
+ output_with_lines_to_ignore = DriverOutput('', image=None, image_hash=None, audio=None, error="""
+Unknown option: --foo-bar
+Should not be ignored
+[WARNING:proxy_service.cc] bad moon a-rising
+[WARNING:chrome.cc] Something went wrong
+[INFO:SkFontHost_android.cpp(1158)] Use Test Config File Main /data/local/tmp/drt/android_main_fonts.xml, Fallback /data/local/tmp/drt/android_fallback_fonts.xml, Font Dir /data/local/tmp/drt/fonts/
+[ERROR:main.cc] The sky has fallen""")
+ test._filter_output(output_with_lines_to_ignore)
+ self.assertEqual(output_with_lines_to_ignore.error,
+ "Should not be ignored\n"
+ "[WARNING:chrome.cc] Something went wrong\n"
+ "[ERROR:main.cc] The sky has fallen")
+
+ def test_parse_output_with_subtests(self):
+ output = DriverOutput("""
+Running 20 times
+some test: [1, 2, 3, 4, 5]
+other test = else: [6, 7, 8, 9, 10]
+Ignoring warm-up run (1115)
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50862 ms
+min 1080 ms
+max 1120 ms
+""", image=None, image_hash=None, audio=None)
+ output_capture = OutputCapture()
+ output_capture.capture_output()
+ try:
+ test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
+ self._assert_results_are_correct(test, output)
+ finally:
+ actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+ self.assertEqual(actual_stdout, '')
+ self.assertEqual(actual_stderr, '')
+ self.assertEqual(actual_logs, '')
+
+
+class TestSingleProcessPerfTest(unittest.TestCase):
+ def test_use_only_one_process(self):
+ called = [0]
+
+ def run_single(driver, path, time_out_ms):
+ called[0] += 1
+ return DriverOutput("""
+Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50862 ms
+min 1080 ms
+max 1120 ms""", image=None, image_hash=None, audio=None)
+
+ test = SingleProcessPerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
+ test.run_single = run_single
+ self.assertTrue(test.run(0))
+ self.assertEqual(called[0], 1)
+
+
+class TestPerfTestFactory(unittest.TestCase):
+ def test_regular_test(self):
+ test = PerfTestFactory.create_perf_test(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test')
+ self.assertEqual(test.__class__, PerfTest)
+
+ def test_inspector_test(self):
+ test = PerfTestFactory.create_perf_test(MockPort(), 'inspector/some-test', '/path/inspector/some-test')
+ self.assertEqual(test.__class__, ChromiumStylePerfTest)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py b/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
new file mode 100644
index 0000000..2b2b2d0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
@@ -0,0 +1,378 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Run Inspector's perf tests in perf mode."""
+
+import os
+import json
+import logging
+import optparse
+import time
+import datetime
+
+from webkitpy.common import find_files
+from webkitpy.common.checkout.scm.detection import SCMDetector
+from webkitpy.common.config.urls import view_source_url
+from webkitpy.common.host import Host
+from webkitpy.common.net.file_uploader import FileUploader
+from webkitpy.performance_tests.perftest import PerfTestFactory
+from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
+
+
+_log = logging.getLogger(__name__)
+
+
+class PerfTestsRunner(object):
+ _default_branch = 'webkit-trunk'
+ EXIT_CODE_BAD_BUILD = -1
+ EXIT_CODE_BAD_SOURCE_JSON = -2
+ EXIT_CODE_BAD_MERGE = -3
+ EXIT_CODE_FAILED_UPLOADING = -4
+ EXIT_CODE_BAD_PREPARATION = -5
+
+ _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
+
+ def __init__(self, args=None, port=None):
+ self._options, self._args = PerfTestsRunner._parse_args(args)
+ if port:
+ self._port = port
+ self._host = self._port.host
+ else:
+ self._host = Host()
+ self._port = self._host.port_factory.get(self._options.platform, self._options)
+ self._host.initialize_scm()
+ self._webkit_base_dir_len = len(self._port.webkit_base())
+ self._base_path = self._port.perf_tests_dir()
+ self._timestamp = time.time()
+ self._utc_timestamp = datetime.datetime.utcnow()
+
+
+ @staticmethod
+ def _parse_args(args=None):
+ def _expand_path(option, opt_str, value, parser):
+ path = os.path.expandvars(os.path.expanduser(value))
+ setattr(parser.values, option.dest, path)
+ perf_option_list = [
+ optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
+ help='Set the configuration to Debug'),
+ optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
+ help='Set the configuration to Release'),
+ optparse.make_option("--platform",
+ help="Specify port/platform being tested (e.g. mac)"),
+ optparse.make_option("--chromium",
+ action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
+ optparse.make_option("--android",
+ action="store_const", const='android', dest='platform', help='Alias for --platform=android'),
+ optparse.make_option("--builder-name",
+ help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
+ optparse.make_option("--build-number",
+ help=("The build number of the builder running this script.")),
+ optparse.make_option("--build", dest="build", action="store_true", default=True,
+ help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
+ optparse.make_option("--no-build", dest="build", action="store_false",
+ help="Don't check to see if the DumpRenderTree build is up-to-date."),
+ optparse.make_option("--build-directory",
+ help="Path to the directory under which build files are kept (should not include configuration)"),
+ optparse.make_option("--time-out-ms", default=600 * 1000,
+ help="Set the timeout for each test"),
+ optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
+ help="Do no generate results JSON and results page."),
+ optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
+ help="Path to generate a JSON file at; may contain previous results if it already exists."),
+ optparse.make_option("--reset-results", action="store_true",
+ help="Clears the content in the generated JSON file before adding the results."),
+ optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
+ help="Only used on bots. Path to a slave configuration file."),
+ optparse.make_option("--description",
+ help="Add a description to the output JSON file if one is generated"),
+ optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
+ help="Don't launch a browser with results after the tests are done"),
+ optparse.make_option("--test-results-server",
+ help="Upload the generated JSON file to the specified server when --output-json-path is present."),
+ optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
+ help="Run all tests, including the ones in the Skipped list."),
+ optparse.make_option("--profile", action="store_true",
+ help="Output per-test profile information."),
+ optparse.make_option("--profiler", action="store",
+ help="Output per-test profile information, using the specified profiler."),
+ optparse.make_option("--additional-drt-flag", action="append",
+ default=[], help="Additional command line flag to pass to DumpRenderTree "
+ "Specify multiple times to add multiple flags."),
+ optparse.make_option("--driver-name", type="string",
+ help="Alternative DumpRenderTree binary to use"),
+ optparse.make_option("--content-shell", action="store_true",
+ help="Use Content Shell instead of DumpRenderTree"),
+ optparse.make_option("--repeat", default=1, type="int",
+ help="Specify number of times to run test set (default: 1)."),
+ optparse.make_option("--test-runner-count", default=DEFAULT_TEST_RUNNER_COUNT, type="int",
+ help="Specify number of times to invoke test runner for each performance test."),
+ ]
+ return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
+
+ def _collect_tests(self):
+ test_extensions = ['.html', '.svg']
+
+ def _is_test_file(filesystem, dirname, filename):
+ return filesystem.splitext(filename)[1] in test_extensions
+
+ filesystem = self._host.filesystem
+
+ paths = []
+ for arg in self._args:
+ if filesystem.exists(filesystem.join(self._base_path, arg)):
+ paths.append(arg)
+ else:
+ relpath = filesystem.relpath(arg, self._base_path)
+ if filesystem.exists(filesystem.join(self._base_path, relpath)):
+ paths.append(filesystem.normpath(relpath))
+ else:
+ _log.warn('Path was not found:' + arg)
+
+ skipped_directories = set(['.svn', 'resources'])
+ test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
+ tests = []
+ for path in test_files:
+ relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
+ if self._options.use_skipped_list and self._port.skips_perf_test(relative_path) and filesystem.normpath(relative_path) not in paths:
+ continue
+ test = PerfTestFactory.create_perf_test(self._port, relative_path, path, test_runner_count=self._options.test_runner_count)
+ tests.append(test)
+
+ return tests
+
+ def _start_http_servers(self):
+ self._port.acquire_http_lock()
+ self._port.start_http_server(number_of_servers=2)
+
+ def _stop_http_servers(self):
+ self._port.stop_http_server()
+ self._port.release_http_lock()
+
+ def run(self):
+ needs_http = self._port.requires_http_server()
+
+ class FakePrinter(object):
+ def write_update(self, msg):
+ print msg
+
+ def write_throttled_update(self, msg):
+ pass
+
+ if self._port.check_build(needs_http=needs_http, printer=FakePrinter()):
+ _log.error("Build not up to date for %s" % self._port._path_to_driver())
+ return self.EXIT_CODE_BAD_BUILD
+
+ run_count = 0
+ repeat = self._options.repeat
+ while (run_count < repeat):
+ run_count += 1
+
+ tests = self._collect_tests()
+ runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
+ _log.info("Running %d tests%s" % (len(tests), runs))
+
+ for test in tests:
+ if not test.prepare(self._options.time_out_ms):
+ return self.EXIT_CODE_BAD_PREPARATION
+
+ try:
+ if needs_http:
+ self._start_http_servers()
+ unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))
+
+ finally:
+ if needs_http:
+ self._stop_http_servers()
+
+ if self._options.generate_results and not self._options.profile:
+ exit_code = self._generate_results()
+ if exit_code:
+ return exit_code
+
+ if self._options.generate_results and not self._options.profile:
+ test_results_server = self._options.test_results_server
+ if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
+ return self.EXIT_CODE_FAILED_UPLOADING
+
+ if self._options.show_results:
+ self._port.show_results_html_file(self._results_page_path())
+
+ return unexpected
+
+ def _output_json_path(self):
+ output_json_path = self._options.output_json_path
+ if output_json_path:
+ return output_json_path
+ return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
+
+ def _results_page_path(self):
+ return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'
+
+ def _generate_results(self):
+ options = self._options
+ output_json_path = self._output_json_path()
+ output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
+
+ if options.slave_config_json_path:
+ output = self._merge_slave_config_json(options.slave_config_json_path, output)
+ if not output:
+ return self.EXIT_CODE_BAD_SOURCE_JSON
+
+ output = self._merge_outputs_if_needed(output_json_path, output)
+ if not output:
+ return self.EXIT_CODE_BAD_MERGE
+
+ filesystem = self._host.filesystem
+ json_output = json.dumps(output)
+ filesystem.write_text_file(output_json_path, json_output)
+
+ template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
+ template = filesystem.read_text_file(template_path)
+
+ absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
+ results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
+ results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
+
+ filesystem.write_text_file(self._results_page_path(), results_page)
+
+ def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
+ revisions = {}
+ for (name, path) in self._port.repository_paths():
+ scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
+ revision = scm.svn_revision(path)
+ revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}
+
+ meta_info = {
+ 'description': description,
+ 'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
+ 'platform': platform,
+ 'revisions': revisions,
+ 'builderName': builder_name,
+ 'buildNumber': int(build_number) if build_number else None}
+
+ contents = {'tests': {}}
+ for key, value in meta_info.items():
+ if value:
+ contents[key] = value
+
+ for test, metrics in self._results:
+ for metric_name, iteration_values in metrics.iteritems():
+ if not isinstance(iteration_values, list): # We can't reports results without individual measurements.
+ continue
+
+ tests = contents['tests']
+ path = test.test_name_without_file_extension().split('/')
+ for i in range(0, len(path)):
+ is_last_token = i + 1 == len(path)
+ url = view_source_url('PerformanceTests/' + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
+ tests.setdefault(path[i], {'url': url})
+ current_test = tests[path[i]]
+ if is_last_token:
+ current_test.setdefault('metrics', {})
+ assert metric_name not in current_test['metrics']
+ current_test['metrics'][metric_name] = {'current': iteration_values}
+ else:
+ current_test.setdefault('tests', {})
+ tests = current_test['tests']
+
+ return contents
+
+ @staticmethod
+ def _datetime_in_ES5_compatible_iso_format(datetime):
+ return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')
+
+ def _merge_slave_config_json(self, slave_config_json_path, contents):
+ if not self._host.filesystem.isfile(slave_config_json_path):
+ _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
+ return None
+
+ try:
+ slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
+ slave_config = json.load(slave_config_json)
+ for key in slave_config:
+ contents['builder' + key.capitalize()] = slave_config[key]
+ return contents
+ except Exception, error:
+ _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
+ return None
+
+ def _merge_outputs_if_needed(self, output_json_path, output):
+ if self._options.reset_results or not self._host.filesystem.isfile(output_json_path):
+ return [output]
+ try:
+ existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
+ return existing_outputs + [output]
+ except Exception, error:
+ _log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
+ return None
+
+ def _upload_json(self, test_results_server, json_path, host_path="/api/report", file_uploader=FileUploader):
+ url = "https://%s%s" % (test_results_server, host_path)
+ uploader = file_uploader(url, 120)
+ try:
+ response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
+ except Exception, error:
+ _log.error("Failed to upload JSON file to %s in 120s: %s" % (url, error))
+ return False
+
+ response_body = [line.strip('\n') for line in response]
+ if response_body != ['OK']:
+ try:
+ parsed_response = json.loads('\n'.join(response_body))
+ except:
+ _log.error("Uploaded JSON to %s but got a bad response:" % url)
+ for line in response_body:
+ _log.error(line)
+ return False
+ if parsed_response.get('status') != 'OK':
+ _log.error("Uploaded JSON to %s but got an error:" % url)
+ _log.error(json.dumps(parsed_response, indent=4))
+ return False
+
+ _log.info("JSON file uploaded to %s." % url)
+ return True
+
+ def _run_tests_set(self, tests):
+ result_count = len(tests)
+ failures = 0
+ self._results = []
+
+ for i, test in enumerate(tests):
+ _log.info('Running %s (%d of %d)' % (test.test_name(), i + 1, len(tests)))
+ start_time = time.time()
+ metrics = test.run(self._options.time_out_ms)
+ if metrics:
+ self._results.append((test, metrics))
+ else:
+ failures += 1
+ _log.error('FAILED')
+
+ _log.info('Finished: %f s' % (time.time() - start_time))
+ _log.info('')
+
+ return failures
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
new file mode 100644
index 0000000..1f1db79
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
@@ -0,0 +1,763 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for run_perf_tests."""
+
+import StringIO
+import datetime
+import json
+import re
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.port.driver import DriverOutput
+from webkitpy.layout_tests.port.test import TestPort
+from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
+from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
+from webkitpy.performance_tests.perftest import PerfTest
+from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
+
+
+class MainTest(unittest.TestCase):
+ def create_runner(self, args=[]):
+ options, parsed_args = PerfTestsRunner._parse_args(args)
+ test_port = TestPort(host=MockHost(), options=options)
+ runner = PerfTestsRunner(args=args, port=test_port)
+ runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
+ runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
+ runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
+ return runner, test_port
+
+ def _add_file(self, runner, dirname, filename, content=True):
+ dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
+ runner._host.filesystem.maybe_make_directory(dirname)
+ runner._host.filesystem.files[runner._host.filesystem.join(dirname, filename)] = content
+
+ def test_collect_tests(self):
+ runner, port = self.create_runner()
+ self._add_file(runner, 'inspector', 'a_file.html', 'a content')
+ tests = runner._collect_tests()
+ self.assertEqual(len(tests), 1)
+
+ def _collect_tests_and_sort_test_name(self, runner):
+ return sorted([test.test_name() for test in runner._collect_tests()])
+
+ def test_collect_tests_with_multile_files(self):
+ runner, port = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
+
+ def add_file(filename):
+ port.host.filesystem.files[runner._host.filesystem.join(runner._base_path, filename)] = 'some content'
+
+ add_file('test1.html')
+ add_file('test2.html')
+ add_file('test3.html')
+ port.host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
+ self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
+
+ def test_collect_tests_with_skipped_list(self):
+ runner, port = self.create_runner()
+
+ self._add_file(runner, 'inspector', 'test1.html')
+ self._add_file(runner, 'inspector', 'unsupported_test1.html')
+ self._add_file(runner, 'inspector', 'test2.html')
+ self._add_file(runner, 'inspector/resources', 'resource_file.html')
+ self._add_file(runner, 'unsupported', 'unsupported_test2.html')
+ port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
+ self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
+
+ def test_collect_tests_with_skipped_list_and_files(self):
+ runner, port = self.create_runner(args=['Suite/Test1.html', 'Suite/SkippedTest1.html', 'SkippedSuite/Test1.html'])
+
+ self._add_file(runner, 'SkippedSuite', 'Test1.html')
+ self._add_file(runner, 'SkippedSuite', 'Test2.html')
+ self._add_file(runner, 'Suite', 'Test1.html')
+ self._add_file(runner, 'Suite', 'Test2.html')
+ self._add_file(runner, 'Suite', 'SkippedTest1.html')
+ self._add_file(runner, 'Suite', 'SkippedTest2.html')
+ port.skipped_perf_tests = lambda: ['Suite/SkippedTest1.html', 'Suite/SkippedTest1.html', 'SkippedSuite']
+ self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner),
+ ['SkippedSuite/Test1.html', 'Suite/SkippedTest1.html', 'Suite/Test1.html'])
+
+ def test_collect_tests_with_ignored_skipped_list(self):
+ runner, port = self.create_runner(args=['--force'])
+
+ self._add_file(runner, 'inspector', 'test1.html')
+ self._add_file(runner, 'inspector', 'unsupported_test1.html')
+ self._add_file(runner, 'inspector', 'test2.html')
+ self._add_file(runner, 'inspector/resources', 'resource_file.html')
+ self._add_file(runner, 'unsupported', 'unsupported_test2.html')
+ port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
+ self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html', 'inspector/unsupported_test1.html', 'unsupported/unsupported_test2.html'])
+
+ def test_default_args(self):
+ runner, port = self.create_runner()
+ options, args = PerfTestsRunner._parse_args([])
+ self.assertTrue(options.build)
+ self.assertEqual(options.time_out_ms, 600 * 1000)
+ self.assertTrue(options.generate_results)
+ self.assertTrue(options.show_results)
+ self.assertTrue(options.use_skipped_list)
+ self.assertEqual(options.repeat, 1)
+ self.assertEqual(options.test_runner_count, DEFAULT_TEST_RUNNER_COUNT)
+
+ def test_parse_args(self):
+ runner, port = self.create_runner()
+ options, args = PerfTestsRunner._parse_args([
+ '--build-directory=folder42',
+ '--platform=platform42',
+ '--builder-name', 'webkit-mac-1',
+ '--build-number=56',
+ '--time-out-ms=42',
+ '--no-show-results',
+ '--reset-results',
+ '--output-json-path=a/output.json',
+ '--slave-config-json-path=a/source.json',
+ '--test-results-server=somehost',
+ '--additional-drt-flag=--enable-threaded-parser',
+ '--additional-drt-flag=--awesomesauce',
+ '--repeat=5',
+ '--test-runner-count=5',
+ '--debug'])
+ self.assertTrue(options.build)
+ self.assertEqual(options.build_directory, 'folder42')
+ self.assertEqual(options.platform, 'platform42')
+ self.assertEqual(options.builder_name, 'webkit-mac-1')
+ self.assertEqual(options.build_number, '56')
+ self.assertEqual(options.time_out_ms, '42')
+ self.assertEqual(options.configuration, 'Debug')
+ self.assertFalse(options.show_results)
+ self.assertTrue(options.reset_results)
+ self.assertEqual(options.output_json_path, 'a/output.json')
+ self.assertEqual(options.slave_config_json_path, 'a/source.json')
+ self.assertEqual(options.test_results_server, 'somehost')
+ self.assertEqual(options.additional_drt_flag, ['--enable-threaded-parser', '--awesomesauce'])
+ self.assertEqual(options.repeat, 5)
+ self.assertEqual(options.test_runner_count, 5)
+
+ def test_upload_json(self):
+ runner, port = self.create_runner()
+ port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'
+
+ class MockFileUploader:
+ called = []
+ upload_single_text_file_throws = False
+ upload_single_text_file_return_value = None
+
+ @classmethod
+ def reset(cls):
+ cls.called = []
+ cls.upload_single_text_file_throws = False
+ cls.upload_single_text_file_return_value = None
+
+ def __init__(mock, url, timeout):
+ self.assertEqual(url, 'https://some.host/some/path')
+ self.assertTrue(isinstance(timeout, int) and timeout)
+ mock.called.append('FileUploader')
+
+ def upload_single_text_file(mock, filesystem, content_type, filename):
+ self.assertEqual(filesystem, port.host.filesystem)
+ self.assertEqual(content_type, 'application/json')
+ self.assertEqual(filename, 'some.json')
+ mock.called.append('upload_single_text_file')
+ if mock.upload_single_text_file_throws:
+ raise Exception
+ return mock.upload_single_text_file_return_value
+
+ MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('OK')
+ self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
+ self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
+
+ MockFileUploader.reset()
+ MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('Some error')
+ output = OutputCapture()
+ output.capture_output()
+ self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
+ _, _, logs = output.restore_output()
+ self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got a bad response:\nSome error\n')
+
+ # Throwing an exception upload_single_text_file shouldn't blow up _upload_json
+ MockFileUploader.reset()
+ MockFileUploader.upload_single_text_file_throws = True
+ self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
+ self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
+
+ MockFileUploader.reset()
+ MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "OK"}')
+ self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
+ self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
+
+ MockFileUploader.reset()
+ MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "SomethingHasFailed", "failureStored": false}')
+ output = OutputCapture()
+ output.capture_output()
+ self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
+ _, _, logs = output.restore_output()
+ serialized_json = json.dumps({'status': 'SomethingHasFailed', 'failureStored': False}, indent=4)
+ self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got an error:\n%s\n' % serialized_json)
+
+
+class InspectorPassTestData:
+ text = 'RESULT group_name: test_name= 42 ms'
+ output = """Running inspector/pass.html (2 of 2)
+RESULT group_name: test_name= 42 ms
+Finished: 0.1 s
+
+"""
+
+
+class EventTargetWrapperTestData:
+ text = """Running 20 times
+Ignoring warm-up run (1502)
+1504
+1505
+1510
+1504
+1507
+1509
+1510
+1487
+1488
+1472
+1472
+1488
+1473
+1472
+1475
+1487
+1486
+1486
+1475
+1471
+
+Time:
+values 1486, 1471, 1510, 1505, 1478, 1490 ms
+avg 1490 ms
+median 1488 ms
+stdev 15.13935 ms
+min 1471 ms
+max 1510 ms
+"""
+
+ output = """Running Bindings/event-target-wrapper.html (1 of 2)
+RESULT Bindings: event-target-wrapper: Time= 1490.0 ms
+median= 1488.0 ms, stdev= 14.11751 ms, min= 1471.0 ms, max= 1510.0 ms
+Finished: 0.1 s
+
+"""
+
+ results = {'url': 'https://src.chromium.org/viewvc/blink/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
+ 'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}}
+
+
+class SomeParserTestData:
+ text = """Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50861 ms
+min 1080 ms
+max 1120 ms
+"""
+
+ output = """Running Parser/some-parser.html (2 of 2)
+RESULT Parser: some-parser: Time= 1100.0 ms
+median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
+Finished: 0.1 s
+
+"""
+
+
+class MemoryTestData:
+ text = """Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50861 ms
+min 1080 ms
+max 1120 ms
+
+JS Heap:
+values 825000, 811000, 848000, 837000, 829000 bytes
+avg 830000 bytes
+median 829000 bytes
+stdev 13784.04875 bytes
+min 811000 bytes
+max 848000 bytes
+
+Malloc:
+values 529000, 511000, 548000, 536000, 521000 bytes
+avg 529000 bytes
+median 529000 bytes
+stdev 14124.44689 bytes
+min 511000 bytes
+max 548000 bytes
+"""
+
+ output = """Running 1 tests
+Running Parser/memory-test.html (1 of 1)
+RESULT Parser: memory-test: Time= 1100.0 ms
+median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
+RESULT Parser: memory-test: JSHeap= 830000.0 bytes
+median= 829000.0 bytes, stdev= 12649.11064 bytes, min= 811000.0 bytes, max= 848000.0 bytes
+RESULT Parser: memory-test: Malloc= 529000.0 bytes
+median= 529000.0 bytes, stdev= 12961.48139 bytes, min= 511000.0 bytes, max= 548000.0 bytes
+Finished: 0.1 s
+"""
+
+ results = {'current': [[1080, 1120, 1095, 1101, 1104]] * 4}
+ js_heap_results = {'current': [[825000, 811000, 848000, 837000, 829000]] * 4}
+ malloc_results = {'current': [[529000, 511000, 548000, 536000, 521000]] * 4}
+
+
+class TestDriver:
+ def run_test(self, driver_input, stop_when_done):
+ text = ''
+ timeout = False
+ crash = False
+ if driver_input.test_name.endswith('pass.html'):
+ text = InspectorPassTestData.text
+ elif driver_input.test_name.endswith('timeout.html'):
+ timeout = True
+ elif driver_input.test_name.endswith('failed.html'):
+ text = None
+ elif driver_input.test_name.endswith('tonguey.html'):
+ text = 'we are not expecting an output from perf tests but RESULT blablabla'
+ elif driver_input.test_name.endswith('crash.html'):
+ crash = True
+ elif driver_input.test_name.endswith('event-target-wrapper.html'):
+ text = EventTargetWrapperTestData.text
+ elif driver_input.test_name.endswith('some-parser.html'):
+ text = SomeParserTestData.text
+ elif driver_input.test_name.endswith('memory-test.html'):
+ text = MemoryTestData.text
+ return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
+
+ def start(self):
+ """do nothing"""
+
+ def stop(self):
+ """do nothing"""
+
+
+class IntegrationTest(unittest.TestCase):
+ def _normalize_output(self, log):
+ return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0-9\.]+ s', 'Finished: 0.1 s', log))
+
+ def _load_output_json(self, runner):
+ json_content = runner._host.filesystem.read_text_file(runner._output_json_path())
+ return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_content))
+
+ def create_runner(self, args=[], driver_class=TestDriver):
+ options, parsed_args = PerfTestsRunner._parse_args(args)
+ test_port = TestPort(host=MockHost(), options=options)
+ test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
+
+ runner = PerfTestsRunner(args=args, port=test_port)
+ runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
+ runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
+ runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
+
+ return runner, test_port
+
+ def run_test(self, test_name):
+ runner, port = self.create_runner()
+ tests = [ChromiumStylePerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name))]
+ return runner._run_tests_set(tests) == 0
+
+ def test_run_passing_test(self):
+ self.assertTrue(self.run_test('pass.html'))
+
+ def test_run_silent_test(self):
+ self.assertFalse(self.run_test('silent.html'))
+
+ def test_run_failed_test(self):
+ self.assertFalse(self.run_test('failed.html'))
+
+ def test_run_tonguey_test(self):
+ self.assertFalse(self.run_test('tonguey.html'))
+
+ def test_run_timeout_test(self):
+ self.assertFalse(self.run_test('timeout.html'))
+
+ def test_run_crash_test(self):
+ self.assertFalse(self.run_test('crash.html'))
+
+ def _tests_for_runner(self, runner, test_names):
+ filesystem = runner._host.filesystem
+ tests = []
+ for test in test_names:
+ path = filesystem.join(runner._base_path, test)
+ dirname = filesystem.dirname(path)
+ if test.startswith('inspector/'):
+ tests.append(ChromiumStylePerfTest(runner._port, test, path))
+ else:
+ tests.append(PerfTest(runner._port, test, path))
+ return tests
+
+ def test_run_test_set(self):
+ runner, port = self.create_runner()
+ tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
+ 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
+ output = OutputCapture()
+ output.capture_output()
+ try:
+ unexpected_result_count = runner._run_tests_set(tests)
+ finally:
+ stdout, stderr, log = output.restore_output()
+ self.assertEqual(unexpected_result_count, len(tests) - 1)
+ self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
+
+ def test_run_test_set_kills_drt_per_run(self):
+
+ class TestDriverWithStopCount(TestDriver):
+ stop_count = 0
+
+ def stop(self):
+ TestDriverWithStopCount.stop_count += 1
+
+ runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
+
+ tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
+ 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
+ unexpected_result_count = runner._run_tests_set(tests)
+
+ self.assertEqual(TestDriverWithStopCount.stop_count, 6)
+
+ def test_run_test_set_for_parser_tests(self):
+ runner, port = self.create_runner()
+ tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
+ output = OutputCapture()
+ output.capture_output()
+ try:
+ unexpected_result_count = runner._run_tests_set(tests)
+ finally:
+ stdout, stderr, log = output.restore_output()
+ self.assertEqual(unexpected_result_count, 0)
+ self.assertEqual(self._normalize_output(log), EventTargetWrapperTestData.output + SomeParserTestData.output)
+
+ def test_run_memory_test(self):
+ runner, port = self.create_runner_and_setup_results_template()
+ runner._timestamp = 123456789
+ port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')
+
+ output = OutputCapture()
+ output.capture_output()
+ try:
+ unexpected_result_count = runner.run()
+ finally:
+ stdout, stderr, log = output.restore_output()
+ self.assertEqual(unexpected_result_count, 0)
+ self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\nMOCK: user.open_url: file://...\n')
+ parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tests']
+ self.assertEqual(parser_tests['memory-test']['metrics']['Time'], MemoryTestData.results)
+ self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], MemoryTestData.js_heap_results)
+ self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], MemoryTestData.malloc_results)
+
+ def _test_run_with_json_output(self, runner, filesystem, upload_succeeds=False, results_shown=True, expected_exit_code=0, repeat=1, compare_logs=True):
+ filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
+ filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
+
+ uploaded = [False]
+
+ def mock_upload_json(hostname, json_path, host_path=None):
+ # FIXME: Get rid of the hard-coded perf.webkit.org once we've completed the transition.
+ self.assertIn(hostname, ['some.host'])
+ self.assertIn(json_path, ['/mock-checkout/output.json'])
+ self.assertIn(host_path, [None, '/api/report'])
+ uploaded[0] = upload_succeeds
+ return upload_succeeds
+
+ runner._upload_json = mock_upload_json
+ runner._timestamp = 123456789
+ runner._utc_timestamp = datetime.datetime(2013, 2, 8, 15, 19, 37, 460000)
+ output_capture = OutputCapture()
+ output_capture.capture_output()
+ try:
+ self.assertEqual(runner.run(), expected_exit_code)
+ finally:
+ stdout, stderr, logs = output_capture.restore_output()
+
+ if not expected_exit_code and compare_logs:
+ expected_logs = ''
+ for i in xrange(repeat):
+ runs = ' (Run %d of %d)' % (i + 1, repeat) if repeat > 1 else ''
+ expected_logs += 'Running 2 tests%s\n' % runs + EventTargetWrapperTestData.output + InspectorPassTestData.output
+ if results_shown:
+ expected_logs += 'MOCK: user.open_url: file://...\n'
+ self.assertEqual(self._normalize_output(logs), expected_logs)
+
+ self.assertEqual(uploaded[0], upload_succeeds)
+
+ return logs
+
+ _event_target_wrapper_and_inspector_results = {
+ "Bindings":
+ {"url": "https://src.chromium.org/viewvc/blink/trunk/PerformanceTests/Bindings",
+ "tests": {"event-target-wrapper": EventTargetWrapperTestData.results}}}
+
+ def test_run_with_json_output(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host'])
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+ self.assertEqual(self._load_output_json(runner), [{
+ "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+ filesystem = port.host.filesystem
+ self.assertTrue(filesystem.isfile(runner._output_json_path()))
+ self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_json_path())[0] + '.html'))
+
+ def test_run_with_description(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host', '--description', 'some description'])
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+ self.assertEqual(self._load_output_json(runner), [{
+ "buildTime": "2013-02-08T15:19:37.460000", "description": "some description",
+ "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+ def create_runner_and_setup_results_template(self, args=[]):
+ runner, port = self.create_runner(args)
+ filesystem = port.host.filesystem
+ filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
+ 'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
+ '<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script>%PeformanceTestsResultsJSON%</script>END')
+ filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
+ return runner, port
+
+ def test_run_respects_no_results(self):
+ runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host', '--no-results'])
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, results_shown=False)
+ self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
+
+ def test_run_generates_json_by_default(self):
+ runner, port = self.create_runner_and_setup_results_template()
+ filesystem = port.host.filesystem
+ output_json_path = runner._output_json_path()
+ results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
+
+ self.assertFalse(filesystem.isfile(output_json_path))
+ self.assertFalse(filesystem.isfile(results_page_path))
+
+ self._test_run_with_json_output(runner, port.host.filesystem)
+
+ self.assertEqual(self._load_output_json(runner), [{
+ "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+ self.assertTrue(filesystem.isfile(output_json_path))
+ self.assertTrue(filesystem.isfile(results_page_path))
+
+ def test_run_merges_output_by_default(self):
+ runner, port = self.create_runner_and_setup_results_template()
+ filesystem = port.host.filesystem
+ output_json_path = runner._output_json_path()
+
+ filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
+
+ self._test_run_with_json_output(runner, port.host.filesystem)
+
+ self.assertEqual(self._load_output_json(runner), [{"previous": "results"}, {
+ "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+ self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
+
+ def test_run_respects_reset_results(self):
+ runner, port = self.create_runner_and_setup_results_template(args=["--reset-results"])
+ filesystem = port.host.filesystem
+ output_json_path = runner._output_json_path()
+
+ filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
+
+ self._test_run_with_json_output(runner, port.host.filesystem)
+
+ self.assertEqual(self._load_output_json(runner), [{
+ "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+ self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
+ pass
+
+ def test_run_generates_and_show_results_page(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+ page_shown = []
+ port.show_results_html_file = lambda path: page_shown.append(path)
+ filesystem = port.host.filesystem
+ self._test_run_with_json_output(runner, filesystem, results_shown=False)
+
+ expected_entry = {"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}
+
+ self.maxDiff = None
+ self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
+ self.assertEqual(self._load_output_json(runner), [expected_entry])
+ self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
+ 'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
+ '<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
+ self.assertEqual(page_shown[0], '/mock-checkout/output.html')
+
+ self._test_run_with_json_output(runner, filesystem, results_shown=False)
+ self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
+ self.assertEqual(self._load_output_json(runner), [expected_entry, expected_entry])
+ self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
+ 'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
+ '<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
+
+ def test_run_respects_no_show_results(self):
+ show_results_html_file = lambda path: page_shown.append(path)
+
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+ page_shown = []
+ port.show_results_html_file = show_results_html_file
+ self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
+ self.assertEqual(page_shown[0], '/mock-checkout/output.html')
+
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--no-show-results'])
+ page_shown = []
+ port.show_results_html_file = show_results_html_file
+ self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
+ self.assertEqual(page_shown, [])
+
+ def test_run_with_bad_output_json(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+ port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
+ self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
+ port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
+ self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
+
+ def test_run_with_slave_config_json(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
+ port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+ self.assertEqual(self._load_output_json(runner), [{
+ "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}, "builderKey": "value"}])
+
+ def test_run_with_bad_slave_config_json(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
+ logs = self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+ self.assertTrue('Missing slave configuration JSON file: /mock-checkout/slave-config.json' in logs)
+ port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
+ self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+ port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]')
+ self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+
+ def test_run_with_multiple_repositories(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host'])
+ port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+ self.assertEqual(self._load_output_json(runner), [{
+ "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"webkit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"},
+ "some": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+ def test_run_with_upload_json(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
+
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+ generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
+ self.assertEqual(generated_json[0]['platform'], 'platform1')
+ self.assertEqual(generated_json[0]['builderName'], 'builder1')
+ self.assertEqual(generated_json[0]['buildNumber'], 123)
+
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
+
+ def test_run_with_upload_json_should_generate_perf_webkit_json(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123',
+ '--slave-config-json-path=/mock-checkout/slave-config.json'])
+ port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value1"}')
+
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+ generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
+ self.assertTrue(isinstance(generated_json, list))
+ self.assertEqual(len(generated_json), 1)
+
+ output = generated_json[0]
+ self.maxDiff = None
+ self.assertEqual(output['platform'], 'platform1')
+ self.assertEqual(output['buildNumber'], 123)
+ self.assertEqual(output['buildTime'], '2013-02-08T15:19:37.460000')
+ self.assertEqual(output['builderName'], 'builder1')
+ self.assertEqual(output['builderKey'], 'value1')
+ self.assertEqual(output['revisions'], {'blink': {'revision': '5678', 'timestamp': '2013-02-01 08:48:05 +0000'}})
+ self.assertEqual(output['tests'].keys(), ['Bindings'])
+ self.assertEqual(sorted(output['tests']['Bindings'].keys()), ['tests', 'url'])
+ self.assertEqual(output['tests']['Bindings']['url'], 'https://src.chromium.org/viewvc/blink/trunk/PerformanceTests/Bindings')
+ self.assertEqual(output['tests']['Bindings']['tests'].keys(), ['event-target-wrapper'])
+ self.assertEqual(output['tests']['Bindings']['tests']['event-target-wrapper'], {
+ 'url': 'https://src.chromium.org/viewvc/blink/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
+ 'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}})
+
+ def test_run_with_repeat(self):
+ self.maxDiff = None
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host', '--repeat', '5'])
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True, repeat=5)
+ self.assertEqual(self._load_output_json(runner), [
+ {"buildTime": "2013-02-08T15:19:37.460000",
+ "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+ {"buildTime": "2013-02-08T15:19:37.460000",
+ "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+ {"buildTime": "2013-02-08T15:19:37.460000",
+ "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+ {"buildTime": "2013-02-08T15:19:37.460000",
+ "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+ {"buildTime": "2013-02-08T15:19:37.460000",
+ "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"blink": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+ def test_run_with_test_runner_count(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-runner-count=3'])
+ self._test_run_with_json_output(runner, port.host.filesystem, compare_logs=False)
+ generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
+ self.assertTrue(isinstance(generated_json, list))
+ self.assertEqual(len(generated_json), 1)
+
+ output = generated_json[0]['tests']['Bindings']['tests']['event-target-wrapper']['metrics']['Time']['current']
+ self.assertEqual(len(output), 3)
+ expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time']['current'][0]
+ for metrics in output:
+ self.assertEqual(metrics, expectedMetrics)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/pylintrc b/src/third_party/blink/Tools/Scripts/webkitpy/pylintrc
new file mode 100644
index 0000000..b3df526
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/pylintrc
@@ -0,0 +1,314 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# FIXME: remove this whitespace diff.
+#
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+
+[MESSAGES CONTROL]
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once).
+# CHANGED:
+# C0103: Invalid name ""
+# C0111: Missing docstring
+# C0301: Line too long
+# C0302: Too many lines in module (N)
+# I0010: Unable to consider inline option ''
+# I0011: Locally disabling WNNNN
+#
+# R0201: Method could be a function
+# R0801: Similar lines in N files
+# R0901: Too many ancestors (8/7)
+# R0902: Too many instance attributes (N/7)
+# R0903: Too few public methods (N/2)
+# R0904: Too many public methods (N/20)
+# R0911: Too many return statements (N/6)
+# R0912: Too many branches (N/12)
+# R0913: Too many arguments (N/5)
+# R0914: Too many local variables (N/15)
+# R0915: Too many statements (N/50)
+# R0921: Abstract class not referenced
+# R0922: Abstract class is only referenced 1 times
+# W0122: Use of the exec statement
+# W0141: Used builtin function ''
+# W0212: Access to a protected member X of a client class
+# W0142: Used * or ** magic
+# W0401: Wildcard import X
+# W0402: Uses of a deprecated module 'string'
+# W0404: 41: Reimport 'XX' (imported line NN)
+# W0511: TODO
+# W0603: Using the global statement
+# W0614: Unused import X from wildcard import
+# W0703: Catch "Exception"
+# W1201: Specify string format arguments as logging function parameters
+disable=C0103,C0111,C0301,C0302,I0010,I0011,R0201,R0801,R0901,R0902,R0903,R0904,R0911,R0912,R0913,R0914,R0915,R0921,R0922,W0122,W0141,W0142,W0212,W0401,W0402,W0404,W0511,W0603,W0614,W0703,W1201
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html
+output-format=text
+
+# Include message's id in output
+include-ids=yes
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+# CHANGED:
+reports=no
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the beginning of the name of dummy variables
+# (i.e. not used).
+dummy-variables-rgx=_|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject,twisted.internet.reactor,hashlib,google.appengine.api.memcache
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed. Python regular
+# expressions are accepted.
+generated-members=REQUEST,acl_users,aq_parent
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=200
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+# CHANGED:
+indent-string=' '
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,apply,input
+
+# Regular expression which should only match correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression which should only match correct module level names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression which should only match correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression which should only match correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct instance attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct list comprehension /
+# generator expression variable names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Regular expression which should only match functions or classes name which do
+# not require a docstring
+no-docstring-rgx=__.*__
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branchs=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checker.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checker.py
new file mode 100644
index 0000000..e7a2dec
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checker.py
@@ -0,0 +1,743 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+# Copyright (C) 2010 ProFUSION embedded systems
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Front end of some style-checker modules."""
+
+import logging
+import os.path
+import re
+import sys
+
+from checkers.common import categories as CommonCategories
+from checkers.common import CarriageReturnChecker
+from checkers.cpp import CppChecker
+from checkers.jsonchecker import JSONChecker
+from checkers.png import PNGChecker
+from checkers.python import PythonChecker
+from checkers.test_expectations import TestExpectationsChecker
+from checkers.text import TextChecker
+from checkers.xcodeproj import XcodeProjectFileChecker
+from checkers.xml import XMLChecker
+from error_handlers import DefaultStyleErrorHandler
+from filter import FilterConfiguration
+from optparser import ArgumentParser
+from optparser import DefaultCommandOptionValues
+from webkitpy.common.system.logutils import configure_logging as _configure_logging
+
+
+_log = logging.getLogger(__name__)
+
+
+# These are default option values for the command-line option parser.
+_DEFAULT_MIN_CONFIDENCE = 1
+_DEFAULT_OUTPUT_FORMAT = 'emacs'
+
+
+# FIXME: For style categories we will never want to have, remove them.
+# For categories for which we want to have similar functionality,
+# modify the implementation and enable them.
+#
+# Throughout this module, we use "filter rule" rather than "filter"
+# for an individual boolean filter flag like "+foo". This allows us to
+# reserve "filter" for what one gets by collectively applying all of
+# the filter rules.
+#
+# The base filter rules are the filter rules that begin the list of
+# filter rules used to check style. For example, these rules precede
+# any user-specified filter rules. Since by default all categories are
+# checked, this list should normally include only rules that begin
+# with a "-" sign.
+_BASE_FILTER_RULES = [
+ '-build/endif_comment',
+ '-build/include_what_you_use', # <string> for std::string
+ '-build/storage_class', # const static
+ '-legal/copyright',
+ '-readability/multiline_comment',
+ '-readability/braces', # int foo() {};
+ '-readability/fn_size',
+ '-readability/casting',
+ '-readability/function',
+ '-runtime/arrays', # variable length array
+ '-runtime/casting',
+ '-runtime/sizeof',
+ '-runtime/explicit', # explicit
+ '-runtime/virtual', # virtual dtor
+ '-runtime/printf',
+ '-runtime/threadsafe_fn',
+ '-runtime/rtti',
+ '-whitespace/blank_line',
+ '-whitespace/end_of_line',
+ # List Python pep8 categories last.
+ #
+ # Because much of WebKit's Python code base does not abide by the
+ # PEP8 79 character limit, we ignore the 79-character-limit category
+ # pep8/E501 for now.
+ #
+ # FIXME: Consider bringing WebKit's Python code base into conformance
+ # with the 79 character limit, or some higher limit that is
+ # agreeable to the WebKit project.
+ '-pep8/E501',
+
+ # FIXME: Move the pylint rules from the pylintrc to here. This will
+ # also require us to re-work lint-webkitpy to produce the equivalent output.
+ ]
+
+
+# The path-specific filter rules.
+#
+# This list is order sensitive. Only the first path substring match
+# is used. See the FilterConfiguration documentation in filter.py
+# for more information on this list.
+#
+# Each string appearing in this nested list should have at least
+# one associated unit test assertion. These assertions are located,
+# for example, in the test_path_rules_specifier() unit test method of
+# checker_unittest.py.
+_PATH_RULES_SPECIFIER = [
+ # Files in these directories are consumers of the WebKit
+ # API and therefore do not follow the same header including
+ # discipline as WebCore.
+
+ ([# There is no clean way to avoid "yy_*" names used by flex.
+ "Source/core/css/CSSParser-in.cpp"],
+ ["-readability/naming"]),
+
+ # For third-party Python code, keep only the following checks--
+ #
+ # No tabs: to avoid having to set the SVN allow-tabs property.
+ # No trailing white space: since this is easy to correct.
+ # No carriage-return line endings: since this is easy to correct.
+ #
+ (["webkitpy/thirdparty/"],
+ ["-",
+ "+pep8/W191", # Tabs
+ "+pep8/W291", # Trailing white space
+ "+whitespace/carriage_return"]),
+
+ ([# Jinja templates: files have .cpp or .h extensions, but contain
+ # template code, which can't be handled, so disable tests.
+ "Source/bindings/templates",
+ "Source/build/scripts/templates"],
+ ["-"]),
+
+ ([# IDL compiler reference output
+ # Conforming to style significantly increases the complexity of the code
+ # generator and decreases *its* readability, which is of more concern
+ # than style of the machine-generated code itself.
+ "Source/bindings/tests/results"],
+ ["-"]),
+]
+
+
+_CPP_FILE_EXTENSIONS = [
+ 'c',
+ 'cpp',
+ 'h',
+ ]
+
+_JSON_FILE_EXTENSION = 'json'
+
+_PYTHON_FILE_EXTENSION = 'py'
+
+_TEXT_FILE_EXTENSIONS = [
+ 'cc',
+ 'cgi',
+ 'css',
+ 'gyp',
+ 'gypi',
+ 'html',
+ 'idl',
+ 'in',
+ 'js',
+ 'mm',
+ 'php',
+ 'pl',
+ 'pm',
+ 'rb',
+ 'sh',
+ 'txt',
+ 'xhtml',
+ 'y',
+ ]
+
+_XCODEPROJ_FILE_EXTENSION = 'pbxproj'
+
+_XML_FILE_EXTENSIONS = [
+ 'vcproj',
+ 'vsprops',
+ ]
+
+_PNG_FILE_EXTENSION = 'png'
+
+# Files to skip that are less obvious.
+#
+# Some files should be skipped when checking style. For example,
+# WebKit maintains some files in Mozilla style on purpose to ease
+# future merges.
+_SKIPPED_FILES_WITH_WARNING = [
+ "Source/WebKit/gtk/tests/",
+ # All WebKit*.h files in Source/WebKit2/UIProcess/API/gtk,
+ # except those ending in ...Private.h are GTK+ API headers,
+ # which differ greatly from WebKit coding style.
+ re.compile(r'Source/WebKit2/UIProcess/API/gtk/WebKit(?!.*Private\.h).*\.h$'),
+ re.compile(r'Source/WebKit2/WebProcess/InjectedBundle/API/gtk/WebKit(?!.*Private\.h).*\.h$'),
+ 'Source/WebKit2/UIProcess/API/gtk/webkit2.h',
+ 'Source/WebKit2/WebProcess/InjectedBundle/API/gtk/webkit-web-extension.h']
+
+# Files to skip that are more common or obvious.
+#
+# This list should be in addition to files with FileType.NONE. Files
+# with FileType.NONE are automatically skipped without warning.
+_SKIPPED_FILES_WITHOUT_WARNING = [
+ "LayoutTests" + os.path.sep,
+ "Source/ThirdParty/leveldb" + os.path.sep,
+ # Prevents this being recognized as a text file.
+ "Source/WebCore/GNUmakefile.features.am.in",
+ ]
+
+# Extensions of files which are allowed to contain carriage returns.
+_CARRIAGE_RETURN_ALLOWED_FILE_EXTENSIONS = [
+ 'png',
+ 'vcproj',
+ 'vsprops',
+ ]
+
+# The maximum number of errors to report per file, per category.
+# If a category is not a key, then it has no maximum.
+_MAX_REPORTS_PER_CATEGORY = {
+ "whitespace/carriage_return": 1
+}
+
+
+def _all_categories():
+ """Return the set of all categories used by check-webkit-style."""
+ # Take the union across all checkers.
+ categories = CommonCategories.union(CppChecker.categories)
+ categories = categories.union(JSONChecker.categories)
+ categories = categories.union(TestExpectationsChecker.categories)
+ categories = categories.union(PNGChecker.categories)
+
+ # FIXME: Consider adding all of the pep8 categories. Since they
+ # are not too meaningful for documentation purposes, for
+ # now we add only the categories needed for the unit tests
+ # (which validate the consistency of the configuration
+ # settings against the known categories, etc).
+ categories = categories.union(["pep8/W191", "pep8/W291", "pep8/E501"])
+
+ return categories
+
+
+def _check_webkit_style_defaults():
+ """Return the default command-line options for check-webkit-style."""
+ return DefaultCommandOptionValues(min_confidence=_DEFAULT_MIN_CONFIDENCE,
+ output_format=_DEFAULT_OUTPUT_FORMAT)
+
+
+# This function assists in optparser not having to import from checker.
+def check_webkit_style_parser():
+ all_categories = _all_categories()
+ default_options = _check_webkit_style_defaults()
+ return ArgumentParser(all_categories=all_categories,
+ base_filter_rules=_BASE_FILTER_RULES,
+ default_options=default_options)
+
+
+def check_webkit_style_configuration(options):
+ """Return a StyleProcessorConfiguration instance for check-webkit-style.
+
+ Args:
+ options: A CommandOptionValues instance.
+
+ """
+ filter_configuration = FilterConfiguration(
+ base_rules=_BASE_FILTER_RULES,
+ path_specific=_PATH_RULES_SPECIFIER,
+ user_rules=options.filter_rules)
+
+ return StyleProcessorConfiguration(filter_configuration=filter_configuration,
+ max_reports_per_category=_MAX_REPORTS_PER_CATEGORY,
+ min_confidence=options.min_confidence,
+ output_format=options.output_format,
+ stderr_write=sys.stderr.write)
+
+
+def _create_log_handlers(stream):
+ """Create and return a default list of logging.Handler instances.
+
+ Format WARNING messages and above to display the logging level, and
+ messages strictly below WARNING not to display it.
+
+ Args:
+ stream: See the configure_logging() docstring.
+
+ """
+ # Handles logging.WARNING and above.
+ error_handler = logging.StreamHandler(stream)
+ error_handler.setLevel(logging.WARNING)
+ formatter = logging.Formatter("%(levelname)s: %(message)s")
+ error_handler.setFormatter(formatter)
+
+ # Create a logging.Filter instance that only accepts messages
+ # below WARNING (i.e. filters out anything WARNING or above).
+ non_error_filter = logging.Filter()
+ # The filter method accepts a logging.LogRecord instance.
+ non_error_filter.filter = lambda record: record.levelno < logging.WARNING
+
+ non_error_handler = logging.StreamHandler(stream)
+ non_error_handler.addFilter(non_error_filter)
+ formatter = logging.Formatter("%(message)s")
+ non_error_handler.setFormatter(formatter)
+
+ return [error_handler, non_error_handler]
+
+
+def _create_debug_log_handlers(stream):
+ """Create and return a list of logging.Handler instances for debugging.
+
+ Args:
+ stream: See the configure_logging() docstring.
+
+ """
+ handler = logging.StreamHandler(stream)
+ formatter = logging.Formatter("%(name)s: %(levelname)-8s %(message)s")
+ handler.setFormatter(formatter)
+
+ return [handler]
+
+
+def configure_logging(stream, logger=None, is_verbose=False):
+ """Configure logging, and return the list of handlers added.
+
+ Returns:
+ A list of references to the logging handlers added to the root
+ logger. This allows the caller to later remove the handlers
+ using logger.removeHandler. This is useful primarily during unit
+ testing where the caller may want to configure logging temporarily
+ and then undo the configuring.
+
+ Args:
+ stream: A file-like object to which to log. The stream must
+ define an "encoding" data attribute, or else logging
+ raises an error.
+ logger: A logging.logger instance to configure. This parameter
+ should be used only in unit tests. Defaults to the
+ root logger.
+ is_verbose: A boolean value of whether logging should be verbose.
+
+ """
+ # If the stream does not define an "encoding" data attribute, the
+ # logging module can throw an error like the following:
+ #
+ # Traceback (most recent call last):
+ # File "/System/Library/Frameworks/Python.framework/Versions/2.6/...
+ # lib/python2.6/logging/__init__.py", line 761, in emit
+ # self.stream.write(fs % msg.encode(self.stream.encoding))
+ # LookupError: unknown encoding: unknown
+ if logger is None:
+ logger = logging.getLogger()
+
+ if is_verbose:
+ logging_level = logging.DEBUG
+ handlers = _create_debug_log_handlers(stream)
+ else:
+ logging_level = logging.INFO
+ handlers = _create_log_handlers(stream)
+
+ handlers = _configure_logging(logging_level=logging_level, logger=logger,
+ handlers=handlers)
+
+ return handlers
+
+
+# Enum-like idiom
+class FileType:
+
+ NONE = 0 # FileType.NONE evaluates to False.
+ # Alphabetize remaining types
+ # CHANGELOG = 1
+ CPP = 2
+ JSON = 3
+ PNG = 4
+ PYTHON = 5
+ TEXT = 6
+ # WATCHLIST = 7
+ XML = 8
+ XCODEPROJ = 9
+
+
+class CheckerDispatcher(object):
+
+ """Supports determining whether and how to check style, based on path."""
+
+ def _file_extension(self, file_path):
+ """Return the file extension without the leading dot."""
+ return os.path.splitext(file_path)[1].lstrip(".")
+
+ def _should_skip_file_path(self, file_path, skip_array_entry):
+ match = re.search("\s*png$", file_path)
+ if match:
+ return False
+ if isinstance(skip_array_entry, str):
+ if file_path.find(skip_array_entry) >= 0:
+ return True
+ elif skip_array_entry.match(file_path):
+ return True
+ return False
+
+ def should_skip_with_warning(self, file_path):
+ """Return whether the given file should be skipped with a warning."""
+ for skipped_file in _SKIPPED_FILES_WITH_WARNING:
+ if self._should_skip_file_path(file_path, skipped_file):
+ return True
+ return False
+
+ def should_skip_without_warning(self, file_path):
+ """Return whether the given file should be skipped without a warning."""
+ if not self._file_type(file_path): # FileType.NONE.
+ return True
+ # Since "LayoutTests" is in _SKIPPED_FILES_WITHOUT_WARNING, make
+ # an exception to prevent files like 'TestExpectations' from being skipped.
+ #
+ # FIXME: Figure out a good way to avoid having to add special logic
+ # for this special case.
+ basename = os.path.basename(file_path)
+ if basename == 'TestExpectations':
+ return False
+ for skipped_file in _SKIPPED_FILES_WITHOUT_WARNING:
+ if self._should_skip_file_path(file_path, skipped_file):
+ return True
+ return False
+
+ def should_check_and_strip_carriage_returns(self, file_path):
+ return self._file_extension(file_path) not in _CARRIAGE_RETURN_ALLOWED_FILE_EXTENSIONS
+
+ def _file_type(self, file_path):
+ """Return the file type corresponding to the given file."""
+ file_extension = self._file_extension(file_path)
+
+ if (file_extension in _CPP_FILE_EXTENSIONS) or (file_path == '-'):
+ # FIXME: Do something about the comment below and the issue it
+ # raises since cpp_style already relies on the extension.
+ #
+ # Treat stdin as C++. Since the extension is unknown when
+ # reading from stdin, cpp_style tests should not rely on
+ # the extension.
+ return FileType.CPP
+ elif file_extension == _JSON_FILE_EXTENSION:
+ return FileType.JSON
+ elif file_extension == _PYTHON_FILE_EXTENSION:
+ return FileType.PYTHON
+ elif file_extension in _XML_FILE_EXTENSIONS:
+ return FileType.XML
+ elif file_extension == _XCODEPROJ_FILE_EXTENSION:
+ return FileType.XCODEPROJ
+ elif file_extension == _PNG_FILE_EXTENSION:
+ return FileType.PNG
+ elif ((not file_extension and os.path.join("Tools", "Scripts") in file_path) or
+ file_extension in _TEXT_FILE_EXTENSIONS or os.path.basename(file_path) == 'TestExpectations'):
+ return FileType.TEXT
+ else:
+ return FileType.NONE
+
+ def _create_checker(self, file_type, file_path, handle_style_error,
+ min_confidence):
+ """Instantiate and return a style checker based on file type."""
+ if file_type == FileType.NONE:
+ checker = None
+ elif file_type == FileType.CPP:
+ file_extension = self._file_extension(file_path)
+ checker = CppChecker(file_path, file_extension,
+ handle_style_error, min_confidence)
+ elif file_type == FileType.JSON:
+ checker = JSONChecker(file_path, handle_style_error)
+ elif file_type == FileType.PYTHON:
+ checker = PythonChecker(file_path, handle_style_error)
+ elif file_type == FileType.XML:
+ checker = XMLChecker(file_path, handle_style_error)
+ elif file_type == FileType.XCODEPROJ:
+ checker = XcodeProjectFileChecker(file_path, handle_style_error)
+ elif file_type == FileType.PNG:
+ checker = PNGChecker(file_path, handle_style_error)
+ elif file_type == FileType.TEXT:
+ basename = os.path.basename(file_path)
+ if basename == 'TestExpectations':
+ checker = TestExpectationsChecker(file_path, handle_style_error)
+ else:
+ checker = TextChecker(file_path, handle_style_error)
+ else:
+ raise ValueError('Invalid file type "%(file_type)s": the only valid file types '
+ "are %(NONE)s, %(CPP)s, and %(TEXT)s."
+ % {"file_type": file_type,
+ "NONE": FileType.NONE,
+ "CPP": FileType.CPP,
+ "TEXT": FileType.TEXT})
+
+ return checker
+
+ def dispatch(self, file_path, handle_style_error, min_confidence):
+ """Instantiate and return a style checker based on file path."""
+ file_type = self._file_type(file_path)
+
+ checker = self._create_checker(file_type,
+ file_path,
+ handle_style_error,
+ min_confidence)
+ return checker
+
+
+# FIXME: Remove the stderr_write attribute from this class and replace
+# its use with calls to a logging module logger.
+class StyleProcessorConfiguration(object):
+
+ """Stores configuration values for the StyleProcessor class.
+
+ Attributes:
+ min_confidence: An integer between 1 and 5 inclusive that is the
+ minimum confidence level of style errors to report.
+
+ max_reports_per_category: The maximum number of errors to report
+ per category, per file.
+
+ stderr_write: A function that takes a string as a parameter and
+ serves as stderr.write.
+
+ """
+
+ def __init__(self,
+ filter_configuration,
+ max_reports_per_category,
+ min_confidence,
+ output_format,
+ stderr_write):
+ """Create a StyleProcessorConfiguration instance.
+
+ Args:
+ filter_configuration: A FilterConfiguration instance. The default
+ is the "empty" filter configuration, which
+ means that all errors should be checked.
+
+ max_reports_per_category: The maximum number of errors to report
+ per category, per file.
+
+ min_confidence: An integer between 1 and 5 inclusive that is the
+ minimum confidence level of style errors to report.
+ The default is 1, which reports all style errors.
+
+ output_format: A string that is the output format. The supported
+ output formats are "emacs" which emacs can parse
+ and "vs7" which Microsoft Visual Studio 7 can parse.
+
+ stderr_write: A function that takes a string as a parameter and
+ serves as stderr.write.
+
+ """
+ self._filter_configuration = filter_configuration
+ self._output_format = output_format
+
+ self.max_reports_per_category = max_reports_per_category
+ self.min_confidence = min_confidence
+ self.stderr_write = stderr_write
+
+ def is_reportable(self, category, confidence_in_error, file_path):
+ """Return whether an error is reportable.
+
+ An error is reportable if both the confidence in the error is
+ at least the minimum confidence level and the current filter
+ says the category should be checked for the given path.
+
+ Args:
+ category: A string that is a style category.
+ confidence_in_error: An integer between 1 and 5 inclusive that is
+ the application's confidence in the error.
+ A higher number means greater confidence.
+ file_path: The path of the file being checked
+
+ """
+ if confidence_in_error < self.min_confidence:
+ return False
+
+ return self._filter_configuration.should_check(category, file_path)
+
+ def write_style_error(self,
+ category,
+ confidence_in_error,
+ file_path,
+ line_number,
+ message):
+ """Write a style error to the configured stderr."""
+ if self._output_format == 'vs7':
+ format_string = "%s(%s): %s [%s] [%d]\n"
+ else:
+ format_string = "%s:%s: %s [%s] [%d]\n"
+
+ self.stderr_write(format_string % (file_path,
+ line_number,
+ message,
+ category,
+ confidence_in_error))
+
+
+class ProcessorBase(object):
+
+ """The base class for processors of lists of lines."""
+
+ def should_process(self, file_path):
+ """Return whether the file at file_path should be processed.
+
+ The TextFileReader class calls this method prior to reading in
+ the lines of a file. Use this method, for example, to prevent
+ the style checker from reading binary files into memory.
+
+ """
+ raise NotImplementedError('Subclasses should implement.')
+
+ def process(self, lines, file_path, **kwargs):
+ """Process lines of text read from a file.
+
+ Args:
+ lines: A list of lines of text to process.
+ file_path: The path from which the lines were read.
+ **kwargs: This argument signifies that the process() method of
+ subclasses of ProcessorBase may support additional
+ keyword arguments.
+ For example, a style checker's check() method
+ may support a "reportable_lines" parameter that represents
+ the line numbers of the lines for which style errors
+ should be reported.
+
+ """
+ raise NotImplementedError('Subclasses should implement.')
+
+
+class StyleProcessor(ProcessorBase):
+
+ """A ProcessorBase for checking style.
+
+ Attributes:
+ error_count: An integer that is the total number of reported
+ errors for the lifetime of this instance.
+
+ """
+
+ def __init__(self, configuration, mock_dispatcher=None,
+ mock_increment_error_count=None,
+ mock_carriage_checker_class=None):
+ """Create an instance.
+
+ Args:
+ configuration: A StyleProcessorConfiguration instance.
+ mock_dispatcher: A mock CheckerDispatcher instance. This
+ parameter is for unit testing. Defaults to a
+ CheckerDispatcher instance.
+ mock_increment_error_count: A mock error-count incrementer.
+ mock_carriage_checker_class: A mock class for checking and
+ transforming carriage returns.
+ This parameter is for unit testing.
+ Defaults to CarriageReturnChecker.
+
+ """
+ if mock_dispatcher is None:
+ dispatcher = CheckerDispatcher()
+ else:
+ dispatcher = mock_dispatcher
+
+ if mock_increment_error_count is None:
+ # The following blank line is present to avoid flagging by pep8.py.
+
+ def increment_error_count():
+ """Increment the total count of reported errors."""
+ self.error_count += 1
+ else:
+ increment_error_count = mock_increment_error_count
+
+ if mock_carriage_checker_class is None:
+ # This needs to be a class rather than an instance since the
+ # process() method instantiates one using parameters.
+ carriage_checker_class = CarriageReturnChecker
+ else:
+ carriage_checker_class = mock_carriage_checker_class
+
+ self.error_count = 0
+
+ self._carriage_checker_class = carriage_checker_class
+ self._configuration = configuration
+ self._dispatcher = dispatcher
+ self._increment_error_count = increment_error_count
+
+ def should_process(self, file_path):
+ """Return whether the file should be checked for style."""
+ if self._dispatcher.should_skip_without_warning(file_path):
+ return False
+ if self._dispatcher.should_skip_with_warning(file_path):
+ _log.warn('File exempt from style guide. Skipping: "%s"'
+ % file_path)
+ return False
+ return True
+
+ def process(self, lines, file_path, line_numbers=None):
+ """Check the given lines for style.
+
+ Arguments:
+ lines: A list of all lines in the file to check.
+ file_path: The path of the file to process. If possible, the path
+ should be relative to the source root. Otherwise,
+ path-specific logic may not behave as expected.
+ line_numbers: A list of line numbers of the lines for which
+ style errors should be reported, or None if errors
+ for all lines should be reported. When not None, this
+ list normally contains the line numbers corresponding
+ to the modified lines of a patch.
+
+ """
+ _log.debug("Checking style: " + file_path)
+
+ style_error_handler = DefaultStyleErrorHandler(
+ configuration=self._configuration,
+ file_path=file_path,
+ increment_error_count=self._increment_error_count,
+ line_numbers=line_numbers)
+
+ carriage_checker = self._carriage_checker_class(style_error_handler)
+
+ # Check for and remove trailing carriage returns ("\r").
+ if self._dispatcher.should_check_and_strip_carriage_returns(file_path):
+ lines = carriage_checker.check(lines)
+
+ min_confidence = self._configuration.min_confidence
+ checker = self._dispatcher.dispatch(file_path,
+ style_error_handler,
+ min_confidence)
+
+ if checker is None:
+ raise AssertionError("File should not be checked: '%s'" % file_path)
+
+ _log.debug("Using class: " + checker.__class__.__name__)
+
+ checker.check(lines)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checker_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checker_unittest.py
new file mode 100644
index 0000000..28a0106
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checker_unittest.py
@@ -0,0 +1,818 @@
+# -*- coding: utf-8; -*-
+#
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2009 Torch Mobile Inc.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for style.py."""
+
+import logging
+import os
+import unittest
+
+import checker as style
+from webkitpy.common.system.logtesting import LogTesting, TestLogStream
+from checker import _BASE_FILTER_RULES
+from checker import _MAX_REPORTS_PER_CATEGORY
+from checker import _PATH_RULES_SPECIFIER as PATH_RULES_SPECIFIER
+from checker import _all_categories
+from checker import check_webkit_style_configuration
+from checker import check_webkit_style_parser
+from checker import configure_logging
+from checker import CheckerDispatcher
+from checker import ProcessorBase
+from checker import StyleProcessor
+from checker import StyleProcessorConfiguration
+from checkers.cpp import CppChecker
+from checkers.jsonchecker import JSONChecker
+from checkers.python import PythonChecker
+from checkers.text import TextChecker
+from checkers.xml import XMLChecker
+from error_handlers import DefaultStyleErrorHandler
+from filter import validate_filter_rules
+from filter import FilterConfiguration
+from optparser import ArgumentParser
+from optparser import CommandOptionValues
+from webkitpy.common.system.logtesting import LoggingTestCase
+from webkitpy.style.filereader import TextFileReader
+
+
+class ConfigureLoggingTestBase(unittest.TestCase):
+
+ """Base class for testing configure_logging().
+
+ Sub-classes should implement:
+
+ is_verbose: The is_verbose value to pass to configure_logging().
+
+ """
+
+ def setUp(self):
+ is_verbose = self.is_verbose
+
+ log_stream = TestLogStream(self)
+
+ # Use a logger other than the root logger or one prefixed with
+ # webkit so as not to conflict with test-webkitpy logging.
+ logger = logging.getLogger("unittest")
+
+ # Configure the test logger not to pass messages along to the
+ # root logger. This prevents test messages from being
+ # propagated to loggers used by test-webkitpy logging (e.g.
+ # the root logger).
+ logger.propagate = False
+
+ self._handlers = configure_logging(stream=log_stream, logger=logger,
+ is_verbose=is_verbose)
+ self._log = logger
+ self._log_stream = log_stream
+
+ def tearDown(self):
+ """Reset logging to its original state.
+
+ This method ensures that the logging configuration set up
+ for a unit test does not affect logging in other unit tests.
+
+ """
+ logger = self._log
+ for handler in self._handlers:
+ logger.removeHandler(handler)
+
+ def assert_log_messages(self, messages):
+ """Assert that the logged messages equal the given messages."""
+ self._log_stream.assertMessages(messages)
+
+
+class ConfigureLoggingTest(ConfigureLoggingTestBase):
+
+ """Tests the configure_logging() function."""
+
+ is_verbose = False
+
+ def test_warning_message(self):
+ self._log.warn("test message")
+ self.assert_log_messages(["WARNING: test message\n"])
+
+ def test_below_warning_message(self):
+ # We test the boundary case of a logging level equal to 29.
+ # In practice, we will probably only be calling log.info(),
+ # which corresponds to a logging level of 20.
+ level = logging.WARNING - 1 # Equals 29.
+ self._log.log(level, "test message")
+ self.assert_log_messages(["test message\n"])
+
+ def test_debug_message(self):
+ self._log.debug("test message")
+ self.assert_log_messages([])
+
+ def test_two_messages(self):
+ self._log.info("message1")
+ self._log.info("message2")
+ self.assert_log_messages(["message1\n", "message2\n"])
+
+
+class ConfigureLoggingVerboseTest(ConfigureLoggingTestBase):
+
+ """Tests the configure_logging() function with is_verbose True."""
+
+ is_verbose = True
+
+ def test_debug_message(self):
+ self._log.debug("test message")
+ self.assert_log_messages(["unittest: DEBUG test message\n"])
+
+
+class GlobalVariablesTest(unittest.TestCase):
+
+ """Tests validity of the global variables."""
+
+ def _all_categories(self):
+ return _all_categories()
+
+ def defaults(self):
+ return style._check_webkit_style_defaults()
+
+ def test_webkit_base_filter_rules(self):
+ base_filter_rules = _BASE_FILTER_RULES
+ defaults = self.defaults()
+ already_seen = []
+ validate_filter_rules(base_filter_rules, self._all_categories())
+ # Also do some additional checks.
+ for rule in base_filter_rules:
+ # Check no leading or trailing white space.
+ self.assertEqual(rule, rule.strip())
+ # All categories are on by default, so defaults should
+ # begin with -.
+ self.assertTrue(rule.startswith('-'))
+ # Check no rule occurs twice.
+ self.assertNotIn(rule, already_seen)
+ already_seen.append(rule)
+
+ def test_defaults(self):
+ """Check that default arguments are valid."""
+ default_options = self.defaults()
+
+ # FIXME: We should not need to call parse() to determine
+ # whether the default arguments are valid.
+ parser = ArgumentParser(all_categories=self._all_categories(),
+ base_filter_rules=[],
+ default_options=default_options)
+ # No need to test the return value here since we test parse()
+ # on valid arguments elsewhere.
+ #
+ # The default options are valid: no error or SystemExit.
+ parser.parse(args=[])
+
+ def test_path_rules_specifier(self):
+ all_categories = self._all_categories()
+ for (sub_paths, path_rules) in PATH_RULES_SPECIFIER:
+ validate_filter_rules(path_rules, self._all_categories())
+
+ config = FilterConfiguration(path_specific=PATH_RULES_SPECIFIER)
+
+ def assertCheck(path, category):
+ """Assert that the given category should be checked."""
+ message = ('Should check category "%s" for path "%s".'
+ % (category, path))
+ self.assertTrue(config.should_check(category, path))
+
+ def assertNoCheck(path, category):
+ """Assert that the given category should not be checked."""
+ message = ('Should not check category "%s" for path "%s".'
+ % (category, path))
+ self.assertFalse(config.should_check(category, path), message)
+
+ assertCheck("random_path.cpp",
+ "build/include")
+ assertCheck("random_path.cpp",
+ "readability/naming")
+ assertNoCheck("Source/core/css/CSSParser-in.cpp",
+ "readability/naming")
+
+ # Third-party Python code: webkitpy/thirdparty
+ path = "Tools/Scripts/webkitpy/thirdparty/mock.py"
+ assertNoCheck(path, "build/include")
+ assertNoCheck(path, "pep8/E401") # A random pep8 category.
+ assertCheck(path, "pep8/W191")
+ assertCheck(path, "pep8/W291")
+ assertCheck(path, "whitespace/carriage_return")
+
+ def test_max_reports_per_category(self):
+ """Check that _MAX_REPORTS_PER_CATEGORY is valid."""
+ all_categories = self._all_categories()
+ for category in _MAX_REPORTS_PER_CATEGORY.iterkeys():
+ self.assertIn(category, all_categories,
+ 'Key "%s" is not a category' % category)
+
+
+class CheckWebKitStyleFunctionTest(unittest.TestCase):
+
+ """Tests the functions with names of the form check_webkit_style_*."""
+
+ def test_check_webkit_style_configuration(self):
+ # Exercise the code path to make sure the function does not error out.
+ option_values = CommandOptionValues()
+ configuration = check_webkit_style_configuration(option_values)
+
+ def test_check_webkit_style_parser(self):
+ # Exercise the code path to make sure the function does not error out.
+ parser = check_webkit_style_parser()
+
+
+class CheckerDispatcherSkipTest(unittest.TestCase):
+
+ """Tests the "should skip" methods of the CheckerDispatcher class."""
+
+ def setUp(self):
+ self._dispatcher = CheckerDispatcher()
+
+ def test_should_skip_with_warning(self):
+ """Test should_skip_with_warning()."""
+ # Check skipped files.
+ paths_to_skip = [
+ "Source/WebKit/gtk/tests/testatk.c",
+ "Source/WebKit2/UIProcess/API/gtk/webkit2.h",
+ "Source/WebKit2/UIProcess/API/gtk/WebKitWebView.h",
+ "Source/WebKit2/UIProcess/API/gtk/WebKitLoader.h",
+ ]
+
+ for path in paths_to_skip:
+ self.assertTrue(self._dispatcher.should_skip_with_warning(path),
+ "Checking: " + path)
+
+ # Verify that some files are not skipped.
+ paths_not_to_skip = [
+ "foo.txt",
+ "Source/WebKit2/UIProcess/API/gtk/HelperClass.cpp",
+ "Source/WebKit2/UIProcess/API/gtk/HelperClass.h",
+ "Source/WebKit2/UIProcess/API/gtk/WebKitWebView.cpp",
+ "Source/WebKit2/UIProcess/API/gtk/WebKitWebViewPrivate.h",
+ "Source/WebKit2/UIProcess/API/gtk/tests/WebViewTest.cpp",
+ "Source/WebKit2/UIProcess/API/gtk/tests/WebViewTest.h",
+ ]
+
+ for path in paths_not_to_skip:
+ self.assertFalse(self._dispatcher.should_skip_with_warning(path))
+
+ def _assert_should_skip_without_warning(self, path, is_checker_none,
+ expected):
+ # Check the file type before asserting the return value.
+ checker = self._dispatcher.dispatch(file_path=path,
+ handle_style_error=None,
+ min_confidence=3)
+ message = 'while checking: %s' % path
+ self.assertEqual(checker is None, is_checker_none, message)
+ self.assertEqual(self._dispatcher.should_skip_without_warning(path),
+ expected, message)
+
+ def test_should_skip_without_warning__true(self):
+ """Test should_skip_without_warning() for True return values."""
+ # Check a file with NONE file type.
+ path = 'foo.asdf' # Non-sensical file extension.
+ self._assert_should_skip_without_warning(path,
+ is_checker_none=True,
+ expected=True)
+
+ # Check files with non-NONE file type. These examples must be
+ # drawn from the _SKIPPED_FILES_WITHOUT_WARNING configuration
+ # variable.
+ path = os.path.join('LayoutTests', 'foo.txt')
+ self._assert_should_skip_without_warning(path,
+ is_checker_none=False,
+ expected=True)
+
+ def test_should_skip_without_warning__false(self):
+ """Test should_skip_without_warning() for False return values."""
+ paths = ['foo.txt',
+ os.path.join('LayoutTests', 'TestExpectations'),
+ ]
+
+ for path in paths:
+ self._assert_should_skip_without_warning(path,
+ is_checker_none=False,
+ expected=False)
+
+
+class CheckerDispatcherCarriageReturnTest(unittest.TestCase):
+ def test_should_check_and_strip_carriage_returns(self):
+ files = {
+ 'foo.txt': True,
+ 'foo.cpp': True,
+ 'foo.vcproj': False,
+ 'foo.vsprops': False,
+ }
+
+ dispatcher = CheckerDispatcher()
+ for file_path, expected_result in files.items():
+ self.assertEqual(dispatcher.should_check_and_strip_carriage_returns(file_path), expected_result, 'Checking: %s' % file_path)
+
+
+class CheckerDispatcherDispatchTest(unittest.TestCase):
+
+ """Tests dispatch() method of CheckerDispatcher class."""
+
+ def dispatch(self, file_path):
+ """Call dispatch() with the given file path."""
+ dispatcher = CheckerDispatcher()
+ self.mock_handle_style_error = DefaultStyleErrorHandler('', None, None, [])
+ checker = dispatcher.dispatch(file_path,
+ self.mock_handle_style_error,
+ min_confidence=3)
+ return checker
+
+ def assert_checker_none(self, file_path):
+ """Assert that the dispatched checker is None."""
+ checker = self.dispatch(file_path)
+ self.assertIsNone(checker, 'Checking: "%s"' % file_path)
+
+ def assert_checker(self, file_path, expected_class):
+ """Assert the type of the dispatched checker."""
+ checker = self.dispatch(file_path)
+ got_class = checker.__class__
+ self.assertEqual(got_class, expected_class,
+ 'For path "%(file_path)s" got %(got_class)s when '
+ "expecting %(expected_class)s."
+ % {"file_path": file_path,
+ "got_class": got_class,
+ "expected_class": expected_class})
+
+ def assert_checker_cpp(self, file_path):
+ """Assert that the dispatched checker is a CppChecker."""
+ self.assert_checker(file_path, CppChecker)
+
+ def assert_checker_json(self, file_path):
+ """Assert that the dispatched checker is a JSONChecker."""
+ self.assert_checker(file_path, JSONChecker)
+
+ def assert_checker_python(self, file_path):
+ """Assert that the dispatched checker is a PythonChecker."""
+ self.assert_checker(file_path, PythonChecker)
+
+ def assert_checker_text(self, file_path):
+ """Assert that the dispatched checker is a TextChecker."""
+ self.assert_checker(file_path, TextChecker)
+
+ def assert_checker_xml(self, file_path):
+ """Assert that the dispatched checker is a XMLChecker."""
+ self.assert_checker(file_path, XMLChecker)
+
+ def test_cpp_paths(self):
+ """Test paths that should be checked as C++."""
+ paths = [
+ "-",
+ "foo.c",
+ "foo.cpp",
+ "foo.h",
+ ]
+
+ for path in paths:
+ self.assert_checker_cpp(path)
+
+ # Check checker attributes on a typical input.
+ file_base = "foo"
+ file_extension = "c"
+ file_path = file_base + "." + file_extension
+ self.assert_checker_cpp(file_path)
+ checker = self.dispatch(file_path)
+ self.assertEqual(checker.file_extension, file_extension)
+ self.assertEqual(checker.file_path, file_path)
+ self.assertEqual(checker.handle_style_error, self.mock_handle_style_error)
+ self.assertEqual(checker.min_confidence, 3)
+ # Check "-" for good measure.
+ file_base = "-"
+ file_extension = ""
+ file_path = file_base
+ self.assert_checker_cpp(file_path)
+ checker = self.dispatch(file_path)
+ self.assertEqual(checker.file_extension, file_extension)
+ self.assertEqual(checker.file_path, file_path)
+
+ def test_json_paths(self):
+ """Test paths that should be checked as JSON."""
+ paths = [
+ "Source/WebCore/inspector/Inspector.json",
+ "Tools/BuildSlaveSupport/build.webkit.org-config/config.json",
+ ]
+
+ for path in paths:
+ self.assert_checker_json(path)
+
+ # Check checker attributes on a typical input.
+ file_base = "foo"
+ file_extension = "json"
+ file_path = file_base + "." + file_extension
+ self.assert_checker_json(file_path)
+ checker = self.dispatch(file_path)
+ self.assertEqual(checker._handle_style_error,
+ self.mock_handle_style_error)
+
+ def test_python_paths(self):
+ """Test paths that should be checked as Python."""
+ paths = [
+ "foo.py",
+ "Tools/Scripts/modules/text_style.py",
+ ]
+
+ for path in paths:
+ self.assert_checker_python(path)
+
+ # Check checker attributes on a typical input.
+ file_base = "foo"
+ file_extension = "css"
+ file_path = file_base + "." + file_extension
+ self.assert_checker_text(file_path)
+ checker = self.dispatch(file_path)
+ self.assertEqual(checker.file_path, file_path)
+ self.assertEqual(checker.handle_style_error,
+ self.mock_handle_style_error)
+
+ def test_text_paths(self):
+ """Test paths that should be checked as text."""
+ paths = [
+ "foo.cc",
+ "foo.cgi",
+ "foo.css",
+ "foo.gyp",
+ "foo.gypi",
+ "foo.html",
+ "foo.idl",
+ "foo.in",
+ "foo.js",
+ "foo.mm",
+ "foo.php",
+ "foo.pl",
+ "foo.pm",
+ "foo.rb",
+ "foo.sh",
+ "foo.txt",
+ "foo.xhtml",
+ "foo.y",
+ os.path.join("Source", "WebCore", "inspector", "front-end", "Main.js"),
+ os.path.join("Tools", "Scripts", "check-webkit-style"),
+ ]
+
+ for path in paths:
+ self.assert_checker_text(path)
+
+ # Check checker attributes on a typical input.
+ file_base = "foo"
+ file_extension = "css"
+ file_path = file_base + "." + file_extension
+ self.assert_checker_text(file_path)
+ checker = self.dispatch(file_path)
+ self.assertEqual(checker.file_path, file_path)
+ self.assertEqual(checker.handle_style_error, self.mock_handle_style_error)
+
+ def test_xml_paths(self):
+ """Test paths that should be checked as XML."""
+ paths = [
+ "Source/WebCore/WebCore.vcproj/WebCore.vcproj",
+ "WebKitLibraries/win/tools/vsprops/common.vsprops",
+ ]
+
+ for path in paths:
+ self.assert_checker_xml(path)
+
+ # Check checker attributes on a typical input.
+ file_base = "foo"
+ file_extension = "vcproj"
+ file_path = file_base + "." + file_extension
+ self.assert_checker_xml(file_path)
+ checker = self.dispatch(file_path)
+ self.assertEqual(checker._handle_style_error,
+ self.mock_handle_style_error)
+
+ def test_none_paths(self):
+ """Test paths that have no file type.."""
+ paths = [
+ "Makefile",
+ "foo.asdf", # Non-sensical file extension.
+ "foo.exe",
+ ]
+
+ for path in paths:
+ self.assert_checker_none(path)
+
+
+class StyleProcessorConfigurationTest(unittest.TestCase):
+
+ """Tests the StyleProcessorConfiguration class."""
+
+ def setUp(self):
+ self._error_messages = []
+ """The messages written to _mock_stderr_write() of this class."""
+
+ def _mock_stderr_write(self, message):
+ self._error_messages.append(message)
+
+ def _style_checker_configuration(self, output_format="vs7"):
+ """Return a StyleProcessorConfiguration instance for testing."""
+ base_rules = ["-whitespace", "+whitespace/tab"]
+ filter_configuration = FilterConfiguration(base_rules=base_rules)
+
+ return StyleProcessorConfiguration(
+ filter_configuration=filter_configuration,
+ max_reports_per_category={"whitespace/newline": 1},
+ min_confidence=3,
+ output_format=output_format,
+ stderr_write=self._mock_stderr_write)
+
+ def test_init(self):
+ """Test the __init__() method."""
+ configuration = self._style_checker_configuration()
+
+ # Check that __init__ sets the "public" data attributes correctly.
+ self.assertEqual(configuration.max_reports_per_category,
+ {"whitespace/newline": 1})
+ self.assertEqual(configuration.stderr_write, self._mock_stderr_write)
+ self.assertEqual(configuration.min_confidence, 3)
+
+ def test_is_reportable(self):
+ """Test the is_reportable() method."""
+ config = self._style_checker_configuration()
+
+ self.assertTrue(config.is_reportable("whitespace/tab", 3, "foo.txt"))
+
+ # Test the confidence check code path by varying the confidence.
+ self.assertFalse(config.is_reportable("whitespace/tab", 2, "foo.txt"))
+
+ # Test the category check code path by varying the category.
+ self.assertFalse(config.is_reportable("whitespace/line", 4, "foo.txt"))
+
+ def _call_write_style_error(self, output_format):
+ config = self._style_checker_configuration(output_format=output_format)
+ config.write_style_error(category="whitespace/tab",
+ confidence_in_error=5,
+ file_path="foo.h",
+ line_number=100,
+ message="message")
+
+ def test_write_style_error_emacs(self):
+ """Test the write_style_error() method."""
+ self._call_write_style_error("emacs")
+ self.assertEqual(self._error_messages,
+ ["foo.h:100: message [whitespace/tab] [5]\n"])
+
+ def test_write_style_error_vs7(self):
+ """Test the write_style_error() method."""
+ self._call_write_style_error("vs7")
+ self.assertEqual(self._error_messages,
+ ["foo.h(100): message [whitespace/tab] [5]\n"])
+
+
+class StyleProcessor_EndToEndTest(LoggingTestCase):
+
+ """Test the StyleProcessor class with an emphasis on end-to-end tests."""
+
+ def setUp(self):
+ LoggingTestCase.setUp(self)
+ self._messages = []
+
+ def _mock_stderr_write(self, message):
+ """Save a message so it can later be asserted."""
+ self._messages.append(message)
+
+ def test_init(self):
+ """Test __init__ constructor."""
+ configuration = StyleProcessorConfiguration(
+ filter_configuration=FilterConfiguration(),
+ max_reports_per_category={},
+ min_confidence=3,
+ output_format="vs7",
+ stderr_write=self._mock_stderr_write)
+ processor = StyleProcessor(configuration)
+
+ self.assertEqual(processor.error_count, 0)
+ self.assertEqual(self._messages, [])
+
+ def test_process(self):
+ configuration = StyleProcessorConfiguration(
+ filter_configuration=FilterConfiguration(),
+ max_reports_per_category={},
+ min_confidence=3,
+ output_format="vs7",
+ stderr_write=self._mock_stderr_write)
+ processor = StyleProcessor(configuration)
+
+ processor.process(lines=['line1', 'Line with tab:\t'],
+ file_path='foo.txt')
+ self.assertEqual(processor.error_count, 1)
+ expected_messages = ['foo.txt(2): Line contains tab character. '
+ '[whitespace/tab] [5]\n']
+ self.assertEqual(self._messages, expected_messages)
+
+
+class StyleProcessor_CodeCoverageTest(LoggingTestCase):
+
+ """Test the StyleProcessor class with an emphasis on code coverage.
+
+ This class makes heavy use of mock objects.
+
+ """
+
+ class MockDispatchedChecker(object):
+
+ """A mock checker dispatched by the MockDispatcher."""
+
+ def __init__(self, file_path, min_confidence, style_error_handler):
+ self.file_path = file_path
+ self.min_confidence = min_confidence
+ self.style_error_handler = style_error_handler
+
+ def check(self, lines):
+ self.lines = lines
+
+ class MockDispatcher(object):
+
+ """A mock CheckerDispatcher class."""
+
+ def __init__(self):
+ self.dispatched_checker = None
+
+ def should_skip_with_warning(self, file_path):
+ return file_path.endswith('skip_with_warning.txt')
+
+ def should_skip_without_warning(self, file_path):
+ return file_path.endswith('skip_without_warning.txt')
+
+ def should_check_and_strip_carriage_returns(self, file_path):
+ return not file_path.endswith('carriage_returns_allowed.txt')
+
+ def dispatch(self, file_path, style_error_handler, min_confidence):
+ if file_path.endswith('do_not_process.txt'):
+ return None
+
+ checker = StyleProcessor_CodeCoverageTest.MockDispatchedChecker(
+ file_path,
+ min_confidence,
+ style_error_handler)
+
+ # Save the dispatched checker so the current test case has a
+ # way to access and check it.
+ self.dispatched_checker = checker
+
+ return checker
+
+ def setUp(self):
+ LoggingTestCase.setUp(self)
+ # We can pass an error-message swallower here because error message
+ # output is tested instead in the end-to-end test case above.
+ configuration = StyleProcessorConfiguration(
+ filter_configuration=FilterConfiguration(),
+ max_reports_per_category={"whitespace/newline": 1},
+ min_confidence=3,
+ output_format="vs7",
+ stderr_write=self._swallow_stderr_message)
+
+ mock_carriage_checker_class = self._create_carriage_checker_class()
+ mock_dispatcher = self.MockDispatcher()
+ # We do not need to use a real incrementer here because error-count
+ # incrementing is tested instead in the end-to-end test case above.
+ mock_increment_error_count = self._do_nothing
+
+ processor = StyleProcessor(configuration=configuration,
+ mock_carriage_checker_class=mock_carriage_checker_class,
+ mock_dispatcher=mock_dispatcher,
+ mock_increment_error_count=mock_increment_error_count)
+
+ self._configuration = configuration
+ self._mock_dispatcher = mock_dispatcher
+ self._processor = processor
+
+ def _do_nothing(self):
+ # We provide this function so the caller can pass it to the
+ # StyleProcessor constructor. This lets us assert the equality of
+ # the DefaultStyleErrorHandler instance generated by the process()
+ # method with an expected instance.
+ pass
+
+ def _swallow_stderr_message(self, message):
+ """Swallow a message passed to stderr.write()."""
+ # This is a mock stderr.write() for passing to the constructor
+ # of the StyleProcessorConfiguration class.
+ pass
+
+ def _create_carriage_checker_class(self):
+
+ # Create a reference to self with a new name so its name does not
+ # conflict with the self introduced below.
+ test_case = self
+
+ class MockCarriageChecker(object):
+
+ """A mock carriage-return checker."""
+
+ def __init__(self, style_error_handler):
+ self.style_error_handler = style_error_handler
+
+ # This gives the current test case access to the
+ # instantiated carriage checker.
+ test_case.carriage_checker = self
+
+ def check(self, lines):
+ # Save the lines so the current test case has a way to access
+ # and check them.
+ self.lines = lines
+
+ return lines
+
+ return MockCarriageChecker
+
+ def test_should_process__skip_without_warning(self):
+ """Test should_process() for a skip-without-warning file."""
+ file_path = "foo/skip_without_warning.txt"
+
+ self.assertFalse(self._processor.should_process(file_path))
+
+ def test_should_process__skip_with_warning(self):
+ """Test should_process() for a skip-with-warning file."""
+ file_path = "foo/skip_with_warning.txt"
+
+ self.assertFalse(self._processor.should_process(file_path))
+
+ self.assertLog(['WARNING: File exempt from style guide. '
+ 'Skipping: "foo/skip_with_warning.txt"\n'])
+
+ def test_should_process__true_result(self):
+ """Test should_process() for a file that should be processed."""
+ file_path = "foo/skip_process.txt"
+
+ self.assertTrue(self._processor.should_process(file_path))
+
+ def test_process__checker_dispatched(self):
+ """Test the process() method for a path with a dispatched checker."""
+ file_path = 'foo.txt'
+ lines = ['line1', 'line2']
+ line_numbers = [100]
+
+ expected_error_handler = DefaultStyleErrorHandler(
+ configuration=self._configuration,
+ file_path=file_path,
+ increment_error_count=self._do_nothing,
+ line_numbers=line_numbers)
+
+ self._processor.process(lines=lines,
+ file_path=file_path,
+ line_numbers=line_numbers)
+
+ # Check that the carriage-return checker was instantiated correctly
+ # and was passed lines correctly.
+ carriage_checker = self.carriage_checker
+ self.assertEqual(carriage_checker.style_error_handler,
+ expected_error_handler)
+ self.assertEqual(carriage_checker.lines, ['line1', 'line2'])
+
+ # Check that the style checker was dispatched correctly and was
+ # passed lines correctly.
+ checker = self._mock_dispatcher.dispatched_checker
+ self.assertEqual(checker.file_path, 'foo.txt')
+ self.assertEqual(checker.min_confidence, 3)
+ self.assertEqual(checker.style_error_handler, expected_error_handler)
+
+ self.assertEqual(checker.lines, ['line1', 'line2'])
+
+ def test_process__no_checker_dispatched(self):
+ """Test the process() method for a path with no dispatched checker."""
+ path = os.path.join('foo', 'do_not_process.txt')
+ self.assertRaises(AssertionError, self._processor.process,
+ lines=['line1', 'line2'], file_path=path,
+ line_numbers=[100])
+
+ def test_process__carriage_returns_not_stripped(self):
+ """Test that carriage returns aren't stripped from files that are allowed to contain them."""
+ file_path = 'carriage_returns_allowed.txt'
+ lines = ['line1\r', 'line2\r']
+ line_numbers = [100]
+ self._processor.process(lines=lines,
+ file_path=file_path,
+ line_numbers=line_numbers)
+ # The carriage return checker should never have been invoked, and so
+ # should not have saved off any lines.
+ self.assertFalse(hasattr(self.carriage_checker, 'lines'))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/common.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/common.py
new file mode 100644
index 0000000..76aa956
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/common.py
@@ -0,0 +1,74 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Supports style checking not specific to any one file type."""
+
+
+# FIXME: Test this list in the same way that the list of CppChecker
+# categories is tested, for example by checking that all of its
+# elements appear in the unit tests. This should probably be done
+# after moving the relevant cpp_unittest.ErrorCollector code
+# into a shared location and refactoring appropriately.
+categories = set([
+ "whitespace/carriage_return",
+ "whitespace/tab"])
+
+
+class CarriageReturnChecker(object):
+
+ """Supports checking for and handling carriage returns."""
+
+ def __init__(self, handle_style_error):
+ self._handle_style_error = handle_style_error
+
+ def check(self, lines):
+ """Check for and strip trailing carriage returns from lines."""
+ for line_number in range(len(lines)):
+ if not lines[line_number].endswith("\r"):
+ continue
+
+ self._handle_style_error(line_number + 1, # Correct for offset.
+ "whitespace/carriage_return",
+ 1,
+ "One or more unexpected \\r (^M) found; "
+ "better to use only a \\n")
+
+ lines[line_number] = lines[line_number].rstrip("\r")
+
+ return lines
+
+
+class TabChecker(object):
+
+ """Supports checking for and handling tabs."""
+
+ def __init__(self, file_path, handle_style_error):
+ self.file_path = file_path
+ self.handle_style_error = handle_style_error
+
+ def check(self, lines):
+ # FIXME: share with cpp_style.
+ for line_number, line in enumerate(lines):
+ if "\t" in line:
+ self.handle_style_error(line_number + 1,
+ "whitespace/tab", 5,
+ "Line contains tab character.")
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/common_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/common_unittest.py
new file mode 100644
index 0000000..6f87dcd
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/common_unittest.py
@@ -0,0 +1,121 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for common.py."""
+
+import unittest
+
+from common import CarriageReturnChecker
+from common import TabChecker
+
+# FIXME: The unit tests for the cpp, text, and common checkers should
+# share supporting test code. This can include, for example, the
+# mock style error handling code and the code to check that all
+# of a checker's categories are covered by the unit tests.
+# Such shared code can be located in a shared test file, perhaps
+# even this file.
+class CarriageReturnCheckerTest(unittest.TestCase):
+
+ """Tests check_no_carriage_return()."""
+
+ _category = "whitespace/carriage_return"
+ _confidence = 1
+ _expected_message = ("One or more unexpected \\r (^M) found; "
+ "better to use only a \\n")
+
+ def setUp(self):
+ self._style_errors = [] # The list of accumulated style errors.
+
+ def _mock_style_error_handler(self, line_number, category, confidence,
+ message):
+ """Append the error information to the list of style errors."""
+ error = (line_number, category, confidence, message)
+ self._style_errors.append(error)
+
+ def assert_carriage_return(self, input_lines, expected_lines, error_lines):
+ """Process the given line and assert that the result is correct."""
+ handle_style_error = self._mock_style_error_handler
+
+ checker = CarriageReturnChecker(handle_style_error)
+ output_lines = checker.check(input_lines)
+
+ # Check both the return value and error messages.
+ self.assertEqual(output_lines, expected_lines)
+
+ expected_errors = [(line_number, self._category, self._confidence,
+ self._expected_message)
+ for line_number in error_lines]
+ self.assertEqual(self._style_errors, expected_errors)
+
+ def test_ends_with_carriage(self):
+ self.assert_carriage_return(["carriage return\r"],
+ ["carriage return"],
+ [1])
+
+ def test_ends_with_nothing(self):
+ self.assert_carriage_return(["no carriage return"],
+ ["no carriage return"],
+ [])
+
+ def test_ends_with_newline(self):
+ self.assert_carriage_return(["no carriage return\n"],
+ ["no carriage return\n"],
+ [])
+
+ def test_carriage_in_middle(self):
+ # The CarriageReturnChecker checks only the final character
+ # of each line.
+ self.assert_carriage_return(["carriage\r in a string"],
+ ["carriage\r in a string"],
+ [])
+
+ def test_multiple_errors(self):
+ self.assert_carriage_return(["line1", "line2\r", "line3\r"],
+ ["line1", "line2", "line3"],
+ [2, 3])
+
+
+class TabCheckerTest(unittest.TestCase):
+
+ """Tests for TabChecker."""
+
+ def assert_tab(self, input_lines, error_lines):
+ """Assert when the given lines contain tabs."""
+ self._error_lines = []
+
+ def style_error_handler(line_number, category, confidence, message):
+ self.assertEqual(category, 'whitespace/tab')
+ self.assertEqual(confidence, 5)
+ self.assertEqual(message, 'Line contains tab character.')
+ self._error_lines.append(line_number)
+
+ checker = TabChecker('', style_error_handler)
+ checker.check(input_lines)
+ self.assertEqual(self._error_lines, error_lines)
+
+ def test_notab(self):
+ self.assert_tab([''], [])
+ self.assert_tab(['foo', 'bar'], [])
+
+ def test_tab(self):
+ self.assert_tab(['\tfoo'], [1])
+ self.assert_tab(['line1', '\tline2', 'line3\t'], [2, 3])
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/cpp.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/cpp.py
new file mode 100644
index 0000000..fa27478
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/cpp.py
@@ -0,0 +1,4086 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2009, 2010, 2012 Google Inc. All rights reserved.
+# Copyright (C) 2009 Torch Mobile Inc.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is the modified version of Google's cpplint. The original code is
+# http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py
+
+"""Support for check-webkit-style."""
+
+import math # for log
+import os
+import os.path
+import re
+import sre_compile
+import string
+import sys
+import unicodedata
+
+from webkitpy.common.memoized import memoized
+from webkitpy.common.system.filesystem import FileSystem
+
+# Headers that we consider STL headers.
+_STL_HEADERS = frozenset([
+ 'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
+ 'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
+ 'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'pair.h',
+ 'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
+ 'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
+ 'utility', 'vector', 'vector.h',
+ ])
+
+
+# Non-STL C++ system headers.
+_CPP_HEADERS = frozenset([
+ 'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
+ 'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
+ 'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
+ 'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
+ 'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
+ 'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
+ 'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream.h',
+ 'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
+ 'numeric', 'ostream.h', 'parsestream.h', 'pfstream.h', 'PlotFile.h',
+ 'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h',
+ 'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
+ 'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
+ 'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
+ ])
+
+
+# Assertion macros. These are defined in base/logging.h and
+# testing/base/gunit.h. Note that the _M versions need to come first
+# for substring matching to work.
+_CHECK_MACROS = [
+ 'DCHECK', 'CHECK',
+ 'EXPECT_TRUE_M', 'EXPECT_TRUE',
+ 'ASSERT_TRUE_M', 'ASSERT_TRUE',
+ 'EXPECT_FALSE_M', 'EXPECT_FALSE',
+ 'ASSERT_FALSE_M', 'ASSERT_FALSE',
+ ]
+
+# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
+_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
+
+for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
+ ('>=', 'GE'), ('>', 'GT'),
+ ('<=', 'LE'), ('<', 'LT')]:
+ _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
+ _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
+ _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
+ _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
+ _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
+ _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
+
+for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
+ ('>=', 'LT'), ('>', 'LE'),
+ ('<=', 'GT'), ('<', 'GE')]:
+ _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
+ _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
+ _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
+ _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
+
+
+# These constants define types of headers for use with
+# _IncludeState.check_next_include_order().
+_CONFIG_HEADER = 0
+_PRIMARY_HEADER = 1
+_OTHER_HEADER = 2
+_MOC_HEADER = 3
+
+
+# The regexp compilation caching is inlined in all regexp functions for
+# performance reasons; factoring it out into a separate function turns out
+# to be noticeably expensive.
+_regexp_compile_cache = {}
+
+
+def match(pattern, s):
+ """Matches the string with the pattern, caching the compiled regexp."""
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].match(s)
+
+
+def search(pattern, s):
+ """Searches the string for the pattern, caching the compiled regexp."""
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].search(s)
+
+
+def sub(pattern, replacement, s):
+ """Substitutes occurrences of a pattern, caching the compiled regexp."""
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].sub(replacement, s)
+
+
+def subn(pattern, replacement, s):
+ """Substitutes occurrences of a pattern, caching the compiled regexp."""
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].subn(replacement, s)
+
+
+def iteratively_replace_matches_with_char(pattern, char_replacement, s):
+ """Returns the string with replacement done.
+
+ Every character in the match is replaced with char.
+ Due to the iterative nature, pattern should not match char or
+ there will be an infinite loop.
+
+ Example:
+ pattern = r'<[^>]>' # template parameters
+ char_replacement = '_'
+ s = 'A<B<C, D>>'
+ Returns 'A_________'
+
+ Args:
+ pattern: The regex to match.
+ char_replacement: The character to put in place of every
+ character of the match.
+ s: The string on which to do the replacements.
+
+ Returns:
+ True, if the given line is blank.
+ """
+ while True:
+ matched = search(pattern, s)
+ if not matched:
+ return s
+ start_match_index = matched.start(0)
+ end_match_index = matched.end(0)
+ match_length = end_match_index - start_match_index
+ s = s[:start_match_index] + char_replacement * match_length + s[end_match_index:]
+
+
+def _find_in_lines(regex, lines, start_position, not_found_position):
+ """Does a find starting at start position and going forward until
+ a match is found.
+
+ Returns the position where the regex started.
+ """
+ current_row = start_position.row
+
+ # Start with the given row and trim off everything before what should be matched.
+ current_line = lines[start_position.row][start_position.column:]
+ starting_offset = start_position.column
+ while True:
+ found_match = search(regex, current_line)
+ if found_match:
+ return Position(current_row, starting_offset + found_match.start())
+
+ # A match was not found so continue forward.
+ current_row += 1
+ starting_offset = 0
+ if current_row >= len(lines):
+ return not_found_position
+ current_line = lines[current_row]
+
+def _rfind_in_lines(regex, lines, start_position, not_found_position):
+ """Does a reverse find starting at start position and going backwards until
+ a match is found.
+
+ Returns the position where the regex ended.
+ """
+ # Put the regex in a group and proceed it with a greedy expression that
+ # matches anything to ensure that we get the last possible match in a line.
+ last_in_line_regex = r'.*(' + regex + ')'
+ current_row = start_position.row
+
+ # Start with the given row and trim off everything past what may be matched.
+ current_line = lines[start_position.row][:start_position.column]
+ while True:
+ found_match = match(last_in_line_regex, current_line)
+ if found_match:
+ return Position(current_row, found_match.end(1))
+
+ # A match was not found so continue backward.
+ current_row -= 1
+ if current_row < 0:
+ return not_found_position
+ current_line = lines[current_row]
+
+
+def _convert_to_lower_with_underscores(text):
+ """Converts all text strings in camelCase or PascalCase to lowers with underscores."""
+
+ # First add underscores before any capital letter followed by a lower case letter
+ # as long as it is in a word.
+ # (This put an underscore before Password but not P and A in WPAPassword).
+ text = sub(r'(?<=[A-Za-z0-9])([A-Z])(?=[a-z])', r'_\1', text)
+
+ # Next add underscores before capitals at the end of words if it was
+ # preceeded by lower case letter or number.
+ # (This puts an underscore before A in isA but not A in CBA).
+ text = sub(r'(?<=[a-z0-9])([A-Z])(?=\b)', r'_\1', text)
+
+ # Next add underscores when you have a captial letter which is followed by a capital letter
+ # but is not proceeded by one. (This puts an underscore before A in 'WordADay').
+ text = sub(r'(?<=[a-z0-9])([A-Z][A-Z_])', r'_\1', text)
+
+ return text.lower()
+
+
+
+def _create_acronym(text):
+ """Creates an acronym for the given text."""
+ # Removes all lower case letters except those starting words.
+ text = sub(r'(?<!\b)[a-z]', '', text)
+ return text.upper()
+
+
+def up_to_unmatched_closing_paren(s):
+ """Splits a string into two parts up to first unmatched ')'.
+
+ Args:
+ s: a string which is a substring of line after '('
+ (e.g., "a == (b + c))").
+
+ Returns:
+ A pair of strings (prefix before first unmatched ')',
+ remainder of s after first unmatched ')'), e.g.,
+ up_to_unmatched_closing_paren("a == (b + c)) { ")
+ returns "a == (b + c)", " {".
+ Returns None, None if there is no unmatched ')'
+
+ """
+ i = 1
+ for pos, c in enumerate(s):
+ if c == '(':
+ i += 1
+ elif c == ')':
+ i -= 1
+ if i == 0:
+ return s[:pos], s[pos + 1:]
+ return None, None
+
+class _IncludeState(dict):
+ """Tracks line numbers for includes, and the order in which includes appear.
+
+ As a dict, an _IncludeState object serves as a mapping between include
+ filename and line number on which that file was included.
+
+ Call check_next_include_order() once for each header in the file, passing
+ in the type constants defined above. Calls in an illegal order will
+ raise an _IncludeError with an appropriate error message.
+
+ """
+ # self._section will move monotonically through this set. If it ever
+ # needs to move backwards, check_next_include_order will raise an error.
+ _INITIAL_SECTION = 0
+ _CONFIG_SECTION = 1
+ _PRIMARY_SECTION = 2
+ _OTHER_SECTION = 3
+
+ _TYPE_NAMES = {
+ _CONFIG_HEADER: 'WebCore config.h',
+ _PRIMARY_HEADER: 'header this file implements',
+ _OTHER_HEADER: 'other header',
+ _MOC_HEADER: 'moc file',
+ }
+ _SECTION_NAMES = {
+ _INITIAL_SECTION: "... nothing.",
+ _CONFIG_SECTION: "WebCore config.h.",
+ _PRIMARY_SECTION: 'a header this file implements.',
+ _OTHER_SECTION: 'other header.',
+ }
+
+ def __init__(self):
+ dict.__init__(self)
+ self._section = self._INITIAL_SECTION
+ self._visited_primary_section = False
+ self.header_types = dict();
+
+ def visited_primary_section(self):
+ return self._visited_primary_section
+
+ def check_next_include_order(self, header_type, file_is_header, primary_header_exists):
+ """Returns a non-empty error message if the next header is out of order.
+
+ This function also updates the internal state to be ready to check
+ the next include.
+
+ Args:
+ header_type: One of the _XXX_HEADER constants defined above.
+ file_is_header: Whether the file that owns this _IncludeState is itself a header
+
+ Returns:
+ The empty string if the header is in the right order, or an
+ error message describing what's wrong.
+
+ """
+ if header_type == _CONFIG_HEADER and file_is_header:
+ return 'Header file should not contain WebCore config.h.'
+ if header_type == _PRIMARY_HEADER and file_is_header:
+ return 'Header file should not contain itself.'
+ if header_type == _MOC_HEADER:
+ return ''
+
+ error_message = ''
+ if self._section != self._OTHER_SECTION:
+ before_error_message = ('Found %s before %s' %
+ (self._TYPE_NAMES[header_type],
+ self._SECTION_NAMES[self._section + 1]))
+ after_error_message = ('Found %s after %s' %
+ (self._TYPE_NAMES[header_type],
+ self._SECTION_NAMES[self._section]))
+
+ if header_type == _CONFIG_HEADER:
+ if self._section >= self._CONFIG_SECTION:
+ error_message = after_error_message
+ self._section = self._CONFIG_SECTION
+ elif header_type == _PRIMARY_HEADER:
+ if self._section >= self._PRIMARY_SECTION:
+ error_message = after_error_message
+ elif self._section < self._CONFIG_SECTION:
+ error_message = before_error_message
+ self._section = self._PRIMARY_SECTION
+ self._visited_primary_section = True
+ else:
+ assert header_type == _OTHER_HEADER
+ if not file_is_header and self._section < self._PRIMARY_SECTION:
+ if primary_header_exists:
+ error_message = before_error_message
+ self._section = self._OTHER_SECTION
+
+ return error_message
+
+
+class Position(object):
+ """Holds the position of something."""
+ def __init__(self, row, column):
+ self.row = row
+ self.column = column
+
+ def __str__(self):
+ return '(%s, %s)' % (self.row, self.column)
+
+ def __cmp__(self, other):
+ return self.row.__cmp__(other.row) or self.column.__cmp__(other.column)
+
+
+class Parameter(object):
+ """Information about one function parameter."""
+ def __init__(self, parameter, parameter_name_index, row):
+ self.type = parameter[:parameter_name_index].strip()
+ # Remove any initializers from the parameter name (e.g. int i = 5).
+ self.name = sub(r'=.*', '', parameter[parameter_name_index:]).strip()
+ self.row = row
+
+ @memoized
+ def lower_with_underscores_name(self):
+ """Returns the parameter name in the lower with underscores format."""
+ return _convert_to_lower_with_underscores(self.name)
+
+
+class SingleLineView(object):
+ """Converts multiple lines into a single line (with line breaks replaced by a
+ space) to allow for easier searching."""
+ def __init__(self, lines, start_position, end_position):
+ """Create a SingleLineView instance.
+
+ Args:
+ lines: a list of multiple lines to combine into a single line.
+ start_position: offset within lines of where to start the single line.
+ end_position: just after where to end (like a slice operation).
+ """
+ # Get the rows of interest.
+ trimmed_lines = lines[start_position.row:end_position.row + 1]
+
+ # Remove the columns on the last line that aren't included.
+ trimmed_lines[-1] = trimmed_lines[-1][:end_position.column]
+
+ # Remove the columns on the first line that aren't included.
+ trimmed_lines[0] = trimmed_lines[0][start_position.column:]
+
+ # Create a single line with all of the parameters.
+ self.single_line = ' '.join(trimmed_lines)
+
+ # Keep the row lengths, so we can calculate the original row number
+ # given a column in the single line (adding 1 due to the space added
+ # during the join).
+ self._row_lengths = [len(line) + 1 for line in trimmed_lines]
+ self._starting_row = start_position.row
+
+ def convert_column_to_row(self, single_line_column_number):
+ """Convert the column number from the single line into the original
+ line number.
+
+ Special cases:
+ * Columns in the added spaces are considered part of the previous line.
+ * Columns beyond the end of the line are consider part the last line
+ in the view."""
+ total_columns = 0
+ row_offset = 0
+ while row_offset < len(self._row_lengths) - 1 and single_line_column_number >= total_columns + self._row_lengths[row_offset]:
+ total_columns += self._row_lengths[row_offset]
+ row_offset += 1
+ return self._starting_row + row_offset
+
+
+def create_skeleton_parameters(all_parameters):
+ """Converts a parameter list to a skeleton version.
+
+ The skeleton only has one word for the parameter name, one word for the type,
+ and commas after each parameter and only there. Everything in the skeleton
+ remains in the same columns as the original."""
+ all_simplifications = (
+ # Remove template parameters, function declaration parameters, etc.
+ r'(<[^<>]*?>)|(\([^\(\)]*?\))|(\{[^\{\}]*?\})',
+ # Remove all initializers.
+ r'=[^,]*',
+ # Remove :: and everything before it.
+ r'[^,]*::',
+ # Remove modifiers like &, *.
+ r'[&*]',
+ # Remove const modifiers.
+ r'\bconst\s+(?=[A-Za-z])',
+ # Remove numerical modifiers like long.
+ r'\b(unsigned|long|short)\s+(?=unsigned|long|short|int|char|double|float)')
+
+ skeleton_parameters = all_parameters
+ for simplification in all_simplifications:
+ skeleton_parameters = iteratively_replace_matches_with_char(simplification, ' ', skeleton_parameters)
+ # If there are any parameters, then add a , after the last one to
+ # make a regular pattern of a , following every parameter.
+ if skeleton_parameters.strip():
+ skeleton_parameters += ','
+ return skeleton_parameters
+
+
+def find_parameter_name_index(skeleton_parameter):
+ """Determines where the parametere name starts given the skeleton parameter."""
+ # The first space from the right in the simplified parameter is where the parameter
+ # name starts unless the first space is before any content in the simplified parameter.
+ before_name_index = skeleton_parameter.rstrip().rfind(' ')
+ if before_name_index != -1 and skeleton_parameter[:before_name_index].strip():
+ return before_name_index + 1
+ return len(skeleton_parameter)
+
+
+def parameter_list(elided_lines, start_position, end_position):
+ """Generator for a function's parameters."""
+ # Create new positions that omit the outer parenthesis of the parameters.
+ start_position = Position(row=start_position.row, column=start_position.column + 1)
+ end_position = Position(row=end_position.row, column=end_position.column - 1)
+ single_line_view = SingleLineView(elided_lines, start_position, end_position)
+ skeleton_parameters = create_skeleton_parameters(single_line_view.single_line)
+ end_index = -1
+
+ while True:
+ # Find the end of the next parameter.
+ start_index = end_index + 1
+ end_index = skeleton_parameters.find(',', start_index)
+
+ # No comma means that all parameters have been parsed.
+ if end_index == -1:
+ return
+ row = single_line_view.convert_column_to_row(end_index)
+
+ # Parse the parameter into a type and parameter name.
+ skeleton_parameter = skeleton_parameters[start_index:end_index]
+ name_offset = find_parameter_name_index(skeleton_parameter)
+ parameter = single_line_view.single_line[start_index:end_index]
+ yield Parameter(parameter, name_offset, row)
+
+
+class _FunctionState(object):
+ """Tracks current function name and the number of lines in its body.
+
+ Attributes:
+ min_confidence: The minimum confidence level to use while checking style.
+
+ """
+
+ _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
+ _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
+
+ def __init__(self, min_confidence):
+ self.min_confidence = min_confidence
+ self.current_function = ''
+ self.in_a_function = False
+ self.lines_in_function = 0
+ # Make sure these will not be mistaken for real positions (even when a
+ # small amount is added to them).
+ self.body_start_position = Position(-1000, 0)
+ self.end_position = Position(-1000, 0)
+
+ def begin(self, function_name, function_name_start_position, body_start_position, end_position,
+ parameter_start_position, parameter_end_position, clean_lines):
+ """Start analyzing function body.
+
+ Args:
+ function_name: The name of the function being tracked.
+ function_name_start_position: Position in elided where the function name starts.
+ body_start_position: Position in elided of the { or the ; for a prototype.
+ end_position: Position in elided just after the final } (or ; is.
+ parameter_start_position: Position in elided of the '(' for the parameters.
+ parameter_end_position: Position in elided just after the ')' for the parameters.
+ clean_lines: A CleansedLines instance containing the file.
+ """
+ self.in_a_function = True
+ self.lines_in_function = -1 # Don't count the open brace line.
+ self.current_function = function_name
+ self.function_name_start_position = function_name_start_position
+ self.body_start_position = body_start_position
+ self.end_position = end_position
+ self.is_declaration = clean_lines.elided[body_start_position.row][body_start_position.column] == ';'
+ self.parameter_start_position = parameter_start_position
+ self.parameter_end_position = parameter_end_position
+ self.is_pure = False
+ if self.is_declaration:
+ characters_after_parameters = SingleLineView(clean_lines.elided, parameter_end_position, body_start_position).single_line
+ self.is_pure = bool(match(r'\s*=\s*0\s*', characters_after_parameters))
+ self._clean_lines = clean_lines
+ self._parameter_list = None
+
+ def modifiers_and_return_type(self):
+ """Returns the modifiers and the return type."""
+ # Go backwards from where the function name is until we encounter one of several things:
+ # ';' or '{' or '}' or 'private:', etc. or '#' or return Position(0, 0)
+ elided = self._clean_lines.elided
+ start_modifiers = _rfind_in_lines(r';|\{|\}|((private|public|protected):)|(#.*)',
+ elided, self.parameter_start_position, Position(0, 0))
+ return SingleLineView(elided, start_modifiers, self.function_name_start_position).single_line.strip()
+
+ def parameter_list(self):
+ if not self._parameter_list:
+ # Store the final result as a tuple since that is immutable.
+ self._parameter_list = tuple(parameter_list(self._clean_lines.elided, self.parameter_start_position, self.parameter_end_position))
+
+ return self._parameter_list
+
+ def count(self, line_number):
+ """Count line in current function body."""
+ if self.in_a_function and line_number >= self.body_start_position.row:
+ self.lines_in_function += 1
+
+ def check(self, error, line_number):
+ """Report if too many lines in function body.
+
+ Args:
+ error: The function to call with any errors found.
+ line_number: The number of the line to check.
+ """
+ if match(r'T(EST|est)', self.current_function):
+ base_trigger = self._TEST_TRIGGER
+ else:
+ base_trigger = self._NORMAL_TRIGGER
+ trigger = base_trigger * 2 ** self.min_confidence
+
+ if self.lines_in_function > trigger:
+ error_level = int(math.log(self.lines_in_function / base_trigger, 2))
+ # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
+ if error_level > 5:
+ error_level = 5
+ error(line_number, 'readability/fn_size', error_level,
+ 'Small and focused functions are preferred:'
+ ' %s has %d non-comment lines'
+ ' (error triggered by exceeding %d lines).' % (
+ self.current_function, self.lines_in_function, trigger))
+
+ def end(self):
+ """Stop analyzing function body."""
+ self.in_a_function = False
+
+
+class _IncludeError(Exception):
+ """Indicates a problem with the include order in a file."""
+ pass
+
+
+class FileInfo:
+ """Provides utility functions for filenames.
+
+ FileInfo provides easy access to the components of a file's path
+ relative to the project root.
+ """
+
+ def __init__(self, filename):
+ self._filename = filename
+
+ def full_name(self):
+ """Make Windows paths like Unix."""
+ return os.path.abspath(self._filename).replace('\\', '/')
+
+ def repository_name(self):
+ """Full name after removing the local path to the repository.
+
+ If we have a real absolute path name here we can try to do something smart:
+ detecting the root of the checkout and truncating /path/to/checkout from
+ the name so that we get header guards that don't include things like
+ "C:\Documents and Settings\..." or "/home/username/..." in them and thus
+ people on different computers who have checked the source out to different
+ locations won't see bogus errors.
+ """
+ fullname = self.full_name()
+
+ if os.path.exists(fullname):
+ project_dir = os.path.dirname(fullname)
+
+ if os.path.exists(os.path.join(project_dir, ".svn")):
+ # If there's a .svn file in the current directory, we
+ # recursively look up the directory tree for the top
+ # of the SVN checkout
+ root_dir = project_dir
+ one_up_dir = os.path.dirname(root_dir)
+ while os.path.exists(os.path.join(one_up_dir, ".svn")):
+ root_dir = os.path.dirname(root_dir)
+ one_up_dir = os.path.dirname(one_up_dir)
+
+ prefix = os.path.commonprefix([root_dir, project_dir])
+ return fullname[len(prefix) + 1:]
+
+ # Not SVN? Try to find a git top level directory by
+ # searching up from the current path.
+ root_dir = os.path.dirname(fullname)
+ while (root_dir != os.path.dirname(root_dir)
+ and not os.path.exists(os.path.join(root_dir, ".git"))):
+ root_dir = os.path.dirname(root_dir)
+ if os.path.exists(os.path.join(root_dir, ".git")):
+ prefix = os.path.commonprefix([root_dir, project_dir])
+ return fullname[len(prefix) + 1:]
+
+ # Don't know what to do; header guard warnings may be wrong...
+ return fullname
+
+ def split(self):
+ """Splits the file into the directory, basename, and extension.
+
+ For 'chrome/browser/browser.cpp', Split() would
+ return ('chrome/browser', 'browser', '.cpp')
+
+ Returns:
+ A tuple of (directory, basename, extension).
+ """
+
+ googlename = self.repository_name()
+ project, rest = os.path.split(googlename)
+ return (project,) + os.path.splitext(rest)
+
+ def base_name(self):
+ """File base name - text after the final slash, before the final period."""
+ return self.split()[1]
+
+ def extension(self):
+ """File extension - text following the final period."""
+ return self.split()[2]
+
+ def no_extension(self):
+ """File has no source file extension."""
+ return '/'.join(self.split()[0:2])
+
+ def is_source(self):
+ """File has a source file extension."""
+ return self.extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
+
+
+# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
+_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
+ r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
+# Matches strings. Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
+# Matches characters. Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
+# Matches multi-line C++ comments.
+# This RE is a little bit more complicated than one might expect, because we
+# have to take care of space removals tools so we can handle comments inside
+# statements better.
+# The current rule is: We only clear spaces from both sides when we're at the
+# end of the line. Otherwise, we try to remove spaces from the right side,
+# if this doesn't work we try on left side but only if there's a non-character
+# on the right.
+_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
+ r"""(\s*/\*.*\*/\s*$|
+ /\*.*\*/\s+|
+ \s+/\*.*\*/(?=\W)|
+ /\*.*\*/)""", re.VERBOSE)
+
+
+def is_cpp_string(line):
+ """Does line terminate so, that the next symbol is in string constant.
+
+ This function does not consider single-line nor multi-line comments.
+
+ Args:
+ line: is a partial line of code starting from the 0..n.
+
+ Returns:
+ True, if next character appended to 'line' is inside a
+ string constant.
+ """
+
+ line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
+ return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
+
+
+def find_next_multi_line_comment_start(lines, line_index):
+ """Find the beginning marker for a multiline comment."""
+ while line_index < len(lines):
+ if lines[line_index].strip().startswith('/*'):
+ # Only return this marker if the comment goes beyond this line
+ if lines[line_index].strip().find('*/', 2) < 0:
+ return line_index
+ line_index += 1
+ return len(lines)
+
+
+def find_next_multi_line_comment_end(lines, line_index):
+ """We are inside a comment, find the end marker."""
+ while line_index < len(lines):
+ if lines[line_index].strip().endswith('*/'):
+ return line_index
+ line_index += 1
+ return len(lines)
+
+
+def remove_multi_line_comments_from_range(lines, begin, end):
+ """Clears a range of lines for multi-line comments."""
+ # Having // dummy comments makes the lines non-empty, so we will not get
+ # unnecessary blank line warnings later in the code.
+ for i in range(begin, end):
+ lines[i] = '// dummy'
+
+
+def remove_multi_line_comments(lines, error):
+ """Removes multiline (c-style) comments from lines."""
+ line_index = 0
+ while line_index < len(lines):
+ line_index_begin = find_next_multi_line_comment_start(lines, line_index)
+ if line_index_begin >= len(lines):
+ return
+ line_index_end = find_next_multi_line_comment_end(lines, line_index_begin)
+ if line_index_end >= len(lines):
+ error(line_index_begin + 1, 'readability/multiline_comment', 5,
+ 'Could not find end of multi-line comment')
+ return
+ remove_multi_line_comments_from_range(lines, line_index_begin, line_index_end + 1)
+ line_index = line_index_end + 1
+
+
+def cleanse_comments(line):
+ """Removes //-comments and single-line C-style /* */ comments.
+
+ Args:
+ line: A line of C++ source.
+
+ Returns:
+ The line with single-line comments removed.
+ """
+ comment_position = line.find('//')
+ if comment_position != -1 and not is_cpp_string(line[:comment_position]):
+ line = line[:comment_position]
+ # get rid of /* ... */
+ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
+
+
+class CleansedLines(object):
+ """Holds 3 copies of all lines with different preprocessing applied to them.
+
+ 1) elided member contains lines without strings and comments,
+ 2) lines member contains lines without comments, and
+ 3) raw member contains all the lines without processing.
+ All these three members are of <type 'list'>, and of the same length.
+ """
+
+ def __init__(self, lines):
+ self.elided = []
+ self.lines = []
+ self.raw_lines = lines
+ self._num_lines = len(lines)
+ for line_number in range(len(lines)):
+ self.lines.append(cleanse_comments(lines[line_number]))
+ elided = self.collapse_strings(lines[line_number])
+ self.elided.append(cleanse_comments(elided))
+
+ def num_lines(self):
+ """Returns the number of lines represented."""
+ return self._num_lines
+
+ @staticmethod
+ def collapse_strings(elided):
+ """Collapses strings and chars on a line to simple "" or '' blocks.
+
+ We nix strings first so we're not fooled by text like '"http://"'
+
+ Args:
+ elided: The line being processed.
+
+ Returns:
+ The line with collapsed strings.
+ """
+ if not _RE_PATTERN_INCLUDE.match(elided):
+ # Remove escaped characters first to make quote/single quote collapsing
+ # basic. Things that look like escaped characters shouldn't occur
+ # outside of strings and chars.
+ elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
+ elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
+ elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
+ return elided
+
+
+def close_expression(elided, position):
+ """If input points to ( or { or [, finds the position that closes it.
+
+ If elided[position.row][position.column] points to a '(' or '{' or '[',
+ finds the line_number/pos that correspond to the closing of the expression.
+
+ Args:
+ elided: A CleansedLines.elided instance containing the file.
+ position: The position of the opening item.
+
+ Returns:
+ The Position *past* the closing brace, or Position(len(elided), -1)
+ if we never find a close. Note we ignore strings and comments when matching.
+ """
+ line = elided[position.row]
+ start_character = line[position.column]
+ if start_character == '(':
+ enclosing_character_regex = r'[\(\)]'
+ elif start_character == '[':
+ enclosing_character_regex = r'[\[\]]'
+ elif start_character == '{':
+ enclosing_character_regex = r'[\{\}]'
+ else:
+ return Position(len(elided), -1)
+
+ current_column = position.column + 1
+ line_number = position.row
+ net_open = 1
+ for line in elided[position.row:]:
+ line = line[current_column:]
+
+ # Search the current line for opening and closing characters.
+ while True:
+ next_enclosing_character = search(enclosing_character_regex, line)
+ # No more on this line.
+ if not next_enclosing_character:
+ break
+ current_column += next_enclosing_character.end(0)
+ line = line[next_enclosing_character.end(0):]
+ if next_enclosing_character.group(0) == start_character:
+ net_open += 1
+ else:
+ net_open -= 1
+ if not net_open:
+ return Position(line_number, current_column)
+
+ # Proceed to the next line.
+ line_number += 1
+ current_column = 0
+
+ # The given item was not closed.
+ return Position(len(elided), -1)
+
+def check_for_copyright(lines, error):
+ """Logs an error if no Copyright message appears at the top of the file."""
+
+ # We'll say it should occur by line 10. Don't forget there's a
+ # dummy line at the front.
+ for line in xrange(1, min(len(lines), 11)):
+ if re.search(r'Copyright', lines[line], re.I):
+ break
+ else: # means no copyright line was found
+ error(0, 'legal/copyright', 5,
+ 'No copyright message found. '
+ 'You should have a line: "Copyright [year] <Copyright Owner>"')
+
+
+# TODO(jww) After the transition of Blink into the Chromium repo, this function
+# should be removed. This will strictly enforce Chromium-style header guards,
+# rather than allowing traditional WebKit header guards and Chromium-style
+# simultaneously.
+def get_legacy_header_guard_cpp_variable(filename):
+ """Returns the CPP variable that should be used as a header guard.
+
+ Args:
+ filename: The name of a C++ header file.
+
+ Returns:
+ The CPP variable that should be used as a header guard in the
+ named file.
+
+ """
+
+ # Restores original filename in case that style checker is invoked from Emacs's
+ # flymake.
+ filename = re.sub(r'_flymake\.h$', '.h', filename)
+
+ standard_name = sub(r'[-.\s]', '_', os.path.basename(filename))
+
+ # Files under WTF typically have header guards that start with WTF_.
+ if '/wtf/' in filename:
+ special_name = "WTF_" + standard_name
+ else:
+ special_name = standard_name
+ return (special_name, standard_name)
+
+
+def get_header_guard_cpp_variable(filename):
+ """Returns the CPP variable that should be used as a header guard in Chromium-style.
+
+ Args:
+ filename: The name of a C++ header file.
+
+ Returns:
+ The CPP variable that should be used as a header guard in the
+ named file in Chromium-style.
+
+ """
+
+ # Restores original filename in case that style checker is invoked from Emacs's
+ # flymake.
+ filename = re.sub(r'_flymake\.h$', '.h', filename)
+
+ # If it's a full path and starts with Source/, replace Source with blink
+ # since that will be the new style directory.
+ filename = sub(r'^Source\/', 'blink/', filename)
+
+ standard_name = sub(r'[-.\s\/]', '_', filename).upper() + '_'
+
+ return standard_name
+
+
+def check_for_header_guard(filename, lines, error):
+ """Checks that the file contains a header guard.
+
+ Logs an error if no #ifndef header guard is present. For other
+ headers, checks that the full pathname is used.
+
+ Args:
+ filename: The name of the C++ header file.
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+
+ legacy_cpp_var = get_legacy_header_guard_cpp_variable(filename)
+ cpp_var = get_header_guard_cpp_variable(filename)
+
+ ifndef = None
+ ifndef_line_number = 0
+ define = None
+ for line_number, line in enumerate(lines):
+ line_split = line.split()
+ if len(line_split) >= 2:
+ # find the first occurrence of #ifndef and #define, save arg
+ if not ifndef and line_split[0] == '#ifndef':
+ # set ifndef to the header guard presented on the #ifndef line.
+ ifndef = line_split[1]
+ ifndef_line_number = line_number
+ if not define and line_split[0] == '#define':
+ define = line_split[1]
+ if define and ifndef:
+ break
+
+ if not ifndef or not define or ifndef != define:
+ error(0, 'build/header_guard', 5,
+ 'No #ifndef header guard found, suggested CPP variable is: %s' %
+ legacy_cpp_var[0])
+ return
+
+ # The guard should be File_h or, for Chromium style, BLINK_PATH_TO_FILE_H_.
+ if ifndef not in legacy_cpp_var and ifndef != cpp_var:
+ error(ifndef_line_number, 'build/header_guard', 5,
+ '#ifndef header guard has wrong style, please use: %s' % legacy_cpp_var[0])
+
+
+def check_for_unicode_replacement_characters(lines, error):
+ """Logs an error for each line containing Unicode replacement characters.
+
+ These indicate that either the file contained invalid UTF-8 (likely)
+ or Unicode replacement characters (which it shouldn't). Note that
+ it's possible for this to throw off line numbering if the invalid
+ UTF-8 occurred adjacent to a newline.
+
+ Args:
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+ for line_number, line in enumerate(lines):
+ if u'\ufffd' in line:
+ error(line_number, 'readability/utf8', 5,
+ 'Line contains invalid UTF-8 (or Unicode replacement character).')
+
+
+def check_for_new_line_at_eof(lines, error):
+ """Logs an error if there is no newline char at the end of the file.
+
+ Args:
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+
+ # The array lines() was created by adding two newlines to the
+ # original file (go figure), then splitting on \n.
+ # To verify that the file ends in \n, we just have to make sure the
+ # last-but-two element of lines() exists and is empty.
+ if len(lines) < 3 or lines[-2]:
+ error(len(lines) - 2, 'whitespace/ending_newline', 5,
+ 'Could not find a newline character at the end of the file.')
+
+
+def check_for_multiline_comments_and_strings(clean_lines, line_number, error):
+ """Logs an error if we see /* ... */ or "..." that extend past one line.
+
+ /* ... */ comments are legit inside macros, for one line.
+ Otherwise, we prefer // comments, so it's ok to warn about the
+ other. Likewise, it's ok for strings to extend across multiple
+ lines, as long as a line continuation character (backslash)
+ terminates each line. Although not currently prohibited by the C++
+ style guide, it's ugly and unnecessary. We don't do well with either
+ in this lint program, so we warn about both.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[line_number]
+
+ # Remove all \\ (escaped backslashes) from the line. They are OK, and the
+ # second (escaped) slash may trigger later \" detection erroneously.
+ line = line.replace('\\\\', '')
+
+ if line.count('/*') > line.count('*/'):
+ error(line_number, 'readability/multiline_comment', 5,
+ 'Complex multi-line /*...*/-style comment found. '
+ 'Lint may give bogus warnings. '
+ 'Consider replacing these with //-style comments, '
+ 'with #if 0...#endif, '
+ 'or with more clearly structured multi-line comments.')
+
+ if (line.count('"') - line.count('\\"')) % 2:
+ error(line_number, 'readability/multiline_string', 5,
+ 'Multi-line string ("...") found. This lint script doesn\'t '
+ 'do well with such strings, and may give bogus warnings. They\'re '
+ 'ugly and unnecessary, and you should use concatenation instead".')
+
+
+_THREADING_LIST = (
+ ('asctime(', 'asctime_r('),
+ ('ctime(', 'ctime_r('),
+ ('getgrgid(', 'getgrgid_r('),
+ ('getgrnam(', 'getgrnam_r('),
+ ('getlogin(', 'getlogin_r('),
+ ('getpwnam(', 'getpwnam_r('),
+ ('getpwuid(', 'getpwuid_r('),
+ ('gmtime(', 'gmtime_r('),
+ ('localtime(', 'localtime_r('),
+ ('rand(', 'rand_r('),
+ ('readdir(', 'readdir_r('),
+ ('strtok(', 'strtok_r('),
+ ('ttyname(', 'ttyname_r('),
+ )
+
+
+def check_posix_threading(clean_lines, line_number, error):
+ """Checks for calls to thread-unsafe functions.
+
+ Much code has been originally written without consideration of
+ multi-threading. Also, engineers are relying on their old experience;
+ they have learned posix before threading extensions were added. These
+ tests guide the engineers to use thread-safe functions (when using
+ posix directly).
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[line_number]
+ for single_thread_function, multithread_safe_function in _THREADING_LIST:
+ index = line.find(single_thread_function)
+ # Comparisons made explicit for clarity
+ if index >= 0 and (index == 0 or (not line[index - 1].isalnum()
+ and line[index - 1] not in ('_', '.', '>'))):
+ error(line_number, 'runtime/threadsafe_fn', 2,
+ 'Consider using ' + multithread_safe_function +
+ '...) instead of ' + single_thread_function +
+ '...) for improved thread safety.')
+
+
+# Matches invalid increment: *count++, which moves pointer instead of
+# incrementing a value.
+_RE_PATTERN_INVALID_INCREMENT = re.compile(
+ r'^\s*\*\w+(\+\+|--);')
+
+
+def check_invalid_increment(clean_lines, line_number, error):
+ """Checks for invalid increment *count++.
+
+ For example following function:
+ void increment_counter(int* count) {
+ *count++;
+ }
+ is invalid, because it effectively does count++, moving pointer, and should
+ be replaced with ++*count, (*count)++ or *count += 1.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[line_number]
+ if _RE_PATTERN_INVALID_INCREMENT.match(line):
+ error(line_number, 'runtime/invalid_increment', 5,
+ 'Changing pointer instead of value (or unused value of operator*).')
+
+
+class _ClassInfo(object):
+ """Stores information about a class."""
+
+ def __init__(self, name, line_number):
+ self.name = name
+ self.line_number = line_number
+ self.seen_open_brace = False
+ self.is_derived = False
+ self.virtual_method_line_number = None
+ self.has_virtual_destructor = False
+ self.brace_depth = 0
+ self.unsigned_bitfields = []
+ self.bool_bitfields = []
+
+
+class _ClassState(object):
+ """Holds the current state of the parse relating to class declarations.
+
+ It maintains a stack of _ClassInfos representing the parser's guess
+ as to the current nesting of class declarations. The innermost class
+ is at the top (back) of the stack. Typically, the stack will either
+ be empty or have exactly one entry.
+ """
+
+ def __init__(self):
+ self.classinfo_stack = []
+
+ def check_finished(self, error):
+ """Checks that all classes have been completely parsed.
+
+ Call this when all lines in a file have been processed.
+ Args:
+ error: The function to call with any errors found.
+ """
+ if self.classinfo_stack:
+ # Note: This test can result in false positives if #ifdef constructs
+ # get in the way of brace matching. See the testBuildClass test in
+ # cpp_style_unittest.py for an example of this.
+ error(self.classinfo_stack[0].line_number, 'build/class', 5,
+ 'Failed to find complete declaration of class %s' %
+ self.classinfo_stack[0].name)
+
+
+class _FileState(object):
+ def __init__(self, clean_lines, file_extension):
+ self._did_inside_namespace_indent_warning = False
+ self._clean_lines = clean_lines
+ if file_extension in ['m', 'mm']:
+ self._is_objective_c = True
+ self._is_c = False
+ elif file_extension == 'h':
+ # In the case of header files, it is unknown if the file
+ # is c / objective c or not, so set this value to None and then
+ # if it is requested, use heuristics to guess the value.
+ self._is_objective_c = None
+ self._is_c = None
+ elif file_extension == 'c':
+ self._is_c = True
+ self._is_objective_c = False
+ else:
+ self._is_objective_c = False
+ self._is_c = False
+
+ def set_did_inside_namespace_indent_warning(self):
+ self._did_inside_namespace_indent_warning = True
+
+ def did_inside_namespace_indent_warning(self):
+ return self._did_inside_namespace_indent_warning
+
+ def is_objective_c(self):
+ if self._is_objective_c is None:
+ for line in self._clean_lines.elided:
+ # Starting with @ or #import seem like the best indications
+ # that we have an Objective C file.
+ if line.startswith("@") or line.startswith("#import"):
+ self._is_objective_c = True
+ break
+ else:
+ self._is_objective_c = False
+ return self._is_objective_c
+
+ def is_c(self):
+ if self._is_c is None:
+ for line in self._clean_lines.lines:
+ # if extern "C" is found, then it is a good indication
+ # that we have a C header file.
+ if line.startswith('extern "C"'):
+ self._is_c = True
+ break
+ else:
+ self._is_c = False
+ return self._is_c
+
+ def is_c_or_objective_c(self):
+ """Return whether the file extension corresponds to C or Objective-C."""
+ return self.is_c() or self.is_objective_c()
+
+
+class _EnumState(object):
+ """Maintains whether currently in an enum declaration, and checks whether
+ enum declarations follow the style guide.
+ """
+
+ def __init__(self):
+ self.in_enum_decl = False
+ self.is_webidl_enum = False
+
+ def process_clean_line(self, line):
+ # FIXME: The regular expressions for expr_all_uppercase and expr_enum_end only accept integers
+ # and identifiers for the value of the enumerator, but do not accept any other constant
+ # expressions. However, this is sufficient for now (11/27/2012).
+ expr_all_uppercase = r'\s*[A-Z0-9_]+\s*(?:=\s*[a-zA-Z0-9]+\s*)?,?\s*$'
+ expr_starts_lowercase = r'\s*[a-z]'
+ expr_enum_end = r'}\s*(?:[a-zA-Z0-9]+\s*(?:=\s*[a-zA-Z0-9]+)?)?\s*;\s*'
+ expr_enum_start = r'\s*enum(?:\s+[a-zA-Z0-9]+)?\s*\{?\s*'
+ if self.in_enum_decl:
+ if match(r'\s*' + expr_enum_end + r'$', line):
+ self.in_enum_decl = False
+ self.is_webidl_enum = False
+ elif match(expr_all_uppercase, line):
+ return self.is_webidl_enum
+ elif match(expr_starts_lowercase, line):
+ return False
+ else:
+ matched = match(expr_enum_start + r'$', line)
+ if matched:
+ self.in_enum_decl = True
+ else:
+ matched = match(expr_enum_start + r'(?P<members>.*)' + expr_enum_end + r'$', line)
+ if matched:
+ members = matched.group('members').split(',')
+ found_invalid_member = False
+ for member in members:
+ if match(expr_all_uppercase, member):
+ found_invalid_member = not self.is_webidl_enum
+ if match(expr_starts_lowercase, member):
+ found_invalid_member = True
+ if found_invalid_member:
+ self.is_webidl_enum = False
+ return False
+ return True
+ return True
+
+def check_for_non_standard_constructs(clean_lines, line_number,
+ class_state, error):
+ """Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
+
+ Complain about several constructs which gcc-2 accepts, but which are
+ not standard C++. Warning about these in lint is one way to ease the
+ transition to new compilers.
+ - put storage class first (e.g. "static const" instead of "const static").
+ - "%lld" instead of %qd" in printf-type functions.
+ - "%1$d" is non-standard in printf-type functions.
+ - "\%" is an undefined character escape sequence.
+ - text after #endif is not allowed.
+ - invalid inner-style forward declaration.
+ - >? and <? operators, and their >?= and <?= cousins.
+ - classes with virtual methods need virtual destructors (compiler warning
+ available, but not turned on yet.)
+
+ Additionally, check for constructor/destructor style violations as it
+ is very convenient to do so while checking for gcc-2 compliance.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ class_state: A _ClassState instance which maintains information about
+ the current stack of nested class declarations being parsed.
+ error: A callable to which errors are reported, which takes parameters:
+ line number, error level, and message
+ """
+
+ # Remove comments from the line, but leave in strings for now.
+ line = clean_lines.lines[line_number]
+
+ if search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
+ error(line_number, 'runtime/printf_format', 3,
+ '%q in format strings is deprecated. Use %ll instead.')
+
+ if search(r'printf\s*\(.*".*%\d+\$', line):
+ error(line_number, 'runtime/printf_format', 2,
+ '%N$ formats are unconventional. Try rewriting to avoid them.')
+
+ # Remove escaped backslashes before looking for undefined escapes.
+ line = line.replace('\\\\', '')
+
+ if search(r'("|\').*\\(%|\[|\(|{)', line):
+ error(line_number, 'build/printf_format', 3,
+ '%, [, (, and { are undefined character escapes. Unescape them.')
+
+ # For the rest, work with both comments and strings removed.
+ line = clean_lines.elided[line_number]
+
+ if search(r'\b(const|volatile|void|char|short|int|long'
+ r'|float|double|signed|unsigned'
+ r'|schar|u?int8|u?int16|u?int32|u?int64)'
+ r'\s+(auto|register|static|extern|typedef)\b',
+ line):
+ error(line_number, 'build/storage_class', 5,
+ 'Storage class (static, extern, typedef, etc) should be first.')
+
+ if match(r'\s*#\s*endif\s*[^/\s]+', line):
+ error(line_number, 'build/endif_comment', 5,
+ 'Uncommented text after #endif is non-standard. Use a comment.')
+
+ if match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
+ error(line_number, 'build/forward_decl', 5,
+ 'Inner-style forward declarations are invalid. Remove this line.')
+
+ if search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line):
+ error(line_number, 'build/deprecated', 3,
+ '>? and <? (max and min) operators are non-standard and deprecated.')
+
+ # Track class entry and exit, and attempt to find cases within the
+ # class declaration that don't meet the C++ style
+ # guidelines. Tracking is very dependent on the code matching Google
+ # style guidelines, but it seems to perform well enough in testing
+ # to be a worthwhile addition to the checks.
+ classinfo_stack = class_state.classinfo_stack
+ # Look for a class declaration
+ class_decl_match = match(
+ r'\s*(template\s*<[\w\s<>,:]*>\s*)?(class|struct)\s+(\w+(::\w+)*)', line)
+ if class_decl_match:
+ classinfo_stack.append(_ClassInfo(class_decl_match.group(3), line_number))
+
+ # Everything else in this function uses the top of the stack if it's
+ # not empty.
+ if not classinfo_stack:
+ return
+
+ classinfo = classinfo_stack[-1]
+
+ # If the opening brace hasn't been seen look for it and also
+ # parent class declarations.
+ if not classinfo.seen_open_brace:
+ # If the line has a ';' in it, assume it's a forward declaration or
+ # a single-line class declaration, which we won't process.
+ if line.find(';') != -1:
+ classinfo_stack.pop()
+ return
+ classinfo.seen_open_brace = (line.find('{') != -1)
+ # Look for a bare ':'
+ if search('(^|[^:]):($|[^:])', line):
+ classinfo.is_derived = True
+ if not classinfo.seen_open_brace:
+ return # Everything else in this function is for after open brace
+
+ # The class may have been declared with namespace or classname qualifiers.
+ # The constructor and destructor will not have those qualifiers.
+ base_classname = classinfo.name.split('::')[-1]
+
+ # Look for single-argument constructors that aren't marked explicit.
+ # Technically a valid construct, but against style.
+ args = match(r'(?<!explicit)\s+%s\s*\(([^,()]+)\)'
+ % re.escape(base_classname),
+ line)
+ if (args
+ and args.group(1) != 'void'
+ and not match(r'(const\s+)?%s\s*&' % re.escape(base_classname),
+ args.group(1).strip())):
+ error(line_number, 'runtime/explicit', 5,
+ 'Single-argument constructors should be marked explicit.')
+
+ # Look for methods declared virtual.
+ if search(r'\bvirtual\b', line):
+ classinfo.virtual_method_line_number = line_number
+ # Only look for a destructor declaration on the same line. It would
+ # be extremely unlikely for the destructor declaration to occupy
+ # more than one line.
+ if search(r'~%s\s*\(' % base_classname, line):
+ classinfo.has_virtual_destructor = True
+
+ # Look for class end.
+ brace_depth = classinfo.brace_depth
+ brace_depth = brace_depth + line.count('{') - line.count('}')
+ if brace_depth <= 0:
+ classinfo = classinfo_stack.pop()
+ # Try to detect missing virtual destructor declarations.
+ # For now, only warn if a non-derived class with virtual methods lacks
+ # a virtual destructor. This is to make it less likely that people will
+ # declare derived virtual destructors without declaring the base
+ # destructor virtual.
+ if ((classinfo.virtual_method_line_number is not None)
+ and (not classinfo.has_virtual_destructor)
+ and (not classinfo.is_derived)): # Only warn for base classes
+ error(classinfo.line_number, 'runtime/virtual', 4,
+ 'The class %s probably needs a virtual destructor due to '
+ 'having virtual method(s), one declared at line %d.'
+ % (classinfo.name, classinfo.virtual_method_line_number))
+ # Look for mixed bool and unsigned bitfields.
+ if (classinfo.bool_bitfields and classinfo.unsigned_bitfields):
+ bool_list = ', '.join(classinfo.bool_bitfields)
+ unsigned_list = ', '.join(classinfo.unsigned_bitfields)
+ error(classinfo.line_number, 'runtime/bitfields', 5,
+ 'The class %s contains mixed unsigned and bool bitfields, '
+ 'which will pack into separate words on the MSVC compiler.\n'
+ 'Bool bitfields are [%s].\nUnsigned bitfields are [%s].\n'
+ 'Consider converting bool bitfields to unsigned.'
+ % (classinfo.name, bool_list, unsigned_list))
+ else:
+ classinfo.brace_depth = brace_depth
+
+ well_typed_bitfield = False;
+ # Look for bool <name> : 1 declarations.
+ args = search(r'\bbool\s+(\S*)\s*:\s*\d+\s*;', line)
+ if args:
+ classinfo.bool_bitfields.append('%d: %s' % (line_number, args.group(1)))
+ well_typed_bitfield = True;
+
+ # Look for unsigned <name> : n declarations.
+ args = search(r'\bunsigned\s+(?:int\s+)?(\S+)\s*:\s*\d+\s*;', line)
+ if args:
+ classinfo.unsigned_bitfields.append('%d: %s' % (line_number, args.group(1)))
+ well_typed_bitfield = True;
+
+ # Look for other bitfield declarations. We don't care about those in
+ # size-matching structs.
+ if not (well_typed_bitfield or classinfo.name.startswith('SameSizeAs') or
+ classinfo.name.startswith('Expected')):
+ args = match(r'\s*(\S+)\s+(\S+)\s*:\s*\d+\s*;', line)
+ if args:
+ error(line_number, 'runtime/bitfields', 4,
+ 'Member %s of class %s defined as a bitfield of type %s. '
+ 'Please declare all bitfields as unsigned.'
+ % (args.group(2), classinfo.name, args.group(1)))
+
+def check_spacing_for_function_call(line, line_number, error):
+ """Checks for the correctness of various spacing around function calls.
+
+ Args:
+ line: The text of the line to check.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # Since function calls often occur inside if/for/foreach/while/switch
+ # expressions - which have their own, more liberal conventions - we
+ # first see if we should be looking inside such an expression for a
+ # function call, to which we can apply more strict standards.
+ function_call = line # if there's no control flow construct, look at whole line
+ for pattern in (r'\bif\s*\((.*)\)\s*{',
+ r'\bfor\s*\((.*)\)\s*{',
+ r'\bforeach\s*\((.*)\)\s*{',
+ r'\bwhile\s*\((.*)\)\s*[{;]',
+ r'\bswitch\s*\((.*)\)\s*{'):
+ matched = search(pattern, line)
+ if matched:
+ function_call = matched.group(1) # look inside the parens for function calls
+ break
+
+ # Except in if/for/foreach/while/switch, there should never be space
+ # immediately inside parens (eg "f( 3, 4 )"). We make an exception
+ # for nested parens ( (a+b) + c ). Likewise, there should never be
+ # a space before a ( when it's a function argument. I assume it's a
+ # function argument when the char before the whitespace is legal in
+ # a function name (alnum + _) and we're not starting a macro. Also ignore
+ # pointers and references to arrays and functions coz they're too tricky:
+ # we use a very simple way to recognize these:
+ # " (something)(maybe-something)" or
+ # " (something)(maybe-something," or
+ # " (something)[something]"
+ # Note that we assume the contents of [] to be short enough that
+ # they'll never need to wrap.
+ if ( # Ignore control structures.
+ not search(r'\b(if|for|foreach|while|switch|return|new|delete)\b', function_call)
+ # Ignore pointers/references to functions.
+ and not search(r' \([^)]+\)\([^)]*(\)|,$)', function_call)
+ # Ignore pointers/references to arrays.
+ and not search(r' \([^)]+\)\[[^\]]+\]', function_call)):
+ if search(r'\w\s*\([ \t](?!\s*\\$)', function_call): # a ( used for a fn call
+ error(line_number, 'whitespace/parens', 4,
+ 'Extra space after ( in function call')
+ elif search(r'\([ \t]+(?!(\s*\\)|\()', function_call):
+ error(line_number, 'whitespace/parens', 2,
+ 'Extra space after (')
+ if (search(r'\w\s+\(', function_call)
+ and not match(r'\s*(#|typedef)', function_call)):
+ error(line_number, 'whitespace/parens', 4,
+ 'Extra space before ( in function call')
+ # If the ) is followed only by a newline or a { + newline, assume it's
+ # part of a control statement (if/while/etc), and don't complain
+ if search(r'[^)\s]\s+\)(?!\s*$|{\s*$)', function_call):
+ error(line_number, 'whitespace/parens', 2,
+ 'Extra space before )')
+
+
+def is_blank_line(line):
+ """Returns true if the given line is blank.
+
+ We consider a line to be blank if the line is empty or consists of
+ only white spaces.
+
+ Args:
+ line: A line of a string.
+
+ Returns:
+ True, if the given line is blank.
+ """
+ return not line or line.isspace()
+
+
+def detect_functions(clean_lines, line_number, function_state, error):
+ """Finds where functions start and end.
+
+ Uses a simplistic algorithm assuming other style guidelines
+ (especially spacing) are followed.
+ Trivial bodies are unchecked, so constructors with huge initializer lists
+ may be missed.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ function_state: Current function name and lines in body so far.
+ error: The function to call with any errors found.
+ """
+ # Are we now past the end of a function?
+ if function_state.end_position.row + 1 == line_number:
+ function_state.end()
+
+ # If we're in a function, don't try to detect a new one.
+ if function_state.in_a_function:
+ return
+
+ lines = clean_lines.lines
+ line = lines[line_number]
+ raw = clean_lines.raw_lines
+ raw_line = raw[line_number]
+
+ # Lines ending with a \ indicate a macro. Don't try to check them.
+ if raw_line.endswith('\\'):
+ return
+
+ regexp = r'\s*(\w(\w|::|\*|\&|\s|<|>|,|~|(operator\s*(/|-|=|!|\+)+))*)\(' # decls * & space::name( ...
+ match_result = match(regexp, line)
+ if not match_result:
+ return
+
+ # If the name is all caps and underscores, figure it's a macro and
+ # ignore it, unless it's TEST or TEST_F.
+ function_name = match_result.group(1).split()[-1]
+ if function_name != 'TEST' and function_name != 'TEST_F' and match(r'[A-Z_]+$', function_name):
+ return
+
+ joined_line = ''
+ for start_line_number in xrange(line_number, clean_lines.num_lines()):
+ start_line = clean_lines.elided[start_line_number]
+ joined_line += ' ' + start_line.lstrip()
+ body_match = search(r'{|;', start_line)
+ if body_match:
+ body_start_position = Position(start_line_number, body_match.start(0))
+
+ # Replace template constructs with _ so that no spaces remain in the function name,
+ # while keeping the column numbers of other characters the same as "line".
+ line_with_no_templates = iteratively_replace_matches_with_char(r'<[^<>]*>', '_', line)
+ match_function = search(r'((\w|:|<|>|,|~|(operator\s*(/|-|=|!|\+)+))*)\(', line_with_no_templates)
+ if not match_function:
+ return # The '(' must have been inside of a template.
+
+ # Use the column numbers from the modified line to find the
+ # function name in the original line.
+ function = line[match_function.start(1):match_function.end(1)]
+ function_name_start_position = Position(line_number, match_function.start(1))
+
+ if match(r'TEST', function): # Handle TEST... macros
+ parameter_regexp = search(r'(\(.*\))', joined_line)
+ if parameter_regexp: # Ignore bad syntax
+ function += parameter_regexp.group(1)
+ else:
+ function += '()'
+
+ parameter_start_position = Position(line_number, match_function.end(1))
+ parameter_end_position = close_expression(clean_lines.elided, parameter_start_position)
+ if parameter_end_position.row == len(clean_lines.elided):
+ # No end was found.
+ return
+
+ if start_line[body_start_position.column] == ';':
+ end_position = Position(body_start_position.row, body_start_position.column + 1)
+ else:
+ end_position = close_expression(clean_lines.elided, body_start_position)
+
+ # Check for nonsensical positions. (This happens in test cases which check code snippets.)
+ if parameter_end_position > body_start_position:
+ return
+
+ function_state.begin(function, function_name_start_position, body_start_position, end_position,
+ parameter_start_position, parameter_end_position, clean_lines)
+ return
+
+ # No body for the function (or evidence of a non-function) was found.
+ error(line_number, 'readability/fn_size', 5,
+ 'Lint failed to find start of function body.')
+
+
+def check_for_function_lengths(clean_lines, line_number, function_state, error):
+ """Reports for long function bodies.
+
+ For an overview why this is done, see:
+ http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
+
+ Blank/comment lines are not counted so as to avoid encouraging the removal
+ of vertical space and commments just to get through a lint check.
+ NOLINT *on the last line of a function* disables this check.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ function_state: Current function name and lines in body so far.
+ error: The function to call with any errors found.
+ """
+ lines = clean_lines.lines
+ line = lines[line_number]
+ raw = clean_lines.raw_lines
+ raw_line = raw[line_number]
+
+ if function_state.end_position.row == line_number: # last line
+ if not search(r'\bNOLINT\b', raw_line):
+ function_state.check(error, line_number)
+ elif not match(r'^\s*$', line):
+ function_state.count(line_number) # Count non-blank/non-comment lines.
+
+
+def _check_parameter_name_against_text(parameter, text, error):
+ """Checks to see if the parameter name is contained within the text.
+
+ Return false if the check failed (i.e. an error was produced).
+ """
+
+ # Treat 'lower with underscores' as a canonical form because it is
+ # case insensitive while still retaining word breaks. (This ensures that
+ # 'elate' doesn't look like it is duplicating of 'NateLate'.)
+ canonical_parameter_name = parameter.lower_with_underscores_name()
+
+ # Appends "object" to all text to catch variables that did the same (but only
+ # do this when the parameter name is more than a single character to avoid
+ # flagging 'b' which may be an ok variable when used in an rgba function).
+ if len(canonical_parameter_name) > 1:
+ text = sub(r'(\w)\b', r'\1Object', text)
+ canonical_text = _convert_to_lower_with_underscores(text)
+
+ # Used to detect cases like ec for ExceptionCode.
+ acronym = _create_acronym(text).lower()
+ if canonical_text.find(canonical_parameter_name) != -1 or acronym.find(canonical_parameter_name) != -1:
+ error(parameter.row, 'readability/parameter_name', 5,
+ 'The parameter name "%s" adds no information, so it should be removed.' % parameter.name)
+ return False
+ return True
+
+
+def check_function_definition_and_pass_ptr(type_text, row, location_description, error):
+ """Check that function definitions for use Pass*Ptr instead of *Ptr.
+
+ Args:
+ type_text: A string containing the type. (For return values, it may contain more than the type.)
+ row: The row number of the type.
+ location_description: Used to indicate where the type is. This is either 'parameter' or 'return'.
+ error: The function to call with any errors found.
+ """
+ match_ref_or_own_ptr = '(?=\W|^)(Ref|Own)Ptr(?=\W)'
+ exceptions = '(?:&|\*|\*\s*=\s*0)$'
+ bad_type_usage = search(match_ref_or_own_ptr, type_text)
+ exception_usage = search(exceptions, type_text)
+ if not bad_type_usage or exception_usage:
+ return
+ type_name = bad_type_usage.group(0)
+ error(row, 'readability/pass_ptr', 5,
+ 'The %s type should use Pass%s instead of %s.' % (location_description, type_name, type_name))
+
+
+def check_function_definition(filename, file_extension, clean_lines, line_number, function_state, error):
+ """Check that function definitions for style issues.
+
+ Specifically, check that parameter names in declarations add information.
+
+ Args:
+ filename: Filename of the file that is being processed.
+ file_extension: The current file extension, without the leading dot.
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ function_state: Current function name and lines in body so far.
+ error: The function to call with any errors found.
+ """
+ if line_number != function_state.body_start_position.row:
+ return
+
+ modifiers_and_return_type = function_state.modifiers_and_return_type()
+ if filename.find('/chromium/') != -1 and search(r'\bWEBKIT_EXPORT\b', modifiers_and_return_type):
+ if filename.find('/chromium/public/') == -1 and filename.find('/chromium/tests/') == -1 and filename.find('chromium/platform') == -1:
+ error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
+ 'WEBKIT_EXPORT should only appear in the chromium public (or tests) directory.')
+ elif not file_extension == "h":
+ error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
+ 'WEBKIT_EXPORT should only be used in header files.')
+ elif not function_state.is_declaration or search(r'\binline\b', modifiers_and_return_type):
+ error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
+ 'WEBKIT_EXPORT should not be used on a function with a body.')
+ elif function_state.is_pure:
+ error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
+ 'WEBKIT_EXPORT should not be used with a pure virtual function.')
+
+ check_function_definition_and_pass_ptr(modifiers_and_return_type, function_state.function_name_start_position.row, 'return', error)
+
+ parameter_list = function_state.parameter_list()
+ for parameter in parameter_list:
+ check_function_definition_and_pass_ptr(parameter.type, parameter.row, 'parameter', error)
+
+ # Do checks specific to function declarations and parameter names.
+ if not function_state.is_declaration or not parameter.name:
+ continue
+
+ # Check the parameter name against the function name for single parameter set functions.
+ if len(parameter_list) == 1 and match('set[A-Z]', function_state.current_function):
+ trimmed_function_name = function_state.current_function[len('set'):]
+ if not _check_parameter_name_against_text(parameter, trimmed_function_name, error):
+ continue # Since an error was noted for this name, move to the next parameter.
+
+ # Check the parameter name against the type.
+ if not _check_parameter_name_against_text(parameter, parameter.type, error):
+ continue # Since an error was noted for this name, move to the next parameter.
+
+
+def check_pass_ptr_usage(clean_lines, line_number, function_state, error):
+ """Check for proper usage of Pass*Ptr.
+
+ Currently this is limited to detecting declarations of Pass*Ptr
+ variables inside of functions.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ function_state: Current function name and lines in body so far.
+ error: The function to call with any errors found.
+ """
+ if not function_state.in_a_function:
+ return
+
+ lines = clean_lines.lines
+ line = lines[line_number]
+ if line_number > function_state.body_start_position.row:
+ matched_pass_ptr = match(r'^\s*Pass([A-Z][A-Za-z]*)Ptr<', line)
+ if matched_pass_ptr:
+ type_name = 'Pass%sPtr' % matched_pass_ptr.group(1)
+ error(line_number, 'readability/pass_ptr', 5,
+ 'Local variables should never be %s (see '
+ 'http://webkit.org/coding/RefPtr.html).' % type_name)
+
+
+def check_for_leaky_patterns(clean_lines, line_number, function_state, error):
+ """Check for constructs known to be leak prone.
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ function_state: Current function name and lines in body so far.
+ error: The function to call with any errors found.
+ """
+ lines = clean_lines.lines
+ line = lines[line_number]
+
+ matched_get_dc = search(r'\b(?P<function_name>GetDC(Ex)?)\s*\(', line)
+ if matched_get_dc:
+ error(line_number, 'runtime/leaky_pattern', 5,
+ 'Use the class HWndDC instead of calling %s to avoid potential '
+ 'memory leaks.' % matched_get_dc.group('function_name'))
+
+ matched_create_dc = search(r'\b(?P<function_name>Create(Compatible)?DC)\s*\(', line)
+ matched_own_dc = search(r'\badoptPtr\b', line)
+ if matched_create_dc and not matched_own_dc:
+ error(line_number, 'runtime/leaky_pattern', 5,
+ 'Use adoptPtr and OwnPtr<HDC> when calling %s to avoid potential '
+ 'memory leaks.' % matched_create_dc.group('function_name'))
+
+
+def check_spacing(file_extension, clean_lines, line_number, error):
+ """Checks for the correctness of various spacing issues in the code.
+
+ Things we check for: spaces around operators, spaces after
+ if/for/while/switch, no spaces around parens in function calls, two
+ spaces between code and comment, don't start a block with a blank
+ line, don't end a function with a blank line, don't have too many
+ blank lines in a row.
+
+ Args:
+ file_extension: The current file extension, without the leading dot.
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ raw = clean_lines.raw_lines
+ line = raw[line_number]
+
+ # Before nixing comments, check if the line is blank for no good
+ # reason. This includes the first line after a block is opened, and
+ # blank lines at the end of a function (ie, right before a line like '}').
+ if is_blank_line(line):
+ elided = clean_lines.elided
+ previous_line = elided[line_number - 1]
+ previous_brace = previous_line.rfind('{')
+ # FIXME: Don't complain if line before blank line, and line after,
+ # both start with alnums and are indented the same amount.
+ # This ignores whitespace at the start of a namespace block
+ # because those are not usually indented.
+ if (previous_brace != -1 and previous_line[previous_brace:].find('}') == -1
+ and previous_line[:previous_brace].find('namespace') == -1):
+ # OK, we have a blank line at the start of a code block. Before we
+ # complain, we check if it is an exception to the rule: The previous
+ # non-empty line has the parameters of a function header that are indented
+ # 4 spaces (because they did not fit in a 80 column line when placed on
+ # the same line as the function name). We also check for the case where
+ # the previous line is indented 6 spaces, which may happen when the
+ # initializers of a constructor do not fit into a 80 column line.
+ exception = False
+ if match(r' {6}\w', previous_line): # Initializer list?
+ # We are looking for the opening column of initializer list, which
+ # should be indented 4 spaces to cause 6 space indentation afterwards.
+ search_position = line_number - 2
+ while (search_position >= 0
+ and match(r' {6}\w', elided[search_position])):
+ search_position -= 1
+ exception = (search_position >= 0
+ and elided[search_position][:5] == ' :')
+ else:
+ # Search for the function arguments or an initializer list. We use a
+ # simple heuristic here: If the line is indented 4 spaces; and we have a
+ # closing paren, without the opening paren, followed by an opening brace
+ # or colon (for initializer lists) we assume that it is the last line of
+ # a function header. If we have a colon indented 4 spaces, it is an
+ # initializer list.
+ exception = (match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
+ previous_line)
+ or match(r' {4}:', previous_line))
+
+ if not exception:
+ error(line_number, 'whitespace/blank_line', 2,
+ 'Blank line at the start of a code block. Is this needed?')
+ # This doesn't ignore whitespace at the end of a namespace block
+ # because that is too hard without pairing open/close braces;
+ # however, a special exception is made for namespace closing
+ # brackets which have a comment containing "namespace".
+ #
+ # Also, ignore blank lines at the end of a block in a long if-else
+ # chain, like this:
+ # if (condition1) {
+ # // Something followed by a blank line
+ #
+ # } else if (condition2) {
+ # // Something else
+ # }
+ if line_number + 1 < clean_lines.num_lines():
+ next_line = raw[line_number + 1]
+ if (next_line
+ and match(r'\s*}', next_line)
+ and next_line.find('namespace') == -1
+ and next_line.find('} else ') == -1):
+ error(line_number, 'whitespace/blank_line', 3,
+ 'Blank line at the end of a code block. Is this needed?')
+
+ # Next, we check for proper spacing with respect to comments.
+ comment_position = line.find('//')
+ if comment_position != -1:
+ # Check if the // may be in quotes. If so, ignore it
+ # Comparisons made explicit for clarity
+ if (line.count('"', 0, comment_position) - line.count('\\"', 0, comment_position)) % 2 == 0: # not in quotes
+ # Allow one space before end of line comment.
+ if (not match(r'^\s*$', line[:comment_position])
+ and (comment_position >= 1
+ and ((line[comment_position - 1] not in string.whitespace)
+ or (comment_position >= 2
+ and line[comment_position - 2] in string.whitespace)))):
+ error(line_number, 'whitespace/comments', 5,
+ 'One space before end of line comments')
+ # There should always be a space between the // and the comment
+ commentend = comment_position + 2
+ if commentend < len(line) and not line[commentend] == ' ':
+ # but some lines are exceptions -- e.g. if they're big
+ # comment delimiters like:
+ # //----------------------------------------------------------
+ # or they begin with multiple slashes followed by a space:
+ # //////// Header comment
+ matched = (search(r'[=/-]{4,}\s*$', line[commentend:])
+ or search(r'^/+ ', line[commentend:]))
+ if not matched:
+ error(line_number, 'whitespace/comments', 4,
+ 'Should have a space between // and comment')
+
+ line = clean_lines.elided[line_number] # get rid of comments and strings
+
+ # Don't try to do spacing checks for operator methods
+ line = sub(r'operator(==|!=|<|<<|<=|>=|>>|>|\+=|-=|\*=|/=|%=|&=|\|=|^=|<<=|>>=|/)\(', 'operator\(', line)
+ # Don't try to do spacing checks for #include or #import statements at
+ # minimum because it messes up checks for spacing around /
+ if match(r'\s*#\s*(?:include|import)', line):
+ return
+ if search(r'[\w.]=[\w.]', line):
+ error(line_number, 'whitespace/operators', 4,
+ 'Missing spaces around =')
+
+ # FIXME: It's not ok to have spaces around binary operators like .
+
+ # You should always have whitespace around binary operators.
+ # Alas, we can't test < or > because they're legitimately used sans spaces
+ # (a->b, vector<int> a). The only time we can tell is a < with no >, and
+ # only if it's not template params list spilling into the next line.
+ matched = search(r'[^<>=!\s](==|!=|\+=|-=|\*=|/=|/|\|=|&=|<<=|>>=|<=|>=|\|\||\||&&|<<)[^<>=!\s]', line)
+ if not matched:
+ # Note that while it seems that the '<[^<]*' term in the following
+ # regexp could be simplified to '<.*', which would indeed match
+ # the same class of strings, the [^<] means that searching for the
+ # regexp takes linear rather than quadratic time.
+ if not search(r'<[^<]*,\s*$', line): # template params spill
+ matched = search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line)
+ if not matched:
+ # Regardless of template arguments or operator>>, \w should not
+ # follow >>.
+ matched = search(r'(>>)\w', line)
+ # If the line has no template arguments, >> is operator>>.
+ # FIXME: This doesn't handle line-breaks inside template arguments.
+ if not matched and not search(r'<', line):
+ matched = search(r'\w(>>)', line)
+
+ if matched:
+ error(line_number, 'whitespace/operators', 3,
+ 'Missing spaces around %s' % matched.group(1))
+
+ # There shouldn't be space around unary operators
+ matched = search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
+ if matched:
+ error(line_number, 'whitespace/operators', 4,
+ 'Extra space for operator %s' % matched.group(1))
+
+ # A pet peeve of mine: no spaces after an if, while, switch, or for
+ matched = search(r' (if\(|for\(|foreach\(|while\(|switch\()', line)
+ if matched:
+ error(line_number, 'whitespace/parens', 5,
+ 'Missing space before ( in %s' % matched.group(1))
+
+ # For if/for/foreach/while/switch, the left and right parens should be
+ # consistent about how many spaces are inside the parens, and
+ # there should either be zero or one spaces inside the parens.
+ # We don't want: "if ( foo)" or "if ( foo )".
+ # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
+ matched = search(r'\b(?P<statement>if|for|foreach|while|switch)\s*\((?P<remainder>.*)$', line)
+ if matched:
+ statement = matched.group('statement')
+ condition, rest = up_to_unmatched_closing_paren(matched.group('remainder'))
+ if condition is not None:
+ condition_match = search(r'(?P<leading>[ ]*)(?P<separator>.).*[^ ]+(?P<trailing>[ ]*)', condition)
+ if condition_match:
+ n_leading = len(condition_match.group('leading'))
+ n_trailing = len(condition_match.group('trailing'))
+ if n_leading != 0:
+ for_exception = statement == 'for' and condition.startswith(' ;')
+ if not for_exception:
+ error(line_number, 'whitespace/parens', 5,
+ 'Extra space after ( in %s' % statement)
+ if n_trailing != 0:
+ for_exception = statement == 'for' and condition.endswith('; ')
+ if not for_exception:
+ error(line_number, 'whitespace/parens', 5,
+ 'Extra space before ) in %s' % statement)
+
+ # Do not check for more than one command in macros
+ in_preprocessor_directive = match(r'\s*#', line)
+ if not in_preprocessor_directive and not match(r'((\s*{\s*}?)|(\s*;?))\s*\\?$', rest):
+ error(line_number, 'whitespace/parens', 4,
+ 'More than one command on the same line in %s' % statement)
+
+ # You should always have a space after a comma (either as fn arg or operator)
+ if search(r',[^\s]', line):
+ error(line_number, 'whitespace/comma', 3,
+ 'Missing space after ,')
+
+ matched = search(r'^\s*(?P<token1>[a-zA-Z0-9_\*&]+)\s\s+(?P<token2>[a-zA-Z0-9_\*&]+)', line)
+ if matched:
+ error(line_number, 'whitespace/declaration', 3,
+ 'Extra space between %s and %s' % (matched.group('token1'), matched.group('token2')))
+
+ if file_extension == 'cpp':
+ # C++ should have the & or * beside the type not the variable name.
+ matched = match(r'\s*\w+(?<!\breturn|\bdelete)\s+(?P<pointer_operator>\*|\&)\w+', line)
+ if matched:
+ error(line_number, 'whitespace/declaration', 3,
+ 'Declaration has space between type name and %s in %s' % (matched.group('pointer_operator'), matched.group(0).strip()))
+
+ elif file_extension == 'c':
+ # C Pointer declaration should have the * beside the variable not the type name.
+ matched = search(r'^\s*\w+\*\s+\w+', line)
+ if matched:
+ error(line_number, 'whitespace/declaration', 3,
+ 'Declaration has space between * and variable name in %s' % matched.group(0).strip())
+
+ # Next we will look for issues with function calls.
+ check_spacing_for_function_call(line, line_number, error)
+
+ # Except after an opening paren, you should have spaces before your braces.
+ # And since you should never have braces at the beginning of a line, this is
+ # an easy test.
+ if search(r'[^ ({]{', line):
+ error(line_number, 'whitespace/braces', 5,
+ 'Missing space before {')
+
+ # Make sure '} else {' has spaces.
+ if search(r'}else', line):
+ error(line_number, 'whitespace/braces', 5,
+ 'Missing space before else')
+
+ # You shouldn't have spaces before your brackets, except maybe after
+ # 'delete []' or 'new char * []'.
+ if search(r'\w\s+\[', line) and not search(r'delete\s+\[', line):
+ error(line_number, 'whitespace/braces', 5,
+ 'Extra space before [')
+
+ # There should always be a single space in between braces on the same line.
+ if search(r'\{\}', line):
+ error(line_number, 'whitespace/braces', 5, 'Missing space inside { }.')
+ if search(r'\{\s\s+\}', line):
+ error(line_number, 'whitespace/braces', 5, 'Too many spaces inside { }.')
+
+ # You shouldn't have a space before a semicolon at the end of the line.
+ # There's a special case for "for" since the style guide allows space before
+ # the semicolon there.
+ if search(r':\s*;\s*$', line):
+ error(line_number, 'whitespace/semicolon', 5,
+ 'Semicolon defining empty statement. Use { } instead.')
+ elif search(r'^\s*;\s*$', line):
+ error(line_number, 'whitespace/semicolon', 5,
+ 'Line contains only semicolon. If this should be an empty statement, '
+ 'use { } instead.')
+ elif (search(r'\s+;\s*$', line) and not search(r'\bfor\b', line)):
+ error(line_number, 'whitespace/semicolon', 5,
+ 'Extra space before last semicolon. If this should be an empty '
+ 'statement, use { } instead.')
+ elif (search(r'\b(for|while)\s*\(.*\)\s*;\s*$', line)
+ and line.count('(') == line.count(')')
+ # Allow do {} while();
+ and not search(r'}\s*while', line)):
+ error(line_number, 'whitespace/semicolon', 5,
+ 'Semicolon defining empty statement for this loop. Use { } instead.')
+
+
+def get_previous_non_blank_line(clean_lines, line_number):
+ """Return the most recent non-blank line and its line number.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file contents.
+ line_number: The number of the line to check.
+
+ Returns:
+ A tuple with two elements. The first element is the contents of the last
+ non-blank line before the current line, or the empty string if this is the
+ first non-blank line. The second is the line number of that line, or -1
+ if this is the first non-blank line.
+ """
+
+ previous_line_number = line_number - 1
+ while previous_line_number >= 0:
+ previous_line = clean_lines.elided[previous_line_number]
+ if not is_blank_line(previous_line): # if not a blank line...
+ return (previous_line, previous_line_number)
+ previous_line_number -= 1
+ return ('', -1)
+
+
+def check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error):
+ """Looks for indentation errors inside of namespaces.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ file_extension: The extension (dot not included) of the file.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
+ error: The function to call with any errors found.
+ """
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ namespace_match = match(r'(?P<namespace_indentation>\s*)namespace\s+\S+\s*{\s*$', line)
+ if not namespace_match:
+ return
+
+ current_indentation_level = len(namespace_match.group('namespace_indentation'))
+ if current_indentation_level > 0:
+ # Don't warn about an indented namespace if we already warned about indented code.
+ if not file_state.did_inside_namespace_indent_warning():
+ error(line_number, 'whitespace/indent', 4,
+ 'namespace should never be indented.')
+ return
+ looking_for_semicolon = False;
+ line_offset = 0
+ in_preprocessor_directive = False;
+ for current_line in clean_lines.elided[line_number + 1:]:
+ line_offset += 1
+ if not current_line.strip():
+ continue
+ if not current_indentation_level:
+ if not (in_preprocessor_directive or looking_for_semicolon):
+ if not match(r'\S', current_line) and not file_state.did_inside_namespace_indent_warning():
+ file_state.set_did_inside_namespace_indent_warning()
+ error(line_number + line_offset, 'whitespace/indent', 4,
+ 'Code inside a namespace should not be indented.')
+ if in_preprocessor_directive or (current_line.strip()[0] == '#'): # This takes care of preprocessor directive syntax.
+ in_preprocessor_directive = current_line[-1] == '\\'
+ else:
+ looking_for_semicolon = ((current_line.find(';') == -1) and (current_line.strip()[-1] != '}')) or (current_line[-1] == '\\')
+ else:
+ looking_for_semicolon = False; # If we have a brace we may not need a semicolon.
+ current_indentation_level += current_line.count('{') - current_line.count('}')
+ if current_indentation_level < 0:
+ break;
+
+
+def check_enum_casing(clean_lines, line_number, enum_state, error):
+ """Looks for incorrectly named enum values.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ enum_state: A _EnumState instance which maintains enum declaration state.
+ error: The function to call with any errors found.
+ """
+
+ enum_state.is_webidl_enum |= bool(match(r'\s*// Web(?:Kit)?IDL enum\s*$', clean_lines.raw_lines[line_number]))
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+ if not enum_state.process_clean_line(line):
+ error(line_number, 'readability/enum_casing', 4,
+ 'enum members should use InterCaps with an initial capital letter.')
+
+def check_directive_indentation(clean_lines, line_number, file_state, error):
+ """Looks for indentation of preprocessor directives.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
+ error: The function to call with any errors found.
+ """
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ indented_preprocessor_directives = match(r'\s+#', line)
+ if not indented_preprocessor_directives:
+ return
+
+ error(line_number, 'whitespace/indent', 4, 'preprocessor directives (e.g., #ifdef, #define, #import) should never be indented.')
+
+
+def get_initial_spaces_for_line(clean_line):
+ initial_spaces = 0
+ while initial_spaces < len(clean_line) and clean_line[initial_spaces] == ' ':
+ initial_spaces += 1
+ return initial_spaces
+
+
+def check_indentation_amount(clean_lines, line_number, error):
+ line = clean_lines.elided[line_number]
+ initial_spaces = get_initial_spaces_for_line(line)
+
+ if initial_spaces % 4:
+ error(line_number, 'whitespace/indent', 3,
+ 'Weird number of spaces at line-start. Are you using a 4-space indent?')
+ return
+
+ previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
+ if not previous_line.strip() or match(r'\s*\w+\s*:\s*$', previous_line) or previous_line[0] == '#':
+ return
+
+ previous_line_initial_spaces = get_initial_spaces_for_line(previous_line)
+ if initial_spaces > previous_line_initial_spaces + 4:
+ error(line_number, 'whitespace/indent', 3, 'When wrapping a line, only indent 4 spaces.')
+
+
+def check_using_std(clean_lines, line_number, file_state, error):
+ """Looks for 'using std::foo;' statements which should be replaced with 'using namespace std;'.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
+ error: The function to call with any errors found.
+ """
+
+ # This check doesn't apply to C or Objective-C implementation files.
+ if file_state.is_c_or_objective_c():
+ return
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ using_std_match = match(r'\s*using\s+std::(?P<method_name>\S+)\s*;\s*$', line)
+ if not using_std_match:
+ return
+
+ method_name = using_std_match.group('method_name')
+ # Exception for the established idiom for swapping objects in generic code.
+ if method_name == 'swap':
+ return
+ error(line_number, 'build/using_std', 4,
+ "Use 'using namespace std;' instead of 'using std::%s;'." % method_name)
+
+
+def check_max_min_macros(clean_lines, line_number, file_state, error):
+ """Looks use of MAX() and MIN() macros that should be replaced with std::max() and std::min().
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
+ error: The function to call with any errors found.
+ """
+
+ # This check doesn't apply to C or Objective-C implementation files.
+ if file_state.is_c_or_objective_c():
+ return
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ max_min_macros_search = search(r'\b(?P<max_min_macro>(MAX|MIN))\s*\(', line)
+ if not max_min_macros_search:
+ return
+
+ max_min_macro = max_min_macros_search.group('max_min_macro')
+ max_min_macro_lower = max_min_macro.lower()
+ error(line_number, 'runtime/max_min_macros', 4,
+ 'Use std::%s() or std::%s<type>() instead of the %s() macro.'
+ % (max_min_macro_lower, max_min_macro_lower, max_min_macro))
+
+
+def check_ctype_functions(clean_lines, line_number, file_state, error):
+ """Looks for use of the standard functions in ctype.h and suggest they be replaced
+ by use of equivilent ones in <wtf/ASCIICType.h>?.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
+ error: The function to call with any errors found.
+ """
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ ctype_function_search = search(r'\b(?P<ctype_function>(isalnum|isalpha|isascii|isblank|iscntrl|isdigit|isgraph|islower|isprint|ispunct|isspace|isupper|isxdigit|toascii|tolower|toupper))\s*\(', line)
+ if not ctype_function_search:
+ return
+
+ ctype_function = ctype_function_search.group('ctype_function')
+ error(line_number, 'runtime/ctype_function', 4,
+ 'Use equivelent function in <wtf/ASCIICType.h> instead of the %s() function.'
+ % (ctype_function))
+
+def check_switch_indentation(clean_lines, line_number, error):
+ """Looks for indentation errors inside of switch statements.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ switch_match = match(r'(?P<switch_indentation>\s*)switch\s*\(.+\)\s*{\s*$', line)
+ if not switch_match:
+ return
+
+ switch_indentation = switch_match.group('switch_indentation')
+ inner_indentation = switch_indentation + ' ' * 4
+ line_offset = 0
+ encountered_nested_switch = False
+
+ for current_line in clean_lines.elided[line_number + 1:]:
+ line_offset += 1
+
+ # Skip not only empty lines but also those with preprocessor directives.
+ if current_line.strip() == '' or current_line.startswith('#'):
+ continue
+
+ if match(r'\s*switch\s*\(.+\)\s*{\s*$', current_line):
+ # Complexity alarm - another switch statement nested inside the one
+ # that we're currently testing. We'll need to track the extent of
+ # that inner switch if the upcoming label tests are still supposed
+ # to work correctly. Let's not do that; instead, we'll finish
+ # checking this line, and then leave it like that. Assuming the
+ # indentation is done consistently (even if incorrectly), this will
+ # still catch all indentation issues in practice.
+ encountered_nested_switch = True
+
+ current_indentation_match = match(r'(?P<indentation>\s*)(?P<remaining_line>.*)$', current_line);
+ current_indentation = current_indentation_match.group('indentation')
+ remaining_line = current_indentation_match.group('remaining_line')
+
+ # End the check at the end of the switch statement.
+ if remaining_line.startswith('}') and current_indentation == switch_indentation:
+ break
+ # Case and default branches should not be indented. The regexp also
+ # catches single-line cases like "default: break;" but does not trigger
+ # on stuff like "Document::Foo();".
+ elif match(r'(default|case\s+.*)\s*:([^:].*)?$', remaining_line):
+ if current_indentation != switch_indentation:
+ error(line_number + line_offset, 'whitespace/indent', 4,
+ 'A case label should not be indented, but line up with its switch statement.')
+ # Don't throw an error for multiple badly indented labels,
+ # one should be enough to figure out the problem.
+ break
+ # We ignore goto labels at the very beginning of a line.
+ elif match(r'\w+\s*:\s*$', remaining_line):
+ continue
+ # It's not a goto label, so check if it's indented at least as far as
+ # the switch statement plus one more level of indentation.
+ elif not current_indentation.startswith(inner_indentation):
+ error(line_number + line_offset, 'whitespace/indent', 4,
+ 'Non-label code inside switch statements should be indented.')
+ # Don't throw an error for multiple badly indented statements,
+ # one should be enough to figure out the problem.
+ break
+
+ if encountered_nested_switch:
+ break
+
+
+def check_braces(clean_lines, line_number, error):
+ """Looks for misplaced braces (e.g. at the end of line).
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ if match(r'\s*{\s*$', line):
+ # We allow an open brace to start a line in the case where someone
+ # is using braces for function definition or in a block to
+ # explicitly create a new scope, which is commonly used to control
+ # the lifetime of stack-allocated variables. We don't detect this
+ # perfectly: we just don't complain if the last non-whitespace
+ # character on the previous non-blank line is ';', ':', '{', '}',
+ # ')', or ') const' and doesn't begin with 'if|for|while|switch|else'.
+ # We also allow '#' for #endif and '=' for array initialization.
+ previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
+ if ((not search(r'[;:}{)=]\s*$|\)\s*((const|override|final)\s*)*\s*$', previous_line)
+ or search(r'\b(if|for|foreach|while|switch|else)\b', previous_line))
+ and previous_line.find('#') < 0):
+ error(line_number, 'whitespace/braces', 4,
+ 'This { should be at the end of the previous line')
+ elif (search(r'\)\s*(((const|override|final)\s*)*\s*)?{\s*$', line)
+ and line.count('(') == line.count(')')
+ and not search(r'\b(if|for|foreach|while|switch)\b', line)
+ and not match(r'\s+[A-Z_][A-Z_0-9]+\b', line)):
+ error(line_number, 'whitespace/braces', 4,
+ 'Place brace on its own line for function definitions.')
+
+ # An else clause should be on the same line as the preceding closing brace.
+ if match(r'\s*else\s*', line):
+ previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
+ if match(r'\s*}\s*$', previous_line):
+ error(line_number, 'whitespace/newline', 4,
+ 'An else should appear on the same line as the preceding }')
+
+ # Likewise, an else should never have the else clause on the same line
+ if search(r'\belse [^\s{]', line) and not search(r'\belse if\b', line):
+ error(line_number, 'whitespace/newline', 4,
+ 'Else clause should never be on same line as else (use 2 lines)')
+
+ # In the same way, a do/while should never be on one line
+ if match(r'\s*do [^\s{]', line):
+ error(line_number, 'whitespace/newline', 4,
+ 'do/while clauses should not be on a single line')
+
+ # Braces shouldn't be followed by a ; unless they're defining a struct
+ # or initializing an array.
+ # We can't tell in general, but we can for some common cases.
+ previous_line_number = line_number
+ while True:
+ (previous_line, previous_line_number) = get_previous_non_blank_line(clean_lines, previous_line_number)
+ if match(r'\s+{.*}\s*;', line) and not previous_line.count(';'):
+ line = previous_line + line
+ else:
+ break
+ if (search(r'{.*}\s*;', line)
+ and line.count('{') == line.count('}')
+ and not search(r'struct|class|enum|\s*=\s*{', line)):
+ error(line_number, 'readability/braces', 4,
+ "You don't need a ; after a }")
+
+
+def check_exit_statement_simplifications(clean_lines, line_number, error):
+ """Looks for else or else-if statements that should be written as an
+ if statement when the prior if concludes with a return, break, continue or
+ goto statement.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ else_match = match(r'(?P<else_indentation>\s*)(\}\s*)?else(\s+if\s*\(|(?P<else>\s*(\{\s*)?\Z))', line)
+ if not else_match:
+ return
+
+ else_indentation = else_match.group('else_indentation')
+ inner_indentation = else_indentation + ' ' * 4
+
+ previous_lines = clean_lines.elided[:line_number]
+ previous_lines.reverse()
+ line_offset = 0
+ encountered_exit_statement = False
+
+ for current_line in previous_lines:
+ line_offset -= 1
+
+ # Skip not only empty lines but also those with preprocessor directives
+ # and goto labels.
+ if current_line.strip() == '' or current_line.startswith('#') or match(r'\w+\s*:\s*$', current_line):
+ continue
+
+ # Skip lines with closing braces on the original indentation level.
+ # Even though the styleguide says they should be on the same line as
+ # the "else if" statement, we also want to check for instances where
+ # the current code does not comply with the coding style. Thus, ignore
+ # these lines and proceed to the line before that.
+ if current_line == else_indentation + '}':
+ continue
+
+ current_indentation_match = match(r'(?P<indentation>\s*)(?P<remaining_line>.*)$', current_line);
+ current_indentation = current_indentation_match.group('indentation')
+ remaining_line = current_indentation_match.group('remaining_line')
+
+ # As we're going up the lines, the first real statement to encounter
+ # has to be an exit statement (return, break, continue or goto) -
+ # otherwise, this check doesn't apply.
+ if not encountered_exit_statement:
+ # We only want to find exit statements if they are on exactly
+ # the same level of indentation as expected from the code inside
+ # the block. If the indentation doesn't strictly match then we
+ # might have a nested if or something, which must be ignored.
+ if current_indentation != inner_indentation:
+ break
+ if match(r'(return(\W+.*)|(break|continue)\s*;|goto\s*\w+;)$', remaining_line):
+ encountered_exit_statement = True
+ continue
+ break
+
+ # When code execution reaches this point, we've found an exit statement
+ # as last statement of the previous block. Now we only need to make
+ # sure that the block belongs to an "if", then we can throw an error.
+
+ # Skip lines with opening braces on the original indentation level,
+ # similar to the closing braces check above. ("if (condition)\n{")
+ if current_line == else_indentation + '{':
+ continue
+
+ # Skip everything that's further indented than our "else" or "else if".
+ if current_indentation.startswith(else_indentation) and current_indentation != else_indentation:
+ continue
+
+ # So we've got a line with same (or less) indentation. Is it an "if"?
+ # If yes: throw an error. If no: don't throw an error.
+ # Whatever the outcome, this is the end of our loop.
+ if match(r'if\s*\(', remaining_line):
+ if else_match.start('else') != -1:
+ error(line_number + line_offset, 'readability/control_flow', 4,
+ 'An else statement can be removed when the prior "if" '
+ 'concludes with a return, break, continue or goto statement.')
+ else:
+ error(line_number + line_offset, 'readability/control_flow', 4,
+ 'An else if statement should be written as an if statement '
+ 'when the prior "if" concludes with a return, break, '
+ 'continue or goto statement.')
+ break
+
+
+def replaceable_check(operator, macro, line):
+ """Determine whether a basic CHECK can be replaced with a more specific one.
+
+ For example suggest using CHECK_EQ instead of CHECK(a == b) and
+ similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
+
+ Args:
+ operator: The C++ operator used in the CHECK.
+ macro: The CHECK or EXPECT macro being called.
+ line: The current source line.
+
+ Returns:
+ True if the CHECK can be replaced with a more specific one.
+ """
+
+ # This matches decimal and hex integers, strings, and chars (in that order).
+ match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
+
+ # Expression to match two sides of the operator with something that
+ # looks like a literal, since CHECK(x == iterator) won't compile.
+ # This means we can't catch all the cases where a more specific
+ # CHECK is possible, but it's less annoying than dealing with
+ # extraneous warnings.
+ match_this = (r'\s*' + macro + r'\((\s*' +
+ match_constant + r'\s*' + operator + r'[^<>].*|'
+ r'.*[^<>]' + operator + r'\s*' + match_constant +
+ r'\s*\))')
+
+ # Don't complain about CHECK(x == NULL) or similar because
+ # CHECK_EQ(x, NULL) won't compile (requires a cast).
+ # Also, don't complain about more complex boolean expressions
+ # involving && or || such as CHECK(a == b || c == d).
+ return match(match_this, line) and not search(r'NULL|&&|\|\|', line)
+
+
+def check_check(clean_lines, line_number, error):
+ """Checks the use of CHECK and EXPECT macros.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # Decide the set of replacement macros that should be suggested
+ raw_lines = clean_lines.raw_lines
+ current_macro = ''
+ for macro in _CHECK_MACROS:
+ if raw_lines[line_number].find(macro) >= 0:
+ current_macro = macro
+ break
+ if not current_macro:
+ # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
+ return
+
+ line = clean_lines.elided[line_number] # get rid of comments and strings
+
+ # Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
+ for operator in ['==', '!=', '>=', '>', '<=', '<']:
+ if replaceable_check(operator, current_macro, line):
+ error(line_number, 'readability/check', 2,
+ 'Consider using %s instead of %s(a %s b)' % (
+ _CHECK_REPLACEMENT[current_macro][operator],
+ current_macro, operator))
+ break
+
+
+def check_for_comparisons_to_boolean(clean_lines, line_number, error):
+ # Get the line without comments and strings.
+ line = clean_lines.elided[line_number]
+
+ # Must include NULL here, as otherwise users will convert NULL to 0 and
+ # then we can't catch it, since it looks like a valid integer comparison.
+ if search(r'[=!]=\s*(NULL|nullptr|true|false)[^\w.]', line) or search(r'[^\w.](NULL|nullptr|true|false)\s*[=!]=', line):
+ if not search('LIKELY', line) and not search('UNLIKELY', line):
+ error(line_number, 'readability/comparison_to_boolean', 5,
+ 'Tests for true/false and null/non-null should be done without equality comparisons.')
+
+
+def check_for_null(clean_lines, line_number, file_state, error):
+ # This check doesn't apply to C or Objective-C implementation files.
+ if file_state.is_c_or_objective_c():
+ return
+
+ line = clean_lines.elided[line_number]
+
+ # Don't warn about NULL usage in g_*(). See Bug 32858 and 39372.
+ if search(r'\bg(_[a-z]+)+\b', line):
+ return
+
+ # Don't warn about NULL usage in gst_*(). See Bug 70498.
+ if search(r'\bgst(_[a-z]+)+\b', line):
+ return
+
+ # Don't warn about NULL usage in gdk_pixbuf_save_to_*{join,concat}(). See Bug 43090.
+ if search(r'\bgdk_pixbuf_save_to\w+\b', line):
+ return
+
+ # Don't warn about NULL usage in gtk_widget_style_get(), gtk_style_context_get_style(), or gtk_style_context_get(). See Bug 51758
+ if search(r'\bgtk_widget_style_get\(\w+\b', line) or search(r'\bgtk_style_context_get_style\(\w+\b', line) or search(r'\bgtk_style_context_get\(\w+\b', line):
+ return
+
+ # Don't warn about NULL usage in soup_server_new(). See Bug 77890.
+ if search(r'\bsoup_server_new\(\w+\b', line):
+ return
+
+ if search(r'\bNULL\b', line):
+ error(line_number, 'readability/null', 5, 'Use 0 instead of NULL.')
+ return
+
+ line = clean_lines.raw_lines[line_number]
+ # See if NULL occurs in any comments in the line. If the search for NULL using the raw line
+ # matches, then do the check with strings collapsed to avoid giving errors for
+ # NULLs occurring in strings.
+ if search(r'\bNULL\b', line) and search(r'\bNULL\b', CleansedLines.collapse_strings(line)):
+ error(line_number, 'readability/null', 4, 'Use 0 or null instead of NULL (even in *comments*).')
+
+def get_line_width(line):
+ """Determines the width of the line in column positions.
+
+ Args:
+ line: A string, which may be a Unicode string.
+
+ Returns:
+ The width of the line in column positions, accounting for Unicode
+ combining characters and wide characters.
+ """
+ if isinstance(line, unicode):
+ width = 0
+ for c in unicodedata.normalize('NFC', line):
+ if unicodedata.east_asian_width(c) in ('W', 'F'):
+ width += 2
+ elif not unicodedata.combining(c):
+ width += 1
+ return width
+ return len(line)
+
+
+def check_conditional_and_loop_bodies_for_brace_violations(clean_lines, line_number, error):
+ """Scans the bodies of conditionals and loops, and in particular
+ all the arms of conditionals, for violations in the use of braces.
+
+ Specifically:
+
+ (1) If an arm omits braces, then the following statement must be on one
+ physical line.
+ (2) If any arm uses braces, all arms must use them.
+
+ These checks are only done here if we find the start of an
+ 'if/for/foreach/while' statement, because this function fails fast
+ if it encounters constructs it doesn't understand. Checks
+ elsewhere validate other constraints, such as requiring '}' and
+ 'else' to be on the same line.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # We work with the elided lines. Comments have been removed, but line
+ # numbers are preserved, so we can still find situations where
+ # single-expression control clauses span multiple lines, or when a
+ # comment preceded the expression.
+ lines = clean_lines.elided
+ line = lines[line_number]
+
+ # Match control structures.
+ control_match = match(r'\s*(if|foreach|for|while)\s*\(', line)
+ if not control_match:
+ return
+
+ # Found the start of a conditional or loop.
+
+ # The following loop handles all potential arms of the control clause.
+ # The initial conditions are the following:
+ # - We start on the opening paren '(' of the condition, *unless* we are
+ # handling an 'else' block, in which case there is no condition.
+ # - In the latter case, we start at the position just beyond the 'else'
+ # token.
+ expect_conditional_expression = True
+ know_whether_using_braces = False
+ using_braces = False
+ search_for_else_clause = control_match.group(1) == "if"
+ current_pos = Position(line_number, control_match.end() - 1)
+
+ while True:
+ if expect_conditional_expression:
+ # Try to find the end of the conditional expression,
+ # potentially spanning multiple lines.
+ open_paren_pos = current_pos
+ close_paren_pos = close_expression(lines, open_paren_pos)
+ if close_paren_pos.column < 0:
+ return
+ current_pos = close_paren_pos
+
+ end_line_of_conditional = current_pos.row
+
+ # Find the start of the body.
+ current_pos = _find_in_lines(r'\S', lines, current_pos, None)
+ if not current_pos:
+ return
+
+ current_arm_uses_brace = False
+ if lines[current_pos.row][current_pos.column] == '{':
+ current_arm_uses_brace = True
+ if know_whether_using_braces:
+ if using_braces != current_arm_uses_brace:
+ error(current_pos.row, 'whitespace/braces', 4,
+ 'If one part of an if-else statement uses curly braces, the other part must too.')
+ return
+ know_whether_using_braces = True
+ using_braces = current_arm_uses_brace
+
+ if using_braces:
+ # Skip over the entire arm.
+ current_pos = close_expression(lines, current_pos)
+ if current_pos.column < 0:
+ return
+ else:
+ # Skip over the current expression.
+ current_line_number = current_pos.row
+ current_pos = _find_in_lines(r';', lines, current_pos, None)
+ if not current_pos:
+ return
+ # If the end of the expression is beyond the line just after
+ # the close parenthesis or control clause, we've found a
+ # single-expression arm that spans multiple lines. (We don't
+ # fire this error for expressions ending on the same line; that
+ # is a different error, handled elsewhere.)
+ if current_pos.row > 1 + end_line_of_conditional:
+ error(current_pos.row, 'whitespace/braces', 4,
+ 'A conditional or loop body must use braces if the statement is more than one line long.')
+ return
+ current_pos = Position(current_pos.row, 1 + current_pos.column)
+
+ # At this point current_pos points just past the end of the last
+ # arm. If we just handled the last control clause, we're done.
+ if not search_for_else_clause:
+ return
+
+ # Scan forward for the next non-whitespace character, and see
+ # whether we are continuing a conditional (with an 'else' or
+ # 'else if'), or are done.
+ current_pos = _find_in_lines(r'\S', lines, current_pos, None)
+ if not current_pos:
+ return
+ next_nonspace_string = lines[current_pos.row][current_pos.column:]
+ next_conditional = match(r'(else\s*if|else)', next_nonspace_string)
+ if not next_conditional:
+ # Done processing this 'if' and all arms.
+ return
+ if next_conditional.group(1) == "else if":
+ current_pos = _find_in_lines(r'\(', lines, current_pos, None)
+ else:
+ current_pos.column += 4 # skip 'else'
+ expect_conditional_expression = False
+ search_for_else_clause = False
+ # End while loop
+
+def check_style(clean_lines, line_number, file_extension, class_state, file_state, enum_state, error):
+ """Checks rules from the 'C++ style rules' section of cppguide.html.
+
+ Most of these rules are hard to test (naming, comment style), but we
+ do what we can. In particular we check for 4-space indents, line lengths,
+ tab usage, spaces inside code, etc.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ file_extension: The extension (without the dot) of the filename.
+ class_state: A _ClassState instance which maintains information about
+ the current stack of nested class declarations being parsed.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
+ enum_state: A _EnumState instance which maintains the current enum state.
+ error: The function to call with any errors found.
+ """
+
+ raw_lines = clean_lines.raw_lines
+ line = raw_lines[line_number]
+
+ if line.find('\t') != -1:
+ error(line_number, 'whitespace/tab', 1,
+ 'Tab found; better to use spaces')
+
+ cleansed_line = clean_lines.elided[line_number]
+ if line and line[-1].isspace():
+ error(line_number, 'whitespace/end_of_line', 4,
+ 'Line ends in whitespace. Consider deleting these extra spaces.')
+
+ if (cleansed_line.count(';') > 1
+ # for loops are allowed two ;'s (and may run over two lines).
+ and cleansed_line.find('for') == -1
+ and (get_previous_non_blank_line(clean_lines, line_number)[0].find('for') == -1
+ or get_previous_non_blank_line(clean_lines, line_number)[0].find(';') != -1)
+ # It's ok to have many commands in a switch case that fits in 1 line
+ and not ((cleansed_line.find('case ') != -1
+ or cleansed_line.find('default:') != -1)
+ and cleansed_line.find('break;') != -1)
+ # Also it's ok to have many commands in trivial single-line accessors in class definitions.
+ and not (match(r'.*\(.*\).*{.*.}', line)
+ and class_state.classinfo_stack
+ and line.count('{') == line.count('}'))
+ and not cleansed_line.startswith('#define ')
+ # It's ok to use use WTF_MAKE_NONCOPYABLE and WTF_MAKE_FAST_ALLOCATED macros in 1 line
+ and not (cleansed_line.find("WTF_MAKE_NONCOPYABLE") != -1
+ and cleansed_line.find("WTF_MAKE_FAST_ALLOCATED") != -1)):
+ error(line_number, 'whitespace/newline', 4,
+ 'More than one command on the same line')
+
+ if cleansed_line.strip().endswith('||') or cleansed_line.strip().endswith('&&'):
+ error(line_number, 'whitespace/operators', 4,
+ 'Boolean expressions that span multiple lines should have their '
+ 'operators on the left side of the line instead of the right side.')
+
+ # Some more style checks
+ check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error)
+ check_directive_indentation(clean_lines, line_number, file_state, error)
+ check_using_std(clean_lines, line_number, file_state, error)
+ check_max_min_macros(clean_lines, line_number, file_state, error)
+ check_ctype_functions(clean_lines, line_number, file_state, error)
+ check_switch_indentation(clean_lines, line_number, error)
+ check_braces(clean_lines, line_number, error)
+ check_exit_statement_simplifications(clean_lines, line_number, error)
+ check_spacing(file_extension, clean_lines, line_number, error)
+ check_check(clean_lines, line_number, error)
+ check_for_comparisons_to_boolean(clean_lines, line_number, error)
+ check_for_null(clean_lines, line_number, file_state, error)
+ check_indentation_amount(clean_lines, line_number, error)
+ check_enum_casing(clean_lines, line_number, enum_state, error)
+
+
+_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
+_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
+# Matches the first component of a filename delimited by -s and _s. That is:
+# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo.cpp').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo-bar_baz.cpp').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo_bar-baz.cpp').group(0) == 'foo'
+_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
+
+
+def _drop_common_suffixes(filename):
+ """Drops common suffixes like _test.cpp or -inl.h from filename.
+
+ For example:
+ >>> _drop_common_suffixes('foo/foo-inl.h')
+ 'foo/foo'
+ >>> _drop_common_suffixes('foo/bar/foo.cpp')
+ 'foo/bar/foo'
+ >>> _drop_common_suffixes('foo/foo_internal.h')
+ 'foo/foo'
+ >>> _drop_common_suffixes('foo/foo_unusualinternal.h')
+ 'foo/foo_unusualinternal'
+
+ Args:
+ filename: The input filename.
+
+ Returns:
+ The filename with the common suffix removed.
+ """
+ for suffix in ('test.cpp', 'regtest.cpp', 'unittest.cpp',
+ 'inl.h', 'impl.h', 'internal.h'):
+ if (filename.endswith(suffix) and len(filename) > len(suffix)
+ and filename[-len(suffix) - 1] in ('-', '_')):
+ return filename[:-len(suffix) - 1]
+ return os.path.splitext(filename)[0]
+
+
+def _classify_include(filename, include, is_system, include_state):
+ """Figures out what kind of header 'include' is.
+
+ Args:
+ filename: The current file cpp_style is running over.
+ include: The path to a #included file.
+ is_system: True if the #include used <> rather than "".
+ include_state: An _IncludeState instance in which the headers are inserted.
+
+ Returns:
+ One of the _XXX_HEADER constants.
+
+ For example:
+ >>> _classify_include('foo.cpp', 'config.h', False)
+ _CONFIG_HEADER
+ >>> _classify_include('foo.cpp', 'foo.h', False)
+ _PRIMARY_HEADER
+ >>> _classify_include('foo.cpp', 'bar.h', False)
+ _OTHER_HEADER
+ """
+
+ # If it is a system header we know it is classified as _OTHER_HEADER.
+ if is_system and not include.startswith('public/'):
+ return _OTHER_HEADER
+
+ # If the include is named config.h then this is WebCore/config.h.
+ if include == "config.h":
+ return _CONFIG_HEADER
+
+ # There cannot be primary includes in header files themselves. Only an
+ # include exactly matches the header filename will be is flagged as
+ # primary, so that it triggers the "don't include yourself" check.
+ if filename.endswith('.h') and filename != include:
+ return _OTHER_HEADER;
+
+ # Qt's moc files do not follow the naming and ordering rules, so they should be skipped
+ if include.startswith('moc_') and include.endswith('.cpp'):
+ return _MOC_HEADER
+
+ if include.endswith('.moc'):
+ return _MOC_HEADER
+
+ # If the target file basename starts with the include we're checking
+ # then we consider it the primary header.
+ target_base = FileInfo(filename).base_name()
+ include_base = FileInfo(include).base_name()
+
+ # If we haven't encountered a primary header, then be lenient in checking.
+ if not include_state.visited_primary_section():
+ if target_base.find(include_base) != -1:
+ return _PRIMARY_HEADER
+ # Qt private APIs use _p.h suffix.
+ if include_base.find(target_base) != -1 and include_base.endswith('_p'):
+ return _PRIMARY_HEADER
+
+ # If we already encountered a primary header, perform a strict comparison.
+ # In case the two filename bases are the same then the above lenient check
+ # probably was a false positive.
+ elif include_state.visited_primary_section() and target_base == include_base:
+ if include == "ResourceHandleWin.h":
+ # FIXME: Thus far, we've only seen one example of these, but if we
+ # start to see more, please consider generalizing this check
+ # somehow.
+ return _OTHER_HEADER
+ return _PRIMARY_HEADER
+
+ return _OTHER_HEADER
+
+
+def _does_primary_header_exist(filename):
+ """Return a primary header file name for a file, or empty string
+ if the file is not source file or primary header does not exist.
+ """
+ fileinfo = FileInfo(filename)
+ if not fileinfo.is_source():
+ return False
+ primary_header = fileinfo.no_extension() + ".h"
+ return os.path.isfile(primary_header)
+
+
+def check_include_line(filename, file_extension, clean_lines, line_number, include_state, error):
+ """Check rules that are applicable to #include lines.
+
+ Strings on #include lines are NOT removed from elided line, to make
+ certain tasks easier. However, to prevent false positives, checks
+ applicable to #include lines in CheckLanguage must be put here.
+
+ Args:
+ filename: The name of the current file.
+ file_extension: The current file extension, without the leading dot.
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ include_state: An _IncludeState instance in which the headers are inserted.
+ error: The function to call with any errors found.
+ """
+ # FIXME: For readability or as a possible optimization, consider
+ # exiting early here by checking whether the "build/include"
+ # category should be checked for the given filename. This
+ # may involve having the error handler classes expose a
+ # should_check() method, in addition to the usual __call__
+ # method.
+ line = clean_lines.lines[line_number]
+
+ matched = _RE_PATTERN_INCLUDE.search(line)
+ if not matched:
+ return
+
+ include = matched.group(2)
+ is_system = (matched.group(1) == '<')
+
+ # Look for any of the stream classes that are part of standard C++.
+ if match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
+ error(line_number, 'readability/streams', 3,
+ 'Streams are highly discouraged.')
+
+ # Look for specific includes to fix.
+ if include.startswith('wtf/') and is_system:
+ error(line_number, 'build/include', 4,
+ 'wtf includes should be "wtf/file.h" instead of <wtf/file.h>.')
+
+ if filename.find('/chromium/') != -1 and include.startswith('cc/CC'):
+ error(line_number, 'build/include', 4,
+ 'cc includes should be "CCFoo.h" instead of "cc/CCFoo.h".')
+
+ duplicate_header = include in include_state
+ if duplicate_header:
+ error(line_number, 'build/include', 4,
+ '"%s" already included at %s:%s' %
+ (include, filename, include_state[include]))
+ else:
+ include_state[include] = line_number
+
+ header_type = _classify_include(filename, include, is_system, include_state)
+ primary_header_exists = _does_primary_header_exist(filename)
+ include_state.header_types[line_number] = header_type
+
+ # Only proceed if this isn't a duplicate header.
+ if duplicate_header:
+ return
+
+ # We want to ensure that headers appear in the right order:
+ # 1) for implementation files: config.h, primary header, blank line, alphabetically sorted
+ # 2) for header files: alphabetically sorted
+ # The include_state object keeps track of the last type seen
+ # and complains if the header types are out of order or missing.
+ error_message = include_state.check_next_include_order(header_type,
+ file_extension == "h",
+ primary_header_exists)
+
+ # Check to make sure we have a blank line after primary header.
+ if not error_message and header_type == _PRIMARY_HEADER:
+ next_line = clean_lines.raw_lines[line_number + 1]
+ if not is_blank_line(next_line):
+ error(line_number, 'build/include_order', 4,
+ 'You should add a blank line after implementation file\'s own header.')
+
+ # Check to make sure all headers besides config.h and the primary header are
+ # alphabetically sorted. Skip Qt's moc files.
+ if not error_message and header_type == _OTHER_HEADER:
+ previous_line_number = line_number - 1;
+ previous_line = clean_lines.lines[previous_line_number]
+ previous_match = _RE_PATTERN_INCLUDE.search(previous_line)
+ while (not previous_match and previous_line_number > 0
+ and not search(r'\A(#if|#ifdef|#ifndef|#else|#elif|#endif)', previous_line)):
+ previous_line_number -= 1;
+ previous_line = clean_lines.lines[previous_line_number]
+ previous_match = _RE_PATTERN_INCLUDE.search(previous_line)
+ if previous_match:
+ previous_header_type = include_state.header_types[previous_line_number]
+ if previous_header_type == _OTHER_HEADER and previous_line.strip() > line.strip():
+ # This type of error is potentially a problem with this line or the previous one,
+ # so if the error is filtered for one line, report it for the next. This is so that
+ # we properly handle patches, for which only modified lines produce errors.
+ if not error(line_number - 1, 'build/include_order', 4, 'Alphabetical sorting problem.'):
+ error(line_number, 'build/include_order', 4, 'Alphabetical sorting problem.')
+
+ if error_message:
+ if file_extension == 'h':
+ error(line_number, 'build/include_order', 4,
+ '%s Should be: alphabetically sorted.' %
+ error_message)
+ else:
+ error(line_number, 'build/include_order', 4,
+ '%s Should be: config.h, primary header, blank line, and then alphabetically sorted.' %
+ error_message)
+
+
+def check_language(filename, clean_lines, line_number, file_extension, include_state,
+ file_state, error):
+ """Checks rules from the 'C++ language rules' section of cppguide.html.
+
+ Some of these rules are hard to test (function overloading, using
+ uint32 inappropriately), but we do the best we can.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ file_extension: The extension (without the dot) of the filename.
+ include_state: An _IncludeState instance in which the headers are inserted.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
+ error: The function to call with any errors found.
+ """
+ # If the line is empty or consists of entirely a comment, no need to
+ # check it.
+ line = clean_lines.elided[line_number]
+ if not line:
+ return
+
+ matched = _RE_PATTERN_INCLUDE.search(line)
+ if matched:
+ check_include_line(filename, file_extension, clean_lines, line_number, include_state, error)
+ return
+
+ # FIXME: figure out if they're using default arguments in fn proto.
+
+ # Check to see if they're using an conversion function cast.
+ # I just try to capture the most common basic types, though there are more.
+ # Parameterless conversion functions, such as bool(), are allowed as they are
+ # probably a member operator declaration or default constructor.
+ matched = search(
+ r'\b(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
+ if matched:
+ # gMock methods are defined using some variant of MOCK_METHODx(name, type)
+ # where type may be float(), int(string), etc. Without context they are
+ # virtually indistinguishable from int(x) casts.
+ if not match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line):
+ error(line_number, 'readability/casting', 4,
+ 'Using deprecated casting style. '
+ 'Use static_cast<%s>(...) instead' %
+ matched.group(1))
+
+ check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number],
+ 'static_cast',
+ r'\((int|float|double|bool|char|u?int(16|32|64))\)',
+ error)
+ # This doesn't catch all cases. Consider (const char * const)"hello".
+ check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number],
+ 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
+
+ # In addition, we look for people taking the address of a cast. This
+ # is dangerous -- casts can assign to temporaries, so the pointer doesn't
+ # point where you think.
+ if search(
+ r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
+ error(line_number, 'runtime/casting', 4,
+ ('Are you taking an address of a cast? '
+ 'This is dangerous: could be a temp var. '
+ 'Take the address before doing the cast, rather than after'))
+
+ # Check for people declaring static/global STL strings at the top level.
+ # This is dangerous because the C++ language does not guarantee that
+ # globals with constructors are initialized before the first access.
+ matched = match(
+ r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
+ line)
+ # Make sure it's not a function.
+ # Function template specialization looks like: "string foo<Type>(...".
+ # Class template definitions look like: "string Foo<Type>::Method(...".
+ if matched and not match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
+ matched.group(3)):
+ error(line_number, 'runtime/string', 4,
+ 'For a static/global string constant, use a C style string instead: '
+ '"%schar %s[]".' %
+ (matched.group(1), matched.group(2)))
+
+ # Check that we're not using RTTI outside of testing code.
+ if search(r'\bdynamic_cast<', line):
+ error(line_number, 'runtime/rtti', 5,
+ 'Do not use dynamic_cast<>. If you need to cast within a class '
+ "hierarchy, use static_cast<> to upcast. Google doesn't support "
+ 'RTTI.')
+
+ if search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
+ error(line_number, 'runtime/init', 4,
+ 'You seem to be initializing a member variable with itself.')
+
+ if file_extension == 'h':
+ # FIXME: check that 1-arg constructors are explicit.
+ # How to tell it's a constructor?
+ # (handled in check_for_non_standard_constructs for now)
+ pass
+
+ # Check if people are using the verboten C basic types. The only exception
+ # we regularly allow is "unsigned short port" for port.
+ if search(r'\bshort port\b', line):
+ if not search(r'\bunsigned short port\b', line):
+ error(line_number, 'runtime/int', 4,
+ 'Use "unsigned short" for ports, not "short"')
+
+ # When snprintf is used, the second argument shouldn't be a literal.
+ matched = search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
+ if matched:
+ error(line_number, 'runtime/printf', 3,
+ 'If you can, use sizeof(%s) instead of %s as the 2nd arg '
+ 'to snprintf.' % (matched.group(1), matched.group(2)))
+
+ # Check if some verboten C functions are being used.
+ if search(r'\bsprintf\b', line):
+ error(line_number, 'runtime/printf', 5,
+ 'Never use sprintf. Use snprintf instead.')
+ matched = search(r'\b(strcpy|strcat)\b', line)
+ if matched:
+ error(line_number, 'runtime/printf', 4,
+ 'Almost always, snprintf is better than %s' % matched.group(1))
+
+ if search(r'\bsscanf\b', line):
+ error(line_number, 'runtime/printf', 1,
+ 'sscanf can be ok, but is slow and can overflow buffers.')
+
+ # Check for suspicious usage of "if" like
+ # } if (a == b) {
+ if search(r'\}\s*if\s*\(', line):
+ error(line_number, 'readability/braces', 4,
+ 'Did you mean "else if"? If not, start a new line for "if".')
+
+ # Check for potential format string bugs like printf(foo).
+ # We constrain the pattern not to pick things like DocidForPrintf(foo).
+ # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
+ matched = re.search(r'\b((?:string)?printf)\s*\(([\w.\->()]+)\)', line, re.I)
+ if matched:
+ error(line_number, 'runtime/printf', 4,
+ 'Potential format string bug. Do %s("%%s", %s) instead.'
+ % (matched.group(1), matched.group(2)))
+
+ # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
+ matched = search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
+ if matched and not match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", matched.group(2)):
+ error(line_number, 'runtime/memset', 4,
+ 'Did you mean "memset(%s, 0, %s)"?'
+ % (matched.group(1), matched.group(2)))
+
+ # Detect variable-length arrays.
+ matched = match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
+ if (matched and matched.group(2) != 'return' and matched.group(2) != 'delete' and
+ matched.group(3).find(']') == -1):
+ # Split the size using space and arithmetic operators as delimiters.
+ # If any of the resulting tokens are not compile time constants then
+ # report the error.
+ tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', matched.group(3))
+ is_const = True
+ skip_next = False
+ for tok in tokens:
+ if skip_next:
+ skip_next = False
+ continue
+
+ if search(r'sizeof\(.+\)', tok):
+ continue
+ if search(r'arraysize\(\w+\)', tok):
+ continue
+
+ tok = tok.lstrip('(')
+ tok = tok.rstrip(')')
+ if not tok:
+ continue
+ if match(r'\d+', tok):
+ continue
+ if match(r'0[xX][0-9a-fA-F]+', tok):
+ continue
+ if match(r'k[A-Z0-9]\w*', tok):
+ continue
+ if match(r'(.+::)?k[A-Z0-9]\w*', tok):
+ continue
+ if match(r'(.+::)?[A-Z][A-Z0-9_]*', tok):
+ continue
+ # A catch all for tricky sizeof cases, including 'sizeof expression',
+ # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
+ # requires skipping the next token becasue we split on ' ' and '*'.
+ if tok.startswith('sizeof'):
+ skip_next = True
+ continue
+ is_const = False
+ break
+ if not is_const:
+ error(line_number, 'runtime/arrays', 1,
+ 'Do not use variable-length arrays. Use an appropriately named '
+ "('k' followed by CamelCase) compile-time constant for the size.")
+
+ # Check for use of unnamed namespaces in header files. Registration
+ # macros are typically OK, so we allow use of "namespace {" on lines
+ # that end with backslashes.
+ if (file_extension == 'h'
+ and search(r'\bnamespace\s*{', line)
+ and line[-1] != '\\'):
+ error(line_number, 'build/namespaces', 4,
+ 'Do not use unnamed namespaces in header files. See '
+ 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
+ ' for more information.')
+
+ # Check for plain bitfields declared without either "singed" or "unsigned".
+ # Most compilers treat such bitfields as signed, but there are still compilers like
+ # RVCT 4.0 that use unsigned by default.
+ matched = re.match(r'\s*((const|mutable)\s+)?(char|(short(\s+int)?)|int|long(\s+(long|int))?)\s+[a-zA-Z_][a-zA-Z0-9_]*\s*:\s*\d+\s*;', line)
+ if matched:
+ error(line_number, 'runtime/bitfields', 5,
+ 'Please declare integral type bitfields with either signed or unsigned.')
+
+ check_identifier_name_in_declaration(filename, line_number, line, file_state, error)
+
+ # Check for unsigned int (should be just 'unsigned')
+ if search(r'\bunsigned int\b', line):
+ error(line_number, 'runtime/unsigned', 1,
+ 'Omit int when using unsigned')
+
+ # Check for usage of static_cast<Classname*>.
+ check_for_object_static_cast(filename, line_number, line, error)
+
+
+def check_identifier_name_in_declaration(filename, line_number, line, file_state, error):
+ """Checks if identifier names contain any underscores.
+
+ As identifiers in libraries we are using have a bunch of
+ underscores, we only warn about the declarations of identifiers
+ and don't check use of identifiers.
+
+ Args:
+ filename: The name of the current file.
+ line_number: The number of the line to check.
+ line: The line of code to check.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
+ error: The function to call with any errors found.
+ """
+ # We don't check return and delete statements and conversion operator declarations.
+ if match(r'\s*(return|delete|operator)\b', line):
+ return
+
+ # Basically, a declaration is a type name followed by whitespaces
+ # followed by an identifier. The type name can be complicated
+ # due to type adjectives and templates. We remove them first to
+ # simplify the process to find declarations of identifiers.
+
+ # Convert "long long", "long double", and "long long int" to
+ # simple types, but don't remove simple "long".
+ line = sub(r'long (long )?(?=long|double|int)', '', line)
+ # Convert unsigned/signed types to simple types, too.
+ line = sub(r'(unsigned|signed) (?=char|short|int|long)', '', line)
+ line = sub(r'\b(inline|using|static|const|volatile|auto|register|extern|typedef|restrict|struct|class|virtual)(?=\W)', '', line)
+
+ # Remove "new" and "new (expr)" to simplify, too.
+ line = sub(r'new\s*(\([^)]*\))?', '', line)
+
+ # Remove all template parameters by removing matching < and >.
+ # Loop until no templates are removed to remove nested templates.
+ while True:
+ line, number_of_replacements = subn(r'<([\w\s:]|::)+\s*[*&]*\s*>', '', line)
+ if not number_of_replacements:
+ break
+
+ # Declarations of local variables can be in condition expressions
+ # of control flow statements (e.g., "if (RenderObject* p = o->parent())").
+ # We remove the keywords and the first parenthesis.
+ #
+ # Declarations in "while", "if", and "switch" are different from
+ # other declarations in two aspects:
+ #
+ # - There can be only one declaration between the parentheses.
+ # (i.e., you cannot write "if (int i = 0, j = 1) {}")
+ # - The variable must be initialized.
+ # (i.e., you cannot write "if (int i) {}")
+ #
+ # and we will need different treatments for them.
+ line = sub(r'^\s*for\s*\(', '', line)
+ line, control_statement = subn(r'^\s*(while|else if|if|switch)\s*\(', '', line)
+
+ # Detect variable and functions.
+ type_regexp = r'\w([\w]|\s*[*&]\s*|::)+'
+ identifier_regexp = r'(?P<identifier>[\w:]+)'
+ maybe_bitfield_regexp = r'(:\s*\d+\s*)?'
+ character_after_identifier_regexp = r'(?P<character_after_identifier>[[;()=,])(?!=)'
+ declaration_without_type_regexp = r'\s*' + identifier_regexp + r'\s*' + maybe_bitfield_regexp + character_after_identifier_regexp
+ declaration_with_type_regexp = r'\s*' + type_regexp + r'\s' + declaration_without_type_regexp
+ is_function_arguments = False
+ number_of_identifiers = 0
+ while True:
+ # If we are seeing the first identifier or arguments of a
+ # function, there should be a type name before an identifier.
+ if not number_of_identifiers or is_function_arguments:
+ declaration_regexp = declaration_with_type_regexp
+ else:
+ declaration_regexp = declaration_without_type_regexp
+
+ matched = match(declaration_regexp, line)
+ if not matched:
+ return
+ identifier = matched.group('identifier')
+ character_after_identifier = matched.group('character_after_identifier')
+
+ # If we removed a non-for-control statement, the character after
+ # the identifier should be '='. With this rule, we can avoid
+ # warning for cases like "if (val & INT_MAX) {".
+ if control_statement and character_after_identifier != '=':
+ return
+
+ is_function_arguments = is_function_arguments or character_after_identifier == '('
+
+ # Remove "m_" and "s_" to allow them.
+ modified_identifier = sub(r'(^|(?<=::))[ms]_', '', identifier)
+ if not file_state.is_objective_c() and modified_identifier.find('_') >= 0:
+ # Various exceptions to the rule: JavaScript op codes functions, const_iterator.
+ if (not (filename.find('JavaScriptCore') >= 0 and modified_identifier.find('op_') >= 0)
+ and not (filename.find('gtk') >= 0 and modified_identifier.startswith('webkit_') >= 0)
+ and not modified_identifier.startswith('tst_')
+ and not modified_identifier.startswith('webkit_dom_object_')
+ and not modified_identifier.startswith('webkit_soup')
+ and not modified_identifier.startswith('NPN_')
+ and not modified_identifier.startswith('NPP_')
+ and not modified_identifier.startswith('NP_')
+ and not modified_identifier.startswith('qt_')
+ and not modified_identifier.startswith('_q_')
+ and not modified_identifier.startswith('cairo_')
+ and not modified_identifier.startswith('Ecore_')
+ and not modified_identifier.startswith('Eina_')
+ and not modified_identifier.startswith('Evas_')
+ and not modified_identifier.startswith('Ewk_')
+ and not modified_identifier.startswith('cti_')
+ and not modified_identifier.find('::qt_') >= 0
+ and not modified_identifier.find('::_q_') >= 0
+ and not modified_identifier == "const_iterator"
+ and not modified_identifier == "vm_throw"
+ and not modified_identifier == "DFG_OPERATION"):
+ error(line_number, 'readability/naming/underscores', 4, identifier + " is incorrectly named. Don't use underscores in your identifier names.")
+
+ # Check for variables named 'l', these are too easy to confuse with '1' in some fonts
+ if modified_identifier == 'l':
+ error(line_number, 'readability/naming', 4, identifier + " is incorrectly named. Don't use the single letter 'l' as an identifier name.")
+
+ # There can be only one declaration in non-for-control statements.
+ if control_statement:
+ return
+ # We should continue checking if this is a function
+ # declaration because we need to check its arguments.
+ # Also, we need to check multiple declarations.
+ if character_after_identifier != '(' and character_after_identifier != ',':
+ return
+
+ number_of_identifiers += 1
+ line = line[matched.end():]
+
+
+def check_for_toFoo_definition(filename, pattern, error):
+ """ Reports for using static_cast instead of toFoo convenience function.
+
+ This function will output warnings to make sure you are actually using
+ the added toFoo conversion functions rather than directly hard coding
+ the static_cast<Classname*> call. For example, you should toHTMLELement(Node*)
+ to convert Node* to HTMLElement*, instead of static_cast<HTMLElement*>(Node*)
+
+ Args:
+ filename: The name of the header file in which to check for toFoo definition.
+ pattern: The conversion function pattern to grep for.
+ error: The function to call with any errors found.
+ """
+ def get_abs_filepath(filename):
+ fileSystem = FileSystem()
+ base_dir = fileSystem.path_to_module(FileSystem.__module__).split('WebKit', 1)[0]
+ base_dir = ''.join((base_dir, 'WebKit/Source'))
+ for root, dirs, names in os.walk(base_dir):
+ if filename in names:
+ return os.path.join(root, filename)
+ return None
+
+ def grep(lines, pattern, error):
+ matches = []
+ function_state = None
+ for line_number in xrange(lines.num_lines()):
+ line = (lines.elided[line_number]).rstrip()
+ try:
+ if pattern in line:
+ if not function_state:
+ function_state = _FunctionState(1)
+ detect_functions(lines, line_number, function_state, error)
+ # Exclude the match of dummy conversion function. Dummy function is just to
+ # catch invalid conversions and shouldn't be part of possible alternatives.
+ result = re.search(r'%s(\s+)%s' % ("void", pattern), line)
+ if not result:
+ matches.append([line, function_state.body_start_position.row, function_state.end_position.row + 1])
+ function_state = None
+ except UnicodeDecodeError:
+ # There would be no non-ascii characters in the codebase ever. The only exception
+ # would be comments/copyright text which might have non-ascii characters. Hence,
+ # it is prefectly safe to catch the UnicodeDecodeError and just pass the line.
+ pass
+
+ return matches
+
+ def check_in_mock_header(filename, matches=None):
+ if not filename == 'Foo.h':
+ return False
+
+ header_file = None
+ try:
+ header_file = CppChecker.fs.read_text_file(filename)
+ except IOError:
+ return False
+ line_number = 0
+ for line in header_file:
+ line_number += 1
+ matched = re.search(r'\btoFoo\b', line)
+ if matched:
+ matches.append(['toFoo', line_number, line_number + 3])
+ return True
+
+ # For unit testing only, avoid header search and lookup locally.
+ matches = []
+ mock_def_found = check_in_mock_header(filename, matches)
+ if mock_def_found:
+ return matches
+
+ # Regular style check flow. Search for actual header file & defs.
+ file_path = get_abs_filepath(filename)
+ if not file_path:
+ return None
+ try:
+ f = open(file_path)
+ clean_lines = CleansedLines(f.readlines())
+ finally:
+ f.close()
+
+ # Make a list of all genuine alternatives to static_cast.
+ matches = grep(clean_lines, pattern, error)
+ return matches
+
+
+def check_for_object_static_cast(processing_file, line_number, line, error):
+ """Checks for a Cpp-style static cast on objects by looking for the pattern.
+
+ Args:
+ processing_file: The name of the processing file.
+ line_number: The number of the line to check.
+ line: The line of code to check.
+ error: The function to call with any errors found.
+ """
+ matched = search(r'\bstatic_cast<(\s*\w*:?:?\w+\s*\*+\s*)>', line)
+ if not matched:
+ return
+
+ class_name = re.sub('[\*]', '', matched.group(1))
+ class_name = class_name.strip()
+ # Ignore (for now) when the casting is to void*,
+ if class_name == 'void':
+ return
+
+ namespace_pos = class_name.find(':')
+ if not namespace_pos == -1:
+ class_name = class_name[namespace_pos + 2:]
+
+ header_file = ''.join((class_name, '.h'))
+ matches = check_for_toFoo_definition(header_file, ''.join(('to', class_name)), error)
+ # Ignore (for now) if not able to find the header where toFoo might be defined.
+ # TODO: Handle cases where Classname might be defined in some other header or cpp file.
+ if matches is None:
+ return
+
+ report_error = True
+ # Ensure found static_cast instance is not from within toFoo definition itself.
+ if (os.path.basename(processing_file) == header_file):
+ for item in matches:
+ if line_number in range(item[1], item[2]):
+ report_error = False
+ break
+
+ if report_error:
+ if len(matches):
+ # toFoo is defined - enforce using it.
+ # TODO: Suggest an appropriate toFoo from the alternatives present in matches.
+ error(line_number, 'runtime/casting', 4,
+ 'static_cast of class objects is not allowed. Use to%s defined in %s.' %
+ (class_name, header_file))
+ else:
+ # No toFoo defined - enforce definition & usage.
+ # TODO: Automate the generation of toFoo() to avoid any slippages ever.
+ error(line_number, 'runtime/casting', 4,
+ 'static_cast of class objects is not allowed. Add to%s in %s and use it instead.' %
+ (class_name, header_file))
+
+
+def check_c_style_cast(line_number, line, raw_line, cast_type, pattern,
+ error):
+ """Checks for a C-style cast by looking for the pattern.
+
+ This also handles sizeof(type) warnings, due to similarity of content.
+
+ Args:
+ line_number: The number of the line to check.
+ line: The line of code to check.
+ raw_line: The raw line of code to check, with comments.
+ cast_type: The string for the C++ cast to recommend. This is either
+ reinterpret_cast or static_cast, depending.
+ pattern: The regular expression used to find C-style casts.
+ error: The function to call with any errors found.
+ """
+ matched = search(pattern, line)
+ if not matched:
+ return
+
+ # e.g., sizeof(int)
+ sizeof_match = match(r'.*sizeof\s*$', line[0:matched.start(1) - 1])
+ if sizeof_match:
+ error(line_number, 'runtime/sizeof', 1,
+ 'Using sizeof(type). Use sizeof(varname) instead if possible')
+ return
+
+ remainder = line[matched.end(0):]
+
+ # The close paren is for function pointers as arguments to a function.
+ # eg, void foo(void (*bar)(int));
+ # The semicolon check is a more basic function check; also possibly a
+ # function pointer typedef.
+ # eg, void foo(int); or void foo(int) const;
+ # The equals check is for function pointer assignment.
+ # eg, void *(*foo)(int) = ...
+ #
+ # Right now, this will only catch cases where there's a single argument, and
+ # it's unnamed. It should probably be expanded to check for multiple
+ # arguments with some unnamed.
+ function_match = match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)))', remainder)
+ if function_match:
+ if (not function_match.group(3)
+ or function_match.group(3) == ';'
+ or raw_line.find('/*') < 0):
+ error(line_number, 'readability/function', 3,
+ 'All parameters should be named in a function')
+ return
+
+ # At this point, all that should be left is actual casts.
+ error(line_number, 'readability/casting', 4,
+ 'Using C-style cast. Use %s<%s>(...) instead' %
+ (cast_type, matched.group(1)))
+
+
+_HEADERS_CONTAINING_TEMPLATES = (
+ ('<deque>', ('deque',)),
+ ('<functional>', ('unary_function', 'binary_function',
+ 'plus', 'minus', 'multiplies', 'divides', 'modulus',
+ 'negate',
+ 'equal_to', 'not_equal_to', 'greater', 'less',
+ 'greater_equal', 'less_equal',
+ 'logical_and', 'logical_or', 'logical_not',
+ 'unary_negate', 'not1', 'binary_negate', 'not2',
+ 'bind1st', 'bind2nd',
+ 'pointer_to_unary_function',
+ 'pointer_to_binary_function',
+ 'ptr_fun',
+ 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
+ 'mem_fun_ref_t',
+ 'const_mem_fun_t', 'const_mem_fun1_t',
+ 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
+ 'mem_fun_ref',
+ )),
+ ('<limits>', ('numeric_limits',)),
+ ('<list>', ('list',)),
+ ('<map>', ('map', 'multimap',)),
+ ('<memory>', ('allocator',)),
+ ('<queue>', ('queue', 'priority_queue',)),
+ ('<set>', ('set', 'multiset',)),
+ ('<stack>', ('stack',)),
+ ('<string>', ('char_traits', 'basic_string',)),
+ ('<utility>', ('pair',)),
+ ('<vector>', ('vector',)),
+
+ # gcc extensions.
+ # Note: std::hash is their hash, ::hash is our hash
+ ('<hash_map>', ('hash_map', 'hash_multimap',)),
+ ('<hash_set>', ('hash_set', 'hash_multiset',)),
+ ('<slist>', ('slist',)),
+ )
+
+_HEADERS_ACCEPTED_BUT_NOT_PROMOTED = {
+ # We can trust with reasonable confidence that map gives us pair<>, too.
+ 'pair<>': ('map', 'multimap', 'hash_map', 'hash_multimap')
+}
+
+_RE_PATTERN_STRING = re.compile(r'\bstring\b')
+
+_re_pattern_algorithm_header = []
+for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
+ 'transform'):
+ # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
+ # type::max().
+ _re_pattern_algorithm_header.append(
+ (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
+ _template,
+ '<algorithm>'))
+
+_re_pattern_templates = []
+for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
+ for _template in _templates:
+ _re_pattern_templates.append(
+ (re.compile(r'(\<|\b)' + _template + r'\s*\<'),
+ _template + '<>',
+ _header))
+
+
+def files_belong_to_same_module(filename_cpp, filename_h):
+ """Check if these two filenames belong to the same module.
+
+ The concept of a 'module' here is a as follows:
+ foo.h, foo-inl.h, foo.cpp, foo_test.cpp and foo_unittest.cpp belong to the
+ same 'module' if they are in the same directory.
+ some/path/public/xyzzy and some/path/internal/xyzzy are also considered
+ to belong to the same module here.
+
+ If the filename_cpp contains a longer path than the filename_h, for example,
+ '/absolute/path/to/base/sysinfo.cpp', and this file would include
+ 'base/sysinfo.h', this function also produces the prefix needed to open the
+ header. This is used by the caller of this function to more robustly open the
+ header file. We don't have access to the real include paths in this context,
+ so we need this guesswork here.
+
+ Known bugs: tools/base/bar.cpp and base/bar.h belong to the same module
+ according to this implementation. Because of this, this function gives
+ some false positives. This should be sufficiently rare in practice.
+
+ Args:
+ filename_cpp: is the path for the .cpp file
+ filename_h: is the path for the header path
+
+ Returns:
+ Tuple with a bool and a string:
+ bool: True if filename_cpp and filename_h belong to the same module.
+ string: the additional prefix needed to open the header file.
+ """
+
+ if not filename_cpp.endswith('.cpp'):
+ return (False, '')
+ filename_cpp = filename_cpp[:-len('.cpp')]
+ if filename_cpp.endswith('_unittest'):
+ filename_cpp = filename_cpp[:-len('_unittest')]
+ elif filename_cpp.endswith('_test'):
+ filename_cpp = filename_cpp[:-len('_test')]
+ filename_cpp = filename_cpp.replace('/public/', '/')
+ filename_cpp = filename_cpp.replace('/internal/', '/')
+
+ if not filename_h.endswith('.h'):
+ return (False, '')
+ filename_h = filename_h[:-len('.h')]
+ if filename_h.endswith('-inl'):
+ filename_h = filename_h[:-len('-inl')]
+ filename_h = filename_h.replace('/public/', '/')
+ filename_h = filename_h.replace('/internal/', '/')
+
+ files_belong_to_same_module = filename_cpp.endswith(filename_h)
+ common_path = ''
+ if files_belong_to_same_module:
+ common_path = filename_cpp[:-len(filename_h)]
+ return files_belong_to_same_module, common_path
+
+
+def update_include_state(filename, include_state):
+ """Fill up the include_state with new includes found from the file.
+
+ Args:
+ filename: the name of the header to read.
+ include_state: an _IncludeState instance in which the headers are inserted.
+ io: The io factory to use to read the file. Provided for testability.
+
+ Returns:
+ True if a header was succesfully added. False otherwise.
+ """
+ header_file = None
+ try:
+ header_file = CppChecker.fs.read_text_file(filename)
+ except IOError:
+ return False
+ line_number = 0
+ for line in header_file:
+ line_number += 1
+ clean_line = cleanse_comments(line)
+ matched = _RE_PATTERN_INCLUDE.search(clean_line)
+ if matched:
+ include = matched.group(2)
+ # The value formatting is cute, but not really used right now.
+ # What matters here is that the key is in include_state.
+ include_state.setdefault(include, '%s:%d' % (filename, line_number))
+ return True
+
+
+def check_for_include_what_you_use(filename, clean_lines, include_state, error):
+ """Reports for missing stl includes.
+
+ This function will output warnings to make sure you are including the headers
+ necessary for the stl containers and functions that you use. We only give one
+ reason to include a header. For example, if you use both equal_to<> and
+ less<> in a .h file, only one (the latter in the file) of these will be
+ reported as a reason to include the <functional>.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ include_state: An _IncludeState instance.
+ error: The function to call with any errors found.
+ """
+ required = {} # A map of header name to line_number and the template entity.
+ # Example of required: { '<functional>': (1219, 'less<>') }
+
+ for line_number in xrange(clean_lines.num_lines()):
+ line = clean_lines.elided[line_number]
+ if not line or line[0] == '#':
+ continue
+
+ # String is special -- it is a non-templatized type in STL.
+ if _RE_PATTERN_STRING.search(line):
+ required['<string>'] = (line_number, 'string')
+
+ for pattern, template, header in _re_pattern_algorithm_header:
+ if pattern.search(line):
+ required[header] = (line_number, template)
+
+ # The following function is just a speed up, no semantics are changed.
+ if not '<' in line: # Reduces the cpu time usage by skipping lines.
+ continue
+
+ for pattern, template, header in _re_pattern_templates:
+ if pattern.search(line):
+ required[header] = (line_number, template)
+
+ # The policy is that if you #include something in foo.h you don't need to
+ # include it again in foo.cpp. Here, we will look at possible includes.
+ # Let's copy the include_state so it is only messed up within this function.
+ include_state = include_state.copy()
+
+ # Did we find the header for this file (if any) and succesfully load it?
+ header_found = False
+
+ # Use the absolute path so that matching works properly.
+ abs_filename = os.path.abspath(filename)
+
+ # For Emacs's flymake.
+ # If cpp_style is invoked from Emacs's flymake, a temporary file is generated
+ # by flymake and that file name might end with '_flymake.cpp'. In that case,
+ # restore original file name here so that the corresponding header file can be
+ # found.
+ # e.g. If the file name is 'foo_flymake.cpp', we should search for 'foo.h'
+ # instead of 'foo_flymake.h'
+ abs_filename = re.sub(r'_flymake\.cpp$', '.cpp', abs_filename)
+
+ # include_state is modified during iteration, so we iterate over a copy of
+ # the keys.
+ for header in include_state.keys(): #NOLINT
+ (same_module, common_path) = files_belong_to_same_module(abs_filename, header)
+ fullpath = common_path + header
+ if same_module and update_include_state(fullpath, include_state):
+ header_found = True
+
+ # If we can't find the header file for a .cpp, assume it's because we don't
+ # know where to look. In that case we'll give up as we're not sure they
+ # didn't include it in the .h file.
+ # FIXME: Do a better job of finding .h files so we are confident that
+ # not having the .h file means there isn't one.
+ if filename.endswith('.cpp') and not header_found:
+ return
+
+ # All the lines have been processed, report the errors found.
+ for required_header_unstripped in required:
+ template = required[required_header_unstripped][1]
+ if template in _HEADERS_ACCEPTED_BUT_NOT_PROMOTED:
+ headers = _HEADERS_ACCEPTED_BUT_NOT_PROMOTED[template]
+ if [True for header in headers if header in include_state]:
+ continue
+ if required_header_unstripped.strip('<>"') not in include_state:
+ error(required[required_header_unstripped][0],
+ 'build/include_what_you_use', 4,
+ 'Add #include ' + required_header_unstripped + ' for ' + template)
+
+
+def process_line(filename, file_extension,
+ clean_lines, line, include_state, function_state,
+ class_state, file_state, enum_state, error):
+ """Processes a single line in the file.
+
+ Args:
+ filename: Filename of the file that is being processed.
+ file_extension: The extension (dot not included) of the file.
+ clean_lines: An array of strings, each representing a line of the file,
+ with comments stripped.
+ line: Number of line being processed.
+ include_state: An _IncludeState instance in which the headers are inserted.
+ function_state: A _FunctionState instance which counts function lines, etc.
+ class_state: A _ClassState instance which maintains information about
+ the current stack of nested class declarations being parsed.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
+ enum_state: A _EnumState instance which maintains an enum declaration
+ state.
+ error: A callable to which errors are reported, which takes arguments:
+ line number, error level, and message
+
+ """
+ raw_lines = clean_lines.raw_lines
+ detect_functions(clean_lines, line, function_state, error)
+ check_for_function_lengths(clean_lines, line, function_state, error)
+ if search(r'\bNOLINT\b', raw_lines[line]): # ignore nolint lines
+ return
+ if match(r'\s*\b__asm\b', raw_lines[line]): # Ignore asm lines as they format differently.
+ return
+ check_function_definition(filename, file_extension, clean_lines, line, function_state, error)
+ check_pass_ptr_usage(clean_lines, line, function_state, error)
+ check_for_leaky_patterns(clean_lines, line, function_state, error)
+ check_for_multiline_comments_and_strings(clean_lines, line, error)
+ check_style(clean_lines, line, file_extension, class_state, file_state, enum_state, error)
+ check_language(filename, clean_lines, line, file_extension, include_state,
+ file_state, error)
+ check_for_non_standard_constructs(clean_lines, line, class_state, error)
+ check_posix_threading(clean_lines, line, error)
+ check_invalid_increment(clean_lines, line, error)
+ check_conditional_and_loop_bodies_for_brace_violations(clean_lines, line, error)
+
+def _process_lines(filename, file_extension, lines, error, min_confidence):
+ """Performs lint checks and reports any errors to the given error function.
+
+ Args:
+ filename: Filename of the file that is being processed.
+ file_extension: The extension (dot not included) of the file.
+ lines: An array of strings, each representing a line of the file, with the
+ last element being empty if the file is termined with a newline.
+ error: A callable to which errors are reported, which takes 4 arguments:
+ """
+ lines = (['// marker so line numbers and indices both start at 1'] + lines +
+ ['// marker so line numbers end in a known way'])
+
+ include_state = _IncludeState()
+ function_state = _FunctionState(min_confidence)
+ class_state = _ClassState()
+
+ check_for_copyright(lines, error)
+
+ if file_extension == 'h':
+ check_for_header_guard(filename, lines, error)
+
+ remove_multi_line_comments(lines, error)
+ clean_lines = CleansedLines(lines)
+ file_state = _FileState(clean_lines, file_extension)
+ enum_state = _EnumState()
+ for line in xrange(clean_lines.num_lines()):
+ process_line(filename, file_extension, clean_lines, line,
+ include_state, function_state, class_state, file_state,
+ enum_state, error)
+ class_state.check_finished(error)
+
+ check_for_include_what_you_use(filename, clean_lines, include_state, error)
+
+ # We check here rather than inside process_line so that we see raw
+ # lines rather than "cleaned" lines.
+ check_for_unicode_replacement_characters(lines, error)
+
+ check_for_new_line_at_eof(lines, error)
+
+
+class CppChecker(object):
+
+ """Processes C++ lines for checking style."""
+
+ # This list is used to--
+ #
+ # (1) generate an explicit list of all possible categories,
+ # (2) unit test that all checked categories have valid names, and
+ # (3) unit test that all categories are getting unit tested.
+ #
+ categories = set([
+ 'build/class',
+ 'build/deprecated',
+ 'build/endif_comment',
+ 'build/forward_decl',
+ 'build/header_guard',
+ 'build/include',
+ 'build/include_order',
+ 'build/include_what_you_use',
+ 'build/namespaces',
+ 'build/printf_format',
+ 'build/storage_class',
+ 'build/using_std',
+ 'legal/copyright',
+ 'readability/braces',
+ 'readability/casting',
+ 'readability/check',
+ 'readability/comparison_to_boolean',
+ 'readability/constructors',
+ 'readability/control_flow',
+ 'readability/enum_casing',
+ 'readability/fn_size',
+ 'readability/function',
+ 'readability/multiline_comment',
+ 'readability/multiline_string',
+ 'readability/parameter_name',
+ 'readability/naming',
+ 'readability/naming/underscores',
+ 'readability/null',
+ 'readability/pass_ptr',
+ 'readability/streams',
+ 'readability/todo',
+ 'readability/utf8',
+ 'readability/webkit_export',
+ 'runtime/arrays',
+ 'runtime/bitfields',
+ 'runtime/casting',
+ 'runtime/ctype_function',
+ 'runtime/explicit',
+ 'runtime/init',
+ 'runtime/int',
+ 'runtime/invalid_increment',
+ 'runtime/leaky_pattern',
+ 'runtime/max_min_macros',
+ 'runtime/memset',
+ 'runtime/printf',
+ 'runtime/printf_format',
+ 'runtime/references',
+ 'runtime/rtti',
+ 'runtime/sizeof',
+ 'runtime/string',
+ 'runtime/threadsafe_fn',
+ 'runtime/unsigned',
+ 'runtime/virtual',
+ 'whitespace/blank_line',
+ 'whitespace/braces',
+ 'whitespace/comma',
+ 'whitespace/comments',
+ 'whitespace/declaration',
+ 'whitespace/end_of_line',
+ 'whitespace/ending_newline',
+ 'whitespace/indent',
+ 'whitespace/line_length',
+ 'whitespace/newline',
+ 'whitespace/operators',
+ 'whitespace/parens',
+ 'whitespace/semicolon',
+ 'whitespace/tab',
+ 'whitespace/todo',
+ ])
+
+ fs = None
+
+ def __init__(self, file_path, file_extension, handle_style_error,
+ min_confidence, fs=None):
+ """Create a CppChecker instance.
+
+ Args:
+ file_extension: A string that is the file extension, without
+ the leading dot.
+
+ """
+ self.file_extension = file_extension
+ self.file_path = file_path
+ self.handle_style_error = handle_style_error
+ self.min_confidence = min_confidence
+ CppChecker.fs = fs or FileSystem()
+
+ # Useful for unit testing.
+ def __eq__(self, other):
+ """Return whether this CppChecker instance is equal to another."""
+ if self.file_extension != other.file_extension:
+ return False
+ if self.file_path != other.file_path:
+ return False
+ if self.handle_style_error != other.handle_style_error:
+ return False
+ if self.min_confidence != other.min_confidence:
+ return False
+
+ return True
+
+ # Useful for unit testing.
+ def __ne__(self, other):
+ # Python does not automatically deduce __ne__() from __eq__().
+ return not self.__eq__(other)
+
+ def check(self, lines):
+ _process_lines(self.file_path, self.file_extension, lines,
+ self.handle_style_error, self.min_confidence)
+
+
+# FIXME: Remove this function (requires refactoring unit tests).
+def process_file_data(filename, file_extension, lines, error, min_confidence, fs=None):
+ checker = CppChecker(filename, file_extension, error, min_confidence, fs)
+ checker.check(lines)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py
new file mode 100644
index 0000000..ac9bdba
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py
@@ -0,0 +1,5205 @@
+# -*- coding: utf-8; -*-
+#
+# Copyright (C) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2009 Torch Mobile Inc.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for cpp_style.py."""
+
+# FIXME: Add a good test that tests UpdateIncludeState.
+
+import os
+import random
+import re
+import unittest
+
+import cpp as cpp_style
+from cpp import CppChecker
+from ..filter import FilterConfiguration
+from webkitpy.common.system.filesystem import FileSystem
+
+# This class works as an error collector and replaces cpp_style.Error
+# function for the unit tests. We also verify each category we see
+# is in STYLE_CATEGORIES, to help keep that list up to date.
+class ErrorCollector:
+ _all_style_categories = CppChecker.categories
+ # This is a list including all categories seen in any unit test.
+ _seen_style_categories = {}
+
+ def __init__(self, assert_fn, filter=None, lines_to_check=None):
+ """assert_fn: a function to call when we notice a problem.
+ filter: filters the errors that we are concerned about."""
+ self._assert_fn = assert_fn
+ self._errors = []
+ self._lines_to_check = lines_to_check
+ if not filter:
+ filter = FilterConfiguration()
+ self._filter = filter
+
+ def __call__(self, line_number, category, confidence, message):
+ self._assert_fn(category in self._all_style_categories,
+ 'Message "%s" has category "%s",'
+ ' which is not in STYLE_CATEGORIES' % (message, category))
+
+ if self._lines_to_check and not line_number in self._lines_to_check:
+ return False
+
+ if self._filter.should_check(category, ""):
+ self._seen_style_categories[category] = 1
+ self._errors.append('%s [%s] [%d]' % (message, category, confidence))
+ return True
+
+ def results(self):
+ if len(self._errors) < 2:
+ return ''.join(self._errors) # Most tests expect to have a string.
+ else:
+ return self._errors # Let's give a list if there is more than one.
+
+ def result_list(self):
+ return self._errors
+
+ def verify_all_categories_are_seen(self):
+ """Fails if there's a category in _all_style_categories - _seen_style_categories.
+
+ This should only be called after all tests are run, so
+ _seen_style_categories has had a chance to fully populate. Since
+ this isn't called from within the normal unittest framework, we
+ can't use the normal unittest assert macros. Instead we just exit
+ when we see an error. Good thing this test is always run last!
+ """
+ for category in self._all_style_categories:
+ if category not in self._seen_style_categories:
+ import sys
+ sys.exit('FATAL ERROR: There are no tests for category "%s"' % category)
+
+
+class CppFunctionsTest(unittest.TestCase):
+
+ """Supports testing functions that do not need CppStyleTestBase."""
+
+ def test_convert_to_lower_with_underscores(self):
+ self.assertEqual(cpp_style._convert_to_lower_with_underscores('ABC'), 'abc')
+ self.assertEqual(cpp_style._convert_to_lower_with_underscores('aB'), 'a_b')
+ self.assertEqual(cpp_style._convert_to_lower_with_underscores('isAName'), 'is_a_name')
+ self.assertEqual(cpp_style._convert_to_lower_with_underscores('AnotherTest'), 'another_test')
+ self.assertEqual(cpp_style._convert_to_lower_with_underscores('PassRefPtr<MyClass>'), 'pass_ref_ptr<my_class>')
+ self.assertEqual(cpp_style._convert_to_lower_with_underscores('_ABC'), '_abc')
+
+ def test_create_acronym(self):
+ self.assertEqual(cpp_style._create_acronym('ABC'), 'ABC')
+ self.assertEqual(cpp_style._create_acronym('IsAName'), 'IAN')
+ self.assertEqual(cpp_style._create_acronym('PassRefPtr<MyClass>'), 'PRP<MC>')
+
+ def test_is_c_or_objective_c(self):
+ clean_lines = cpp_style.CleansedLines([''])
+ clean_objc_lines = cpp_style.CleansedLines(['#import "header.h"'])
+ self.assertTrue(cpp_style._FileState(clean_lines, 'c').is_c_or_objective_c())
+ self.assertTrue(cpp_style._FileState(clean_lines, 'm').is_c_or_objective_c())
+ self.assertFalse(cpp_style._FileState(clean_lines, 'cpp').is_c_or_objective_c())
+ self.assertFalse(cpp_style._FileState(clean_lines, 'cc').is_c_or_objective_c())
+ self.assertFalse(cpp_style._FileState(clean_lines, 'h').is_c_or_objective_c())
+ self.assertTrue(cpp_style._FileState(clean_objc_lines, 'h').is_c_or_objective_c())
+
+ def test_parameter(self):
+ # Test type.
+ parameter = cpp_style.Parameter('ExceptionCode', 13, 1)
+ self.assertEqual(parameter.type, 'ExceptionCode')
+ self.assertEqual(parameter.name, '')
+ self.assertEqual(parameter.row, 1)
+
+ # Test type and name.
+ parameter = cpp_style.Parameter('PassRefPtr<MyClass> parent', 19, 1)
+ self.assertEqual(parameter.type, 'PassRefPtr<MyClass>')
+ self.assertEqual(parameter.name, 'parent')
+ self.assertEqual(parameter.row, 1)
+
+ # Test type, no name, with default value.
+ parameter = cpp_style.Parameter('MyClass = 0', 7, 0)
+ self.assertEqual(parameter.type, 'MyClass')
+ self.assertEqual(parameter.name, '')
+ self.assertEqual(parameter.row, 0)
+
+ # Test type, name, and default value.
+ parameter = cpp_style.Parameter('MyClass a = 0', 7, 0)
+ self.assertEqual(parameter.type, 'MyClass')
+ self.assertEqual(parameter.name, 'a')
+ self.assertEqual(parameter.row, 0)
+
+ def test_single_line_view(self):
+ start_position = cpp_style.Position(row=1, column=1)
+ end_position = cpp_style.Position(row=3, column=1)
+ single_line_view = cpp_style.SingleLineView(['0', 'abcde', 'fgh', 'i'], start_position, end_position)
+ self.assertEqual(single_line_view.single_line, 'bcde fgh i')
+ self.assertEqual(single_line_view.convert_column_to_row(0), 1)
+ self.assertEqual(single_line_view.convert_column_to_row(4), 1)
+ self.assertEqual(single_line_view.convert_column_to_row(5), 2)
+ self.assertEqual(single_line_view.convert_column_to_row(8), 2)
+ self.assertEqual(single_line_view.convert_column_to_row(9), 3)
+ self.assertEqual(single_line_view.convert_column_to_row(100), 3)
+
+ start_position = cpp_style.Position(row=0, column=3)
+ end_position = cpp_style.Position(row=0, column=4)
+ single_line_view = cpp_style.SingleLineView(['abcdef'], start_position, end_position)
+ self.assertEqual(single_line_view.single_line, 'd')
+
+ def test_create_skeleton_parameters(self):
+ self.assertEqual(cpp_style.create_skeleton_parameters(''), '')
+ self.assertEqual(cpp_style.create_skeleton_parameters(' '), ' ')
+ self.assertEqual(cpp_style.create_skeleton_parameters('long'), 'long,')
+ self.assertEqual(cpp_style.create_skeleton_parameters('const unsigned long int'), ' int,')
+ self.assertEqual(cpp_style.create_skeleton_parameters('long int*'), ' int ,')
+ self.assertEqual(cpp_style.create_skeleton_parameters('PassRefPtr<Foo> a'), 'PassRefPtr a,')
+ self.assertEqual(cpp_style.create_skeleton_parameters(
+ 'ComplexTemplate<NestedTemplate1<MyClass1, MyClass2>, NestedTemplate1<MyClass1, MyClass2> > param, int second'),
+ 'ComplexTemplate param, int second,')
+ self.assertEqual(cpp_style.create_skeleton_parameters('int = 0, Namespace::Type& a'), 'int , Type a,')
+ # Create skeleton parameters is a bit too aggressive with function variables, but
+ # it allows for parsing other parameters and declarations like this are rare.
+ self.assertEqual(cpp_style.create_skeleton_parameters('void (*fn)(int a, int b), Namespace::Type& a'),
+ 'void , Type a,')
+
+ # This doesn't look like functions declarations but the simplifications help to eliminate false positives.
+ self.assertEqual(cpp_style.create_skeleton_parameters('b{d}'), 'b ,')
+
+ def test_find_parameter_name_index(self):
+ self.assertEqual(cpp_style.find_parameter_name_index(' int a '), 5)
+ self.assertEqual(cpp_style.find_parameter_name_index(' PassRefPtr '), 16)
+ self.assertEqual(cpp_style.find_parameter_name_index('double'), 6)
+
+ def test_parameter_list(self):
+ elided_lines = ['int blah(PassRefPtr<MyClass> paramName,',
+ 'const Other1Class& foo,',
+ 'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const * param = new ComplexTemplate<Class1, NestedTemplate<P1, P2> >(34, 42),',
+ 'int* myCount = 0);']
+ start_position = cpp_style.Position(row=0, column=8)
+ end_position = cpp_style.Position(row=3, column=16)
+
+ expected_parameters = ({'type': 'PassRefPtr<MyClass>', 'name': 'paramName', 'row': 0},
+ {'type': 'const Other1Class&', 'name': 'foo', 'row': 1},
+ {'type': 'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const *', 'name': 'param', 'row': 2},
+ {'type': 'int*', 'name': 'myCount', 'row': 3})
+ index = 0
+ for parameter in cpp_style.parameter_list(elided_lines, start_position, end_position):
+ expected_parameter = expected_parameters[index]
+ self.assertEqual(parameter.type, expected_parameter['type'])
+ self.assertEqual(parameter.name, expected_parameter['name'])
+ self.assertEqual(parameter.row, expected_parameter['row'])
+ index += 1
+ self.assertEqual(index, len(expected_parameters))
+
+ def test_check_parameter_against_text(self):
+ error_collector = ErrorCollector(self.assertTrue)
+ parameter = cpp_style.Parameter('FooF ooF', 4, 1)
+ self.assertFalse(cpp_style._check_parameter_name_against_text(parameter, 'FooF', error_collector))
+ self.assertEqual(error_collector.results(),
+ 'The parameter name "ooF" adds no information, so it should be removed. [readability/parameter_name] [5]')
+
+class CppStyleTestBase(unittest.TestCase):
+ """Provides some useful helper functions for cpp_style tests.
+
+ Attributes:
+ min_confidence: An integer that is the current minimum confidence
+ level for the tests.
+
+ """
+
+ # FIXME: Refactor the unit tests so the confidence level is passed
+ # explicitly, just like it is in the real code.
+ min_confidence = 1;
+
+ # Helper function to avoid needing to explicitly pass confidence
+ # in all the unit test calls to cpp_style.process_file_data().
+ def process_file_data(self, filename, file_extension, lines, error, fs=None):
+ """Call cpp_style.process_file_data() with the min_confidence."""
+ return cpp_style.process_file_data(filename, file_extension, lines,
+ error, self.min_confidence, fs)
+
+ def perform_lint(self, code, filename, basic_error_rules, fs=None, lines_to_check=None):
+ error_collector = ErrorCollector(self.assertTrue, FilterConfiguration(basic_error_rules), lines_to_check)
+ lines = code.split('\n')
+ extension = filename.split('.')[1]
+ self.process_file_data(filename, extension, lines, error_collector, fs)
+ return error_collector.results()
+
+ # Perform lint on single line of input and return the error message.
+ def perform_single_line_lint(self, code, filename):
+ basic_error_rules = ('-build/header_guard',
+ '-legal/copyright',
+ '-readability/fn_size',
+ '-readability/parameter_name',
+ '-readability/pass_ptr',
+ '-whitespace/ending_newline')
+ return self.perform_lint(code, filename, basic_error_rules)
+
+ # Perform lint over multiple lines and return the error message.
+ def perform_multi_line_lint(self, code, file_extension):
+ basic_error_rules = ('-build/header_guard',
+ '-legal/copyright',
+ '-readability/parameter_name',
+ '-whitespace/ending_newline')
+ return self.perform_lint(code, 'test.' + file_extension, basic_error_rules)
+
+ # Only keep some errors related to includes, namespaces and rtti.
+ def perform_language_rules_check(self, filename, code, lines_to_check=None):
+ basic_error_rules = ('-',
+ '+build/include',
+ '+build/include_order',
+ '+build/namespaces',
+ '+runtime/rtti')
+ return self.perform_lint(code, filename, basic_error_rules, lines_to_check=lines_to_check)
+
+ # Only keep function length errors.
+ def perform_function_lengths_check(self, code):
+ basic_error_rules = ('-',
+ '+readability/fn_size')
+ return self.perform_lint(code, 'test.cpp', basic_error_rules)
+
+ # Only keep pass ptr errors.
+ def perform_pass_ptr_check(self, code):
+ basic_error_rules = ('-',
+ '+readability/pass_ptr')
+ return self.perform_lint(code, 'test.cpp', basic_error_rules)
+
+ # Only keep leaky pattern errors.
+ def perform_leaky_pattern_check(self, code):
+ basic_error_rules = ('-',
+ '+runtime/leaky_pattern')
+ return self.perform_lint(code, 'test.cpp', basic_error_rules)
+
+ # Only include what you use errors.
+ def perform_include_what_you_use(self, code, filename='foo.h', fs=None):
+ basic_error_rules = ('-',
+ '+build/include_what_you_use')
+ return self.perform_lint(code, filename, basic_error_rules, fs)
+
+ def perform_avoid_static_cast_of_objects(self, code, filename='foo.cpp', fs=None):
+ basic_error_rules = ('-',
+ '+runtime/casting')
+ return self.perform_lint(code, filename, basic_error_rules, fs)
+
+ # Perform lint and compare the error message with "expected_message".
+ def assert_lint(self, code, expected_message, file_name='foo.cpp'):
+ self.assertEqual(expected_message, self.perform_single_line_lint(code, file_name))
+
+ def assert_lint_one_of_many_errors_re(self, code, expected_message_re, file_name='foo.cpp'):
+ messages = self.perform_single_line_lint(code, file_name)
+ for message in messages:
+ if re.search(expected_message_re, message):
+ return
+
+ self.assertEqual(expected_message_re, messages)
+
+ def assert_multi_line_lint(self, code, expected_message, file_name='foo.h'):
+ file_extension = file_name[file_name.rfind('.') + 1:]
+ self.assertEqual(expected_message, self.perform_multi_line_lint(code, file_extension))
+
+ def assert_multi_line_lint_re(self, code, expected_message_re, file_name='foo.h'):
+ file_extension = file_name[file_name.rfind('.') + 1:]
+ message = self.perform_multi_line_lint(code, file_extension)
+ if not re.search(expected_message_re, message):
+ self.fail('Message was:\n' + message + 'Expected match to "' + expected_message_re + '"')
+
+ def assert_language_rules_check(self, file_name, code, expected_message, lines_to_check=None):
+ self.assertEqual(expected_message,
+ self.perform_language_rules_check(file_name, code, lines_to_check))
+
+ def assert_include_what_you_use(self, code, expected_message):
+ self.assertEqual(expected_message,
+ self.perform_include_what_you_use(code))
+
+ def assert_blank_lines_check(self, lines, start_errors, end_errors):
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data('foo.cpp', 'cpp', lines, error_collector)
+ self.assertEqual(
+ start_errors,
+ error_collector.results().count(
+ 'Blank line at the start of a code block. Is this needed?'
+ ' [whitespace/blank_line] [2]'))
+ self.assertEqual(
+ end_errors,
+ error_collector.results().count(
+ 'Blank line at the end of a code block. Is this needed?'
+ ' [whitespace/blank_line] [3]'))
+
+ def assert_positions_equal(self, position, tuple_position):
+ """Checks if the two positions are equal.
+
+ position: a cpp_style.Position object.
+ tuple_position: a tuple (row, column) to compare against."""
+ self.assertEqual(position, cpp_style.Position(tuple_position[0], tuple_position[1]),
+ 'position %s, tuple_position %s' % (position, tuple_position))
+
+
+class FunctionDetectionTest(CppStyleTestBase):
+ def perform_function_detection(self, lines, function_information, detection_line=0):
+ clean_lines = cpp_style.CleansedLines(lines)
+ function_state = cpp_style._FunctionState(5)
+ error_collector = ErrorCollector(self.assertTrue)
+ cpp_style.detect_functions(clean_lines, detection_line, function_state, error_collector)
+ if not function_information:
+ self.assertEqual(function_state.in_a_function, False)
+ return
+ self.assertEqual(function_state.in_a_function, True)
+ self.assertEqual(function_state.current_function, function_information['name'] + '()')
+ self.assertEqual(function_state.modifiers_and_return_type(), function_information['modifiers_and_return_type'])
+ self.assertEqual(function_state.is_pure, function_information['is_pure'])
+ self.assertEqual(function_state.is_declaration, function_information['is_declaration'])
+ self.assert_positions_equal(function_state.function_name_start_position, function_information['function_name_start_position'])
+ self.assert_positions_equal(function_state.parameter_start_position, function_information['parameter_start_position'])
+ self.assert_positions_equal(function_state.parameter_end_position, function_information['parameter_end_position'])
+ self.assert_positions_equal(function_state.body_start_position, function_information['body_start_position'])
+ self.assert_positions_equal(function_state.end_position, function_information['end_position'])
+ expected_parameters = function_information.get('parameter_list')
+ if expected_parameters:
+ actual_parameters = function_state.parameter_list()
+ self.assertEqual(len(actual_parameters), len(expected_parameters))
+ for index in range(len(expected_parameters)):
+ actual_parameter = actual_parameters[index]
+ expected_parameter = expected_parameters[index]
+ self.assertEqual(actual_parameter.type, expected_parameter['type'])
+ self.assertEqual(actual_parameter.name, expected_parameter['name'])
+ self.assertEqual(actual_parameter.row, expected_parameter['row'])
+
+ def test_basic_function_detection(self):
+ self.perform_function_detection(
+ ['void theTestFunctionName(int) {',
+ '}'],
+ {'name': 'theTestFunctionName',
+ 'modifiers_and_return_type': 'void',
+ 'function_name_start_position': (0, 5),
+ 'parameter_start_position': (0, 24),
+ 'parameter_end_position': (0, 29),
+ 'body_start_position': (0, 30),
+ 'end_position': (1, 1),
+ 'is_pure': False,
+ 'is_declaration': False})
+
+ def test_function_declaration_detection(self):
+ self.perform_function_detection(
+ ['void aFunctionName(int);'],
+ {'name': 'aFunctionName',
+ 'modifiers_and_return_type': 'void',
+ 'function_name_start_position': (0, 5),
+ 'parameter_start_position': (0, 18),
+ 'parameter_end_position': (0, 23),
+ 'body_start_position': (0, 23),
+ 'end_position': (0, 24),
+ 'is_pure': False,
+ 'is_declaration': True})
+
+ self.perform_function_detection(
+ ['CheckedInt<T> operator /(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'],
+ {'name': 'operator /',
+ 'modifiers_and_return_type': 'CheckedInt<T>',
+ 'function_name_start_position': (0, 14),
+ 'parameter_start_position': (0, 24),
+ 'parameter_end_position': (0, 76),
+ 'body_start_position': (0, 76),
+ 'end_position': (0, 77),
+ 'is_pure': False,
+ 'is_declaration': True})
+
+ self.perform_function_detection(
+ ['CheckedInt<T> operator -(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'],
+ {'name': 'operator -',
+ 'modifiers_and_return_type': 'CheckedInt<T>',
+ 'function_name_start_position': (0, 14),
+ 'parameter_start_position': (0, 24),
+ 'parameter_end_position': (0, 76),
+ 'body_start_position': (0, 76),
+ 'end_position': (0, 77),
+ 'is_pure': False,
+ 'is_declaration': True})
+
+ self.perform_function_detection(
+ ['CheckedInt<T> operator !=(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'],
+ {'name': 'operator !=',
+ 'modifiers_and_return_type': 'CheckedInt<T>',
+ 'function_name_start_position': (0, 14),
+ 'parameter_start_position': (0, 25),
+ 'parameter_end_position': (0, 77),
+ 'body_start_position': (0, 77),
+ 'end_position': (0, 78),
+ 'is_pure': False,
+ 'is_declaration': True})
+
+ self.perform_function_detection(
+ ['CheckedInt<T> operator +(const CheckedInt<T> &lhs, const CheckedInt<T> &rhs);'],
+ {'name': 'operator +',
+ 'modifiers_and_return_type': 'CheckedInt<T>',
+ 'function_name_start_position': (0, 14),
+ 'parameter_start_position': (0, 24),
+ 'parameter_end_position': (0, 76),
+ 'body_start_position': (0, 76),
+ 'end_position': (0, 77),
+ 'is_pure': False,
+ 'is_declaration': True})
+
+ def test_pure_function_detection(self):
+ self.perform_function_detection(
+ ['virtual void theTestFunctionName(int = 0);'],
+ {'name': 'theTestFunctionName',
+ 'modifiers_and_return_type': 'virtual void',
+ 'function_name_start_position': (0, 13),
+ 'parameter_start_position': (0, 32),
+ 'parameter_end_position': (0, 41),
+ 'body_start_position': (0, 41),
+ 'end_position': (0, 42),
+ 'is_pure': False,
+ 'is_declaration': True})
+
+ self.perform_function_detection(
+ ['virtual void theTestFunctionName(int) = 0;'],
+ {'name': 'theTestFunctionName',
+ 'modifiers_and_return_type': 'virtual void',
+ 'function_name_start_position': (0, 13),
+ 'parameter_start_position': (0, 32),
+ 'parameter_end_position': (0, 37),
+ 'body_start_position': (0, 41),
+ 'end_position': (0, 42),
+ 'is_pure': True,
+ 'is_declaration': True})
+
+ # Hopefully, no one writes code like this but it is a tricky case.
+ self.perform_function_detection(
+ ['virtual void theTestFunctionName(int)',
+ ' = ',
+ ' 0 ;'],
+ {'name': 'theTestFunctionName',
+ 'modifiers_and_return_type': 'virtual void',
+ 'function_name_start_position': (0, 13),
+ 'parameter_start_position': (0, 32),
+ 'parameter_end_position': (0, 37),
+ 'body_start_position': (2, 3),
+ 'end_position': (2, 4),
+ 'is_pure': True,
+ 'is_declaration': True})
+
+ def test_ignore_macros(self):
+ self.perform_function_detection(['void aFunctionName(int); \\'], None)
+
+ def test_non_functions(self):
+ # This case exposed an error because the open brace was in quotes.
+ self.perform_function_detection(
+ ['asm(',
+ ' "stmdb sp!, {r1-r3}" "\n"',
+ ');'],
+ # This isn't a function but it looks like one to our simple
+ # algorithm and that is ok.
+ {'name': 'asm',
+ 'modifiers_and_return_type': '',
+ 'function_name_start_position': (0, 0),
+ 'parameter_start_position': (0, 3),
+ 'parameter_end_position': (2, 1),
+ 'body_start_position': (2, 1),
+ 'end_position': (2, 2),
+ 'is_pure': False,
+ 'is_declaration': True})
+
+ # Simple test case with something that is not a function.
+ self.perform_function_detection(['class Stuff;'], None)
+
+ def test_parameter_list(self):
+ # A function with no arguments.
+ function_state = self.perform_function_detection(
+ ['void functionName();'],
+ {'name': 'functionName',
+ 'modifiers_and_return_type': 'void',
+ 'function_name_start_position': (0, 5),
+ 'parameter_start_position': (0, 17),
+ 'parameter_end_position': (0, 19),
+ 'body_start_position': (0, 19),
+ 'end_position': (0, 20),
+ 'is_pure': False,
+ 'is_declaration': True,
+ 'parameter_list': ()})
+
+ # A function with one argument.
+ function_state = self.perform_function_detection(
+ ['void functionName(int);'],
+ {'name': 'functionName',
+ 'modifiers_and_return_type': 'void',
+ 'function_name_start_position': (0, 5),
+ 'parameter_start_position': (0, 17),
+ 'parameter_end_position': (0, 22),
+ 'body_start_position': (0, 22),
+ 'end_position': (0, 23),
+ 'is_pure': False,
+ 'is_declaration': True,
+ 'parameter_list':
+ ({'type': 'int', 'name': '', 'row': 0},)})
+
+ # A function with unsigned and short arguments
+ function_state = self.perform_function_detection(
+ ['void functionName(unsigned a, short b, long c, long long short unsigned int);'],
+ {'name': 'functionName',
+ 'modifiers_and_return_type': 'void',
+ 'function_name_start_position': (0, 5),
+ 'parameter_start_position': (0, 17),
+ 'parameter_end_position': (0, 76),
+ 'body_start_position': (0, 76),
+ 'end_position': (0, 77),
+ 'is_pure': False,
+ 'is_declaration': True,
+ 'parameter_list':
+ ({'type': 'unsigned', 'name': 'a', 'row': 0},
+ {'type': 'short', 'name': 'b', 'row': 0},
+ {'type': 'long', 'name': 'c', 'row': 0},
+ {'type': 'long long short unsigned int', 'name': '', 'row': 0})})
+
+ # Some parameter type with modifiers and no parameter names.
+ function_state = self.perform_function_detection(
+ ['virtual void determineARIADropEffects(Vector<String>*&, const unsigned long int*&, const MediaPlayer::Preload, Other<Other2, Other3<P1, P2> >, int);'],
+ {'name': 'determineARIADropEffects',
+ 'modifiers_and_return_type': 'virtual void',
+ 'parameter_start_position': (0, 37),
+ 'function_name_start_position': (0, 13),
+ 'parameter_end_position': (0, 147),
+ 'body_start_position': (0, 147),
+ 'end_position': (0, 148),
+ 'is_pure': False,
+ 'is_declaration': True,
+ 'parameter_list':
+ ({'type': 'Vector<String>*&', 'name': '', 'row': 0},
+ {'type': 'const unsigned long int*&', 'name': '', 'row': 0},
+ {'type': 'const MediaPlayer::Preload', 'name': '', 'row': 0},
+ {'type': 'Other<Other2, Other3<P1, P2> >', 'name': '', 'row': 0},
+ {'type': 'int', 'name': '', 'row': 0})})
+
+ # Try parsing a function with a very complex definition.
+ function_state = self.perform_function_detection(
+ ['#define MyMacro(a) a',
+ 'virtual',
+ 'AnotherTemplate<Class1, Class2> aFunctionName(PassRefPtr<MyClass> paramName,',
+ 'const Other1Class& foo,',
+ 'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const * param = new ComplexTemplate<Class1, NestedTemplate<P1, P2> >(34, 42),',
+ 'int* myCount = 0);'],
+ {'name': 'aFunctionName',
+ 'modifiers_and_return_type': 'virtual AnotherTemplate<Class1, Class2>',
+ 'function_name_start_position': (2, 32),
+ 'parameter_start_position': (2, 45),
+ 'parameter_end_position': (5, 17),
+ 'body_start_position': (5, 17),
+ 'end_position': (5, 18),
+ 'is_pure': False,
+ 'is_declaration': True,
+ 'parameter_list':
+ ({'type': 'PassRefPtr<MyClass>', 'name': 'paramName', 'row': 2},
+ {'type': 'const Other1Class&', 'name': 'foo', 'row': 3},
+ {'type': 'const ComplexTemplate<Class1, NestedTemplate<P1, P2> >* const *', 'name': 'param', 'row': 4},
+ {'type': 'int*', 'name': 'myCount', 'row': 5})},
+ detection_line=2)
+
+
+class CppStyleTest(CppStyleTestBase):
+
+ def test_asm_lines_ignored(self):
+ self.assert_lint(
+ '__asm mov [registration], eax',
+ '')
+
+ # Test get line width.
+ def test_get_line_width(self):
+ self.assertEqual(0, cpp_style.get_line_width(''))
+ self.assertEqual(10, cpp_style.get_line_width(u'x' * 10))
+ self.assertEqual(16, cpp_style.get_line_width(u'都|道|府|県|支庁'))
+
+ def test_find_next_multi_line_comment_start(self):
+ self.assertEqual(1, cpp_style.find_next_multi_line_comment_start([''], 0))
+
+ lines = ['a', 'b', '/* c']
+ self.assertEqual(2, cpp_style.find_next_multi_line_comment_start(lines, 0))
+
+ lines = ['char a[] = "/*";'] # not recognized as comment.
+ self.assertEqual(1, cpp_style.find_next_multi_line_comment_start(lines, 0))
+
+ def test_find_next_multi_line_comment_end(self):
+ self.assertEqual(1, cpp_style.find_next_multi_line_comment_end([''], 0))
+ lines = ['a', 'b', ' c */']
+ self.assertEqual(2, cpp_style.find_next_multi_line_comment_end(lines, 0))
+
+ def test_remove_multi_line_comments_from_range(self):
+ lines = ['a', ' /* comment ', ' * still comment', ' comment */ ', 'b']
+ cpp_style.remove_multi_line_comments_from_range(lines, 1, 4)
+ self.assertEqual(['a', '// dummy', '// dummy', '// dummy', 'b'], lines)
+
+ def test_position(self):
+ position = cpp_style.Position(3, 4)
+ self.assert_positions_equal(position, (3, 4))
+ self.assertEqual(position.row, 3)
+ self.assertTrue(position > cpp_style.Position(position.row - 1, position.column + 1))
+ self.assertTrue(position > cpp_style.Position(position.row, position.column - 1))
+ self.assertTrue(position < cpp_style.Position(position.row, position.column + 1))
+ self.assertTrue(position < cpp_style.Position(position.row + 1, position.column - 1))
+ self.assertEqual(position.__str__(), '(3, 4)')
+
+ def test_rfind_in_lines(self):
+ not_found_position = cpp_style.Position(10, 11)
+ start_position = cpp_style.Position(2, 2)
+ lines = ['ab', 'ace', 'test']
+ self.assertEqual(not_found_position, cpp_style._rfind_in_lines('st', lines, start_position, not_found_position))
+ self.assertTrue(cpp_style.Position(1, 1) == cpp_style._rfind_in_lines('a', lines, start_position, not_found_position))
+ self.assertEqual(cpp_style.Position(2, 2), cpp_style._rfind_in_lines('(te|a)', lines, start_position, not_found_position))
+
+ def test_close_expression(self):
+ self.assertEqual(cpp_style.Position(1, -1), cpp_style.close_expression([')('], cpp_style.Position(0, 1)))
+ self.assertEqual(cpp_style.Position(1, -1), cpp_style.close_expression([') ()'], cpp_style.Position(0, 1)))
+ self.assertEqual(cpp_style.Position(0, 4), cpp_style.close_expression([')[)]'], cpp_style.Position(0, 1)))
+ self.assertEqual(cpp_style.Position(0, 5), cpp_style.close_expression(['}{}{}'], cpp_style.Position(0, 3)))
+ self.assertEqual(cpp_style.Position(1, 1), cpp_style.close_expression(['}{}{', '}'], cpp_style.Position(0, 3)))
+ self.assertEqual(cpp_style.Position(2, -1), cpp_style.close_expression(['][][', ' '], cpp_style.Position(0, 3)))
+
+ def test_spaces_at_end_of_line(self):
+ self.assert_lint(
+ '// Hello there ',
+ 'Line ends in whitespace. Consider deleting these extra spaces.'
+ ' [whitespace/end_of_line] [4]')
+
+ # Test C-style cast cases.
+ def test_cstyle_cast(self):
+ self.assert_lint(
+ 'int a = (int)1.0;',
+ 'Using C-style cast. Use static_cast<int>(...) instead'
+ ' [readability/casting] [4]')
+ self.assert_lint(
+ 'int *a = (int *)DEFINED_VALUE;',
+ 'Using C-style cast. Use reinterpret_cast<int *>(...) instead'
+ ' [readability/casting] [4]', 'foo.c')
+ self.assert_lint(
+ 'uint16 a = (uint16)1.0;',
+ 'Using C-style cast. Use static_cast<uint16>(...) instead'
+ ' [readability/casting] [4]')
+ self.assert_lint(
+ 'int32 a = (int32)1.0;',
+ 'Using C-style cast. Use static_cast<int32>(...) instead'
+ ' [readability/casting] [4]')
+ self.assert_lint(
+ 'uint64 a = (uint64)1.0;',
+ 'Using C-style cast. Use static_cast<uint64>(...) instead'
+ ' [readability/casting] [4]')
+
+ # Test taking address of casts (runtime/casting)
+ def test_runtime_casting(self):
+ self.assert_lint(
+ 'int* x = &static_cast<int*>(foo);',
+ 'Are you taking an address of a cast? '
+ 'This is dangerous: could be a temp var. '
+ 'Take the address before doing the cast, rather than after'
+ ' [runtime/casting] [4]')
+
+ self.assert_lint(
+ 'int* x = &dynamic_cast<int *>(foo);',
+ ['Are you taking an address of a cast? '
+ 'This is dangerous: could be a temp var. '
+ 'Take the address before doing the cast, rather than after'
+ ' [runtime/casting] [4]',
+ 'Do not use dynamic_cast<>. If you need to cast within a class '
+ 'hierarchy, use static_cast<> to upcast. Google doesn\'t support '
+ 'RTTI. [runtime/rtti] [5]'])
+
+ self.assert_lint(
+ 'int* x = &reinterpret_cast<int *>(foo);',
+ 'Are you taking an address of a cast? '
+ 'This is dangerous: could be a temp var. '
+ 'Take the address before doing the cast, rather than after'
+ ' [runtime/casting] [4]')
+
+ # It's OK to cast an address.
+ self.assert_lint(
+ 'int* x = reinterpret_cast<int *>(&foo);',
+ '')
+
+ def test_runtime_selfinit(self):
+ self.assert_lint(
+ 'Foo::Foo(Bar r, Bel l) : r_(r_), l_(l_) { }',
+ 'You seem to be initializing a member variable with itself.'
+ ' [runtime/init] [4]')
+ self.assert_lint(
+ 'Foo::Foo(Bar r, Bel l) : r_(r), l_(l) { }',
+ '')
+ self.assert_lint(
+ 'Foo::Foo(Bar r) : r_(r), l_(r_), ll_(l_) { }',
+ '')
+
+ def test_runtime_rtti(self):
+ statement = 'int* x = dynamic_cast<int*>(&foo);'
+ error_message = (
+ 'Do not use dynamic_cast<>. If you need to cast within a class '
+ 'hierarchy, use static_cast<> to upcast. Google doesn\'t support '
+ 'RTTI. [runtime/rtti] [5]')
+ # dynamic_cast is disallowed in most files.
+ self.assert_language_rules_check('foo.cpp', statement, error_message)
+ self.assert_language_rules_check('foo.h', statement, error_message)
+
+ # Tests for static_cast readability.
+ def test_static_cast_on_objects_with_toFoo(self):
+ mock_header_contents = ['inline Foo* toFoo(Bar* bar)']
+ fs = FileSystem()
+ orig_read_text_file_fn = fs.read_text_file
+
+ def mock_read_text_file_fn(path):
+ return mock_header_contents
+
+ try:
+ fs.read_text_file = mock_read_text_file_fn
+ message = self.perform_avoid_static_cast_of_objects(
+ 'Foo* x = static_cast<Foo*>(bar);',
+ filename='casting.cpp',
+ fs=fs)
+ self.assertEqual(message, 'static_cast of class objects is not allowed. Use toFoo defined in Foo.h.'
+ ' [runtime/casting] [4]')
+ finally:
+ fs.read_text_file = orig_read_text_file_fn
+
+ def test_static_cast_on_objects_without_toFoo(self):
+ mock_header_contents = ['inline FooBar* toFooBar(Bar* bar)']
+ fs = FileSystem()
+ orig_read_text_file_fn = fs.read_text_file
+
+ def mock_read_text_file_fn(path):
+ return mock_header_contents
+
+ try:
+ fs.read_text_file = mock_read_text_file_fn
+ message = self.perform_avoid_static_cast_of_objects(
+ 'Foo* x = static_cast<Foo*>(bar);',
+ filename='casting.cpp',
+ fs=fs)
+ self.assertEqual(message, 'static_cast of class objects is not allowed. Add toFoo in Foo.h and use it instead.'
+ ' [runtime/casting] [4]')
+ finally:
+ fs.read_text_file = orig_read_text_file_fn
+
+ # We cannot test this functionality because of difference of
+ # function definitions. Anyway, we may never enable this.
+ #
+ # # Test for unnamed arguments in a method.
+ # def test_check_for_unnamed_params(self):
+ # message = ('All parameters should be named in a function'
+ # ' [readability/function] [3]')
+ # self.assert_lint('virtual void A(int*) const;', message)
+ # self.assert_lint('virtual void B(void (*fn)(int*));', message)
+ # self.assert_lint('virtual void C(int*);', message)
+ # self.assert_lint('void *(*f)(void *) = x;', message)
+ # self.assert_lint('void Method(char*) {', message)
+ # self.assert_lint('void Method(char*);', message)
+ # self.assert_lint('void Method(char* /*x*/);', message)
+ # self.assert_lint('typedef void (*Method)(int32);', message)
+ # self.assert_lint('static void operator delete[](void*) throw();', message)
+ #
+ # self.assert_lint('virtual void D(int* p);', '')
+ # self.assert_lint('void operator delete(void* x) throw();', '')
+ # self.assert_lint('void Method(char* x)\n{', '')
+ # self.assert_lint('void Method(char* /*x*/)\n{', '')
+ # self.assert_lint('void Method(char* x);', '')
+ # self.assert_lint('typedef void (*Method)(int32 x);', '')
+ # self.assert_lint('static void operator delete[](void* x) throw();', '')
+ # self.assert_lint('static void operator delete[](void* /*x*/) throw();', '')
+ #
+ # # This one should technically warn, but doesn't because the function
+ # # pointer is confusing.
+ # self.assert_lint('virtual void E(void (*fn)(int* p));', '')
+
+ # Test deprecated casts such as int(d)
+ def test_deprecated_cast(self):
+ self.assert_lint(
+ 'int a = int(2.2);',
+ 'Using deprecated casting style. '
+ 'Use static_cast<int>(...) instead'
+ ' [readability/casting] [4]')
+ # Checks for false positives...
+ self.assert_lint(
+ 'int a = int(); // Constructor, o.k.',
+ '')
+ self.assert_lint(
+ 'X::X() : a(int()) { } // default Constructor, o.k.',
+ '')
+ self.assert_lint(
+ 'operator bool(); // Conversion operator, o.k.',
+ '')
+
+ # The second parameter to a gMock method definition is a function signature
+ # that often looks like a bad cast but should not picked up by lint.
+ def test_mock_method(self):
+ self.assert_lint(
+ 'MOCK_METHOD0(method, int());',
+ '')
+ self.assert_lint(
+ 'MOCK_CONST_METHOD1(method, float(string));',
+ '')
+ self.assert_lint(
+ 'MOCK_CONST_METHOD2_T(method, double(float, float));',
+ '')
+
+ # Test sizeof(type) cases.
+ def test_sizeof_type(self):
+ self.assert_lint(
+ 'sizeof(int);',
+ 'Using sizeof(type). Use sizeof(varname) instead if possible'
+ ' [runtime/sizeof] [1]')
+ self.assert_lint(
+ 'sizeof(int *);',
+ 'Using sizeof(type). Use sizeof(varname) instead if possible'
+ ' [runtime/sizeof] [1]')
+
+ # Test typedef cases. There was a bug that cpp_style misidentified
+ # typedef for pointer to function as C-style cast and produced
+ # false-positive error messages.
+ def test_typedef_for_pointer_to_function(self):
+ self.assert_lint(
+ 'typedef void (*Func)(int x);',
+ '')
+ self.assert_lint(
+ 'typedef void (*Func)(int *x);',
+ '')
+ self.assert_lint(
+ 'typedef void Func(int x);',
+ '')
+ self.assert_lint(
+ 'typedef void Func(int *x);',
+ '')
+
+ def test_include_what_you_use_no_implementation_files(self):
+ code = 'std::vector<int> foo;'
+ self.assertEqual('Add #include <vector> for vector<>'
+ ' [build/include_what_you_use] [4]',
+ self.perform_include_what_you_use(code, 'foo.h'))
+ self.assertEqual('',
+ self.perform_include_what_you_use(code, 'foo.cpp'))
+
+ def test_include_what_you_use(self):
+ self.assert_include_what_you_use(
+ '''#include <vector>
+ std::vector<int> foo;
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <map>
+ std::pair<int,int> foo;
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <multimap>
+ std::pair<int,int> foo;
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <hash_map>
+ std::pair<int,int> foo;
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <utility>
+ std::pair<int,int> foo;
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <vector>
+ DECLARE_string(foobar);
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <vector>
+ DEFINE_string(foobar, "", "");
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <vector>
+ std::pair<int,int> foo;
+ ''',
+ 'Add #include <utility> for pair<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ std::vector<int> foo;
+ ''',
+ 'Add #include <vector> for vector<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include <vector>
+ std::set<int> foo;
+ ''',
+ 'Add #include <set> for set<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ hash_map<int, int> foobar;
+ ''',
+ 'Add #include <hash_map> for hash_map<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ bool foobar = std::less<int>(0,1);
+ ''',
+ 'Add #include <functional> for less<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ bool foobar = min<int>(0,1);
+ ''',
+ 'Add #include <algorithm> for min [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ 'void a(const string &foobar);',
+ 'Add #include <string> for string [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ bool foobar = swap(0,1);
+ ''',
+ 'Add #include <algorithm> for swap [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ bool foobar = transform(a.begin(), a.end(), b.start(), Foo);
+ ''',
+ 'Add #include <algorithm> for transform '
+ '[build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ bool foobar = min_element(a.begin(), a.end());
+ ''',
+ 'Add #include <algorithm> for min_element '
+ '[build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''foo->swap(0,1);
+ foo.swap(0,1);
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <string>
+ void a(const std::multimap<int,string> &foobar);
+ ''',
+ 'Add #include <map> for multimap<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include <queue>
+ void a(const std::priority_queue<int> &foobar);
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include "base/basictypes.h"
+ #include "base/port.h"
+ #include <assert.h>
+ #include <string>
+ #include <vector>
+ vector<string> hajoa;''', '')
+ self.assert_include_what_you_use(
+ '''#include <string>
+ int i = numeric_limits<int>::max()
+ ''',
+ 'Add #include <limits> for numeric_limits<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include <limits>
+ int i = numeric_limits<int>::max()
+ ''',
+ '')
+
+ # Test the UpdateIncludeState code path.
+ mock_header_contents = ['#include "blah/foo.h"', '#include "blah/bar.h"']
+ fs = FileSystem()
+ orig_read_text_file_fn = fs.read_text_file
+
+ def mock_read_text_file_fn(path):
+ return mock_header_contents
+
+ try:
+ fs.read_text_file = mock_read_text_file_fn
+ message = self.perform_include_what_you_use(
+ '#include "config.h"\n'
+ '#include "blah/a.h"\n',
+ filename='blah/a.cpp',
+ fs=fs)
+ self.assertEqual(message, '')
+
+ mock_header_contents = ['#include <set>']
+ message = self.perform_include_what_you_use(
+ '''#include "config.h"
+ #include "blah/a.h"
+
+ std::set<int> foo;''',
+ filename='blah/a.cpp',
+ fs=fs)
+ self.assertEqual(message, '')
+
+ # If there's just a .cpp and the header can't be found then it's ok.
+ message = self.perform_include_what_you_use(
+ '''#include "config.h"
+ #include "blah/a.h"
+
+ std::set<int> foo;''',
+ filename='blah/a.cpp')
+ self.assertEqual(message, '')
+
+ # Make sure we find the headers with relative paths.
+ mock_header_contents = ['']
+ message = self.perform_include_what_you_use(
+ '''#include "config.h"
+ #include "%s%sa.h"
+
+ std::set<int> foo;''' % (os.path.basename(os.getcwd()), os.path.sep),
+ filename='a.cpp',
+ fs=fs)
+ self.assertEqual(message, 'Add #include <set> for set<> '
+ '[build/include_what_you_use] [4]')
+ finally:
+ fs.read_text_file = orig_read_text_file_fn
+
+ def test_files_belong_to_same_module(self):
+ f = cpp_style.files_belong_to_same_module
+ self.assertEqual((True, ''), f('a.cpp', 'a.h'))
+ self.assertEqual((True, ''), f('base/google.cpp', 'base/google.h'))
+ self.assertEqual((True, ''), f('base/google_test.cpp', 'base/google.h'))
+ self.assertEqual((True, ''),
+ f('base/google_unittest.cpp', 'base/google.h'))
+ self.assertEqual((True, ''),
+ f('base/internal/google_unittest.cpp',
+ 'base/public/google.h'))
+ self.assertEqual((True, 'xxx/yyy/'),
+ f('xxx/yyy/base/internal/google_unittest.cpp',
+ 'base/public/google.h'))
+ self.assertEqual((True, 'xxx/yyy/'),
+ f('xxx/yyy/base/google_unittest.cpp',
+ 'base/public/google.h'))
+ self.assertEqual((True, ''),
+ f('base/google_unittest.cpp', 'base/google-inl.h'))
+ self.assertEqual((False, ''), f('a.cpp', 'b.h'))
+
+ def test_cleanse_line(self):
+ self.assertEqual('int foo = 0; ',
+ cpp_style.cleanse_comments('int foo = 0; // danger!'))
+ self.assertEqual('int o = 0;',
+ cpp_style.cleanse_comments('int /* foo */ o = 0;'))
+ self.assertEqual('foo(int a, int b);',
+ cpp_style.cleanse_comments('foo(int a /* abc */, int b);'))
+ self.assertEqual('f(a, b);',
+ cpp_style.cleanse_comments('f(a, /* name */ b);'))
+ self.assertEqual('f(a, b);',
+ cpp_style.cleanse_comments('f(a /* name */, b);'))
+ self.assertEqual('f(a, b);',
+ cpp_style.cleanse_comments('f(a, /* name */b);'))
+
+ def test_multi_line_comments(self):
+ # missing explicit is bad
+ self.assert_multi_line_lint(
+ r'''int a = 0;
+ /* multi-liner
+ class Foo {
+ Foo(int f); // should cause a lint warning in code
+ }
+ */ ''',
+ '')
+ self.assert_multi_line_lint(
+ '''\
+ /* int a = 0; multi-liner
+ static const int b = 0;''',
+ ['Could not find end of multi-line comment'
+ ' [readability/multiline_comment] [5]',
+ 'Complex multi-line /*...*/-style comment found. '
+ 'Lint may give bogus warnings. Consider replacing these with '
+ '//-style comments, with #if 0...#endif, or with more clearly '
+ 'structured multi-line comments. [readability/multiline_comment] [5]'])
+ self.assert_multi_line_lint(r''' /* multi-line comment''',
+ ['Could not find end of multi-line comment'
+ ' [readability/multiline_comment] [5]',
+ 'Complex multi-line /*...*/-style comment found. '
+ 'Lint may give bogus warnings. Consider replacing these with '
+ '//-style comments, with #if 0...#endif, or with more clearly '
+ 'structured multi-line comments. [readability/multiline_comment] [5]'])
+ self.assert_multi_line_lint(r''' // /* comment, but not multi-line''', '')
+
+ def test_multiline_strings(self):
+ multiline_string_error_message = (
+ 'Multi-line string ("...") found. This lint script doesn\'t '
+ 'do well with such strings, and may give bogus warnings. They\'re '
+ 'ugly and unnecessary, and you should use concatenation instead".'
+ ' [readability/multiline_string] [5]')
+
+ file_path = 'mydir/foo.cpp'
+
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data(file_path, 'cpp',
+ ['const char* str = "This is a\\',
+ ' multiline string.";'],
+ error_collector)
+ self.assertEqual(
+ 2, # One per line.
+ error_collector.result_list().count(multiline_string_error_message))
+
+ # Test non-explicit single-argument constructors
+ def test_explicit_single_argument_constructors(self):
+ # missing explicit is bad
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ Foo(int f);
+ };''',
+ 'Single-argument constructors should be marked explicit.'
+ ' [runtime/explicit] [5]')
+ # missing explicit is bad, even with whitespace
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ Foo (int f);
+ };''',
+ ['Extra space before ( in function call [whitespace/parens] [4]',
+ 'Single-argument constructors should be marked explicit.'
+ ' [runtime/explicit] [5]'])
+ # missing explicit, with distracting comment, is still bad
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ Foo(int f); // simpler than Foo(blargh, blarg)
+ };''',
+ 'Single-argument constructors should be marked explicit.'
+ ' [runtime/explicit] [5]')
+ # missing explicit, with qualified classname
+ self.assert_multi_line_lint(
+ '''\
+ class Qualifier::AnotherOne::Foo {
+ Foo(int f);
+ };''',
+ 'Single-argument constructors should be marked explicit.'
+ ' [runtime/explicit] [5]')
+ # structs are caught as well.
+ self.assert_multi_line_lint(
+ '''\
+ struct Foo {
+ Foo(int f);
+ };''',
+ 'Single-argument constructors should be marked explicit.'
+ ' [runtime/explicit] [5]')
+ # Templatized classes are caught as well.
+ self.assert_multi_line_lint(
+ '''\
+ template<typename T> class Foo {
+ Foo(int f);
+ };''',
+ 'Single-argument constructors should be marked explicit.'
+ ' [runtime/explicit] [5]')
+ # proper style is okay
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ explicit Foo(int f);
+ };''',
+ '')
+ # two argument constructor is okay
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ Foo(int f, int b);
+ };''',
+ '')
+ # two argument constructor, across two lines, is okay
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ Foo(int f,
+ int b);
+ };''',
+ '')
+ # non-constructor (but similar name), is okay
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ aFoo(int f);
+ };''',
+ '')
+ # constructor with void argument is okay
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ Foo(void);
+ };''',
+ '')
+ # single argument method is okay
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ Bar(int b);
+ };''',
+ '')
+ # comments should be ignored
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ // Foo(int f);
+ };''',
+ '')
+ # single argument function following class definition is okay
+ # (okay, it's not actually valid, but we don't want a false positive)
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ Foo(int f, int b);
+ };
+ Foo(int f);''',
+ '')
+ # single argument function is okay
+ self.assert_multi_line_lint(
+ '''static Foo(int f);''',
+ '')
+ # single argument copy constructor is okay.
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ Foo(const Foo&);
+ };''',
+ '')
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ Foo(Foo&);
+ };''',
+ '')
+
+ def test_slash_star_comment_on_single_line(self):
+ self.assert_multi_line_lint(
+ '''/* static */ Foo(int f);''',
+ '')
+ self.assert_multi_line_lint(
+ '''/*/ static */ Foo(int f);''',
+ '')
+ self.assert_multi_line_lint(
+ '''/*/ static Foo(int f);''',
+ 'Could not find end of multi-line comment'
+ ' [readability/multiline_comment] [5]')
+ self.assert_multi_line_lint(
+ ''' /*/ static Foo(int f);''',
+ 'Could not find end of multi-line comment'
+ ' [readability/multiline_comment] [5]')
+
+ # Test suspicious usage of "if" like this:
+ # if (a == b) {
+ # DoSomething();
+ # } if (a == c) { // Should be "else if".
+ # DoSomething(); // This gets called twice if a == b && a == c.
+ # }
+ def test_suspicious_usage_of_if(self):
+ self.assert_lint(
+ ' if (a == b) {',
+ '')
+ self.assert_lint(
+ ' } if (a == b) {',
+ 'Did you mean "else if"? If not, start a new line for "if".'
+ ' [readability/braces] [4]')
+
+ # Test suspicious usage of memset. Specifically, a 0
+ # as the final argument is almost certainly an error.
+ def test_suspicious_usage_of_memset(self):
+ # Normal use is okay.
+ self.assert_lint(
+ ' memset(buf, 0, sizeof(buf))',
+ '')
+
+ # A 0 as the final argument is almost certainly an error.
+ self.assert_lint(
+ ' memset(buf, sizeof(buf), 0)',
+ 'Did you mean "memset(buf, 0, sizeof(buf))"?'
+ ' [runtime/memset] [4]')
+ self.assert_lint(
+ ' memset(buf, xsize * ysize, 0)',
+ 'Did you mean "memset(buf, 0, xsize * ysize)"?'
+ ' [runtime/memset] [4]')
+
+ # There is legitimate test code that uses this form.
+ # This is okay since the second argument is a literal.
+ self.assert_lint(
+ " memset(buf, 'y', 0)",
+ '')
+ self.assert_lint(
+ ' memset(buf, 4, 0)',
+ '')
+ self.assert_lint(
+ ' memset(buf, -1, 0)',
+ '')
+ self.assert_lint(
+ ' memset(buf, 0xF1, 0)',
+ '')
+ self.assert_lint(
+ ' memset(buf, 0xcd, 0)',
+ '')
+
+ def test_check_posix_threading(self):
+ self.assert_lint('sctime_r()', '')
+ self.assert_lint('strtok_r()', '')
+ self.assert_lint(' strtok_r(foo, ba, r)', '')
+ self.assert_lint('brand()', '')
+ self.assert_lint('_rand()', '')
+ self.assert_lint('.rand()', '')
+ self.assert_lint('>rand()', '')
+ self.assert_lint('rand()',
+ 'Consider using rand_r(...) instead of rand(...)'
+ ' for improved thread safety.'
+ ' [runtime/threadsafe_fn] [2]')
+ self.assert_lint('strtok()',
+ 'Consider using strtok_r(...) '
+ 'instead of strtok(...)'
+ ' for improved thread safety.'
+ ' [runtime/threadsafe_fn] [2]')
+
+ # Test potential format string bugs like printf(foo).
+ def test_format_strings(self):
+ self.assert_lint('printf("foo")', '')
+ self.assert_lint('printf("foo: %s", foo)', '')
+ self.assert_lint('DocidForPrintf(docid)', '') # Should not trigger.
+ self.assert_lint(
+ 'printf(foo)',
+ 'Potential format string bug. Do printf("%s", foo) instead.'
+ ' [runtime/printf] [4]')
+ self.assert_lint(
+ 'printf(foo.c_str())',
+ 'Potential format string bug. '
+ 'Do printf("%s", foo.c_str()) instead.'
+ ' [runtime/printf] [4]')
+ self.assert_lint(
+ 'printf(foo->c_str())',
+ 'Potential format string bug. '
+ 'Do printf("%s", foo->c_str()) instead.'
+ ' [runtime/printf] [4]')
+ self.assert_lint(
+ 'StringPrintf(foo)',
+ 'Potential format string bug. Do StringPrintf("%s", foo) instead.'
+ ''
+ ' [runtime/printf] [4]')
+
+ # Variable-length arrays are not permitted.
+ def test_variable_length_array_detection(self):
+ errmsg = ('Do not use variable-length arrays. Use an appropriately named '
+ "('k' followed by CamelCase) compile-time constant for the size."
+ ' [runtime/arrays] [1]')
+
+ self.assert_lint('int a[any_old_variable];', errmsg)
+ self.assert_lint('int doublesize[some_var * 2];', errmsg)
+ self.assert_lint('int a[afunction()];', errmsg)
+ self.assert_lint('int a[function(kMaxFooBars)];', errmsg)
+ self.assert_lint('bool aList[items_->size()];', errmsg)
+ self.assert_lint('namespace::Type buffer[len+1];', errmsg)
+
+ self.assert_lint('int a[64];', '')
+ self.assert_lint('int a[0xFF];', '')
+ self.assert_lint('int first[256], second[256];', '')
+ self.assert_lint('int arrayName[kCompileTimeConstant];', '')
+ self.assert_lint('char buf[somenamespace::kBufSize];', '')
+ self.assert_lint('int arrayName[ALL_CAPS];', '')
+ self.assert_lint('AClass array1[foo::bar::ALL_CAPS];', '')
+ self.assert_lint('int a[kMaxStrLen + 1];', '')
+ self.assert_lint('int a[sizeof(foo)];', '')
+ self.assert_lint('int a[sizeof(*foo)];', '')
+ self.assert_lint('int a[sizeof foo];', '')
+ self.assert_lint('int a[sizeof(struct Foo)];', '')
+ self.assert_lint('int a[128 - sizeof(const bar)];', '')
+ self.assert_lint('int a[(sizeof(foo) * 4)];', '')
+ self.assert_lint('int a[(arraysize(fixed_size_array)/2) << 1];', 'Missing spaces around / [whitespace/operators] [3]')
+ self.assert_lint('delete a[some_var];', '')
+ self.assert_lint('return a[some_var];', '')
+
+ # Brace usage
+ def test_braces(self):
+ # Braces shouldn't be followed by a ; unless they're defining a struct
+ # or initializing an array
+ self.assert_lint('int a[3] = { 1, 2, 3 };', '')
+ self.assert_lint(
+ '''\
+ const int foo[] =
+ {1, 2, 3 };''',
+ '')
+ # For single line, unmatched '}' with a ';' is ignored (not enough context)
+ self.assert_multi_line_lint(
+ '''\
+ int a[3] = { 1,
+ 2,
+ 3 };''',
+ '')
+ self.assert_multi_line_lint(
+ '''\
+ int a[2][3] = { { 1, 2 },
+ { 3, 4 } };''',
+ '')
+ self.assert_multi_line_lint(
+ '''\
+ int a[2][3] =
+ { { 1, 2 },
+ { 3, 4 } };''',
+ '')
+
+ # CHECK/EXPECT_TRUE/EXPECT_FALSE replacements
+ def test_check_check(self):
+ self.assert_lint('CHECK(x == 42)',
+ 'Consider using CHECK_EQ instead of CHECK(a == b)'
+ ' [readability/check] [2]')
+ self.assert_lint('CHECK(x != 42)',
+ 'Consider using CHECK_NE instead of CHECK(a != b)'
+ ' [readability/check] [2]')
+ self.assert_lint('CHECK(x >= 42)',
+ 'Consider using CHECK_GE instead of CHECK(a >= b)'
+ ' [readability/check] [2]')
+ self.assert_lint('CHECK(x > 42)',
+ 'Consider using CHECK_GT instead of CHECK(a > b)'
+ ' [readability/check] [2]')
+ self.assert_lint('CHECK(x <= 42)',
+ 'Consider using CHECK_LE instead of CHECK(a <= b)'
+ ' [readability/check] [2]')
+ self.assert_lint('CHECK(x < 42)',
+ 'Consider using CHECK_LT instead of CHECK(a < b)'
+ ' [readability/check] [2]')
+
+ self.assert_lint('DCHECK(x == 42)',
+ 'Consider using DCHECK_EQ instead of DCHECK(a == b)'
+ ' [readability/check] [2]')
+ self.assert_lint('DCHECK(x != 42)',
+ 'Consider using DCHECK_NE instead of DCHECK(a != b)'
+ ' [readability/check] [2]')
+ self.assert_lint('DCHECK(x >= 42)',
+ 'Consider using DCHECK_GE instead of DCHECK(a >= b)'
+ ' [readability/check] [2]')
+ self.assert_lint('DCHECK(x > 42)',
+ 'Consider using DCHECK_GT instead of DCHECK(a > b)'
+ ' [readability/check] [2]')
+ self.assert_lint('DCHECK(x <= 42)',
+ 'Consider using DCHECK_LE instead of DCHECK(a <= b)'
+ ' [readability/check] [2]')
+ self.assert_lint('DCHECK(x < 42)',
+ 'Consider using DCHECK_LT instead of DCHECK(a < b)'
+ ' [readability/check] [2]')
+
+ self.assert_lint(
+ 'EXPECT_TRUE("42" == x)',
+ 'Consider using EXPECT_EQ instead of EXPECT_TRUE(a == b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_TRUE("42" != x)',
+ 'Consider using EXPECT_NE instead of EXPECT_TRUE(a != b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_TRUE(+42 >= x)',
+ 'Consider using EXPECT_GE instead of EXPECT_TRUE(a >= b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_TRUE_M(-42 > x)',
+ 'Consider using EXPECT_GT_M instead of EXPECT_TRUE_M(a > b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_TRUE_M(42U <= x)',
+ 'Consider using EXPECT_LE_M instead of EXPECT_TRUE_M(a <= b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_TRUE_M(42L < x)',
+ 'Consider using EXPECT_LT_M instead of EXPECT_TRUE_M(a < b)'
+ ' [readability/check] [2]')
+
+ self.assert_lint(
+ 'EXPECT_FALSE(x == 42)',
+ 'Consider using EXPECT_NE instead of EXPECT_FALSE(a == b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_FALSE(x != 42)',
+ 'Consider using EXPECT_EQ instead of EXPECT_FALSE(a != b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_FALSE(x >= 42)',
+ 'Consider using EXPECT_LT instead of EXPECT_FALSE(a >= b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'ASSERT_FALSE(x > 42)',
+ 'Consider using ASSERT_LE instead of ASSERT_FALSE(a > b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'ASSERT_FALSE(x <= 42)',
+ 'Consider using ASSERT_GT instead of ASSERT_FALSE(a <= b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'ASSERT_FALSE_M(x < 42)',
+ 'Consider using ASSERT_GE_M instead of ASSERT_FALSE_M(a < b)'
+ ' [readability/check] [2]')
+
+ self.assert_lint('CHECK(some_iterator == obj.end())', '')
+ self.assert_lint('EXPECT_TRUE(some_iterator == obj.end())', '')
+ self.assert_lint('EXPECT_FALSE(some_iterator == obj.end())', '')
+
+ self.assert_lint('CHECK(CreateTestFile(dir, (1 << 20)));', '')
+ self.assert_lint('CHECK(CreateTestFile(dir, (1 >> 20)));', '')
+
+ self.assert_lint('CHECK(x<42)',
+ ['Missing spaces around <'
+ ' [whitespace/operators] [3]',
+ 'Consider using CHECK_LT instead of CHECK(a < b)'
+ ' [readability/check] [2]'])
+ self.assert_lint('CHECK(x>42)',
+ 'Consider using CHECK_GT instead of CHECK(a > b)'
+ ' [readability/check] [2]')
+
+ self.assert_lint(
+ ' EXPECT_TRUE(42 < x) // Random comment.',
+ 'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_TRUE( 42 < x )',
+ ['Extra space after ( in function call'
+ ' [whitespace/parens] [4]',
+ 'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)'
+ ' [readability/check] [2]'])
+ self.assert_lint(
+ 'CHECK("foo" == "foo")',
+ 'Consider using CHECK_EQ instead of CHECK(a == b)'
+ ' [readability/check] [2]')
+
+ self.assert_lint('CHECK_EQ("foo", "foo")', '')
+
+ def test_brace_at_begin_of_line(self):
+ self.assert_lint('{',
+ 'This { should be at the end of the previous line'
+ ' [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ '#endif\n'
+ '{\n'
+ '}\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition) {',
+ '')
+ self.assert_multi_line_lint(
+ ' MACRO1(macroArg) {',
+ '')
+ self.assert_multi_line_lint(
+ 'ACCESSOR_GETTER(MessageEventPorts) {',
+ 'Place brace on its own line for function definitions. [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'int foo() {',
+ 'Place brace on its own line for function definitions. [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'int foo() const {',
+ 'Place brace on its own line for function definitions. [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'int foo() override {',
+ 'Place brace on its own line for function definitions. [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'int foo() final {',
+ 'Place brace on its own line for function definitions. [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'int foo() const\n'
+ '{\n'
+ '}\n',
+ '')
+ self.assert_multi_line_lint(
+ 'int foo() override\n'
+ '{\n'
+ '}\n',
+ '')
+ self.assert_multi_line_lint(
+ 'int foo() final\n'
+ '{\n'
+ '}\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition\n'
+ ' && condition2\n'
+ ' && condition3) {\n'
+ '}\n',
+ '')
+
+ def test_mismatching_spaces_in_parens(self):
+ self.assert_lint('if (foo ) {', 'Extra space before ) in if'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('switch ( foo) {', 'Extra space after ( in switch'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('for (foo; ba; bar ) {', 'Extra space before ) in for'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('for ((foo); (ba); (bar) ) {', 'Extra space before ) in for'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('for (; foo; bar) {', '')
+ self.assert_lint('for (; (foo); (bar)) {', '')
+ self.assert_lint('for ( ; foo; bar) {', '')
+ self.assert_lint('for ( ; (foo); (bar)) {', '')
+ self.assert_lint('for ( ; foo; bar ) {', 'Extra space before ) in for'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('for ( ; (foo); (bar) ) {', 'Extra space before ) in for'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('for (foo; bar; ) {', '')
+ self.assert_lint('for ((foo); (bar); ) {', '')
+ self.assert_lint('foreach (foo, foos ) {', 'Extra space before ) in foreach'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('foreach ( foo, foos) {', 'Extra space after ( in foreach'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('while ( foo) {', 'Extra space after ( in while'
+ ' [whitespace/parens] [5]')
+
+ def test_spacing_for_fncall(self):
+ self.assert_lint('if (foo) {', '')
+ self.assert_lint('for (foo;bar;baz) {', '')
+ self.assert_lint('foreach (foo, foos) {', '')
+ self.assert_lint('while (foo) {', '')
+ self.assert_lint('switch (foo) {', '')
+ self.assert_lint('new (RenderArena()) RenderInline(document())', '')
+ self.assert_lint('foo( bar)', 'Extra space after ( in function call'
+ ' [whitespace/parens] [4]')
+ self.assert_lint('foobar( \\', '')
+ self.assert_lint('foobar( \\', '')
+ self.assert_lint('( a + b)', 'Extra space after ('
+ ' [whitespace/parens] [2]')
+ self.assert_lint('((a+b))', '')
+ self.assert_lint('foo (foo)', 'Extra space before ( in function call'
+ ' [whitespace/parens] [4]')
+ self.assert_lint('#elif (foo(bar))', '')
+ self.assert_lint('#elif (foo(bar) && foo(baz))', '')
+ self.assert_lint('typedef foo (*foo)(foo)', '')
+ self.assert_lint('typedef foo (*foo12bar_)(foo)', '')
+ self.assert_lint('typedef foo (Foo::*bar)(foo)', '')
+ self.assert_lint('foo (Foo::*bar)(',
+ 'Extra space before ( in function call'
+ ' [whitespace/parens] [4]')
+ self.assert_lint('typedef foo (Foo::*bar)(', '')
+ self.assert_lint('(foo)(bar)', '')
+ self.assert_lint('Foo (*foo)(bar)', '')
+ self.assert_lint('Foo (*foo)(Bar bar,', '')
+ self.assert_lint('char (*p)[sizeof(foo)] = &foo', '')
+ self.assert_lint('char (&ref)[sizeof(foo)] = &foo', '')
+ self.assert_lint('const char32 (*table[])[6];', '')
+
+ def test_spacing_before_braces(self):
+ self.assert_lint('if (foo){', 'Missing space before {'
+ ' [whitespace/braces] [5]')
+ self.assert_lint('for{', 'Missing space before {'
+ ' [whitespace/braces] [5]')
+ self.assert_lint('for {', '')
+ self.assert_lint('EXPECT_DEBUG_DEATH({', '')
+
+ def test_spacing_between_braces(self):
+ self.assert_lint(' { }', '')
+ self.assert_lint(' {}', 'Missing space inside { }. [whitespace/braces] [5]')
+ self.assert_lint(' { }', 'Too many spaces inside { }. [whitespace/braces] [5]')
+
+ def test_spacing_around_else(self):
+ self.assert_lint('}else {', 'Missing space before else'
+ ' [whitespace/braces] [5]')
+ self.assert_lint('} else{', 'Missing space before {'
+ ' [whitespace/braces] [5]')
+ self.assert_lint('} else {', '')
+ self.assert_lint('} else if', '')
+
+ def test_spacing_for_binary_ops(self):
+ self.assert_lint('if (foo<=bar) {', 'Missing spaces around <='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('if (foo<bar) {', 'Missing spaces around <'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('if (foo<bar->baz) {', 'Missing spaces around <'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('if (foo<bar->bar) {', 'Missing spaces around <'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('typedef hash_map<Foo, Bar', 'Missing spaces around <'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('typedef hash_map<FoooooType, BaaaaarType,', '')
+ self.assert_lint('a<Foo> t+=b;', 'Missing spaces around +='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo> t-=b;', 'Missing spaces around -='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t*=b;', 'Missing spaces around *='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t/=b;', 'Missing spaces around /='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t|=b;', 'Missing spaces around |='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t&=b;', 'Missing spaces around &='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t<<=b;', 'Missing spaces around <<='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t>>=b;', 'Missing spaces around >>='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t>>=&b|c;', 'Missing spaces around >>='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t<<=*b/c;', 'Missing spaces around <<='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo> t -= b;', '')
+ self.assert_lint('a<Foo> t += b;', '')
+ self.assert_lint('a<Foo*> t *= b;', '')
+ self.assert_lint('a<Foo*> t /= b;', '')
+ self.assert_lint('a<Foo*> t |= b;', '')
+ self.assert_lint('a<Foo*> t &= b;', '')
+ self.assert_lint('a<Foo*> t <<= b;', '')
+ self.assert_lint('a<Foo*> t >>= b;', '')
+ self.assert_lint('a<Foo*> t >>= &b|c;', 'Missing spaces around |'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= *b/c;', 'Missing spaces around /'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= b/c; //Test', [
+ 'Should have a space between // and comment '
+ '[whitespace/comments] [4]', 'Missing'
+ ' spaces around / [whitespace/operators] [3]'])
+ self.assert_lint('a<Foo*> t <<= b||c; //Test', ['One space before end'
+ ' of line comments [whitespace/comments] [5]',
+ 'Should have a space between // and comment '
+ '[whitespace/comments] [4]',
+ 'Missing spaces around || [whitespace/operators] [3]'])
+ self.assert_lint('a<Foo*> t <<= b&&c; // Test', 'Missing spaces around'
+ ' && [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= b&&&c; // Test', 'Missing spaces around'
+ ' && [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= b&&*c; // Test', 'Missing spaces around'
+ ' && [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= b && *c; // Test', '')
+ self.assert_lint('a<Foo*> t <<= b && &c; // Test', '')
+ self.assert_lint('a<Foo*> t <<= b || &c; /*Test', 'Complex multi-line '
+ '/*...*/-style comment found. Lint may give bogus '
+ 'warnings. Consider replacing these with //-style'
+ ' comments, with #if 0...#endif, or with more clearly'
+ ' structured multi-line comments. [readability/multiline_comment] [5]')
+ self.assert_lint('a<Foo&> t <<= &b | &c;', '')
+ self.assert_lint('a<Foo*> t <<= &b & &c; // Test', '')
+ self.assert_lint('a<Foo*> t <<= *b / &c; // Test', '')
+ self.assert_lint('if (a=b == 1)', 'Missing spaces around = [whitespace/operators] [4]')
+ self.assert_lint('a = 1<<20', 'Missing spaces around << [whitespace/operators] [3]')
+ self.assert_lint('a = 1>> 20', 'Missing spaces around >> [whitespace/operators] [3]')
+ self.assert_lint('a = 1 >>20', 'Missing spaces around >> [whitespace/operators] [3]')
+ self.assert_lint('a = 1>>20', 'Missing spaces around >> [whitespace/operators] [3]')
+ self.assert_lint('func(OwnPtr<Vector<Foo>>)', '')
+ self.assert_lint('func(OwnPtr<Vector<Foo>> foo)', '')
+ self.assert_lint('func(OwnPtr<HashMap<Foo, Member<Bar>>>)', '')
+ # FIXME: The following test should not show any error.
+ self.assert_lint('func(OwnPtr<HashMap<Foo, Member<Bar\n >>>)',
+ 'Missing spaces around < [whitespace/operators] [3]')
+ self.assert_lint('if (a = b == 1)', '')
+ self.assert_lint('a = 1 << 20', '')
+ self.assert_multi_line_lint('#include <sys/io.h>\n', '')
+ self.assert_multi_line_lint('#import <foo/bar.h>\n', '')
+
+ def test_operator_methods(self):
+ self.assert_lint('String operator+(const String&, const String&);', '')
+ self.assert_lint('String operator/(const String&, const String&);', '')
+ self.assert_lint('bool operator==(const String&, const String&);', '')
+ self.assert_lint('String& operator-=(const String&, const String&);', '')
+ self.assert_lint('String& operator+=(const String&, const String&);', '')
+ self.assert_lint('String& operator*=(const String&, const String&);', '')
+ self.assert_lint('String& operator%=(const String&, const String&);', '')
+ self.assert_lint('String& operator&=(const String&, const String&);', '')
+ self.assert_lint('String& operator<<=(const String&, const String&);', '')
+ self.assert_lint('String& operator>>=(const String&, const String&);', '')
+ self.assert_lint('String& operator|=(const String&, const String&);', '')
+ self.assert_lint('String& operator^=(const String&, const String&);', '')
+
+ def test_spacing_before_last_semicolon(self):
+ self.assert_lint('call_function() ;',
+ 'Extra space before last semicolon. If this should be an '
+ 'empty statement, use { } instead.'
+ ' [whitespace/semicolon] [5]')
+ self.assert_lint('while (true) ;',
+ 'Extra space before last semicolon. If this should be an '
+ 'empty statement, use { } instead.'
+ ' [whitespace/semicolon] [5]')
+ self.assert_lint('default:;',
+ 'Semicolon defining empty statement. Use { } instead.'
+ ' [whitespace/semicolon] [5]')
+ self.assert_lint(' ;',
+ 'Line contains only semicolon. If this should be an empty '
+ 'statement, use { } instead.'
+ ' [whitespace/semicolon] [5]')
+ self.assert_lint('for (int i = 0; ;', '')
+
+ # Static or global STL strings.
+ def test_static_or_global_stlstrings(self):
+ self.assert_lint('string foo;',
+ 'For a static/global string constant, use a C style '
+ 'string instead: "char foo[]".'
+ ' [runtime/string] [4]')
+ self.assert_lint('string kFoo = "hello"; // English',
+ 'For a static/global string constant, use a C style '
+ 'string instead: "char kFoo[]".'
+ ' [runtime/string] [4]')
+ self.assert_lint('static string foo;',
+ 'For a static/global string constant, use a C style '
+ 'string instead: "static char foo[]".'
+ ' [runtime/string] [4]')
+ self.assert_lint('static const string foo;',
+ 'For a static/global string constant, use a C style '
+ 'string instead: "static const char foo[]".'
+ ' [runtime/string] [4]')
+ self.assert_lint('string Foo::bar;',
+ 'For a static/global string constant, use a C style '
+ 'string instead: "char Foo::bar[]".'
+ ' [runtime/string] [4]')
+ # Rare case.
+ self.assert_lint('string foo("foobar");',
+ 'For a static/global string constant, use a C style '
+ 'string instead: "char foo[]".'
+ ' [runtime/string] [4]')
+ # Should not catch local or member variables.
+ self.assert_lint(' string foo', '')
+ # Should not catch functions.
+ self.assert_lint('string EmptyString() { return ""; }', '')
+ self.assert_lint('string EmptyString () { return ""; }', '')
+ self.assert_lint('string VeryLongNameFunctionSometimesEndsWith(\n'
+ ' VeryLongNameType veryLongNameVariable) { }', '')
+ self.assert_lint('template<>\n'
+ 'string FunctionTemplateSpecialization<SomeType>(\n'
+ ' int x) { return ""; }', '')
+ self.assert_lint('template<>\n'
+ 'string FunctionTemplateSpecialization<vector<A::B>* >(\n'
+ ' int x) { return ""; }', '')
+
+ # should not catch methods of template classes.
+ self.assert_lint('string Class<Type>::Method() const\n'
+ '{\n'
+ ' return "";\n'
+ '}\n', '')
+ self.assert_lint('string Class<Type>::Method(\n'
+ ' int arg) const\n'
+ '{\n'
+ ' return "";\n'
+ '}\n', '')
+
+ def test_no_spaces_in_function_calls(self):
+ self.assert_lint('TellStory(1, 3);',
+ '')
+ self.assert_lint('TellStory(1, 3 );',
+ 'Extra space before )'
+ ' [whitespace/parens] [2]')
+ self.assert_lint('TellStory(1 /* wolf */, 3 /* pigs */);',
+ '')
+ self.assert_multi_line_lint('#endif\n );',
+ '')
+
+ def test_one_spaces_between_code_and_comments(self):
+ self.assert_lint('} // namespace foo',
+ '')
+ self.assert_lint('}// namespace foo',
+ 'One space before end of line comments'
+ ' [whitespace/comments] [5]')
+ self.assert_lint('printf("foo"); // Outside quotes.',
+ '')
+ self.assert_lint('int i = 0; // Having one space is fine.','')
+ self.assert_lint('int i = 0; // Having two spaces is bad.',
+ 'One space before end of line comments'
+ ' [whitespace/comments] [5]')
+ self.assert_lint('int i = 0; // Having three spaces is bad.',
+ 'One space before end of line comments'
+ ' [whitespace/comments] [5]')
+ self.assert_lint('// Top level comment', '')
+ self.assert_lint(' // Line starts with four spaces.', '')
+ self.assert_lint('foo();\n'
+ '{ // A scope is opening.', '')
+ self.assert_lint(' foo();\n'
+ ' { // An indented scope is opening.', '')
+ self.assert_lint('if (foo) { // not a pure scope',
+ '')
+ self.assert_lint('printf("// In quotes.")', '')
+ self.assert_lint('printf("\\"%s // In quotes.")', '')
+ self.assert_lint('printf("%s", "// In quotes.")', '')
+
+ def test_line_ending_in_whitespace(self):
+ self.assert_lint('int a; // This is a sentence.',
+ '')
+ self.assert_lint('int a; // This is a sentence. ',
+ 'Line ends in whitespace. Consider deleting these extra spaces. [whitespace/end_of_line] [4]')
+
+ def test_space_after_comment_marker(self):
+ self.assert_lint('//', '')
+ self.assert_lint('//x', 'Should have a space between // and comment'
+ ' [whitespace/comments] [4]')
+ self.assert_lint('// x', '')
+ self.assert_lint('//----', '')
+ self.assert_lint('//====', '')
+ self.assert_lint('//////', '')
+ self.assert_lint('////// x', '')
+ self.assert_lint('/// x', '')
+ self.assert_lint('////x', 'Should have a space between // and comment'
+ ' [whitespace/comments] [4]')
+
+ def test_newline_at_eof(self):
+ def do_test(self, data, is_missing_eof):
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data('foo.cpp', 'cpp', data.split('\n'),
+ error_collector)
+ # The warning appears only once.
+ self.assertEqual(
+ int(is_missing_eof),
+ error_collector.results().count(
+ 'Could not find a newline character at the end of the file.'
+ ' [whitespace/ending_newline] [5]'))
+
+ do_test(self, '// Newline\n// at EOF\n', False)
+ do_test(self, '// No newline\n// at EOF', True)
+
+ def test_invalid_utf8(self):
+ def do_test(self, raw_bytes, has_invalid_utf8):
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data('foo.cpp', 'cpp',
+ unicode(raw_bytes, 'utf8', 'replace').split('\n'),
+ error_collector)
+ # The warning appears only once.
+ self.assertEqual(
+ int(has_invalid_utf8),
+ error_collector.results().count(
+ 'Line contains invalid UTF-8'
+ ' (or Unicode replacement character).'
+ ' [readability/utf8] [5]'))
+
+ do_test(self, 'Hello world\n', False)
+ do_test(self, '\xe9\x8e\xbd\n', False)
+ do_test(self, '\xe9x\x8e\xbd\n', True)
+ # This is the encoding of the replacement character itself (which
+ # you can see by evaluating codecs.getencoder('utf8')(u'\ufffd')).
+ do_test(self, '\xef\xbf\xbd\n', True)
+
+ def test_is_blank_line(self):
+ self.assertTrue(cpp_style.is_blank_line(''))
+ self.assertTrue(cpp_style.is_blank_line(' '))
+ self.assertTrue(cpp_style.is_blank_line(' \t\r\n'))
+ self.assertTrue(not cpp_style.is_blank_line('int a;'))
+ self.assertTrue(not cpp_style.is_blank_line('{'))
+
+ def test_blank_lines_check(self):
+ self.assert_blank_lines_check(['{\n', '\n', '\n', '}\n'], 1, 1)
+ self.assert_blank_lines_check([' if (foo) {\n', '\n', ' }\n'], 1, 1)
+ self.assert_blank_lines_check(
+ ['\n', '// {\n', '\n', '\n', '// Comment\n', '{\n', '}\n'], 0, 0)
+ self.assert_blank_lines_check(['\n', 'run("{");\n', '\n'], 0, 0)
+ self.assert_blank_lines_check(['\n', ' if (foo) { return 0; }\n', '\n'], 0, 0)
+
+ def test_allow_blank_line_before_closing_namespace(self):
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data('foo.cpp', 'cpp',
+ ['namespace {', '', '} // namespace'],
+ error_collector)
+ self.assertEqual(0, error_collector.results().count(
+ 'Blank line at the end of a code block. Is this needed?'
+ ' [whitespace/blank_line] [3]'))
+
+ def test_allow_blank_line_before_if_else_chain(self):
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data('foo.cpp', 'cpp',
+ ['if (hoge) {',
+ '', # No warning
+ '} else if (piyo) {',
+ '', # No warning
+ '} else if (piyopiyo) {',
+ ' hoge = true;', # No warning
+ '} else {',
+ '', # Warning on this line
+ '}'],
+ error_collector)
+ self.assertEqual(1, error_collector.results().count(
+ 'Blank line at the end of a code block. Is this needed?'
+ ' [whitespace/blank_line] [3]'))
+
+ def test_else_on_same_line_as_closing_braces(self):
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data('foo.cpp', 'cpp',
+ ['if (hoge) {',
+ '',
+ '}',
+ ' else {' # Warning on this line
+ '',
+ '}'],
+ error_collector)
+ self.assertEqual(1, error_collector.results().count(
+ 'An else should appear on the same line as the preceding }'
+ ' [whitespace/newline] [4]'))
+
+ def test_else_clause_not_on_same_line_as_else(self):
+ self.assert_lint(' else DoSomethingElse();',
+ 'Else clause should never be on same line as else '
+ '(use 2 lines) [whitespace/newline] [4]')
+ self.assert_lint(' else ifDoSomethingElse();',
+ 'Else clause should never be on same line as else '
+ '(use 2 lines) [whitespace/newline] [4]')
+ self.assert_lint(' else if (blah) {', '')
+ self.assert_lint(' variable_ends_in_else = true;', '')
+
+ def test_comma(self):
+ self.assert_lint('a = f(1,2);',
+ 'Missing space after , [whitespace/comma] [3]')
+ self.assert_lint('int tmp=a,a=b,b=tmp;',
+ ['Missing spaces around = [whitespace/operators] [4]',
+ 'Missing space after , [whitespace/comma] [3]'])
+ self.assert_lint('f(a, /* name */ b);', '')
+ self.assert_lint('f(a, /* name */b);', '')
+
+ def test_declaration(self):
+ self.assert_lint('int a;', '')
+ self.assert_lint('int a;', 'Extra space between int and a [whitespace/declaration] [3]')
+ self.assert_lint('int* a;', 'Extra space between int* and a [whitespace/declaration] [3]')
+ self.assert_lint('else if { }', '')
+ self.assert_lint('else if { }', 'Extra space between else and if [whitespace/declaration] [3]')
+
+ def test_pointer_reference_marker_location(self):
+ self.assert_lint('int* b;', '', 'foo.cpp')
+ self.assert_lint('int *b;',
+ 'Declaration has space between type name and * in int *b [whitespace/declaration] [3]',
+ 'foo.cpp')
+ self.assert_lint('return *b;', '', 'foo.cpp')
+ self.assert_lint('delete *b;', '', 'foo.cpp')
+ self.assert_lint('int *b;', '', 'foo.c')
+ self.assert_lint('int* b;',
+ 'Declaration has space between * and variable name in int* b [whitespace/declaration] [3]',
+ 'foo.c')
+ self.assert_lint('int& b;', '', 'foo.cpp')
+ self.assert_lint('int &b;',
+ 'Declaration has space between type name and & in int &b [whitespace/declaration] [3]',
+ 'foo.cpp')
+ self.assert_lint('return &b;', '', 'foo.cpp')
+
+ def test_indent(self):
+ self.assert_lint('static int noindent;', '')
+ self.assert_lint(' int fourSpaceIndent;', '')
+ self.assert_lint(' int oneSpaceIndent;',
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 4-space indent? [whitespace/indent] [3]')
+ self.assert_lint(' int threeSpaceIndent;',
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 4-space indent? [whitespace/indent] [3]')
+ self.assert_lint(' char* oneSpaceIndent = "public:";',
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 4-space indent? [whitespace/indent] [3]')
+ self.assert_lint(' public:',
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 4-space indent? [whitespace/indent] [3]')
+ self.assert_lint(' public:',
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 4-space indent? [whitespace/indent] [3]')
+ self.assert_lint(' public:',
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 4-space indent? [whitespace/indent] [3]')
+ self.assert_multi_line_lint(
+ 'class Foo {\n'
+ 'public:\n'
+ ' enum Bar {\n'
+ ' Alpha,\n'
+ ' Beta,\n'
+ '#if ENABLED_BETZ\n'
+ ' Charlie,\n'
+ '#endif\n'
+ ' };\n'
+ '};',
+ '')
+ self.assert_multi_line_lint(
+ 'if (true) {\n'
+ ' myFunction(reallyLongParam1, reallyLongParam2,\n'
+ ' reallyLongParam3);\n'
+ '}\n',
+ 'Weird number of spaces at line-start. Are you using a 4-space indent? [whitespace/indent] [3]')
+
+ self.assert_multi_line_lint(
+ 'if (true) {\n'
+ ' myFunction(reallyLongParam1, reallyLongParam2,\n'
+ ' reallyLongParam3);\n'
+ '}\n',
+ 'When wrapping a line, only indent 4 spaces. [whitespace/indent] [3]')
+
+
+ def test_not_alabel(self):
+ self.assert_lint('MyVeryLongNamespace::MyVeryLongClassName::', '')
+
+ def test_tab(self):
+ self.assert_lint('\tint a;',
+ 'Tab found; better to use spaces [whitespace/tab] [1]')
+ self.assert_lint('int a = 5;\t// set a to 5',
+ 'Tab found; better to use spaces [whitespace/tab] [1]')
+
+ def test_unnamed_namespaces_in_headers(self):
+ self.assert_language_rules_check(
+ 'foo.h', 'namespace {',
+ 'Do not use unnamed namespaces in header files. See'
+ ' http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
+ ' for more information. [build/namespaces] [4]')
+ # namespace registration macros are OK.
+ self.assert_language_rules_check('foo.h', 'namespace { \\', '')
+ # named namespaces are OK.
+ self.assert_language_rules_check('foo.h', 'namespace foo {', '')
+ self.assert_language_rules_check('foo.h', 'namespace foonamespace {', '')
+ self.assert_language_rules_check('foo.cpp', 'namespace {', '')
+ self.assert_language_rules_check('foo.cpp', 'namespace foo {', '')
+
+ def test_build_class(self):
+ # Test that the linter can parse to the end of class definitions,
+ # and that it will report when it can't.
+ # Use multi-line linter because it performs the ClassState check.
+ self.assert_multi_line_lint(
+ 'class Foo {',
+ 'Failed to find complete declaration of class Foo'
+ ' [build/class] [5]')
+ # Don't warn on forward declarations of various types.
+ self.assert_multi_line_lint(
+ 'class Foo;',
+ '')
+ self.assert_multi_line_lint(
+ '''\
+ struct Foo*
+ foo = NewFoo();''',
+ '')
+ # Here is an example where the linter gets confused, even though
+ # the code doesn't violate the style guide.
+ self.assert_multi_line_lint(
+ 'class Foo\n'
+ '#ifdef DERIVE_FROM_GOO\n'
+ ' : public Goo {\n'
+ '#else\n'
+ ' : public Hoo {\n'
+ '#endif\n'
+ '};',
+ 'Failed to find complete declaration of class Foo'
+ ' [build/class] [5]')
+
+ def test_build_end_comment(self):
+ # The crosstool compiler we currently use will fail to compile the
+ # code in this test, so we might consider removing the lint check.
+ self.assert_lint('#endif Not a comment',
+ 'Uncommented text after #endif is non-standard.'
+ ' Use a comment.'
+ ' [build/endif_comment] [5]')
+
+ def test_build_forward_decl(self):
+ # The crosstool compiler we currently use will fail to compile the
+ # code in this test, so we might consider removing the lint check.
+ self.assert_lint('class Foo::Goo;',
+ 'Inner-style forward declarations are invalid.'
+ ' Remove this line.'
+ ' [build/forward_decl] [5]')
+
+ def test_build_header_guard(self):
+ file_path = 'mydir/Foo.h'
+
+ # We can't rely on our internal stuff to get a sane path on the open source
+ # side of things, so just parse out the suggested header guard. This
+ # doesn't allow us to test the suggested header guard, but it does let us
+ # test all the other header tests.
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data(file_path, 'h', [], error_collector)
+ expected_guard = ''
+ matcher = re.compile(
+ 'No \#ifndef header guard found\, suggested CPP variable is\: ([A-Za-z_0-9]+) ')
+ for error in error_collector.result_list():
+ matches = matcher.match(error)
+ if matches:
+ expected_guard = matches.group(1)
+ break
+
+ # Make sure we extracted something for our header guard.
+ self.assertNotEqual(expected_guard, '')
+
+ # Wrong guard
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data(file_path, 'h',
+ ['#ifndef FOO_H', '#define FOO_H'], error_collector)
+ self.assertEqual(
+ 1,
+ error_collector.result_list().count(
+ '#ifndef header guard has wrong style, please use: %s'
+ ' [build/header_guard] [5]' % expected_guard),
+ error_collector.result_list())
+
+ # No define
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data(file_path, 'h',
+ ['#ifndef %s' % expected_guard], error_collector)
+ self.assertEqual(
+ 1,
+ error_collector.result_list().count(
+ 'No #ifndef header guard found, suggested CPP variable is: %s'
+ ' [build/header_guard] [5]' % expected_guard),
+ error_collector.result_list())
+
+ # Mismatched define
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data(file_path, 'h',
+ ['#ifndef %s' % expected_guard,
+ '#define FOO_H'],
+ error_collector)
+ self.assertEqual(
+ 1,
+ error_collector.result_list().count(
+ 'No #ifndef header guard found, suggested CPP variable is: %s'
+ ' [build/header_guard] [5]' % expected_guard),
+ error_collector.result_list())
+
+ # No header guard errors
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data(file_path, 'h',
+ ['#ifndef %s' % expected_guard,
+ '#define %s' % expected_guard,
+ '#endif // %s' % expected_guard],
+ error_collector)
+ for line in error_collector.result_list():
+ if line.find('build/header_guard') != -1:
+ self.fail('Unexpected error: %s' % line)
+
+ # Completely incorrect header guard
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data(file_path, 'h',
+ ['#ifndef FOO',
+ '#define FOO',
+ '#endif // FOO'],
+ error_collector)
+ self.assertEqual(
+ 1,
+ error_collector.result_list().count(
+ '#ifndef header guard has wrong style, please use: %s'
+ ' [build/header_guard] [5]' % expected_guard),
+ error_collector.result_list())
+
+ # Special case for flymake
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data('mydir/Foo_flymake.h', 'h',
+ ['#ifndef %s' % expected_guard,
+ '#define %s' % expected_guard,
+ '#endif // %s' % expected_guard],
+ error_collector)
+ for line in error_collector.result_list():
+ if line.find('build/header_guard') != -1:
+ self.fail('Unexpected error: %s' % line)
+
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data('mydir/Foo_flymake.h', 'h', [], error_collector)
+ self.assertEqual(
+ 1,
+ error_collector.result_list().count(
+ 'No #ifndef header guard found, suggested CPP variable is: %s'
+ ' [build/header_guard] [5]' % expected_guard),
+ error_collector.result_list())
+
+ # Verify that we don't blindly suggest the WTF prefix for all headers.
+ self.assertFalse(expected_guard.startswith('WTF_'))
+
+ # Allow the WTF_ prefix for files in that directory.
+ header_guard_filter = FilterConfiguration(('-', '+build/header_guard'))
+ error_collector = ErrorCollector(self.assertTrue, header_guard_filter)
+ self.process_file_data('Source/JavaScriptCore/wtf/TestName.h', 'h',
+ ['#ifndef WTF_TestName_h', '#define WTF_TestName_h'],
+ error_collector)
+ self.assertEqual(0, len(error_collector.result_list()),
+ error_collector.result_list())
+
+ # Also allow the non WTF_ prefix for files in that directory.
+ error_collector = ErrorCollector(self.assertTrue, header_guard_filter)
+ self.process_file_data('Source/JavaScriptCore/wtf/TestName.h', 'h',
+ ['#ifndef TestName_h', '#define TestName_h'],
+ error_collector)
+ self.assertEqual(0, len(error_collector.result_list()),
+ error_collector.result_list())
+
+ # Verify that we suggest the WTF prefix version.
+ error_collector = ErrorCollector(self.assertTrue, header_guard_filter)
+ self.process_file_data('Source/JavaScriptCore/wtf/TestName.h', 'h',
+ ['#ifndef BAD_TestName_h', '#define BAD_TestName_h'],
+ error_collector)
+ self.assertEqual(
+ 1,
+ error_collector.result_list().count(
+ '#ifndef header guard has wrong style, please use: WTF_TestName_h'
+ ' [build/header_guard] [5]'),
+ error_collector.result_list())
+
+ # Verify that the Chromium-style header guard is allowed as well.
+ error_collector = ErrorCollector(self.assertTrue, header_guard_filter)
+ self.process_file_data('Source/foo/testname.h', 'h',
+ ['#ifndef BLINK_FOO_TESTNAME_H_',
+ '#define BLINK_FOO_TESTNAME_H_'],
+ error_collector)
+ self.assertEqual(0, len(error_collector.result_list()),
+ error_collector.result_list())
+
+ def test_build_printf_format(self):
+ self.assert_lint(
+ r'printf("\%%d", value);',
+ '%, [, (, and { are undefined character escapes. Unescape them.'
+ ' [build/printf_format] [3]')
+
+ self.assert_lint(
+ r'snprintf(buffer, sizeof(buffer), "\[%d", value);',
+ '%, [, (, and { are undefined character escapes. Unescape them.'
+ ' [build/printf_format] [3]')
+
+ self.assert_lint(
+ r'fprintf(file, "\(%d", value);',
+ '%, [, (, and { are undefined character escapes. Unescape them.'
+ ' [build/printf_format] [3]')
+
+ self.assert_lint(
+ r'vsnprintf(buffer, sizeof(buffer), "\\\{%d", ap);',
+ '%, [, (, and { are undefined character escapes. Unescape them.'
+ ' [build/printf_format] [3]')
+
+ # Don't warn if double-slash precedes the symbol
+ self.assert_lint(r'printf("\\%%%d", value);',
+ '')
+
+ def test_runtime_printf_format(self):
+ self.assert_lint(
+ r'fprintf(file, "%q", value);',
+ '%q in format strings is deprecated. Use %ll instead.'
+ ' [runtime/printf_format] [3]')
+
+ self.assert_lint(
+ r'aprintf(file, "The number is %12q", value);',
+ '%q in format strings is deprecated. Use %ll instead.'
+ ' [runtime/printf_format] [3]')
+
+ self.assert_lint(
+ r'printf(file, "The number is" "%-12q", value);',
+ '%q in format strings is deprecated. Use %ll instead.'
+ ' [runtime/printf_format] [3]')
+
+ self.assert_lint(
+ r'printf(file, "The number is" "%+12q", value);',
+ '%q in format strings is deprecated. Use %ll instead.'
+ ' [runtime/printf_format] [3]')
+
+ self.assert_lint(
+ r'printf(file, "The number is" "% 12q", value);',
+ '%q in format strings is deprecated. Use %ll instead.'
+ ' [runtime/printf_format] [3]')
+
+ self.assert_lint(
+ r'snprintf(file, "Never mix %d and %1$d parmaeters!", value);',
+ '%N$ formats are unconventional. Try rewriting to avoid them.'
+ ' [runtime/printf_format] [2]')
+
+ def assert_lintLogCodeOnError(self, code, expected_message):
+ # Special assert_lint which logs the input code on error.
+ result = self.perform_single_line_lint(code, 'foo.cpp')
+ if result != expected_message:
+ self.fail('For code: "%s"\nGot: "%s"\nExpected: "%s"'
+ % (code, result, expected_message))
+
+ def test_build_storage_class(self):
+ qualifiers = [None, 'const', 'volatile']
+ signs = [None, 'signed', 'unsigned']
+ types = ['void', 'char', 'int', 'float', 'double',
+ 'schar', 'int8', 'uint8', 'int16', 'uint16',
+ 'int32', 'uint32', 'int64', 'uint64']
+ storage_classes = ['auto', 'extern', 'register', 'static', 'typedef']
+
+ build_storage_class_error_message = (
+ 'Storage class (static, extern, typedef, etc) should be first.'
+ ' [build/storage_class] [5]')
+
+ # Some explicit cases. Legal in C++, deprecated in C99.
+ self.assert_lint('const int static foo = 5;',
+ build_storage_class_error_message)
+
+ self.assert_lint('char static foo;',
+ build_storage_class_error_message)
+
+ self.assert_lint('double const static foo = 2.0;',
+ build_storage_class_error_message)
+
+ self.assert_lint('uint64 typedef unsignedLongLong;',
+ build_storage_class_error_message)
+
+ self.assert_lint('int register foo = 0;',
+ build_storage_class_error_message)
+
+ # Since there are a very large number of possibilities, randomly
+ # construct declarations.
+ # Make sure that the declaration is logged if there's an error.
+ # Seed generator with an integer for absolute reproducibility.
+ random.seed(25)
+ for unused_i in range(10):
+ # Build up random list of non-storage-class declaration specs.
+ other_decl_specs = [random.choice(qualifiers), random.choice(signs),
+ random.choice(types)]
+ # remove None
+ other_decl_specs = filter(lambda x: x is not None, other_decl_specs)
+
+ # shuffle
+ random.shuffle(other_decl_specs)
+
+ # insert storage class after the first
+ storage_class = random.choice(storage_classes)
+ insertion_point = random.randint(1, len(other_decl_specs))
+ decl_specs = (other_decl_specs[0:insertion_point]
+ + [storage_class]
+ + other_decl_specs[insertion_point:])
+
+ self.assert_lintLogCodeOnError(
+ ' '.join(decl_specs) + ';',
+ build_storage_class_error_message)
+
+ # but no error if storage class is first
+ self.assert_lintLogCodeOnError(
+ storage_class + ' ' + ' '.join(other_decl_specs),
+ '')
+
+ def test_legal_copyright(self):
+ legal_copyright_message = (
+ 'No copyright message found. '
+ 'You should have a line: "Copyright [year] <Copyright Owner>"'
+ ' [legal/copyright] [5]')
+
+ copyright_line = '// Copyright 2008 Google Inc. All Rights Reserved.'
+
+ file_path = 'mydir/googleclient/foo.cpp'
+
+ # There should be a copyright message in the first 10 lines
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data(file_path, 'cpp', [], error_collector)
+ self.assertEqual(
+ 1,
+ error_collector.result_list().count(legal_copyright_message))
+
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data(
+ file_path, 'cpp',
+ ['' for unused_i in range(10)] + [copyright_line],
+ error_collector)
+ self.assertEqual(
+ 1,
+ error_collector.result_list().count(legal_copyright_message))
+
+ # Test that warning isn't issued if Copyright line appears early enough.
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data(file_path, 'cpp', [copyright_line], error_collector)
+ for message in error_collector.result_list():
+ if message.find('legal/copyright') != -1:
+ self.fail('Unexpected error: %s' % message)
+
+ error_collector = ErrorCollector(self.assertTrue)
+ self.process_file_data(
+ file_path, 'cpp',
+ ['' for unused_i in range(9)] + [copyright_line],
+ error_collector)
+ for message in error_collector.result_list():
+ if message.find('legal/copyright') != -1:
+ self.fail('Unexpected error: %s' % message)
+
+ def test_invalid_increment(self):
+ self.assert_lint('*count++;',
+ 'Changing pointer instead of value (or unused value of '
+ 'operator*). [runtime/invalid_increment] [5]')
+
+ # Integral bitfields must be declared with either signed or unsigned keyword.
+ def test_plain_integral_bitfields(self):
+ errmsg = ('Please declare integral type bitfields with either signed or unsigned. [runtime/bitfields] [5]')
+
+ self.assert_lint('int a : 30;', errmsg)
+ self.assert_lint('mutable short a : 14;', errmsg)
+ self.assert_lint('const char a : 6;', errmsg)
+ self.assert_lint('long int a : 30;', errmsg)
+ self.assert_lint('int a = 1 ? 0 : 30;', '')
+
+ # A mixture of unsigned and bool bitfields in a class will generate a warning.
+ def test_mixing_unsigned_bool_bitfields(self):
+ def errmsg(bool_bitfields, unsigned_bitfields, name):
+ bool_list = ', '.join(bool_bitfields)
+ unsigned_list = ', '.join(unsigned_bitfields)
+ return ('The class %s contains mixed unsigned and bool bitfields, '
+ 'which will pack into separate words on the MSVC compiler.\n'
+ 'Bool bitfields are [%s].\nUnsigned bitfields are [%s].\n'
+ 'Consider converting bool bitfields to unsigned. [runtime/bitfields] [5]'
+ % (name, bool_list, unsigned_list))
+
+ def build_test_case(bitfields, name, will_warn, extra_warnings=[]):
+ bool_bitfields = []
+ unsigned_bitfields = []
+ test_string = 'class %s {\n' % (name,)
+ line = 2
+ for bitfield in bitfields:
+ test_string += ' %s %s : %d;\n' % bitfield
+ if bitfield[0] == 'bool':
+ bool_bitfields.append('%d: %s' % (line, bitfield[1]))
+ elif bitfield[0].startswith('unsigned'):
+ unsigned_bitfields.append('%d: %s' % (line, bitfield[1]))
+ line += 1
+ test_string += '}\n'
+ error = ''
+ if will_warn:
+ error = errmsg(bool_bitfields, unsigned_bitfields, name)
+ if extra_warnings and error:
+ error = extra_warnings + [error]
+ self.assert_multi_line_lint(test_string, error)
+
+ build_test_case([('bool', 'm_boolMember', 4), ('unsigned', 'm_unsignedMember', 3)],
+ 'MyClass', True)
+ build_test_case([('bool', 'm_boolMember', 4), ('bool', 'm_anotherBool', 3)],
+ 'MyClass', False)
+ build_test_case([('unsigned', 'm_unsignedMember', 4), ('unsigned', 'm_anotherUnsigned', 3)],
+ 'MyClass', False)
+
+ build_test_case([('bool', 'm_boolMember', 4), ('bool', 'm_anotherbool', 3),
+ ('bool', 'm_moreBool', 1), ('bool', 'm_lastBool', 1),
+ ('unsigned int', 'm_tokenUnsigned', 4)],
+ 'MyClass', True, ['Omit int when using unsigned [runtime/unsigned] [1]'])
+
+ self.assert_multi_line_lint('class NoProblemsHere {\n'
+ ' bool m_boolMember;\n'
+ ' unsigned m_unsignedMember;\n'
+ ' unsigned m_bitField1 : 1;\n'
+ ' unsigned m_bitField4 : 4;\n'
+ '}\n', '')
+
+ # Bitfields which are not declared unsigned or bool will generate a warning.
+ def test_unsigned_bool_bitfields(self):
+ def errmsg(member, name, bit_type):
+ return ('Member %s of class %s defined as a bitfield of type %s. '
+ 'Please declare all bitfields as unsigned. [runtime/bitfields] [4]'
+ % (member, name, bit_type))
+
+ def warning_bitfield_test(member, name, bit_type, bits):
+ self.assert_multi_line_lint('class %s {\n%s %s: %d;\n}\n'
+ % (name, bit_type, member, bits),
+ errmsg(member, name, bit_type))
+
+ def safe_bitfield_test(member, name, bit_type, bits):
+ self.assert_multi_line_lint('class %s {\n%s %s: %d;\n}\n'
+ % (name, bit_type, member, bits),
+ '')
+
+ warning_bitfield_test('a', 'A', 'int32_t', 25)
+ warning_bitfield_test('m_someField', 'SomeClass', 'signed', 4)
+ warning_bitfield_test('m_someField', 'SomeClass', 'SomeEnum', 2)
+
+ safe_bitfield_test('a', 'A', 'unsigned', 22)
+ safe_bitfield_test('m_someField', 'SomeClass', 'bool', 1)
+ safe_bitfield_test('m_someField', 'SomeClass', 'unsigned', 2)
+
+ # Declarations in 'Expected' or 'SameSizeAs' classes are OK.
+ warning_bitfield_test('m_bitfields', 'SomeClass', 'int32_t', 32)
+ safe_bitfield_test('m_bitfields', 'ExpectedSomeClass', 'int32_t', 32)
+ safe_bitfield_test('m_bitfields', 'SameSizeAsSomeClass', 'int32_t', 32)
+
+class CleansedLinesTest(unittest.TestCase):
+ def test_init(self):
+ lines = ['Line 1',
+ 'Line 2',
+ 'Line 3 // Comment test',
+ 'Line 4 "foo"']
+
+ clean_lines = cpp_style.CleansedLines(lines)
+ self.assertEqual(lines, clean_lines.raw_lines)
+ self.assertEqual(4, clean_lines.num_lines())
+
+ self.assertEqual(['Line 1',
+ 'Line 2',
+ 'Line 3 ',
+ 'Line 4 "foo"'],
+ clean_lines.lines)
+
+ self.assertEqual(['Line 1',
+ 'Line 2',
+ 'Line 3 ',
+ 'Line 4 ""'],
+ clean_lines.elided)
+
+ def test_init_empty(self):
+ clean_lines = cpp_style.CleansedLines([])
+ self.assertEqual([], clean_lines.raw_lines)
+ self.assertEqual(0, clean_lines.num_lines())
+
+ def test_collapse_strings(self):
+ collapse = cpp_style.CleansedLines.collapse_strings
+ self.assertEqual('""', collapse('""')) # "" (empty)
+ self.assertEqual('"""', collapse('"""')) # """ (bad)
+ self.assertEqual('""', collapse('"xyz"')) # "xyz" (string)
+ self.assertEqual('""', collapse('"\\\""')) # "\"" (string)
+ self.assertEqual('""', collapse('"\'"')) # "'" (string)
+ self.assertEqual('"\"', collapse('"\"')) # "\" (bad)
+ self.assertEqual('""', collapse('"\\\\"')) # "\\" (string)
+ self.assertEqual('"', collapse('"\\\\\\"')) # "\\\" (bad)
+ self.assertEqual('""', collapse('"\\\\\\\\"')) # "\\\\" (string)
+
+ self.assertEqual('\'\'', collapse('\'\'')) # '' (empty)
+ self.assertEqual('\'\'', collapse('\'a\'')) # 'a' (char)
+ self.assertEqual('\'\'', collapse('\'\\\'\'')) # '\'' (char)
+ self.assertEqual('\'', collapse('\'\\\'')) # '\' (bad)
+ self.assertEqual('', collapse('\\012')) # '\012' (char)
+ self.assertEqual('', collapse('\\xfF0')) # '\xfF0' (char)
+ self.assertEqual('', collapse('\\n')) # '\n' (char)
+ self.assertEqual('\#', collapse('\\#')) # '\#' (bad)
+
+ self.assertEqual('StringReplace(body, "", "");',
+ collapse('StringReplace(body, "\\\\", "\\\\\\\\");'))
+ self.assertEqual('\'\' ""',
+ collapse('\'"\' "foo"'))
+
+
+class OrderOfIncludesTest(CppStyleTestBase):
+ def setUp(self):
+ self.include_state = cpp_style._IncludeState()
+
+ # Cheat os.path.abspath called in FileInfo class.
+ self.os_path_abspath_orig = os.path.abspath
+ os.path.abspath = lambda value: value
+
+ def tearDown(self):
+ os.path.abspath = self.os_path_abspath_orig
+
+ def test_try_drop_common_suffixes(self):
+ self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo-inl.h'))
+ self.assertEqual('foo/bar/foo',
+ cpp_style._drop_common_suffixes('foo/bar/foo_inl.h'))
+ self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo.cpp'))
+ self.assertEqual('foo/foo_unusualinternal',
+ cpp_style._drop_common_suffixes('foo/foo_unusualinternal.h'))
+ self.assertEqual('',
+ cpp_style._drop_common_suffixes('_test.cpp'))
+ self.assertEqual('test',
+ cpp_style._drop_common_suffixes('test.cpp'))
+
+
+class OrderOfIncludesTest(CppStyleTestBase):
+ def setUp(self):
+ self.include_state = cpp_style._IncludeState()
+
+ # Cheat os.path.abspath called in FileInfo class.
+ self.os_path_abspath_orig = os.path.abspath
+ self.os_path_isfile_orig = os.path.isfile
+ os.path.abspath = lambda value: value
+
+ def tearDown(self):
+ os.path.abspath = self.os_path_abspath_orig
+ os.path.isfile = self.os_path_isfile_orig
+
+ def test_check_next_include_order__no_config(self):
+ self.assertEqual('Header file should not contain WebCore config.h.',
+ self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, True, True))
+
+ def test_check_next_include_order__no_self(self):
+ self.assertEqual('Header file should not contain itself.',
+ self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, True, True))
+ # Test actual code to make sure that header types are correctly assigned.
+ self.assert_language_rules_check('Foo.h',
+ '#include "Foo.h"\n',
+ 'Header file should not contain itself. Should be: alphabetically sorted.'
+ ' [build/include_order] [4]')
+ self.assert_language_rules_check('FooBar.h',
+ '#include "Foo.h"\n',
+ '')
+
+ def test_check_next_include_order__likely_then_config(self):
+ self.assertEqual('Found header this file implements before WebCore config.h.',
+ self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False, True))
+ self.assertEqual('Found WebCore config.h after a header this file implements.',
+ self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True))
+
+ def test_check_next_include_order__other_then_config(self):
+ self.assertEqual('Found other header before WebCore config.h.',
+ self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False, True))
+ self.assertEqual('Found WebCore config.h after other header.',
+ self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True))
+
+ def test_check_next_include_order__config_then_other_then_likely(self):
+ self.assertEqual('', self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True))
+ self.assertEqual('Found other header before a header this file implements.',
+ self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False, True))
+ self.assertEqual('Found header this file implements after other header.',
+ self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False, True))
+
+ def test_check_alphabetical_include_order(self):
+ self.assert_language_rules_check('foo.h',
+ '#include "a.h"\n'
+ '#include "c.h"\n'
+ '#include "b.h"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
+ self.assert_language_rules_check('foo.h',
+ '#include "a.h"\n'
+ '#include "b.h"\n'
+ '#include "c.h"\n',
+ '')
+
+ self.assert_language_rules_check('foo.h',
+ '#include <assert.h>\n'
+ '#include "bar.h"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
+ self.assert_language_rules_check('foo.h',
+ '#include "bar.h"\n'
+ '#include <assert.h>\n',
+ '')
+
+ def test_check_alphabetical_include_order_errors_reported_for_both_lines(self):
+ # If one of the two lines of out of order headers are filtered, the error should be
+ # reported on the other line.
+ self.assert_language_rules_check('foo.h',
+ '#include "a.h"\n'
+ '#include "c.h"\n'
+ '#include "b.h"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]',
+ lines_to_check=[2])
+
+ self.assert_language_rules_check('foo.h',
+ '#include "a.h"\n'
+ '#include "c.h"\n'
+ '#include "b.h"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]',
+ lines_to_check=[3])
+
+ # If no lines are filtered, the error should be reported only once.
+ self.assert_language_rules_check('foo.h',
+ '#include "a.h"\n'
+ '#include "c.h"\n'
+ '#include "b.h"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
+ def test_check_line_break_after_own_header(self):
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '#include "bar.h"\n',
+ 'You should add a blank line after implementation file\'s own header. [build/include_order] [4]')
+
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#include "bar.h"\n',
+ '')
+
+ def test_check_preprocessor_in_include_section(self):
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#ifdef BAZ\n'
+ '#include "baz.h"\n'
+ '#else\n'
+ '#include "foobar.h"\n'
+ '#endif"\n'
+ '#include "bar.h"\n', # No flag because previous is in preprocessor section
+ '')
+
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#ifdef BAZ\n'
+ '#include "baz.h"\n'
+ '#endif"\n'
+ '#include "bar.h"\n'
+ '#include "a.h"\n', # Should still flag this.
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#ifdef BAZ\n'
+ '#include "baz.h"\n'
+ '#include "bar.h"\n' #Should still flag this
+ '#endif"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#ifdef BAZ\n'
+ '#include "baz.h"\n'
+ '#endif"\n'
+ '#ifdef FOOBAR\n'
+ '#include "foobar.h"\n'
+ '#endif"\n'
+ '#include "bar.h"\n'
+ '#include "a.h"\n', # Should still flag this.
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
+ # Check that after an already included error, the sorting rules still work.
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#include "foo.h"\n'
+ '#include "g.h"\n',
+ '"foo.h" already included at foo.cpp:2 [build/include] [4]')
+
+ def test_primary_header(self):
+ # File with non-existing primary header should not produce errors.
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '\n'
+ '#include "bar.h"\n',
+ '')
+ # Pretend that header files exist.
+ os.path.isfile = lambda filename: True
+ # Missing include for existing primary header -> error.
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '\n'
+ '#include "bar.h"\n',
+ 'Found other header before a header this file implements. '
+ 'Should be: config.h, primary header, blank line, and then '
+ 'alphabetically sorted. [build/include_order] [4]')
+ # Having include for existing primary header -> no error.
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#include "bar.h"\n',
+ '')
+
+ os.path.isfile = self.os_path_isfile_orig
+
+ def test_public_primary_header(self):
+ # System header is not considered a primary header.
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include <other/foo.h>\n'
+ '\n'
+ '#include "a.h"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
+ # ...except that it starts with public/.
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include <public/foo.h>\n'
+ '\n'
+ '#include "a.h"\n',
+ '')
+
+ # Even if it starts with public/ its base part must match with the source file name.
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include <public/foop.h>\n'
+ '\n'
+ '#include "a.h"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
+ def test_check_wtf_includes(self):
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#include <wtf/Assertions.h>\n',
+ 'wtf includes should be "wtf/file.h" instead of <wtf/file.h>.'
+ ' [build/include] [4]')
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#include "wtf/Assertions.h"\n',
+ '')
+
+ def test_check_cc_includes(self):
+ self.assert_language_rules_check('bar/chromium/foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#include "cc/CCProxy.h"\n',
+ 'cc includes should be "CCFoo.h" instead of "cc/CCFoo.h".'
+ ' [build/include] [4]')
+
+ def test_classify_include(self):
+ classify_include = cpp_style._classify_include
+ include_state = cpp_style._IncludeState()
+ self.assertEqual(cpp_style._CONFIG_HEADER,
+ classify_include('foo/foo.cpp',
+ 'config.h',
+ False, include_state))
+ self.assertEqual(cpp_style._PRIMARY_HEADER,
+ classify_include('foo/internal/foo.cpp',
+ 'foo/public/foo.h',
+ False, include_state))
+ self.assertEqual(cpp_style._PRIMARY_HEADER,
+ classify_include('foo/internal/foo.cpp',
+ 'foo/other/public/foo.h',
+ False, include_state))
+ self.assertEqual(cpp_style._OTHER_HEADER,
+ classify_include('foo/internal/foo.cpp',
+ 'foo/other/public/foop.h',
+ False, include_state))
+ self.assertEqual(cpp_style._OTHER_HEADER,
+ classify_include('foo/foo.cpp',
+ 'string',
+ True, include_state))
+ self.assertEqual(cpp_style._PRIMARY_HEADER,
+ classify_include('fooCustom.cpp',
+ 'foo.h',
+ False, include_state))
+ self.assertEqual(cpp_style._PRIMARY_HEADER,
+ classify_include('PrefixFooCustom.cpp',
+ 'Foo.h',
+ False, include_state))
+ self.assertEqual(cpp_style._MOC_HEADER,
+ classify_include('foo.cpp',
+ 'foo.moc',
+ False, include_state))
+ self.assertEqual(cpp_style._MOC_HEADER,
+ classify_include('foo.cpp',
+ 'moc_foo.cpp',
+ False, include_state))
+ # <public/foo.h> must be considered as primary even if is_system is True.
+ self.assertEqual(cpp_style._PRIMARY_HEADER,
+ classify_include('foo/foo.cpp',
+ 'public/foo.h',
+ True, include_state))
+ self.assertEqual(cpp_style._OTHER_HEADER,
+ classify_include('foo.cpp',
+ 'foo.h',
+ True, include_state))
+ self.assertEqual(cpp_style._OTHER_HEADER,
+ classify_include('foo.cpp',
+ 'public/foop.h',
+ True, include_state))
+ # Qt private APIs use _p.h suffix.
+ self.assertEqual(cpp_style._PRIMARY_HEADER,
+ classify_include('foo.cpp',
+ 'foo_p.h',
+ False, include_state))
+ # Tricky example where both includes might be classified as primary.
+ self.assert_language_rules_check('ScrollbarThemeWince.cpp',
+ '#include "config.h"\n'
+ '#include "ScrollbarThemeWince.h"\n'
+ '\n'
+ '#include "Scrollbar.h"\n',
+ '')
+ self.assert_language_rules_check('ScrollbarThemeWince.cpp',
+ '#include "config.h"\n'
+ '#include "Scrollbar.h"\n'
+ '\n'
+ '#include "ScrollbarThemeWince.h"\n',
+ 'Found header this file implements after a header this file implements.'
+ ' Should be: config.h, primary header, blank line, and then alphabetically sorted.'
+ ' [build/include_order] [4]')
+ self.assert_language_rules_check('ResourceHandleWin.cpp',
+ '#include "config.h"\n'
+ '#include "ResourceHandle.h"\n'
+ '\n'
+ '#include "ResourceHandleWin.h"\n',
+ '')
+
+ def test_try_drop_common_suffixes(self):
+ self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo-inl.h'))
+ self.assertEqual('foo/bar/foo',
+ cpp_style._drop_common_suffixes('foo/bar/foo_inl.h'))
+ self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo.cpp'))
+ self.assertEqual('foo/foo_unusualinternal',
+ cpp_style._drop_common_suffixes('foo/foo_unusualinternal.h'))
+ self.assertEqual('',
+ cpp_style._drop_common_suffixes('_test.cpp'))
+ self.assertEqual('test',
+ cpp_style._drop_common_suffixes('test.cpp'))
+ self.assertEqual('test',
+ cpp_style._drop_common_suffixes('test.cpp'))
+
+class CheckForFunctionLengthsTest(CppStyleTestBase):
+ def setUp(self):
+ # Reducing these thresholds for the tests speeds up tests significantly.
+ self.old_normal_trigger = cpp_style._FunctionState._NORMAL_TRIGGER
+ self.old_test_trigger = cpp_style._FunctionState._TEST_TRIGGER
+
+ cpp_style._FunctionState._NORMAL_TRIGGER = 10
+ cpp_style._FunctionState._TEST_TRIGGER = 25
+
+ def tearDown(self):
+ cpp_style._FunctionState._NORMAL_TRIGGER = self.old_normal_trigger
+ cpp_style._FunctionState._TEST_TRIGGER = self.old_test_trigger
+
+ # FIXME: Eliminate the need for this function.
+ def set_min_confidence(self, min_confidence):
+ """Set new test confidence and return old test confidence."""
+ old_min_confidence = self.min_confidence
+ self.min_confidence = min_confidence
+ return old_min_confidence
+
+ def assert_function_lengths_check(self, code, expected_message):
+ """Check warnings for long function bodies are as expected.
+
+ Args:
+ code: C++ source code expected to generate a warning message.
+ expected_message: Message expected to be generated by the C++ code.
+ """
+ self.assertEqual(expected_message,
+ self.perform_function_lengths_check(code))
+
+ def trigger_lines(self, error_level):
+ """Return number of lines needed to trigger a function length warning.
+
+ Args:
+ error_level: --v setting for cpp_style.
+
+ Returns:
+ Number of lines needed to trigger a function length warning.
+ """
+ return cpp_style._FunctionState._NORMAL_TRIGGER * 2 ** error_level
+
+ def trigger_test_lines(self, error_level):
+ """Return number of lines needed to trigger a test function length warning.
+
+ Args:
+ error_level: --v setting for cpp_style.
+
+ Returns:
+ Number of lines needed to trigger a test function length warning.
+ """
+ return cpp_style._FunctionState._TEST_TRIGGER * 2 ** error_level
+
+ def assert_function_length_check_definition(self, lines, error_level):
+ """Generate long function definition and check warnings are as expected.
+
+ Args:
+ lines: Number of lines to generate.
+ error_level: --v setting for cpp_style.
+ """
+ trigger_level = self.trigger_lines(self.min_confidence)
+ self.assert_function_lengths_check(
+ 'void test(int x)' + self.function_body(lines),
+ ('Small and focused functions are preferred: '
+ 'test() has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]'
+ % (lines, trigger_level, error_level)))
+
+ def assert_function_length_check_definition_ok(self, lines):
+ """Generate shorter function definition and check no warning is produced.
+
+ Args:
+ lines: Number of lines to generate.
+ """
+ self.assert_function_lengths_check(
+ 'void test(int x)' + self.function_body(lines),
+ '')
+
+ def assert_function_length_check_at_error_level(self, error_level):
+ """Generate and check function at the trigger level for --v setting.
+
+ Args:
+ error_level: --v setting for cpp_style.
+ """
+ self.assert_function_length_check_definition(self.trigger_lines(error_level),
+ error_level)
+
+ def assert_function_length_check_below_error_level(self, error_level):
+ """Generate and check function just below the trigger level for --v setting.
+
+ Args:
+ error_level: --v setting for cpp_style.
+ """
+ self.assert_function_length_check_definition(self.trigger_lines(error_level) - 1,
+ error_level - 1)
+
+ def assert_function_length_check_above_error_level(self, error_level):
+ """Generate and check function just above the trigger level for --v setting.
+
+ Args:
+ error_level: --v setting for cpp_style.
+ """
+ self.assert_function_length_check_definition(self.trigger_lines(error_level) + 1,
+ error_level)
+
+ def function_body(self, number_of_lines):
+ return ' {\n' + ' this_is_just_a_test();\n' * number_of_lines + '}'
+
+ def function_body_with_blank_lines(self, number_of_lines):
+ return ' {\n' + ' this_is_just_a_test();\n\n' * number_of_lines + '}'
+
+ def function_body_with_no_lints(self, number_of_lines):
+ return ' {\n' + ' this_is_just_a_test(); // NOLINT\n' * number_of_lines + '}'
+
+ # Test line length checks.
+ def test_function_length_check_declaration(self):
+ self.assert_function_lengths_check(
+ 'void test();', # Not a function definition
+ '')
+
+ def test_function_length_check_declaration_with_block_following(self):
+ self.assert_function_lengths_check(
+ ('void test();\n'
+ + self.function_body(66)), # Not a function definition
+ '')
+
+ def test_function_length_check_class_definition(self):
+ self.assert_function_lengths_check( # Not a function definition
+ 'class Test' + self.function_body(66) + ';',
+ '')
+
+ def test_function_length_check_trivial(self):
+ self.assert_function_lengths_check(
+ 'void test() {}', # Not counted
+ '')
+
+ def test_function_length_check_empty(self):
+ self.assert_function_lengths_check(
+ 'void test() {\n}',
+ '')
+
+ def test_function_length_check_definition_below_severity0(self):
+ old_min_confidence = self.set_min_confidence(0)
+ self.assert_function_length_check_definition_ok(self.trigger_lines(0) - 1)
+ self.set_min_confidence(old_min_confidence)
+
+ def test_function_length_check_definition_at_severity0(self):
+ old_min_confidence = self.set_min_confidence(0)
+ self.assert_function_length_check_definition_ok(self.trigger_lines(0))
+ self.set_min_confidence(old_min_confidence)
+
+ def test_function_length_check_definition_above_severity0(self):
+ old_min_confidence = self.set_min_confidence(0)
+ self.assert_function_length_check_above_error_level(0)
+ self.set_min_confidence(old_min_confidence)
+
+ def test_function_length_check_definition_below_severity1v0(self):
+ old_min_confidence = self.set_min_confidence(0)
+ self.assert_function_length_check_below_error_level(1)
+ self.set_min_confidence(old_min_confidence)
+
+ def test_function_length_check_definition_at_severity1v0(self):
+ old_min_confidence = self.set_min_confidence(0)
+ self.assert_function_length_check_at_error_level(1)
+ self.set_min_confidence(old_min_confidence)
+
+ def test_function_length_check_definition_below_severity1(self):
+ self.assert_function_length_check_definition_ok(self.trigger_lines(1) - 1)
+
+ def test_function_length_check_definition_at_severity1(self):
+ self.assert_function_length_check_definition_ok(self.trigger_lines(1))
+
+ def test_function_length_check_definition_above_severity1(self):
+ self.assert_function_length_check_above_error_level(1)
+
+ def test_function_length_check_definition_severity1_plus_indented(self):
+ error_level = 1
+ error_lines = self.trigger_lines(error_level) + 1
+ trigger_level = self.trigger_lines(self.min_confidence)
+ indent_spaces = ' '
+ self.assert_function_lengths_check(
+ re.sub(r'(?m)^(.)', indent_spaces + r'\1',
+ 'void test_indent(int x)\n' + self.function_body(error_lines)),
+ ('Small and focused functions are preferred: '
+ 'test_indent() has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]')
+ % (error_lines, trigger_level, error_level))
+
+ def test_function_length_check_definition_severity1_plus_blanks(self):
+ error_level = 1
+ error_lines = self.trigger_lines(error_level) + 1
+ trigger_level = self.trigger_lines(self.min_confidence)
+ self.assert_function_lengths_check(
+ 'void test_blanks(int x)' + self.function_body(error_lines),
+ ('Small and focused functions are preferred: '
+ 'test_blanks() has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]')
+ % (error_lines, trigger_level, error_level))
+
+ def test_function_length_check_complex_definition_severity1(self):
+ error_level = 1
+ error_lines = self.trigger_lines(error_level) + 1
+ trigger_level = self.trigger_lines(self.min_confidence)
+ self.assert_function_lengths_check(
+ ('my_namespace::my_other_namespace::MyVeryLongTypeName<Type1, bool func(const Element*)>*\n'
+ 'my_namespace::my_other_namespace<Type3, Type4>::~MyFunction<Type5<Type6, Type7> >(int arg1, char* arg2)'
+ + self.function_body(error_lines)),
+ ('Small and focused functions are preferred: '
+ 'my_namespace::my_other_namespace<Type3, Type4>::~MyFunction<Type5<Type6, Type7> >()'
+ ' has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]')
+ % (error_lines, trigger_level, error_level))
+
+ def test_function_length_check_definition_severity1_for_test(self):
+ error_level = 1
+ error_lines = self.trigger_test_lines(error_level) + 1
+ trigger_level = self.trigger_test_lines(self.min_confidence)
+ self.assert_function_lengths_check(
+ 'TEST_F(Test, Mutator)' + self.function_body(error_lines),
+ ('Small and focused functions are preferred: '
+ 'TEST_F(Test, Mutator) has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]')
+ % (error_lines, trigger_level, error_level))
+
+ def test_function_length_check_definition_severity1_for_split_line_test(self):
+ error_level = 1
+ error_lines = self.trigger_test_lines(error_level) + 1
+ trigger_level = self.trigger_test_lines(self.min_confidence)
+ self.assert_function_lengths_check(
+ ('TEST_F(GoogleUpdateRecoveryRegistryProtectedTest,\n'
+ ' FixGoogleUpdate_AllValues_MachineApp)' # note: 4 spaces
+ + self.function_body(error_lines)),
+ ('Small and focused functions are preferred: '
+ 'TEST_F(GoogleUpdateRecoveryRegistryProtectedTest, ' # 1 space
+ 'FixGoogleUpdate_AllValues_MachineApp) has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]')
+ % (error_lines, trigger_level, error_level))
+
+ def test_function_length_check_definition_severity1_for_bad_test_doesnt_break(self):
+ error_level = 1
+ error_lines = self.trigger_test_lines(error_level) + 1
+ trigger_level = self.trigger_test_lines(self.min_confidence)
+ # Since the function name isn't valid, the function detection algorithm
+ # will skip it, so no error is produced.
+ self.assert_function_lengths_check(
+ ('TEST_F('
+ + self.function_body(error_lines)),
+ '')
+
+ def test_function_length_check_definition_severity1_with_embedded_no_lints(self):
+ error_level = 1
+ error_lines = self.trigger_lines(error_level) + 1
+ trigger_level = self.trigger_lines(self.min_confidence)
+ self.assert_function_lengths_check(
+ 'void test(int x)' + self.function_body_with_no_lints(error_lines),
+ ('Small and focused functions are preferred: '
+ 'test() has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]')
+ % (error_lines, trigger_level, error_level))
+
+ def test_function_length_check_definition_severity1_with_no_lint(self):
+ self.assert_function_lengths_check(
+ ('void test(int x)' + self.function_body(self.trigger_lines(1))
+ + ' // NOLINT -- long function'),
+ '')
+
+ def test_function_length_check_definition_below_severity2(self):
+ self.assert_function_length_check_below_error_level(2)
+
+ def test_function_length_check_definition_severity2(self):
+ self.assert_function_length_check_at_error_level(2)
+
+ def test_function_length_check_definition_above_severity2(self):
+ self.assert_function_length_check_above_error_level(2)
+
+ def test_function_length_check_definition_below_severity3(self):
+ self.assert_function_length_check_below_error_level(3)
+
+ def test_function_length_check_definition_severity3(self):
+ self.assert_function_length_check_at_error_level(3)
+
+ def test_function_length_check_definition_above_severity3(self):
+ self.assert_function_length_check_above_error_level(3)
+
+ def test_function_length_check_definition_below_severity4(self):
+ self.assert_function_length_check_below_error_level(4)
+
+ def test_function_length_check_definition_severity4(self):
+ self.assert_function_length_check_at_error_level(4)
+
+ def test_function_length_check_definition_above_severity4(self):
+ self.assert_function_length_check_above_error_level(4)
+
+ def test_function_length_check_definition_below_severity5(self):
+ self.assert_function_length_check_below_error_level(5)
+
+ def test_function_length_check_definition_at_severity5(self):
+ self.assert_function_length_check_at_error_level(5)
+
+ def test_function_length_check_definition_above_severity5(self):
+ self.assert_function_length_check_above_error_level(5)
+
+ def test_function_length_check_definition_huge_lines(self):
+ # 5 is the limit
+ self.assert_function_length_check_definition(self.trigger_lines(6), 5)
+
+ def test_function_length_not_determinable(self):
+ # Macro invocation without terminating semicolon.
+ self.assert_function_lengths_check(
+ 'MACRO(arg)',
+ '')
+
+ # Macro with underscores
+ self.assert_function_lengths_check(
+ 'MACRO_WITH_UNDERSCORES(arg1, arg2, arg3)',
+ '')
+
+ self.assert_function_lengths_check(
+ 'NonMacro(arg)',
+ 'Lint failed to find start of function body.'
+ ' [readability/fn_size] [5]')
+
+
+class NoNonVirtualDestructorsTest(CppStyleTestBase):
+
+ def test_no_error(self):
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ virtual ~Foo();
+ virtual void foo();
+ };''',
+ '')
+
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ virtual inline ~Foo();
+ virtual void foo();
+ };''',
+ '')
+
+ self.assert_multi_line_lint(
+ '''\
+ class Foo {
+ inline virtual ~Foo();
+ virtual void foo();
+ };''',
+ '')
+
+ self.assert_multi_line_lint(
+ '''\
+ class Foo::Goo {
+ virtual ~Goo();
+ virtual void goo();
+ };''',
+ '')
+ self.assert_multi_line_lint(
+ 'class Foo { void foo(); };',
+ 'More than one command on the same line [whitespace/newline] [4]')
+ self.assert_multi_line_lint(
+ 'class MyClass {\n'
+ ' int getIntValue() { ASSERT(m_ptr); return *m_ptr; }\n'
+ '};\n',
+ '')
+ self.assert_multi_line_lint(
+ 'class MyClass {\n'
+ ' int getIntValue()\n'
+ ' {\n'
+ ' ASSERT(m_ptr); return *m_ptr;\n'
+ ' }\n'
+ '};\n',
+ 'More than one command on the same line [whitespace/newline] [4]')
+
+ self.assert_multi_line_lint(
+ '''\
+ class Qualified::Goo : public Foo {
+ virtual void goo();
+ };''',
+ '')
+
+ def test_no_destructor_when_virtual_needed(self):
+ self.assert_multi_line_lint_re(
+ '''\
+ class Foo {
+ virtual void foo();
+ };''',
+ 'The class Foo probably needs a virtual destructor')
+
+ def test_enum_casing(self):
+ self.assert_multi_line_lint(
+ '''\
+ enum Foo {
+ FOO_ONE = 1,
+ FOO_TWO
+ };
+ enum { FOO_ONE };
+ enum {FooOne, fooTwo};
+ enum {
+ FOO_ONE
+ };''',
+ ['enum members should use InterCaps with an initial capital letter. [readability/enum_casing] [4]'] * 5)
+
+ self.assert_multi_line_lint(
+ '''\
+ enum Foo {
+ fooOne = 1,
+ FooTwo = 2
+ };''',
+ 'enum members should use InterCaps with an initial capital letter. [readability/enum_casing] [4]')
+
+ self.assert_multi_line_lint(
+ '''\
+ enum Foo {
+ FooOne = 1,
+ FooTwo
+ } fooVar = FooOne;
+ enum { FooOne, FooTwo };
+ enum { FooOne, FooTwo } fooVar = FooTwo;
+ enum { FooOne= FooTwo } foo;
+ enum Enum123 {
+ FooOne,
+ FooTwo = FooOne,
+ };''',
+ '')
+
+ self.assert_multi_line_lint(
+ '''\
+ // WebIDL enum
+ enum Foo {
+ FOO_ONE = 1,
+ FOO_TWO = 2,
+ };''',
+ '')
+
+ self.assert_multi_line_lint(
+ '''\
+ // WebKitIDL enum
+ enum Foo { FOO_ONE, FOO_TWO };''',
+ '')
+
+ def test_destructor_non_virtual_when_virtual_needed(self):
+ self.assert_multi_line_lint_re(
+ '''\
+ class Foo {
+ ~Foo();
+ virtual void foo();
+ };''',
+ 'The class Foo probably needs a virtual destructor')
+
+ def test_no_warn_when_derived(self):
+ self.assert_multi_line_lint(
+ '''\
+ class Foo : public Goo {
+ virtual void foo();
+ };''',
+ '')
+
+ def test_internal_braces(self):
+ self.assert_multi_line_lint_re(
+ '''\
+ class Foo {
+ enum Goo {
+ Goo
+ };
+ virtual void foo();
+ };''',
+ 'The class Foo probably needs a virtual destructor')
+
+ def test_inner_class_needs_virtual_destructor(self):
+ self.assert_multi_line_lint_re(
+ '''\
+ class Foo {
+ class Goo {
+ virtual void goo();
+ };
+ };''',
+ 'The class Goo probably needs a virtual destructor')
+
+ def test_outer_class_needs_virtual_destructor(self):
+ self.assert_multi_line_lint_re(
+ '''\
+ class Foo {
+ class Goo {
+ };
+ virtual void foo();
+ };''',
+ 'The class Foo probably needs a virtual destructor')
+
+ def test_qualified_class_needs_virtual_destructor(self):
+ self.assert_multi_line_lint_re(
+ '''\
+ class Qualified::Foo {
+ virtual void foo();
+ };''',
+ 'The class Qualified::Foo probably needs a virtual destructor')
+
+ def test_multi_line_declaration_no_error(self):
+ self.assert_multi_line_lint_re(
+ '''\
+ class Foo
+ : public Goo {
+ virtual void foo();
+ };''',
+ '')
+
+ def test_multi_line_declaration_with_error(self):
+ self.assert_multi_line_lint(
+ '''\
+ class Foo
+ {
+ virtual void foo();
+ };''',
+ ['This { should be at the end of the previous line '
+ '[whitespace/braces] [4]',
+ 'The class Foo probably needs a virtual destructor due to having '
+ 'virtual method(s), one declared at line 3. [runtime/virtual] [4]'])
+
+
+class PassPtrTest(CppStyleTestBase):
+ # For http://webkit.org/coding/RefPtr.html
+
+ def assert_pass_ptr_check(self, code, expected_message):
+ """Check warnings for Pass*Ptr are as expected.
+
+ Args:
+ code: C++ source code expected to generate a warning message.
+ expected_message: Message expected to be generated by the C++ code.
+ """
+ self.assertEqual(expected_message,
+ self.perform_pass_ptr_check(code))
+
+ def test_pass_ref_ptr_in_function(self):
+ self.assert_pass_ptr_check(
+ 'int myFunction()\n'
+ '{\n'
+ ' PassRefPtr<Type1> variable = variable2;\n'
+ '}',
+ 'Local variables should never be PassRefPtr (see '
+ 'http://webkit.org/coding/RefPtr.html). [readability/pass_ptr] [5]')
+
+ def test_pass_own_ptr_in_function(self):
+ self.assert_pass_ptr_check(
+ 'int myFunction()\n'
+ '{\n'
+ ' PassOwnPtr<Type1> variable = variable2;\n'
+ '}',
+ 'Local variables should never be PassOwnPtr (see '
+ 'http://webkit.org/coding/RefPtr.html). [readability/pass_ptr] [5]')
+
+ def test_pass_other_type_ptr_in_function(self):
+ self.assert_pass_ptr_check(
+ 'int myFunction()\n'
+ '{\n'
+ ' PassOtherTypePtr<Type1> variable;\n'
+ '}',
+ 'Local variables should never be PassOtherTypePtr (see '
+ 'http://webkit.org/coding/RefPtr.html). [readability/pass_ptr] [5]')
+
+ def test_pass_ref_ptr_return_value(self):
+ self.assert_pass_ptr_check(
+ 'PassRefPtr<Type1>\n'
+ 'myFunction(int)\n'
+ '{\n'
+ '}',
+ '')
+ self.assert_pass_ptr_check(
+ 'PassRefPtr<Type1> myFunction(int)\n'
+ '{\n'
+ '}',
+ '')
+ self.assert_pass_ptr_check(
+ 'PassRefPtr<Type1> myFunction();\n',
+ '')
+ self.assert_pass_ptr_check(
+ 'OwnRefPtr<Type1> myFunction();\n',
+ '')
+ self.assert_pass_ptr_check(
+ 'RefPtr<Type1> myFunction(int)\n'
+ '{\n'
+ '}',
+ 'The return type should use PassRefPtr instead of RefPtr. [readability/pass_ptr] [5]')
+ self.assert_pass_ptr_check(
+ 'OwnPtr<Type1> myFunction(int)\n'
+ '{\n'
+ '}',
+ 'The return type should use PassOwnPtr instead of OwnPtr. [readability/pass_ptr] [5]')
+
+ def test_ref_ptr_parameter_value(self):
+ self.assert_pass_ptr_check(
+ 'int myFunction(PassRefPtr<Type1>)\n'
+ '{\n'
+ '}',
+ '')
+ self.assert_pass_ptr_check(
+ 'int myFunction(RefPtr<Type1>)\n'
+ '{\n'
+ '}',
+ 'The parameter type should use PassRefPtr instead of RefPtr. [readability/pass_ptr] [5]')
+ self.assert_pass_ptr_check(
+ 'int myFunction(RefPtr<Type1>&)\n'
+ '{\n'
+ '}',
+ '')
+ self.assert_pass_ptr_check(
+ 'int myFunction(RefPtr<Type1>*)\n'
+ '{\n'
+ '}',
+ '')
+ self.assert_pass_ptr_check(
+ 'int myFunction(RefPtr<Type1>* = 0)\n'
+ '{\n'
+ '}',
+ '')
+ self.assert_pass_ptr_check(
+ 'int myFunction(RefPtr<Type1>* = 0)\n'
+ '{\n'
+ '}',
+ '')
+
+ def test_own_ptr_parameter_value(self):
+ self.assert_pass_ptr_check(
+ 'int myFunction(PassOwnPtr<Type1>)\n'
+ '{\n'
+ '}',
+ '')
+ self.assert_pass_ptr_check(
+ 'int myFunction(OwnPtr<Type1>)\n'
+ '{\n'
+ '}',
+ 'The parameter type should use PassOwnPtr instead of OwnPtr. [readability/pass_ptr] [5]')
+ self.assert_pass_ptr_check(
+ 'int myFunction(OwnPtr<Type1>& simple)\n'
+ '{\n'
+ '}',
+ '')
+
+ def test_ref_ptr_member_variable(self):
+ self.assert_pass_ptr_check(
+ 'class Foo {'
+ ' RefPtr<Type1> m_other;\n'
+ '};\n',
+ '')
+
+
+class LeakyPatternTest(CppStyleTestBase):
+
+ def assert_leaky_pattern_check(self, code, expected_message):
+ """Check warnings for leaky patterns are as expected.
+
+ Args:
+ code: C++ source code expected to generate a warning message.
+ expected_message: Message expected to be generated by the C++ code.
+ """
+ self.assertEqual(expected_message,
+ self.perform_leaky_pattern_check(code))
+
+ def test_get_dc(self):
+ self.assert_leaky_pattern_check(
+ 'HDC hdc = GetDC(hwnd);',
+ 'Use the class HWndDC instead of calling GetDC to avoid potential '
+ 'memory leaks. [runtime/leaky_pattern] [5]')
+
+ def test_get_dc(self):
+ self.assert_leaky_pattern_check(
+ 'HDC hdc = GetDCEx(hwnd, 0, 0);',
+ 'Use the class HWndDC instead of calling GetDCEx to avoid potential '
+ 'memory leaks. [runtime/leaky_pattern] [5]')
+
+ def test_own_get_dc(self):
+ self.assert_leaky_pattern_check(
+ 'HWndDC hdc(hwnd);',
+ '')
+
+ def test_create_dc(self):
+ self.assert_leaky_pattern_check(
+ 'HDC dc2 = ::CreateDC();',
+ 'Use adoptPtr and OwnPtr<HDC> when calling CreateDC to avoid potential '
+ 'memory leaks. [runtime/leaky_pattern] [5]')
+
+ self.assert_leaky_pattern_check(
+ 'adoptPtr(CreateDC());',
+ '')
+
+ def test_create_compatible_dc(self):
+ self.assert_leaky_pattern_check(
+ 'HDC dc2 = CreateCompatibleDC(dc);',
+ 'Use adoptPtr and OwnPtr<HDC> when calling CreateCompatibleDC to avoid potential '
+ 'memory leaks. [runtime/leaky_pattern] [5]')
+ self.assert_leaky_pattern_check(
+ 'adoptPtr(CreateCompatibleDC(dc));',
+ '')
+
+
+class WebKitStyleTest(CppStyleTestBase):
+
+ # for http://webkit.org/coding/coding-style.html
+ def test_indentation(self):
+ # 1. Use spaces, not tabs. Tabs should only appear in files that
+ # require them for semantic meaning, like Makefiles.
+ self.assert_multi_line_lint(
+ 'class Foo {\n'
+ ' int goo;\n'
+ '};',
+ '')
+ self.assert_multi_line_lint(
+ 'class Foo {\n'
+ '\tint goo;\n'
+ '};',
+ 'Tab found; better to use spaces [whitespace/tab] [1]')
+
+ # 2. The indent size is 4 spaces.
+ self.assert_multi_line_lint(
+ 'class Foo {\n'
+ ' int goo;\n'
+ '};',
+ '')
+ self.assert_multi_line_lint(
+ 'class Foo {\n'
+ ' int goo;\n'
+ '};',
+ 'Weird number of spaces at line-start. Are you using a 4-space indent? [whitespace/indent] [3]')
+
+ # 3. In a header, code inside a namespace should not be indented.
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n\n'
+ 'class Document {\n'
+ ' int myVariable;\n'
+ '};\n'
+ '}',
+ '',
+ 'foo.h')
+ self.assert_multi_line_lint(
+ 'namespace OuterNamespace {\n'
+ ' namespace InnerNamespace {\n'
+ ' class Document {\n'
+ '};\n'
+ '};\n'
+ '}',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.h')
+ self.assert_multi_line_lint(
+ 'namespace OuterNamespace {\n'
+ ' class Document {\n'
+ ' namespace InnerNamespace {\n'
+ '};\n'
+ '};\n'
+ '}',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.h')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n'
+ '#if 0\n'
+ ' class Document {\n'
+ '};\n'
+ '#endif\n'
+ '}',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.h')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n'
+ 'class Document {\n'
+ '};\n'
+ '}',
+ '',
+ 'foo.h')
+
+ # 4. In an implementation file (files with the extension .cpp, .c
+ # or .mm), code inside a namespace should not be indented.
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n\n'
+ 'Document::Foo()\n'
+ ' : foo(bar)\n'
+ ' , boo(far)\n'
+ '{\n'
+ ' stuff();\n'
+ '}',
+ '',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace OuterNamespace {\n'
+ 'namespace InnerNamespace {\n'
+ 'Document::Foo() { }\n'
+ ' void* p;\n'
+ '}\n'
+ '}\n',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace OuterNamespace {\n'
+ 'namespace InnerNamespace {\n'
+ 'Document::Foo() { }\n'
+ '}\n'
+ ' void* p;\n'
+ '}\n',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n\n'
+ ' const char* foo = "start:;"\n'
+ ' "dfsfsfs";\n'
+ '}\n',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n\n'
+ 'const char* foo(void* a = ";", // ;\n'
+ ' void* b);\n'
+ ' void* p;\n'
+ '}\n',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n\n'
+ 'const char* foo[] = {\n'
+ ' "void* b);", // ;\n'
+ ' "asfdf",\n'
+ ' }\n'
+ ' void* p;\n'
+ '}\n',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n\n'
+ 'const char* foo[] = {\n'
+ ' "void* b);", // }\n'
+ ' "asfdf",\n'
+ ' }\n'
+ '}\n',
+ '',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ ' namespace WebCore {\n\n'
+ ' void Document::Foo()\n'
+ ' {\n'
+ 'start: // infinite loops are fun!\n'
+ ' goto start;\n'
+ ' }',
+ 'namespace should never be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n'
+ ' Document::Foo() { }\n'
+ '}',
+ 'Code inside a namespace should not be indented.'
+ ' [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n'
+ '#define abc(x) x; \\\n'
+ ' x\n'
+ '}',
+ '',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n'
+ '#define abc(x) x; \\\n'
+ ' x\n'
+ ' void* x;'
+ '}',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+
+ # 5. A case label should line up with its switch statement. The
+ # case statement is indented.
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' case barCondition:\n'
+ ' i++;\n'
+ ' break;\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ '')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' switch (otherCondition) {\n'
+ ' default:\n'
+ ' return;\n'
+ ' }\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ '')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition: break;\n'
+ ' default: return;\n'
+ ' }\n',
+ '')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' case barCondition:\n'
+ ' i++;\n'
+ ' break;\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ 'A case label should not be indented, but line up with its switch statement.'
+ ' [whitespace/indent] [4]')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' break;\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ 'A case label should not be indented, but line up with its switch statement.'
+ ' [whitespace/indent] [4]')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' case barCondition:\n'
+ ' switch (otherCondition) {\n'
+ ' default:\n'
+ ' return;\n'
+ ' }\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ 'A case label should not be indented, but line up with its switch statement.'
+ ' [whitespace/indent] [4]')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' case barCondition:\n'
+ ' i++;\n'
+ ' break;\n\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ 'Non-label code inside switch statements should be indented.'
+ ' [whitespace/indent] [4]')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' case barCondition:\n'
+ ' switch (otherCondition) {\n'
+ ' default:\n'
+ ' return;\n'
+ ' }\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ 'Non-label code inside switch statements should be indented.'
+ ' [whitespace/indent] [4]')
+
+ # 6. Boolean expressions at the same nesting level that span
+ # multiple lines should have their operators on the left side of
+ # the line instead of the right side.
+ self.assert_multi_line_lint(
+ ' return attr->name() == srcAttr\n'
+ ' || attr->name() == lowsrcAttr;\n',
+ '')
+ self.assert_multi_line_lint(
+ ' return attr->name() == srcAttr ||\n'
+ ' attr->name() == lowsrcAttr;\n',
+ 'Boolean expressions that span multiple lines should have their '
+ 'operators on the left side of the line instead of the right side.'
+ ' [whitespace/operators] [4]')
+
+ def test_spacing(self):
+ # 1. Do not place spaces around unary operators.
+ self.assert_multi_line_lint(
+ 'i++;',
+ '')
+ self.assert_multi_line_lint(
+ 'i ++;',
+ 'Extra space for operator ++; [whitespace/operators] [4]')
+
+ # 2. Do place spaces around binary and ternary operators.
+ self.assert_multi_line_lint(
+ 'y = m * x + b;',
+ '')
+ self.assert_multi_line_lint(
+ 'f(a, b);',
+ '')
+ self.assert_multi_line_lint(
+ 'c = a | b;',
+ '')
+ self.assert_multi_line_lint(
+ 'return condition ? 1 : 0;',
+ '')
+ self.assert_multi_line_lint(
+ 'y=m*x+b;',
+ 'Missing spaces around = [whitespace/operators] [4]')
+ self.assert_multi_line_lint(
+ 'f(a,b);',
+ 'Missing space after , [whitespace/comma] [3]')
+ self.assert_multi_line_lint(
+ 'c = a|b;',
+ 'Missing spaces around | [whitespace/operators] [3]')
+ # FIXME: We cannot catch this lint error.
+ # self.assert_multi_line_lint(
+ # 'return condition ? 1:0;',
+ # '')
+
+ # 3. Place spaces between control statements and their parentheses.
+ self.assert_multi_line_lint(
+ ' if (condition)\n'
+ ' doIt();\n',
+ '')
+ self.assert_multi_line_lint(
+ ' if(condition)\n'
+ ' doIt();\n',
+ 'Missing space before ( in if( [whitespace/parens] [5]')
+
+ # 4. Do not place spaces between a function and its parentheses,
+ # or between a parenthesis and its content.
+ self.assert_multi_line_lint(
+ 'f(a, b);',
+ '')
+ self.assert_multi_line_lint(
+ 'f (a, b);',
+ 'Extra space before ( in function call [whitespace/parens] [4]')
+ self.assert_multi_line_lint(
+ 'f( a, b );',
+ ['Extra space after ( in function call [whitespace/parens] [4]',
+ 'Extra space before ) [whitespace/parens] [2]'])
+
+ def test_line_breaking(self):
+ # 1. Each statement should get its own line.
+ self.assert_multi_line_lint(
+ ' x++;\n'
+ ' y++;\n'
+ ' if (condition);\n'
+ ' doIt();\n',
+ '')
+ self.assert_multi_line_lint(
+ ' if (condition) \\\n'
+ ' doIt();\n',
+ '')
+ self.assert_multi_line_lint(
+ ' x++; y++;',
+ 'More than one command on the same line [whitespace/newline] [4]')
+ self.assert_multi_line_lint(
+ ' if (condition) doIt();\n',
+ 'More than one command on the same line in if [whitespace/parens] [4]')
+ # Ensure that having a # in the line doesn't hide the error.
+ self.assert_multi_line_lint(
+ ' x++; char a[] = "#";',
+ 'More than one command on the same line [whitespace/newline] [4]')
+ # Ignore preprocessor if's.
+ self.assert_multi_line_lint(
+ '#if (condition) || (condition2)\n',
+ '')
+
+ # 2. An else statement should go on the same line as a preceding
+ # close brace if one is present, else it should line up with the
+ # if statement.
+ self.assert_multi_line_lint(
+ 'if (condition) {\n'
+ ' doSomething();\n'
+ ' doSomethingAgain();\n'
+ '} else {\n'
+ ' doSomethingElse();\n'
+ ' doSomethingElseAgain();\n'
+ '}\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' doSomething();\n'
+ 'else\n'
+ ' doSomethingElse();\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition) {\n'
+ ' doSomething();\n'
+ '} else {\n'
+ ' doSomethingElse();\n'
+ ' doSomethingElseAgain();\n'
+ '}\n',
+ '')
+ self.assert_multi_line_lint(
+ '#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n',
+ '')
+ self.assert_multi_line_lint(
+ '#define TEST_ASSERT(expression) do { if ( !(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n',
+ 'Extra space after ( in if [whitespace/parens] [5]')
+ # FIXME: currently we only check first conditional, so we cannot detect errors in next ones.
+ # self.assert_multi_line_lint(
+ # '#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0 )\n',
+ # 'Mismatching spaces inside () in if [whitespace/parens] [5]')
+ self.assert_multi_line_lint(
+ 'WTF_MAKE_NONCOPYABLE(ClassName); WTF_MAKE_FAST_ALLOCATED;\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition) {\n'
+ ' doSomething();\n'
+ ' doSomethingAgain();\n'
+ '}\n'
+ 'else {\n'
+ ' doSomethingElse();\n'
+ ' doSomethingElseAgain();\n'
+ '}\n',
+ 'An else should appear on the same line as the preceding } [whitespace/newline] [4]')
+ self.assert_multi_line_lint(
+ 'if (condition) doSomething(); else doSomethingElse();\n',
+ ['More than one command on the same line [whitespace/newline] [4]',
+ 'Else clause should never be on same line as else (use 2 lines) [whitespace/newline] [4]',
+ 'More than one command on the same line in if [whitespace/parens] [4]'])
+ self.assert_multi_line_lint(
+ 'if (condition) doSomething(); else {\n'
+ ' doSomethingElse();\n'
+ '}\n',
+ ['More than one command on the same line in if [whitespace/parens] [4]',
+ 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]'])
+ self.assert_multi_line_lint(
+ 'void func()\n'
+ '{\n'
+ ' while (condition) { }\n'
+ ' return 0;\n'
+ '}\n',
+ '')
+ self.assert_multi_line_lint(
+ 'void func()\n'
+ '{\n'
+ ' for (i = 0; i < 42; i++) { foobar(); }\n'
+ ' return 0;\n'
+ '}\n',
+ 'More than one command on the same line in for [whitespace/parens] [4]')
+
+ # 3. An else if statement should be written as an if statement
+ # when the prior if concludes with a return statement.
+ self.assert_multi_line_lint(
+ 'if (motivated) {\n'
+ ' if (liquid)\n'
+ ' return money;\n'
+ '} else if (tired) {\n'
+ ' break;\n'
+ '}',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' doSomething();\n'
+ 'else if (otherCondition)\n'
+ ' doSomethingElse();\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' doSomething();\n'
+ 'else\n'
+ ' doSomethingElse();\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' returnValue = foo;\n'
+ 'else if (otherCondition)\n'
+ ' returnValue = bar;\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' returnValue = foo;\n'
+ 'else\n'
+ ' returnValue = bar;\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' doSomething();\n'
+ 'else if (liquid)\n'
+ ' return money;\n'
+ 'else if (broke)\n'
+ ' return favor;\n'
+ 'else\n'
+ ' sleep(28800);\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (liquid) {\n'
+ ' prepare();\n'
+ ' return money;\n'
+ '} else if (greedy) {\n'
+ ' keep();\n'
+ ' return nothing;\n'
+ '}\n',
+ 'An else if statement should be written as an if statement when the '
+ 'prior "if" concludes with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]')
+ self.assert_multi_line_lint(
+ ' if (stupid) {\n'
+ 'infiniteLoop:\n'
+ ' goto infiniteLoop;\n'
+ ' } else if (evil)\n'
+ ' goto hell;\n',
+ ['If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]',
+ 'An else if statement should be written as an if statement when the '
+ 'prior "if" concludes with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]'])
+ self.assert_multi_line_lint(
+ 'if (liquid)\n'
+ '{\n'
+ ' prepare();\n'
+ ' return money;\n'
+ '}\n'
+ 'else if (greedy)\n'
+ ' keep();\n',
+ ['If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]',
+ 'An else should appear on the same line as the preceding } [whitespace/newline] [4]',
+ 'An else if statement should be written as an if statement when the '
+ 'prior "if" concludes with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]'])
+ self.assert_multi_line_lint(
+ 'if (gone)\n'
+ ' return;\n'
+ 'else if (here)\n'
+ ' go();\n',
+ 'An else if statement should be written as an if statement when the '
+ 'prior "if" concludes with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]')
+ self.assert_multi_line_lint(
+ 'if (gone)\n'
+ ' return;\n'
+ 'else\n'
+ ' go();\n',
+ 'An else statement can be removed when the prior "if" concludes '
+ 'with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]')
+ self.assert_multi_line_lint(
+ 'if (motivated) {\n'
+ ' prepare();\n'
+ ' continue;\n'
+ '} else {\n'
+ ' cleanUp();\n'
+ ' break;\n'
+ '}\n',
+ 'An else statement can be removed when the prior "if" concludes '
+ 'with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]')
+ self.assert_multi_line_lint(
+ 'if (tired)\n'
+ ' break;\n'
+ 'else {\n'
+ ' prepare();\n'
+ ' continue;\n'
+ '}\n',
+ ['If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]',
+ 'An else statement can be removed when the prior "if" concludes '
+ 'with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]'])
+
+ def test_braces(self):
+ # 1. Function definitions: place each brace on its own line.
+ self.assert_multi_line_lint(
+ 'int main()\n'
+ '{\n'
+ ' doSomething();\n'
+ '}\n',
+ '')
+ self.assert_multi_line_lint(
+ 'int main() {\n'
+ ' doSomething();\n'
+ '}\n',
+ 'Place brace on its own line for function definitions. [whitespace/braces] [4]')
+
+ # 2. Other braces: place the open brace on the line preceding the
+ # code block; place the close brace on its own line.
+ self.assert_multi_line_lint(
+ 'class MyClass {\n'
+ ' int foo;\n'
+ '};\n',
+ '')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n'
+ 'int foo;\n'
+ '};\n',
+ '')
+ self.assert_multi_line_lint(
+ 'for (int i = 0; i < 10; i++) {\n'
+ ' DoSomething();\n'
+ '};\n',
+ '')
+ self.assert_multi_line_lint(
+ 'class MyClass\n'
+ '{\n'
+ ' int foo;\n'
+ '};\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ '{\n'
+ ' int foo;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'for (int i = 0; i < 10; i++)\n'
+ '{\n'
+ ' int foo;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'while (true)\n'
+ '{\n'
+ ' int foo;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'foreach (Foo* foo, foos)\n'
+ '{\n'
+ ' int bar;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'switch (type)\n'
+ '{\n'
+ 'case foo: return;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ '{\n'
+ ' int foo;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'for (int i = 0; i < 10; i++)\n'
+ '{\n'
+ ' int foo;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'while (true)\n'
+ '{\n'
+ ' int foo;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'switch (type)\n'
+ '{\n'
+ 'case foo: return;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'else if (type)\n'
+ '{\n'
+ 'case foo: return;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+
+ # 3. Curly braces are not required for single-line conditionals and
+ # loop bodies, but are required for single-statement bodies that
+ # span multiple lines.
+
+ #
+ # Positive tests
+ #
+ self.assert_multi_line_lint(
+ 'if (condition1)\n'
+ ' statement1();\n'
+ 'else\n'
+ ' statement2();\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'if (condition1)\n'
+ ' statement1();\n'
+ 'else if (condition2)\n'
+ ' statement2();\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'if (condition1)\n'
+ ' statement1();\n'
+ 'else if (condition2)\n'
+ ' statement2();\n'
+ 'else\n'
+ ' statement3();\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'for (; foo; bar)\n'
+ ' int foo;\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'for (; foo; bar) {\n'
+ ' int foo;\n'
+ '}\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'foreach (foo, foos) {\n'
+ ' int bar;\n'
+ '}\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'foreach (foo, foos)\n'
+ ' int bar;\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'while (true) {\n'
+ ' int foo;\n'
+ '}\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'while (true)\n'
+ ' int foo;\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'if (condition1) {\n'
+ ' statement1();\n'
+ '} else {\n'
+ ' statement2();\n'
+ '}\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'if (condition1) {\n'
+ ' statement1();\n'
+ '} else if (condition2) {\n'
+ ' statement2();\n'
+ '}\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'if (condition1) {\n'
+ ' statement1();\n'
+ '} else if (condition2) {\n'
+ ' statement2();\n'
+ '} else {\n'
+ ' statement3();\n'
+ '}\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'if (condition1) {\n'
+ ' statement1();\n'
+ ' statement1_2();\n'
+ '} else if (condition2) {\n'
+ ' statement2();\n'
+ ' statement2_2();\n'
+ '}\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'if (condition1) {\n'
+ ' statement1();\n'
+ ' statement1_2();\n'
+ '} else if (condition2) {\n'
+ ' statement2();\n'
+ ' statement2_2();\n'
+ '} else {\n'
+ ' statement3();\n'
+ ' statement3_2();\n'
+ '}\n',
+ '')
+
+ #
+ # Negative tests
+ #
+
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' doSomething(\n'
+ ' spanningMultipleLines);\n',
+ 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' // Single-line comment\n'
+ ' doSomething();\n',
+ 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'if (condition1)\n'
+ ' statement1();\n'
+ 'else if (condition2)\n'
+ ' // Single-line comment\n'
+ ' statement2();\n',
+ 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'if (condition1)\n'
+ ' statement1();\n'
+ 'else if (condition2)\n'
+ ' statement2();\n'
+ 'else\n'
+ ' // Single-line comment\n'
+ ' statement3();\n',
+ 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'for (; foo; bar)\n'
+ ' // Single-line comment\n'
+ ' int foo;\n',
+ 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'foreach (foo, foos)\n'
+ ' // Single-line comment\n'
+ ' int bar;\n',
+ 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'while (true)\n'
+ ' // Single-line comment\n'
+ ' int foo;\n'
+ '\n',
+ 'A conditional or loop body must use braces if the statement is more than one line long. [whitespace/braces] [4]')
+
+ # 4. If one part of an if-else statement uses curly braces, the
+ # other part must too.
+
+ self.assert_multi_line_lint(
+ 'if (condition1) {\n'
+ ' doSomething1();\n'
+ ' doSomething1_2();\n'
+ '} else if (condition2)\n'
+ ' doSomething2();\n'
+ 'else\n'
+ ' doSomething3();\n',
+ 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'if (condition1)\n'
+ ' doSomething1();\n'
+ 'else if (condition2) {\n'
+ ' doSomething2();\n'
+ ' doSomething2_2();\n'
+ '} else\n'
+ ' doSomething3();\n',
+ 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'if (condition1) {\n'
+ ' doSomething1();\n'
+ '} else if (condition2) {\n'
+ ' doSomething2();\n'
+ ' doSomething2_2();\n'
+ '} else\n'
+ ' doSomething3();\n',
+ 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'if (condition1)\n'
+ ' doSomething1();\n'
+ 'else if (condition2)\n'
+ ' doSomething2();\n'
+ 'else {\n'
+ ' doSomething3();\n'
+ ' doSomething3_2();\n'
+ '}\n',
+ 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'if (condition1) {\n'
+ ' doSomething1();\n'
+ ' doSomething1_2();\n'
+ '} else if (condition2)\n'
+ ' doSomething2();\n'
+ 'else {\n'
+ ' doSomething3();\n'
+ ' doSomething3_2();\n'
+ '}\n',
+ 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'if (condition1)\n'
+ ' doSomething1();\n'
+ 'else if (condition2) {\n'
+ ' doSomething2();\n'
+ ' doSomething2_2();\n'
+ '} else {\n'
+ ' doSomething3();\n'
+ ' doSomething3_2();\n'
+ '}\n',
+ 'If one part of an if-else statement uses curly braces, the other part must too. [whitespace/braces] [4]')
+
+
+ # 5. Control clauses without a body should use empty braces.
+ self.assert_multi_line_lint(
+ 'for ( ; current; current = current->next) { }\n',
+ '')
+ self.assert_multi_line_lint(
+ 'for ( ; current;\n'
+ ' current = current->next) { }\n',
+ 'Weird number of spaces at line-start. Are you using a 4-space indent? [whitespace/indent] [3]')
+ self.assert_multi_line_lint(
+ 'for ( ; current; current = current->next);\n',
+ 'Semicolon defining empty statement for this loop. Use { } instead. [whitespace/semicolon] [5]')
+ self.assert_multi_line_lint(
+ 'while (true);\n',
+ 'Semicolon defining empty statement for this loop. Use { } instead. [whitespace/semicolon] [5]')
+ self.assert_multi_line_lint(
+ '} while (true);\n',
+ '')
+
+ def test_null_false_zero(self):
+ # 1. In C++, the null pointer value should be written as 0. In C,
+ # it should be written as NULL. In Objective-C and Objective-C++,
+ # follow the guideline for C or C++, respectively, but use nil to
+ # represent a null Objective-C object.
+ self.assert_lint(
+ 'functionCall(NULL)',
+ 'Use 0 instead of NULL.'
+ ' [readability/null] [5]',
+ 'foo.cpp')
+ self.assert_lint(
+ "// Don't use NULL in comments since it isn't in code.",
+ 'Use 0 or null instead of NULL (even in *comments*).'
+ ' [readability/null] [4]',
+ 'foo.cpp')
+ self.assert_lint(
+ '"A string with NULL" // and a comment with NULL is tricky to flag correctly in cpp_style.',
+ 'Use 0 or null instead of NULL (even in *comments*).'
+ ' [readability/null] [4]',
+ 'foo.cpp')
+ self.assert_lint(
+ '"A string containing NULL is ok"',
+ '',
+ 'foo.cpp')
+ self.assert_lint(
+ 'if (aboutNULL)',
+ '',
+ 'foo.cpp')
+ self.assert_lint(
+ 'myVariable = NULLify',
+ '',
+ 'foo.cpp')
+ # Make sure that the NULL check does not apply to C and Objective-C files.
+ self.assert_lint(
+ 'functionCall(NULL)',
+ '',
+ 'foo.c')
+ self.assert_lint(
+ 'functionCall(NULL)',
+ '',
+ 'foo.m')
+
+ # Make sure that the NULL check does not apply to g_object_{set,get} and
+ # g_str{join,concat}
+ self.assert_lint(
+ 'g_object_get(foo, "prop", &bar, NULL);',
+ '')
+ self.assert_lint(
+ 'g_object_set(foo, "prop", bar, NULL);',
+ '')
+ self.assert_lint(
+ 'g_build_filename(foo, bar, NULL);',
+ '')
+ self.assert_lint(
+ 'gst_bin_add_many(foo, bar, boo, NULL);',
+ '')
+ self.assert_lint(
+ 'gst_bin_remove_many(foo, bar, boo, NULL);',
+ '')
+ self.assert_lint(
+ 'gst_element_link_many(foo, bar, boo, NULL);',
+ '')
+ self.assert_lint(
+ 'gst_element_unlink_many(foo, bar, boo, NULL);',
+ '')
+ self.assert_lint(
+ 'gst_structure_get(foo, "value", G_TYPE_INT, &value, NULL);',
+ '')
+ self.assert_lint(
+ 'gst_structure_set(foo, "value", G_TYPE_INT, value, NULL);',
+ '')
+ self.assert_lint(
+ 'gst_structure_remove_fields(foo, "value", "bar", NULL);',
+ '')
+ self.assert_lint(
+ 'gst_structure_new("foo", "value", G_TYPE_INT, value, NULL);',
+ '')
+ self.assert_lint(
+ 'gst_structure_id_new(FOO, VALUE, G_TYPE_INT, value, NULL);',
+ '')
+ self.assert_lint(
+ 'gst_structure_id_set(FOO, VALUE, G_TYPE_INT, value, NULL);',
+ '')
+ self.assert_lint(
+ 'gst_structure_id_get(FOO, VALUE, G_TYPE_INT, &value, NULL);',
+ '')
+ self.assert_lint(
+ 'gst_caps_new_simple(mime, "value", G_TYPE_INT, &value, NULL);',
+ '')
+ self.assert_lint(
+ 'gst_caps_new_full(structure1, structure2, NULL);',
+ '')
+ self.assert_lint(
+ 'gchar* result = g_strconcat("part1", "part2", "part3", NULL);',
+ '')
+ self.assert_lint(
+ 'gchar* result = g_strconcat("part1", NULL);',
+ '')
+ self.assert_lint(
+ 'gchar* result = g_strjoin(",", "part1", "part2", "part3", NULL);',
+ '')
+ self.assert_lint(
+ 'gchar* result = g_strjoin(",", "part1", NULL);',
+ '')
+ self.assert_lint(
+ 'gchar* result = gdk_pixbuf_save_to_callback(pixbuf, function, data, type, error, NULL);',
+ '')
+ self.assert_lint(
+ 'gchar* result = gdk_pixbuf_save_to_buffer(pixbuf, function, data, type, error, NULL);',
+ '')
+ self.assert_lint(
+ 'gchar* result = gdk_pixbuf_save_to_stream(pixbuf, function, data, type, error, NULL);',
+ '')
+ self.assert_lint(
+ 'gtk_widget_style_get(style, "propertyName", &value, "otherName", &otherValue, NULL);',
+ '')
+ self.assert_lint(
+ 'gtk_style_context_get_style(context, "propertyName", &value, "otherName", &otherValue, NULL);',
+ '')
+ self.assert_lint(
+ 'gtk_style_context_get(context, static_cast<GtkStateFlags>(0), "property", &value, NULL);',
+ '')
+ self.assert_lint(
+ 'gtk_widget_style_get_property(style, NULL, NULL);',
+ 'Use 0 instead of NULL. [readability/null] [5]',
+ 'foo.cpp')
+ self.assert_lint(
+ 'gtk_widget_style_get_valist(style, NULL, NULL);',
+ 'Use 0 instead of NULL. [readability/null] [5]',
+ 'foo.cpp')
+
+ # 2. C++ and C bool values should be written as true and
+ # false. Objective-C BOOL values should be written as YES and NO.
+ # FIXME: Implement this.
+
+ # 3. Tests for true/false and null/non-null should be done without
+ # equality comparisons.
+ self.assert_lint_one_of_many_errors_re(
+ 'if (string != NULL)',
+ r'Tests for true/false and null/non-null should be done without equality comparisons\.')
+ self.assert_lint(
+ 'if (p == nullptr)',
+ 'Tests for true/false and null/non-null should be done without equality comparisons.'
+ ' [readability/comparison_to_boolean] [5]')
+ self.assert_lint(
+ 'if (condition == true)',
+ 'Tests for true/false and null/non-null should be done without equality comparisons.'
+ ' [readability/comparison_to_boolean] [5]')
+ self.assert_lint(
+ 'if (myVariable != /* Why would anyone put a comment here? */ false)',
+ 'Tests for true/false and null/non-null should be done without equality comparisons.'
+ ' [readability/comparison_to_boolean] [5]')
+
+ self.assert_lint_one_of_many_errors_re(
+ 'if (NULL == thisMayBeNull)',
+ r'Tests for true/false and null/non-null should be done without equality comparisons\.')
+ self.assert_lint(
+ 'if (nullptr /* funny place for a comment */ == p)',
+ 'Tests for true/false and null/non-null should be done without equality comparisons.'
+ ' [readability/comparison_to_boolean] [5]')
+ self.assert_lint(
+ 'if (true != anotherCondition)',
+ 'Tests for true/false and null/non-null should be done without equality comparisons.'
+ ' [readability/comparison_to_boolean] [5]')
+ self.assert_lint(
+ 'if (false == myBoolValue)',
+ 'Tests for true/false and null/non-null should be done without equality comparisons.'
+ ' [readability/comparison_to_boolean] [5]')
+
+ self.assert_lint(
+ 'if (fontType == trueType)',
+ '')
+ self.assert_lint(
+ 'if (othertrue == fontType)',
+ '')
+ self.assert_lint(
+ 'if (LIKELY(foo == 0))',
+ '')
+ self.assert_lint(
+ 'if (UNLIKELY(foo == 0))',
+ '')
+ self.assert_lint(
+ 'if ((a - b) == 0.5)',
+ '')
+ self.assert_lint(
+ 'if (0.5 == (a - b))',
+ '')
+ self.assert_lint(
+ 'if (LIKELY(foo == NULL))',
+ 'Use 0 instead of NULL. [readability/null] [5]')
+ self.assert_lint(
+ 'if (UNLIKELY(foo == NULL))',
+ 'Use 0 instead of NULL. [readability/null] [5]')
+
+ def test_directive_indentation(self):
+ self.assert_lint(
+ " #if FOO",
+ "preprocessor directives (e.g., #ifdef, #define, #import) should never be indented."
+ " [whitespace/indent] [4]",
+ "foo.cpp")
+
+ def test_using_std(self):
+ self.assert_lint(
+ 'using std::min;',
+ "Use 'using namespace std;' instead of 'using std::min;'."
+ " [build/using_std] [4]",
+ 'foo.cpp')
+
+ def test_using_std_swap_ignored(self):
+ self.assert_lint(
+ 'using std::swap;',
+ '',
+ 'foo.cpp')
+
+ def test_max_macro(self):
+ self.assert_lint(
+ 'int i = MAX(0, 1);',
+ '',
+ 'foo.c')
+
+ self.assert_lint(
+ 'int i = MAX(0, 1);',
+ 'Use std::max() or std::max<type>() instead of the MAX() macro.'
+ ' [runtime/max_min_macros] [4]',
+ 'foo.cpp')
+
+ self.assert_lint(
+ 'inline int foo() { return MAX(0, 1); }',
+ 'Use std::max() or std::max<type>() instead of the MAX() macro.'
+ ' [runtime/max_min_macros] [4]',
+ 'foo.h')
+
+ def test_min_macro(self):
+ self.assert_lint(
+ 'int i = MIN(0, 1);',
+ '',
+ 'foo.c')
+
+ self.assert_lint(
+ 'int i = MIN(0, 1);',
+ 'Use std::min() or std::min<type>() instead of the MIN() macro.'
+ ' [runtime/max_min_macros] [4]',
+ 'foo.cpp')
+
+ self.assert_lint(
+ 'inline int foo() { return MIN(0, 1); }',
+ 'Use std::min() or std::min<type>() instead of the MIN() macro.'
+ ' [runtime/max_min_macros] [4]',
+ 'foo.h')
+
+ def test_ctype_fucntion(self):
+ self.assert_lint(
+ 'int i = isascii(8);',
+ 'Use equivelent function in <wtf/ASCIICType.h> instead of the '
+ 'isascii() function. [runtime/ctype_function] [4]',
+ 'foo.cpp')
+
+ def test_names(self):
+ name_underscore_error_message = " is incorrectly named. Don't use underscores in your identifier names. [readability/naming/underscores] [4]"
+ name_tooshort_error_message = " is incorrectly named. Don't use the single letter 'l' as an identifier name. [readability/naming] [4]"
+
+ # Basic cases from WebKit style guide.
+ self.assert_lint('struct Data;', '')
+ self.assert_lint('size_t bufferSize;', '')
+ self.assert_lint('class HTMLDocument;', '')
+ self.assert_lint('String mimeType();', '')
+ self.assert_lint('size_t buffer_size;',
+ 'buffer_size' + name_underscore_error_message)
+ self.assert_lint('short m_length;', '')
+ self.assert_lint('short _length;',
+ '_length' + name_underscore_error_message)
+ self.assert_lint('short length_;',
+ 'length_' + name_underscore_error_message)
+ self.assert_lint('unsigned _length;',
+ '_length' + name_underscore_error_message)
+ self.assert_lint('unsigned long _length;',
+ '_length' + name_underscore_error_message)
+ self.assert_lint('unsigned long long _length;',
+ '_length' + name_underscore_error_message)
+
+ # Allow underscores in Objective C files.
+ self.assert_lint('unsigned long long _length;',
+ '',
+ 'foo.m')
+ self.assert_lint('unsigned long long _length;',
+ '',
+ 'foo.mm')
+ self.assert_lint('#import "header_file.h"\n'
+ 'unsigned long long _length;',
+ '',
+ 'foo.h')
+ self.assert_lint('unsigned long long _length;\n'
+ '@interface WebFullscreenWindow;',
+ '',
+ 'foo.h')
+ self.assert_lint('unsigned long long _length;\n'
+ '@implementation WebFullscreenWindow;',
+ '',
+ 'foo.h')
+ self.assert_lint('unsigned long long _length;\n'
+ '@class WebWindowFadeAnimation;',
+ '',
+ 'foo.h')
+
+ # Variable name 'l' is easy to confuse with '1'
+ self.assert_lint('int l;', 'l' + name_tooshort_error_message)
+ self.assert_lint('size_t l;', 'l' + name_tooshort_error_message)
+ self.assert_lint('long long l;', 'l' + name_tooshort_error_message)
+
+ # Pointers, references, functions, templates, and adjectives.
+ self.assert_lint('char* under_score;',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('const int UNDER_SCORE;',
+ 'UNDER_SCORE' + name_underscore_error_message)
+ self.assert_lint('static inline const char const& const under_score;',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('WebCore::RenderObject* under_score;',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('int func_name();',
+ 'func_name' + name_underscore_error_message)
+ self.assert_lint('RefPtr<RenderObject*> under_score;',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('WTF::Vector<WTF::RefPtr<const RenderObject* const> > under_score;',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('int under_score[];',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('struct dirent* under_score;',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('long under_score;',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('long long under_score;',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('long double under_score;',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('long long int under_score;',
+ 'under_score' + name_underscore_error_message)
+
+ # Declarations in control statement.
+ self.assert_lint('if (int under_score = 42) {',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('else if (int under_score = 42) {',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('for (int under_score = 42; cond; i++) {',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('while (foo & under_score = bar) {',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('for (foo * under_score = p; cond; i++) {',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('for (foo * under_score; cond; i++) {',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('while (foo & value_in_thirdparty_library) {', '')
+ self.assert_lint('while (foo * value_in_thirdparty_library) {', '')
+ self.assert_lint('if (mli && S_OK == mli->foo()) {', '')
+
+ # More member variables and functions.
+ self.assert_lint('int SomeClass::s_validName', '')
+ self.assert_lint('int m_under_score;',
+ 'm_under_score' + name_underscore_error_message)
+ self.assert_lint('int SomeClass::s_under_score = 0;',
+ 'SomeClass::s_under_score' + name_underscore_error_message)
+ self.assert_lint('int SomeClass::under_score = 0;',
+ 'SomeClass::under_score' + name_underscore_error_message)
+
+ # Other statements.
+ self.assert_lint('return INT_MAX;', '')
+ self.assert_lint('return_t under_score;',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('goto under_score;',
+ 'under_score' + name_underscore_error_message)
+ self.assert_lint('delete static_cast<Foo*>(p);', '')
+
+ # Multiple variables in one line.
+ self.assert_lint('void myFunction(int variable1, int another_variable);',
+ 'another_variable' + name_underscore_error_message)
+ self.assert_lint('int variable1, another_variable;',
+ 'another_variable' + name_underscore_error_message)
+ self.assert_lint('int first_variable, secondVariable;',
+ 'first_variable' + name_underscore_error_message)
+ self.assert_lint('void my_function(int variable_1, int variable_2);',
+ ['my_function' + name_underscore_error_message,
+ 'variable_1' + name_underscore_error_message,
+ 'variable_2' + name_underscore_error_message])
+ self.assert_lint('for (int variable_1, variable_2;;) {',
+ ['variable_1' + name_underscore_error_message,
+ 'variable_2' + name_underscore_error_message])
+
+ # There is an exception for op code functions but only in the JavaScriptCore directory.
+ self.assert_lint('void this_op_code(int var1, int var2)', '', 'Source/JavaScriptCore/foo.cpp')
+ self.assert_lint('void op_code(int var1, int var2)', '', 'Source/JavaScriptCore/foo.cpp')
+ self.assert_lint('void this_op_code(int var1, int var2)', 'this_op_code' + name_underscore_error_message)
+
+ # GObject requires certain magical names in class declarations.
+ self.assert_lint('void webkit_dom_object_init();', '')
+ self.assert_lint('void webkit_dom_object_class_init();', '')
+
+ # There is an exception for GTK+ API.
+ self.assert_lint('void webkit_web_view_load(int var1, int var2)', '', 'Source/Webkit/gtk/webkit/foo.cpp')
+ self.assert_lint('void webkit_web_view_load(int var1, int var2)', '', 'Source/Webkit2/UIProcess/gtk/foo.cpp')
+
+ # Test that this doesn't also apply to files not in a 'gtk' directory.
+ self.assert_lint('void webkit_web_view_load(int var1, int var2)',
+ 'webkit_web_view_load is incorrectly named. Don\'t use underscores in your identifier names.'
+ ' [readability/naming/underscores] [4]', 'Source/Webkit/webkit/foo.cpp')
+ # Test that this doesn't also apply to names that don't start with 'webkit_'.
+ self.assert_lint_one_of_many_errors_re('void otherkit_web_view_load(int var1, int var2)',
+ 'otherkit_web_view_load is incorrectly named. Don\'t use underscores in your identifier names.'
+ ' [readability/naming/underscores] [4]', 'Source/Webkit/webkit/foo.cpp')
+
+ # There is an exception for some unit tests that begin with "tst_".
+ self.assert_lint('void tst_QWebFrame::arrayObjectEnumerable(int var1, int var2)', '')
+
+ # The Qt API uses names that begin with "qt_" or "_q_".
+ self.assert_lint('void QTFrame::qt_drt_is_awesome(int var1, int var2)', '')
+ self.assert_lint('void QTFrame::_q_drt_is_awesome(int var1, int var2)', '')
+ self.assert_lint('void qt_drt_is_awesome(int var1, int var2);', '')
+ self.assert_lint('void _q_drt_is_awesome(int var1, int var2);', '')
+
+ # Cairo forward-declarations should not be a failure.
+ self.assert_lint('typedef struct _cairo cairo_t;', '')
+ self.assert_lint('typedef struct _cairo_surface cairo_surface_t;', '')
+ self.assert_lint('typedef struct _cairo_scaled_font cairo_scaled_font_t;', '')
+
+ # EFL forward-declarations should not be a failure.
+ self.assert_lint('typedef struct _Ecore_Evas Ecore_Evas;', '')
+ self.assert_lint('typedef struct _Ecore_Pipe Ecore_Pipe;', '')
+ self.assert_lint('typedef struct _Eina_Rectangle Eina_Rectangle;', '')
+ self.assert_lint('typedef struct _Evas_Object Evas_Object;', '')
+ self.assert_lint('typedef struct _Ewk_History_Item Ewk_History_Item;', '')
+
+ # NPAPI functions that start with NPN_, NPP_ or NP_ are allowed.
+ self.assert_lint('void NPN_Status(NPP, const char*)', '')
+ self.assert_lint('NPError NPP_SetWindow(NPP instance, NPWindow *window)', '')
+ self.assert_lint('NPObject* NP_Allocate(NPP, NPClass*)', '')
+
+ # const_iterator is allowed as well.
+ self.assert_lint('typedef VectorType::const_iterator const_iterator;', '')
+
+ # vm_throw is allowed as well.
+ self.assert_lint('int vm_throw;', '')
+
+ # Bitfields.
+ self.assert_lint('unsigned _fillRule : 1;',
+ '_fillRule' + name_underscore_error_message)
+
+ # new operators in initialization.
+ self.assert_lint('OwnPtr<uint32_t> variable(new uint32_t);', '')
+ self.assert_lint('OwnPtr<uint32_t> variable(new (expr) uint32_t);', '')
+ self.assert_lint('OwnPtr<uint32_t> under_score(new uint32_t);',
+ 'under_score' + name_underscore_error_message)
+
+ # Conversion operator declaration.
+ self.assert_lint('operator int64_t();', '')
+
+ def test_parameter_names(self):
+ # Leave meaningless variable names out of function declarations.
+ meaningless_variable_name_error_message = 'The parameter name "%s" adds no information, so it should be removed. [readability/parameter_name] [5]'
+
+ parameter_error_rules = ('-',
+ '+readability/parameter_name')
+ # No variable name, so no error.
+ self.assertEqual('',
+ self.perform_lint('void func(int);', 'test.cpp', parameter_error_rules))
+
+ # Verify that copying the name of the set function causes the error (with some odd casing).
+ self.assertEqual(meaningless_variable_name_error_message % 'itemCount',
+ self.perform_lint('void setItemCount(size_t itemCount);', 'test.cpp', parameter_error_rules))
+ self.assertEqual(meaningless_variable_name_error_message % 'abcCount',
+ self.perform_lint('void setABCCount(size_t abcCount);', 'test.cpp', parameter_error_rules))
+
+ # Verify that copying a type name will trigger the warning (even if the type is a template parameter).
+ self.assertEqual(meaningless_variable_name_error_message % 'context',
+ self.perform_lint('void funct(PassRefPtr<ScriptExecutionContext> context);', 'test.cpp', parameter_error_rules))
+
+ # Verify that acronyms as variable names trigger the error (for both set functions and type names).
+ self.assertEqual(meaningless_variable_name_error_message % 'ec',
+ self.perform_lint('void setExceptionCode(int ec);', 'test.cpp', parameter_error_rules))
+ self.assertEqual(meaningless_variable_name_error_message % 'ec',
+ self.perform_lint('void funct(ExceptionCode ec);', 'test.cpp', parameter_error_rules))
+
+ # 'object' alone, appended, or as part of an acronym is meaningless.
+ self.assertEqual(meaningless_variable_name_error_message % 'object',
+ self.perform_lint('void funct(RenderView object);', 'test.cpp', parameter_error_rules))
+ self.assertEqual(meaningless_variable_name_error_message % 'viewObject',
+ self.perform_lint('void funct(RenderView viewObject);', 'test.cpp', parameter_error_rules))
+ self.assertEqual(meaningless_variable_name_error_message % 'rvo',
+ self.perform_lint('void funct(RenderView rvo);', 'test.cpp', parameter_error_rules))
+
+ # Check that r, g, b, and a are allowed.
+ self.assertEqual('',
+ self.perform_lint('void setRGBAValues(int r, int g, int b, int a);', 'test.cpp', parameter_error_rules))
+
+ # Verify that a simple substring match isn't done which would cause false positives.
+ self.assertEqual('',
+ self.perform_lint('void setNateLateCount(size_t elate);', 'test.cpp', parameter_error_rules))
+ self.assertEqual('',
+ self.perform_lint('void funct(NateLate elate);', 'test.cpp', parameter_error_rules))
+
+ # Don't have generate warnings for functions (only declarations).
+ self.assertEqual('',
+ self.perform_lint('void funct(PassRefPtr<ScriptExecutionContext> context)\n'
+ '{\n'
+ '}\n', 'test.cpp', parameter_error_rules))
+
+ def test_comments(self):
+ # A comment at the beginning of a line is ok.
+ self.assert_lint('// comment', '')
+ self.assert_lint(' // comment', '')
+
+ self.assert_lint('} // namespace WebCore',
+ 'One space before end of line comments'
+ ' [whitespace/comments] [5]')
+
+ def test_webkit_export_check(self):
+ webkit_export_error_rules = ('-',
+ '+readability/webkit_export')
+ self.assertEqual('',
+ self.perform_lint('WEBKIT_EXPORT int foo();\n',
+ 'WebKit/chromium/public/test.h',
+ webkit_export_error_rules))
+ self.assertEqual('',
+ self.perform_lint('WEBKIT_EXPORT int foo();\n',
+ 'WebKit/chromium/tests/test.h',
+ webkit_export_error_rules))
+ self.assertEqual('WEBKIT_EXPORT should only be used in header files. [readability/webkit_export] [5]',
+ self.perform_lint('WEBKIT_EXPORT int foo();\n',
+ 'WebKit/chromium/public/test.cpp',
+ webkit_export_error_rules))
+ self.assertEqual('WEBKIT_EXPORT should only appear in the chromium public (or tests) directory. [readability/webkit_export] [5]',
+ self.perform_lint('WEBKIT_EXPORT int foo();\n',
+ 'WebKit/chromium/src/test.h',
+ webkit_export_error_rules))
+ self.assertEqual('WEBKIT_EXPORT should not be used on a function with a body. [readability/webkit_export] [5]',
+ self.perform_lint('WEBKIT_EXPORT int foo() { }\n',
+ 'WebKit/chromium/public/test.h',
+ webkit_export_error_rules))
+ self.assertEqual('WEBKIT_EXPORT should not be used on a function with a body. [readability/webkit_export] [5]',
+ self.perform_lint('WEBKIT_EXPORT inline int foo()\n'
+ '{\n'
+ '}\n',
+ 'WebKit/chromium/public/test.h',
+ webkit_export_error_rules))
+ self.assertEqual('WEBKIT_EXPORT should not be used with a pure virtual function. [readability/webkit_export] [5]',
+ self.perform_lint('{}\n'
+ 'WEBKIT_EXPORT\n'
+ 'virtual\n'
+ 'int\n'
+ 'foo() = 0;\n',
+ 'WebKit/chromium/public/test.h',
+ webkit_export_error_rules))
+ self.assertEqual('',
+ self.perform_lint('{}\n'
+ 'WEBKIT_EXPORT\n'
+ 'virtual\n'
+ 'int\n'
+ 'foo() = 0;\n',
+ 'test.h',
+ webkit_export_error_rules))
+
+ def test_other(self):
+ # FIXME: Implement this.
+ pass
+
+
+class CppCheckerTest(unittest.TestCase):
+
+ """Tests CppChecker class."""
+
+ def mock_handle_style_error(self):
+ pass
+
+ def _checker(self):
+ return CppChecker("foo", "h", self.mock_handle_style_error, 3)
+
+ def test_init(self):
+ """Test __init__ constructor."""
+ checker = self._checker()
+ self.assertEqual(checker.file_extension, "h")
+ self.assertEqual(checker.file_path, "foo")
+ self.assertEqual(checker.handle_style_error, self.mock_handle_style_error)
+ self.assertEqual(checker.min_confidence, 3)
+
+ def test_eq(self):
+ """Test __eq__ equality function."""
+ checker1 = self._checker()
+ checker2 = self._checker()
+
+ # == calls __eq__.
+ self.assertTrue(checker1 == checker2)
+
+ def mock_handle_style_error2(self):
+ pass
+
+ # Verify that a difference in any argument cause equality to fail.
+ checker = CppChecker("foo", "h", self.mock_handle_style_error, 3)
+ self.assertFalse(checker == CppChecker("bar", "h", self.mock_handle_style_error, 3))
+ self.assertFalse(checker == CppChecker("foo", "c", self.mock_handle_style_error, 3))
+ self.assertFalse(checker == CppChecker("foo", "h", mock_handle_style_error2, 3))
+ self.assertFalse(checker == CppChecker("foo", "h", self.mock_handle_style_error, 4))
+
+ def test_ne(self):
+ """Test __ne__ inequality function."""
+ checker1 = self._checker()
+ checker2 = self._checker()
+
+ # != calls __ne__.
+ # By default, __ne__ always returns true on different objects.
+ # Thus, just check the distinguishing case to verify that the
+ # code defines __ne__.
+ self.assertFalse(checker1 != checker2)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/jsonchecker.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/jsonchecker.py
new file mode 100644
index 0000000..264cbee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/jsonchecker.py
@@ -0,0 +1,49 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Checks WebKit style for JSON files."""
+
+import json
+import re
+
+
+class JSONChecker(object):
+ """Processes JSON lines for checking style."""
+
+ categories = set(('json/syntax',))
+
+ def __init__(self, file_path, handle_style_error):
+ self._handle_style_error = handle_style_error
+ self._handle_style_error.turn_off_line_filtering()
+
+ def check(self, lines):
+ try:
+ json.loads('\n'.join(lines) + '\n')
+ except ValueError, e:
+ self._handle_style_error(self.line_number_from_json_exception(e), 'json/syntax', 5, str(e))
+
+ @staticmethod
+ def line_number_from_json_exception(error):
+ match = re.search(r': line (?P<line>\d+) column \d+', str(error))
+ if not match:
+ return 0
+ return int(match.group('line'))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py
new file mode 100644
index 0000000..35ecdf5
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for jsonchecker.py."""
+
+import unittest
+
+import jsonchecker
+
+
+class MockErrorHandler(object):
+ def __init__(self, handle_style_error):
+ self.turned_off_filtering = False
+ self._handle_style_error = handle_style_error
+
+ def turn_off_line_filtering(self):
+ self.turned_off_filtering = True
+
+ def __call__(self, line_number, category, confidence, message):
+ self._handle_style_error(self, line_number, category, confidence, message)
+ return True
+
+
+class JSONCheckerTest(unittest.TestCase):
+ """Tests JSONChecker class."""
+
+ def test_line_number_from_json_exception(self):
+ tests = (
+ (0, 'No JSON object could be decoded'),
+ (2, 'Expecting property name: line 2 column 1 (char 2)'),
+ (3, 'Expecting object: line 3 column 1 (char 15)'),
+ (9, 'Expecting property name: line 9 column 21 (char 478)'),
+ )
+ for expected_line, message in tests:
+ self.assertEqual(expected_line, jsonchecker.JSONChecker.line_number_from_json_exception(ValueError(message)))
+
+ def assert_no_error(self, json_data):
+ def handle_style_error(mock_error_handler, line_number, category, confidence, message):
+ self.fail('Unexpected error: %d %s %d %s' % (line_number, category, confidence, message))
+
+ error_handler = MockErrorHandler(handle_style_error)
+ checker = jsonchecker.JSONChecker('foo.json', error_handler)
+ checker.check(json_data.split('\n'))
+ self.assertTrue(error_handler.turned_off_filtering)
+
+ def assert_error(self, expected_line_number, expected_category, json_data):
+ def handle_style_error(mock_error_handler, line_number, category, confidence, message):
+ mock_error_handler.had_error = True
+ self.assertEqual(expected_line_number, line_number)
+ self.assertEqual(expected_category, category)
+ self.assertIn(category, jsonchecker.JSONChecker.categories)
+
+ error_handler = MockErrorHandler(handle_style_error)
+ error_handler.had_error = False
+
+ checker = jsonchecker.JSONChecker('foo.json', error_handler)
+ checker.check(json_data.split('\n'))
+ self.assertTrue(error_handler.had_error)
+ self.assertTrue(error_handler.turned_off_filtering)
+
+ def mock_handle_style_error(self):
+ pass
+
+ def test_conflict_marker(self):
+ self.assert_error(0, 'json/syntax', '<<<<<<< HEAD\n{\n}\n')
+
+ def test_single_quote(self):
+ self.assert_error(2, 'json/syntax', "{\n'slaves': []\n}\n")
+
+ def test_init(self):
+ error_handler = MockErrorHandler(self.mock_handle_style_error)
+ checker = jsonchecker.JSONChecker('foo.json', error_handler)
+ self.assertEqual(checker._handle_style_error, error_handler)
+
+ def test_no_error(self):
+ self.assert_no_error("""{
+ "slaves": [ { "name": "test-slave", "platform": "*" },
+ { "name": "apple-xserve-4", "platform": "mac-snowleopard" }
+ ],
+
+ "builders": [ { "name": "SnowLeopard Intel Release (Build)", "type": "Build", "builddir": "snowleopard-intel-release",
+ "platform": "mac-snowleopard", "configuration": "release", "architectures": ["x86_64"],
+ "slavenames": ["apple-xserve-4"]
+ }
+ ],
+
+ "schedulers": [ { "type": "PlatformSpecificScheduler", "platform": "mac-snowleopard", "branch": "trunk", "treeStableTimer": 45.0,
+ "builderNames": ["SnowLeopard Intel Release (Build)", "SnowLeopard Intel Debug (Build)"]
+ }
+ ]
+}
+""")
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/png.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/png.py
new file mode 100644
index 0000000..430d6f0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/png.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Supports checking WebKit style in png files."""
+
+import os
+import re
+
+from webkitpy.common import checksvnconfigfile
+from webkitpy.common import read_checksum_from_png
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.checkout.scm.detection import SCMDetector
+
+class PNGChecker(object):
+ """Check svn:mime-type for checking style"""
+
+ categories = set(['image/png'])
+
+ def __init__(self, file_path, handle_style_error, scm=None, host=None):
+ self._file_path = file_path
+ self._handle_style_error = handle_style_error
+ self._host = host or SystemHost()
+ self._fs = self._host.filesystem
+ self._detector = scm or SCMDetector(self._fs, self._host.executive).detect_scm_system(self._fs.getcwd())
+
+ def check(self, inline=None):
+ errorstr = ""
+ config_file_path = ""
+ detection = self._detector.display_name()
+
+ if self._fs.exists(self._file_path) and self._file_path.endswith("-expected.png"):
+ with self._fs.open_binary_file_for_reading(self._file_path) as filehandle:
+ if not read_checksum_from_png.read_checksum(filehandle):
+ self._handle_style_error(0, 'image/png', 5, "Image lacks a checksum. Generate pngs using run-webkit-tests to ensure they have a checksum.")
+
+ if detection == "git":
+ (file_missing, autoprop_missing, png_missing) = checksvnconfigfile.check(self._host, self._fs)
+ config_file_path = checksvnconfigfile.config_file_path(self._host, self._fs)
+
+ if file_missing:
+ self._handle_style_error(0, 'image/png', 5, "There is no SVN config file. (%s)" % config_file_path)
+ elif autoprop_missing and png_missing:
+ self._handle_style_error(0, 'image/png', 5, checksvnconfigfile.errorstr_autoprop(config_file_path) + checksvnconfigfile.errorstr_png(config_file_path))
+ elif autoprop_missing:
+ self._handle_style_error(0, 'image/png', 5, checksvnconfigfile.errorstr_autoprop(config_file_path))
+ elif png_missing:
+ self._handle_style_error(0, 'image/png', 5, checksvnconfigfile.errorstr_png(config_file_path))
+
+ elif detection == "svn":
+ prop_get = self._detector.propget("svn:mime-type", self._file_path)
+ if prop_get != "image/png":
+ errorstr = "Set the svn:mime-type property (svn propset svn:mime-type image/png %s)." % self._file_path
+ self._handle_style_error(0, 'image/png', 5, errorstr)
+
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/png_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/png_unittest.py
new file mode 100644
index 0000000..0267f54
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/png_unittest.py
@@ -0,0 +1,132 @@
+# Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for png.py."""
+
+import unittest
+
+from png import PNGChecker
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+
+class MockSCMDetector(object):
+
+ def __init__(self, scm, prop=None):
+ self._scm = scm
+ self._prop = prop
+
+ def display_name(self):
+ return self._scm
+
+ def propget(self, pname, path):
+ return self._prop
+
+
+class PNGCheckerTest(unittest.TestCase):
+ """Tests PNGChecker class."""
+
+ def test_init(self):
+ """Test __init__() method."""
+
+ def mock_handle_style_error(self):
+ pass
+
+ checker = PNGChecker("test/config", mock_handle_style_error, MockSCMDetector('git'), MockSystemHost())
+ self.assertEqual(checker._file_path, "test/config")
+ self.assertEqual(checker._handle_style_error, mock_handle_style_error)
+
+ def test_check(self):
+ errors = []
+
+ def mock_handle_style_error(line_number, category, confidence, message):
+ error = (line_number, category, confidence, message)
+ errors.append(error)
+
+ file_path = ''
+
+ fs = MockFileSystem()
+
+ scm = MockSCMDetector('svn')
+ checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(filesystem=fs))
+ checker.check()
+ self.assertEqual(len(errors), 1)
+ self.assertEqual(errors[0],
+ (0, 'image/png', 5, 'Set the svn:mime-type property (svn propset svn:mime-type image/png ).'))
+
+ files = {'/Users/mock/.subversion/config': 'enable-auto-props = yes\n*.png = svn:mime-type=image/png'}
+ fs = MockFileSystem(files)
+ scm = MockSCMDetector('git')
+ errors = []
+ checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+ checker.check()
+ self.assertEqual(len(errors), 0)
+
+ files = {'/Users/mock/.subversion/config': '#enable-auto-props = yes'}
+ fs = MockFileSystem(files)
+ scm = MockSCMDetector('git')
+ errors = []
+ checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+ checker.check()
+ self.assertEqual(len(errors), 1)
+
+ files = {'/Users/mock/.subversion/config': 'enable-auto-props = yes\n#enable-auto-props = yes\n*.png = svn:mime-type=image/png'}
+ fs = MockFileSystem(files)
+ scm = MockSCMDetector('git')
+ errors = []
+ checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+ checker.check()
+ self.assertEqual(len(errors), 0)
+
+ files = {'/Users/mock/.subversion/config': '#enable-auto-props = yes\nenable-auto-props = yes\n*.png = svn:mime-type=image/png'}
+ fs = MockFileSystem(files)
+ scm = MockSCMDetector('git')
+ errors = []
+ checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+ checker.check()
+ self.assertEqual(len(errors), 0)
+
+ files = {'/Users/mock/.subversion/config': 'enable-auto-props = no'}
+ fs = MockFileSystem(files)
+ scm = MockSCMDetector('git')
+ errors = []
+ checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+ checker.check()
+ self.assertEqual(len(errors), 1)
+
+ file_path = "foo.png"
+ fs.write_binary_file(file_path, "Dummy binary data")
+ scm = MockSCMDetector('git')
+ errors = []
+ checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+ checker.check()
+ self.assertEqual(len(errors), 1)
+
+ file_path = "foo-expected.png"
+ fs.write_binary_file(file_path, "Dummy binary data")
+ scm = MockSCMDetector('git')
+ errors = []
+ checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+ checker.check()
+ self.assertEqual(len(errors), 2)
+ self.assertEqual(errors[0], (0, 'image/png', 5, 'Image lacks a checksum. Generate pngs using run-webkit-tests to ensure they have a checksum.'))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/python.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/python.py
new file mode 100644
index 0000000..f09638c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/python.py
@@ -0,0 +1,122 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Supports checking WebKit style in Python files."""
+
+import os
+import re
+import sys
+
+from StringIO import StringIO
+
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.webkit_finder import WebKitFinder
+from webkitpy.thirdparty import pep8
+
+
+class PythonChecker(object):
+ """Processes text lines for checking style."""
+ def __init__(self, file_path, handle_style_error):
+ self._file_path = file_path
+ self._handle_style_error = handle_style_error
+
+ def check(self, lines):
+ self._check_pep8(lines)
+ self._check_pylint(lines)
+
+ def _check_pep8(self, lines):
+ # Initialize pep8.options, which is necessary for
+ # Checker.check_all() to execute.
+ pep8.process_options(arglist=[self._file_path])
+
+ pep8_checker = pep8.Checker(self._file_path)
+
+ def _pep8_handle_error(line_number, offset, text, check):
+ # FIXME: Incorporate the character offset into the error output.
+ # This will require updating the error handler __call__
+ # signature to include an optional "offset" parameter.
+ pep8_code = text[:4]
+ pep8_message = text[5:]
+
+ category = "pep8/" + pep8_code
+
+ self._handle_style_error(line_number, category, 5, pep8_message)
+
+ pep8_checker.report_error = _pep8_handle_error
+ pep8_errors = pep8_checker.check_all()
+
+ def _check_pylint(self, lines):
+ output = self._run_pylint(self._file_path)
+ errors = self._parse_pylint_output(output)
+ for line_number, category, message in errors:
+ self._handle_style_error(line_number, category, 5, message)
+
+ def _run_pylint(self, path):
+ wkf = WebKitFinder(FileSystem())
+ executive = Executive()
+ env = os.environ.copy()
+ env['PYTHONPATH'] = ('%s%s%s%s%s' % (wkf.path_from_webkit_base('Tools', 'Scripts'),
+ os.pathsep,
+ wkf.path_from_webkit_base('Source', 'build', 'scripts'),
+ os.pathsep,
+ wkf.path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'thirdparty')))
+ return executive.run_command([sys.executable, wkf.path_from_depot_tools_base('pylint.py'),
+ '--output-format=parseable',
+ '--errors-only',
+ '--rcfile=' + wkf.path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'pylintrc'),
+ path],
+ env=env,
+ error_handler=executive.ignore_error)
+
+ def _parse_pylint_output(self, output):
+ # We filter out these messages because they are bugs in pylint that produce false positives.
+ # FIXME: Does it make sense to combine these rules with the rules in style/checker.py somehow?
+ FALSE_POSITIVES = [
+ # possibly http://www.logilab.org/ticket/98613 ?
+ "Instance of 'Popen' has no 'poll' member",
+ "Instance of 'Popen' has no 'returncode' member",
+ "Instance of 'Popen' has no 'stdin' member",
+ "Instance of 'Popen' has no 'stdout' member",
+ "Instance of 'Popen' has no 'stderr' member",
+ "Instance of 'Popen' has no 'wait' member",
+ ]
+
+ lint_regex = re.compile('([^:]+):([^:]+): \[([^]]+)\] (.*)')
+ errors = []
+ for line in output.splitlines():
+ if any(msg in line for msg in FALSE_POSITIVES):
+ continue
+
+ match_obj = lint_regex.match(line)
+ if not match_obj:
+ continue
+
+ line_number = int(match_obj.group(2))
+ category_and_method = match_obj.group(3).split(', ')
+ category = 'pylint/' + (category_and_method[0])
+ if len(category_and_method) > 1:
+ message = '[%s] %s' % (category_and_method[1], match_obj.group(4))
+ else:
+ message = match_obj.group(4)
+ errors.append((line_number, category, message))
+ return errors
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/python_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/python_unittest.py
new file mode 100644
index 0000000..73bda76
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/python_unittest.py
@@ -0,0 +1,63 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for python.py."""
+
+import os
+import unittest
+
+from python import PythonChecker
+
+
+class PythonCheckerTest(unittest.TestCase):
+
+ """Tests the PythonChecker class."""
+
+ def test_init(self):
+ """Test __init__() method."""
+ def _mock_handle_style_error(self):
+ pass
+
+ checker = PythonChecker("foo.txt", _mock_handle_style_error)
+ self.assertEqual(checker._file_path, "foo.txt")
+ self.assertEqual(checker._handle_style_error,
+ _mock_handle_style_error)
+
+ def test_check(self):
+ """Test check() method."""
+ errors = []
+
+ def _mock_handle_style_error(line_number, category, confidence,
+ message):
+ error = (line_number, category, confidence, message)
+ errors.append(error)
+
+ current_dir = os.path.dirname(__file__)
+ file_path = os.path.join(current_dir, "python_unittest_input.py")
+
+ checker = PythonChecker(file_path, _mock_handle_style_error)
+ checker.check(lines=[])
+
+ self.assertEqual(errors, [
+ (4, "pep8/W291", 5, "trailing whitespace"),
+ (4, "pylint/E0602", 5, "Undefined variable 'error'"),
+ ])
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/python_unittest_input.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/python_unittest_input.py
new file mode 100644
index 0000000..afa1d4e
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/python_unittest_input.py
@@ -0,0 +1,4 @@
+# This file is sample input for python_unittest.py and includes two
+# problems, one that will generate a PEP-8 warning for trailing whitespace
+# and one that will generate a pylint error for an undefined variable.
+print error()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/test_expectations.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/test_expectations.py
new file mode 100644
index 0000000..605ab7c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/test_expectations.py
@@ -0,0 +1,86 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Checks WebKit style for test_expectations files."""
+
+import logging
+import optparse
+import os
+import re
+import sys
+
+from common import TabChecker
+from webkitpy.common.host import Host
+from webkitpy.layout_tests.models.test_expectations import TestExpectationParser
+
+
+_log = logging.getLogger(__name__)
+
+
+class TestExpectationsChecker(object):
+ """Processes TestExpectations lines for validating the syntax."""
+
+ categories = set(['test/expectations'])
+
+ def __init__(self, file_path, handle_style_error, host=None):
+ self._file_path = file_path
+ self._handle_style_error = handle_style_error
+ self._tab_checker = TabChecker(file_path, handle_style_error)
+
+ # FIXME: host should be a required parameter, not an optional one.
+ host = host or Host()
+ host.initialize_scm()
+
+ self._port_obj = host.port_factory.get()
+
+ # Suppress error messages of test_expectations module since they will be reported later.
+ log = logging.getLogger("webkitpy.layout_tests.layout_package.test_expectations")
+ log.setLevel(logging.CRITICAL)
+
+ def _handle_error_message(self, lineno, message, confidence):
+ pass
+
+ def check_test_expectations(self, expectations_str, tests=None):
+ parser = TestExpectationParser(self._port_obj, tests, is_lint_mode=True)
+ expectations = parser.parse('expectations', expectations_str)
+
+ level = 5
+ for expectation_line in expectations:
+ for warning in expectation_line.warnings:
+ self._handle_style_error(expectation_line.line_numbers, 'test/expectations', level, warning)
+
+ def check_tabs(self, lines):
+ self._tab_checker.check(lines)
+
+ def check(self, lines):
+ expectations = '\n'.join(lines)
+ if self._port_obj:
+ self.check_test_expectations(expectations_str=expectations, tests=None)
+
+ # Warn tabs in lines as well
+ self.check_tabs(lines)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
new file mode 100644
index 0000000..d48f9d8
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
@@ -0,0 +1,101 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import sys
+import unittest
+
+from test_expectations import TestExpectationsChecker
+from webkitpy.common.host_mock import MockHost
+
+
+class ErrorCollector(object):
+ """An error handler class for unit tests."""
+
+ def __init__(self):
+ self._errors = []
+ self.turned_off_filtering = False
+
+ def turn_off_line_filtering(self):
+ self.turned_off_filtering = True
+
+ def __call__(self, lineno, category, confidence, message):
+ self._errors.append('%s [%s] [%d]' % (message, category, confidence))
+ return True
+
+ def get_errors(self):
+ return ''.join(self._errors)
+
+ def reset_errors(self):
+ self._errors = []
+ self.turned_off_filtering = False
+
+
+class TestExpectationsTestCase(unittest.TestCase):
+ """TestCase for test_expectations.py"""
+
+ def setUp(self):
+ self._error_collector = ErrorCollector()
+ self._test_file = 'passes/text.html'
+
+ def assert_lines_lint(self, lines, should_pass, expected_output=None):
+ self._error_collector.reset_errors()
+
+ host = MockHost()
+ checker = TestExpectationsChecker('test/TestExpectations',
+ self._error_collector, host=host)
+
+ # We should have a valid port, but override it with a test port so we
+ # can check the lines.
+ self.assertIsNotNone(checker._port_obj)
+ checker._port_obj = host.port_factory.get('test-mac-leopard')
+
+ checker.check_test_expectations(expectations_str='\n'.join(lines),
+ tests=[self._test_file])
+ checker.check_tabs(lines)
+ if should_pass:
+ self.assertEqual('', self._error_collector.get_errors())
+ elif expected_output:
+ self.assertEqual(expected_output, self._error_collector.get_errors())
+ else:
+ self.assertNotEquals('', self._error_collector.get_errors())
+
+ # Note that a patch might change a line that introduces errors elsewhere, but we
+ # don't want to lint the whole file (it can unfairly punish patches for pre-existing errors).
+ # We rely on a separate lint-webkitpy step on the bots to keep the whole file okay.
+ # FIXME: See https://bugs.webkit.org/show_bug.cgi?id=104712 .
+ self.assertFalse(self._error_collector.turned_off_filtering)
+
+ def test_valid_expectations(self):
+ self.assert_lines_lint(["crbug.com/1234 [ Mac ] passes/text.html [ Pass Failure ]"], should_pass=True)
+
+ def test_invalid_expectations(self):
+ self.assert_lines_lint(["Bug(me) passes/text.html [ Give Up]"], should_pass=False)
+
+ def test_tab(self):
+ self.assert_lines_lint(["\twebkit.org/b/1 passes/text.html [ Pass ]"], should_pass=False, expected_output="Line contains tab character. [whitespace/tab] [5]")
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/text.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/text.py
new file mode 100644
index 0000000..1147658
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/text.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Checks WebKit style for text files."""
+
+from common import TabChecker
+
+class TextChecker(object):
+
+ """Processes text lines for checking style."""
+
+ def __init__(self, file_path, handle_style_error):
+ self.file_path = file_path
+ self.handle_style_error = handle_style_error
+ self._tab_checker = TabChecker(file_path, handle_style_error)
+
+ def check(self, lines):
+ self._tab_checker.check(lines)
+
+
+# FIXME: Remove this function (requires refactoring unit tests).
+def process_file_data(filename, lines, error):
+ checker = TextChecker(filename, error)
+ checker.check(lines)
+
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/text_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/text_unittest.py
new file mode 100644
index 0000000..d4c1aaa
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/text_unittest.py
@@ -0,0 +1,89 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for text_style.py."""
+
+import unittest
+
+import text as text_style
+from text import TextChecker
+
+class TextStyleTestCase(unittest.TestCase):
+ """TestCase for text_style.py"""
+
+ def assertNoError(self, lines):
+ """Asserts that the specified lines has no errors."""
+ self.had_error = False
+
+ def error_for_test(line_number, category, confidence, message):
+ """Records if an error occurs."""
+ self.had_error = True
+
+ text_style.process_file_data('', lines, error_for_test)
+ self.assertFalse(self.had_error, '%s should not have any errors.' % lines)
+
+ def assertError(self, lines, expected_line_number):
+ """Asserts that the specified lines has an error."""
+ self.had_error = False
+
+ def error_for_test(line_number, category, confidence, message):
+ """Checks if the expected error occurs."""
+ self.assertEqual(expected_line_number, line_number)
+ self.assertEqual('whitespace/tab', category)
+ self.had_error = True
+
+ text_style.process_file_data('', lines, error_for_test)
+ self.assertTrue(self.had_error, '%s should have an error [whitespace/tab].' % lines)
+
+
+ def test_no_error(self):
+ """Tests for no error cases."""
+ self.assertNoError([''])
+ self.assertNoError(['abc def', 'ggg'])
+
+
+ def test_error(self):
+ """Tests for error cases."""
+ self.assertError(['2009-12-16\tKent Tamura\t<tkent@chromium.org>'], 1)
+ self.assertError(['2009-12-16 Kent Tamura <tkent@chromium.org>',
+ '',
+ '\tReviewed by NOBODY.'], 3)
+
+
+class TextCheckerTest(unittest.TestCase):
+
+ """Tests TextChecker class."""
+
+ def mock_handle_style_error(self):
+ pass
+
+ def test_init(self):
+ """Test __init__ constructor."""
+ checker = TextChecker("foo.txt", self.mock_handle_style_error)
+ self.assertEqual(checker.file_path, "foo.txt")
+ self.assertEqual(checker.handle_style_error, self.mock_handle_style_error)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/xcodeproj.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/xcodeproj.py
new file mode 100644
index 0000000..3de3d19
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/xcodeproj.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Checks Xcode project files."""
+
+import re
+
+
+class XcodeProjectFileChecker(object):
+
+ """Processes Xcode project file lines for checking style."""
+
+ def __init__(self, file_path, handle_style_error):
+ self.file_path = file_path
+ self.handle_style_error = handle_style_error
+ self.handle_style_error.turn_off_line_filtering()
+ self._development_region_regex = re.compile('developmentRegion = (?P<region>.+);')
+
+ def _check_development_region(self, line_index, line):
+ """Returns True when developmentRegion is detected."""
+ matched = self._development_region_regex.search(line)
+ if not matched:
+ return False
+ if matched.group('region') != 'English':
+ self.handle_style_error(line_index,
+ 'xcodeproj/settings', 5,
+ 'developmentRegion is not English.')
+ return True
+
+ def check(self, lines):
+ development_region_is_detected = False
+ for line_index, line in enumerate(lines):
+ if self._check_development_region(line_index, line):
+ development_region_is_detected = True
+
+ if not development_region_is_detected:
+ self.handle_style_error(len(lines),
+ 'xcodeproj/settings', 5,
+ 'Missing "developmentRegion = English".')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py
new file mode 100644
index 0000000..1497e38
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py
@@ -0,0 +1,69 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for xcodeproj.py."""
+import unittest
+
+import xcodeproj
+
+
+class TestErrorHandler(object):
+ """Error handler for XcodeProjectFileChecker unittests"""
+ def __init__(self, handler):
+ self.handler = handler
+
+ def turn_off_line_filtering(self):
+ pass
+
+ def __call__(self, line_number, category, confidence, message):
+ self.handler(self, line_number, category, confidence, message)
+ return True
+
+
+class XcodeProjectFileCheckerTest(unittest.TestCase):
+ """Tests XcodeProjectFileChecker class."""
+
+ def assert_no_error(self, lines):
+ def handler(error_handler, line_number, category, confidence, message):
+ self.fail('Unexpected error: %d %s %d %s' % (line_number, category, confidence, message))
+
+ error_handler = TestErrorHandler(handler)
+ checker = xcodeproj.XcodeProjectFileChecker('', error_handler)
+ checker.check(lines)
+
+ def assert_error(self, lines, expected_message):
+ self.had_error = False
+
+ def handler(error_handler, line_number, category, confidence, message):
+ self.assertEqual(expected_message, message)
+ self.had_error = True
+ error_handler = TestErrorHandler(handler)
+ checker = xcodeproj.XcodeProjectFileChecker('', error_handler)
+ checker.check(lines)
+ self.assertTrue(self.had_error, '%s should have error: %s.' % (lines, expected_message))
+
+ def test_detect_development_region(self):
+ self.assert_no_error(['developmentRegion = English;'])
+ self.assert_error([''], 'Missing "developmentRegion = English".')
+ self.assert_error(['developmentRegion = Japanese;'],
+ 'developmentRegion is not English.')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/xml.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/xml.py
new file mode 100644
index 0000000..ff4a415
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/xml.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Checks WebKit style for XML files."""
+
+from __future__ import absolute_import
+
+from xml.parsers import expat
+
+
+class XMLChecker(object):
+ """Processes XML lines for checking style."""
+
+ def __init__(self, file_path, handle_style_error):
+ self._handle_style_error = handle_style_error
+ self._handle_style_error.turn_off_line_filtering()
+
+ def check(self, lines):
+ parser = expat.ParserCreate()
+ try:
+ for line in lines:
+ parser.Parse(line)
+ parser.Parse('\n')
+ parser.Parse('', True)
+ except expat.ExpatError, error:
+ self._handle_style_error(error.lineno, 'xml/syntax', 5, expat.ErrorString(error.code))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py
new file mode 100644
index 0000000..be141b7
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py
@@ -0,0 +1,86 @@
+# Copyright (C) 2010 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for xml.py."""
+
+import unittest
+import xml
+
+
+class MockErrorHandler(object):
+ def __init__(self, handle_style_error):
+ self.turned_off_filtering = False
+ self._handle_style_error = handle_style_error
+
+ def turn_off_line_filtering(self):
+ self.turned_off_filtering = True
+
+ def __call__(self, line_number, category, confidence, message):
+ self._handle_style_error(self, line_number, category, confidence, message)
+ return True
+
+
+class XMLCheckerTest(unittest.TestCase):
+ """Tests XMLChecker class."""
+
+ def assert_no_error(self, xml_data):
+ def handle_style_error(mock_error_handler, line_number, category, confidence, message):
+ self.fail('Unexpected error: %d %s %d %s' % (line_number, category, confidence, message))
+
+ error_handler = MockErrorHandler(handle_style_error)
+ checker = xml.XMLChecker('foo.xml', error_handler)
+ checker.check(xml_data.split('\n'))
+ self.assertTrue(error_handler.turned_off_filtering)
+
+ def assert_error(self, expected_line_number, expected_category, xml_data):
+ def handle_style_error(mock_error_handler, line_number, category, confidence, message):
+ mock_error_handler.had_error = True
+ self.assertEqual(expected_line_number, line_number)
+ self.assertEqual(expected_category, category)
+
+ error_handler = MockErrorHandler(handle_style_error)
+ error_handler.had_error = False
+
+ checker = xml.XMLChecker('foo.xml', error_handler)
+ checker.check(xml_data.split('\n'))
+ self.assertTrue(error_handler.had_error)
+ self.assertTrue(error_handler.turned_off_filtering)
+
+ def mock_handle_style_error(self):
+ pass
+
+ def test_conflict_marker(self):
+ self.assert_error(1, 'xml/syntax', '<<<<<<< HEAD\n<foo>\n</foo>\n')
+
+ def test_extra_closing_tag(self):
+ self.assert_error(3, 'xml/syntax', '<foo>\n</foo>\n</foo>\n')
+
+ def test_init(self):
+ error_handler = MockErrorHandler(self.mock_handle_style_error)
+ checker = xml.XMLChecker('foo.xml', error_handler)
+ self.assertEqual(checker._handle_style_error, error_handler)
+
+ def test_missing_closing_tag(self):
+ self.assert_error(3, 'xml/syntax', '<foo>\n<bar>\n</foo>\n')
+
+ def test_no_error(self):
+ self.assert_no_error('<foo>\n</foo>')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/error_handlers.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/error_handlers.py
new file mode 100644
index 0000000..99d5cb3
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/error_handlers.py
@@ -0,0 +1,164 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Defines style error handler classes.
+
+A style error handler is a function to call when a style error is
+found. Style error handlers can also have state. A class that represents
+a style error handler should implement the following methods.
+
+Methods:
+
+ __call__(self, line_number, category, confidence, message):
+
+ Handle the occurrence of a style error.
+
+ Check whether the error is reportable. If so, increment the total
+ error count and report the details. Note that error reporting can
+ be suppressed after reaching a certain number of reports.
+
+ Args:
+ line_number: The integer line number of the line containing the error.
+ category: The name of the category of the error, for example
+ "whitespace/newline".
+ confidence: An integer between 1 and 5 inclusive that represents the
+ application's level of confidence in the error. The value
+ 5 means that we are certain of the problem, and the
+ value 1 means that it could be a legitimate construct.
+ message: The error message to report.
+
+"""
+
+
+import sys
+
+
+class DefaultStyleErrorHandler(object):
+
+ """The default style error handler."""
+
+ def __init__(self, file_path, configuration, increment_error_count,
+ line_numbers=None):
+ """Create a default style error handler.
+
+ Args:
+ file_path: The path to the file containing the error. This
+ is used for reporting to the user.
+ configuration: A StyleProcessorConfiguration instance.
+ increment_error_count: A function that takes no arguments and
+ increments the total count of reportable
+ errors.
+ line_numbers: An array of line numbers of the lines for which
+ style errors should be reported, or None if errors
+ for all lines should be reported. When it is not
+ None, this array normally contains the line numbers
+ corresponding to the modified lines of a patch.
+
+ """
+ if line_numbers is not None:
+ line_numbers = set(line_numbers)
+
+ self._file_path = file_path
+ self._configuration = configuration
+ self._increment_error_count = increment_error_count
+ self._line_numbers = line_numbers
+
+ # A string to integer dictionary cache of the number of reportable
+ # errors per category passed to this instance.
+ self._category_totals = {}
+
+ # Useful for unit testing.
+ def __eq__(self, other):
+ """Return whether this instance is equal to another."""
+ if self._configuration != other._configuration:
+ return False
+ if self._file_path != other._file_path:
+ return False
+ if self._increment_error_count != other._increment_error_count:
+ return False
+ if self._line_numbers != other._line_numbers:
+ return False
+
+ return True
+
+ # Useful for unit testing.
+ def __ne__(self, other):
+ # Python does not automatically deduce __ne__ from __eq__.
+ return not self.__eq__(other)
+
+ def _add_reportable_error(self, category):
+ """Increment the error count and return the new category total."""
+ self._increment_error_count() # Increment the total.
+
+ # Increment the category total.
+ if not category in self._category_totals:
+ self._category_totals[category] = 1
+ else:
+ self._category_totals[category] += 1
+
+ return self._category_totals[category]
+
+ def _max_reports(self, category):
+ """Return the maximum number of errors to report."""
+ if not category in self._configuration.max_reports_per_category:
+ return None
+ return self._configuration.max_reports_per_category[category]
+
+ def should_line_be_checked(self, line_number):
+ "Returns if a particular line should be checked"
+ # Was the line that was modified?
+ return self._line_numbers is None or line_number in self._line_numbers
+
+ def turn_off_line_filtering(self):
+ self._line_numbers = None
+
+ def __call__(self, line_number, category, confidence, message):
+ """Handle the occurrence of a style error.
+
+ See the docstring of this module for more information.
+
+ """
+ if not self.should_line_be_checked(line_number):
+ return False
+
+ if not self._configuration.is_reportable(category=category,
+ confidence_in_error=confidence,
+ file_path=self._file_path):
+ return False
+
+ category_total = self._add_reportable_error(category)
+
+ max_reports = self._max_reports(category)
+
+ if (max_reports is not None) and (category_total > max_reports):
+ # Then suppress displaying the error.
+ return False
+
+ self._configuration.write_style_error(category=category,
+ confidence_in_error=confidence,
+ file_path=self._file_path,
+ line_number=line_number,
+ message=message)
+ if category_total == max_reports:
+ self._configuration.stderr_write("Suppressing further [%s] reports "
+ "for this file.\n" % category)
+ return True
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/error_handlers_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/error_handlers_unittest.py
new file mode 100644
index 0000000..5620d2a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/error_handlers_unittest.py
@@ -0,0 +1,195 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for error_handlers.py."""
+
+import unittest
+
+from checker import StyleProcessorConfiguration
+from error_handlers import DefaultStyleErrorHandler
+from filter import FilterConfiguration
+
+
+class DefaultStyleErrorHandlerTest(unittest.TestCase):
+
+ """Tests the DefaultStyleErrorHandler class."""
+
+ def setUp(self):
+ self._error_messages = []
+ self._error_count = 0
+
+ _category = "whitespace/tab"
+ """The category name for the tests in this class."""
+
+ _file_path = "foo.h"
+ """The file path for the tests in this class."""
+
+ def _mock_increment_error_count(self):
+ self._error_count += 1
+
+ def _mock_stderr_write(self, message):
+ self._error_messages.append(message)
+
+ def _style_checker_configuration(self):
+ """Return a StyleProcessorConfiguration instance for testing."""
+ base_rules = ["-whitespace", "+whitespace/tab"]
+ filter_configuration = FilterConfiguration(base_rules=base_rules)
+
+ return StyleProcessorConfiguration(
+ filter_configuration=filter_configuration,
+ max_reports_per_category={"whitespace/tab": 2},
+ min_confidence=3,
+ output_format="vs7",
+ stderr_write=self._mock_stderr_write)
+
+ def _error_handler(self, configuration, line_numbers=None):
+ return DefaultStyleErrorHandler(configuration=configuration,
+ file_path=self._file_path,
+ increment_error_count=self._mock_increment_error_count,
+ line_numbers=line_numbers)
+
+ def _check_initialized(self):
+ """Check that count and error messages are initialized."""
+ self.assertEqual(0, self._error_count)
+ self.assertEqual(0, len(self._error_messages))
+
+ def _call_error_handler(self, handle_error, confidence, line_number=100):
+ """Call the given error handler with a test error."""
+ handle_error(line_number=line_number,
+ category=self._category,
+ confidence=confidence,
+ message="message")
+
+ def test_eq__true_return_value(self):
+ """Test the __eq__() method for the return value of True."""
+ handler1 = self._error_handler(configuration=None)
+ handler2 = self._error_handler(configuration=None)
+
+ self.assertTrue(handler1.__eq__(handler2))
+
+ def test_eq__false_return_value(self):
+ """Test the __eq__() method for the return value of False."""
+ def make_handler(configuration=self._style_checker_configuration(),
+ file_path='foo.txt', increment_error_count=lambda: True,
+ line_numbers=[100]):
+ return DefaultStyleErrorHandler(configuration=configuration,
+ file_path=file_path,
+ increment_error_count=increment_error_count,
+ line_numbers=line_numbers)
+
+ handler = make_handler()
+
+ # Establish a baseline for our comparisons below.
+ self.assertTrue(handler.__eq__(make_handler()))
+
+ # Verify that a difference in any argument causes equality to fail.
+ self.assertFalse(handler.__eq__(make_handler(configuration=None)))
+ self.assertFalse(handler.__eq__(make_handler(file_path='bar.txt')))
+ self.assertFalse(handler.__eq__(make_handler(increment_error_count=None)))
+ self.assertFalse(handler.__eq__(make_handler(line_numbers=[50])))
+
+ def test_ne(self):
+ """Test the __ne__() method."""
+ # By default, __ne__ always returns true on different objects.
+ # Thus, check just the distinguishing case to verify that the
+ # code defines __ne__.
+ handler1 = self._error_handler(configuration=None)
+ handler2 = self._error_handler(configuration=None)
+
+ self.assertFalse(handler1.__ne__(handler2))
+
+ def test_non_reportable_error(self):
+ """Test __call__() with a non-reportable error."""
+ self._check_initialized()
+ configuration = self._style_checker_configuration()
+
+ confidence = 1
+ # Confirm the error is not reportable.
+ self.assertFalse(configuration.is_reportable(self._category,
+ confidence,
+ self._file_path))
+ error_handler = self._error_handler(configuration)
+ self._call_error_handler(error_handler, confidence)
+
+ self.assertEqual(0, self._error_count)
+ self.assertEqual([], self._error_messages)
+
+ # Also serves as a reportable error test.
+ def test_max_reports_per_category(self):
+ """Test error report suppression in __call__() method."""
+ self._check_initialized()
+ configuration = self._style_checker_configuration()
+ error_handler = self._error_handler(configuration)
+
+ confidence = 5
+
+ # First call: usual reporting.
+ self._call_error_handler(error_handler, confidence)
+ self.assertEqual(1, self._error_count)
+ self.assertEqual(1, len(self._error_messages))
+ self.assertEqual(self._error_messages,
+ ["foo.h(100): message [whitespace/tab] [5]\n"])
+
+ # Second call: suppression message reported.
+ self._call_error_handler(error_handler, confidence)
+ # The "Suppressing further..." message counts as an additional
+ # message (but not as an addition to the error count).
+ self.assertEqual(2, self._error_count)
+ self.assertEqual(3, len(self._error_messages))
+ self.assertEqual(self._error_messages[-2],
+ "foo.h(100): message [whitespace/tab] [5]\n")
+ self.assertEqual(self._error_messages[-1],
+ "Suppressing further [whitespace/tab] reports "
+ "for this file.\n")
+
+ # Third call: no report.
+ self._call_error_handler(error_handler, confidence)
+ self.assertEqual(3, self._error_count)
+ self.assertEqual(3, len(self._error_messages))
+
+ def test_line_numbers(self):
+ """Test the line_numbers parameter."""
+ self._check_initialized()
+ configuration = self._style_checker_configuration()
+ error_handler = self._error_handler(configuration,
+ line_numbers=[50])
+ confidence = 5
+
+ # Error on non-modified line: no error.
+ self._call_error_handler(error_handler, confidence, line_number=60)
+ self.assertEqual(0, self._error_count)
+ self.assertEqual([], self._error_messages)
+
+ # Error on modified line: error.
+ self._call_error_handler(error_handler, confidence, line_number=50)
+ self.assertEqual(1, self._error_count)
+ self.assertEqual(self._error_messages,
+ ["foo.h(50): message [whitespace/tab] [5]\n"])
+
+ # Error on non-modified line after turning off line filtering: error.
+ error_handler.turn_off_line_filtering()
+ self._call_error_handler(error_handler, confidence, line_number=60)
+ self.assertEqual(2, self._error_count)
+ self.assertEqual(self._error_messages,
+ ['foo.h(50): message [whitespace/tab] [5]\n',
+ 'foo.h(60): message [whitespace/tab] [5]\n',
+ 'Suppressing further [whitespace/tab] reports for this file.\n'])
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/filereader.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/filereader.py
new file mode 100644
index 0000000..1181ad4
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/filereader.py
@@ -0,0 +1,154 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+# Copyright (C) 2010 ProFUSION embedded systems
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Supports reading and processing text files."""
+
+import codecs
+import logging
+import os
+import sys
+
+
+_log = logging.getLogger(__name__)
+
+
+class TextFileReader(object):
+
+ """Supports reading and processing text files.
+
+ Attributes:
+ file_count: The total number of files passed to this instance
+ for processing, including non-text files and files
+ that should be skipped.
+ delete_only_file_count: The total number of files that are not
+ processed this instance actually because
+ the files don't have any modified lines
+ but should be treated as processed.
+
+ """
+
+ def __init__(self, filesystem, processor):
+ """Create an instance.
+
+ Arguments:
+ processor: A ProcessorBase instance.
+
+ """
+ # FIXME: Although TextFileReader requires a FileSystem it circumvents it in two places!
+ self.filesystem = filesystem
+ self._processor = processor
+ self.file_count = 0
+ self.delete_only_file_count = 0
+
+ def _read_lines(self, file_path):
+ """Read the file at a path, and return its lines.
+
+ Raises:
+ IOError: If the file does not exist or cannot be read.
+
+ """
+ # Support the UNIX convention of using "-" for stdin.
+ if file_path == '-':
+ file = codecs.StreamReaderWriter(sys.stdin,
+ codecs.getreader('utf8'),
+ codecs.getwriter('utf8'),
+ 'replace')
+ else:
+ # We do not open the file with universal newline support
+ # (codecs does not support it anyway), so the resulting
+ # lines contain trailing "\r" characters if we are reading
+ # a file with CRLF endings.
+ # FIXME: This should use self.filesystem
+ file = codecs.open(file_path, 'r', 'utf8', 'replace')
+
+ try:
+ contents = file.read()
+ finally:
+ file.close()
+
+ lines = contents.split('\n')
+ return lines
+
+ def process_file(self, file_path, **kwargs):
+ """Process the given file by calling the processor's process() method.
+
+ Args:
+ file_path: The path of the file to process.
+ **kwargs: Any additional keyword parameters that should be passed
+ to the processor's process() method. The process()
+ method should support these keyword arguments.
+
+ Raises:
+ SystemExit: If no file at file_path exists.
+
+ """
+ self.file_count += 1
+
+ if not self.filesystem.exists(file_path) and file_path != "-":
+ _log.error("File does not exist: '%s'" % file_path)
+ sys.exit(1) # FIXME: This should throw or return instead of exiting directly.
+
+ if not self._processor.should_process(file_path):
+ _log.debug("Skipping file: '%s'" % file_path)
+ return
+ _log.debug("Processing file: '%s'" % file_path)
+
+ try:
+ lines = self._read_lines(file_path)
+ except IOError, err:
+ message = ("Could not read file. Skipping: '%s'\n %s" % (file_path, err))
+ _log.warn(message)
+ return
+
+ self._processor.process(lines, file_path, **kwargs)
+
+ def _process_directory(self, directory):
+ """Process all files in the given directory, recursively."""
+ # FIXME: We should consider moving to self.filesystem.files_under() (or adding walk() to FileSystem)
+ for dir_path, dir_names, file_names in os.walk(directory):
+ for file_name in file_names:
+ file_path = self.filesystem.join(dir_path, file_name)
+ self.process_file(file_path)
+
+ def process_paths(self, paths):
+ for path in paths:
+ if self.filesystem.isdir(path):
+ self._process_directory(directory=path)
+ else:
+ self.process_file(path)
+
+ def count_delete_only_file(self):
+ """Count up files that contains only deleted lines.
+
+ Files which has no modified or newly-added lines don't need
+ to check style, but should be treated as checked. For that
+ purpose, we just count up the number of such files.
+ """
+ self.delete_only_file_count += 1
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/filereader_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/filereader_unittest.py
new file mode 100644
index 0000000..d728c46
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/filereader_unittest.py
@@ -0,0 +1,155 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.system.logtesting import LoggingTestCase
+from webkitpy.style.checker import ProcessorBase
+from webkitpy.style.filereader import TextFileReader
+
+
+class TextFileReaderTest(LoggingTestCase):
+
+ class MockProcessor(ProcessorBase):
+
+ """A processor for test purposes.
+
+ This processor simply records the parameters passed to its process()
+ method for later checking by the unittest test methods.
+
+ """
+
+ def __init__(self):
+ self.processed = []
+ """The parameters passed for all calls to the process() method."""
+
+ def should_process(self, file_path):
+ return not file_path.endswith('should_not_process.txt')
+
+ def process(self, lines, file_path, test_kwarg=None):
+ self.processed.append((lines, file_path, test_kwarg))
+
+ def setUp(self):
+ LoggingTestCase.setUp(self)
+ # FIXME: This should be a MockFileSystem once TextFileReader is moved entirely on top of FileSystem.
+ self.filesystem = FileSystem()
+ self._temp_dir = str(self.filesystem.mkdtemp())
+ self._processor = TextFileReaderTest.MockProcessor()
+ self._file_reader = TextFileReader(self.filesystem, self._processor)
+
+ def tearDown(self):
+ LoggingTestCase.tearDown(self)
+ self.filesystem.rmtree(self._temp_dir)
+
+ def _create_file(self, rel_path, text):
+ """Create a file with given text and return the path to the file."""
+ # FIXME: There are better/more secure APIs for creating tmp file paths.
+ file_path = self.filesystem.join(self._temp_dir, rel_path)
+ self.filesystem.write_text_file(file_path, text)
+ return file_path
+
+ def _passed_to_processor(self):
+ """Return the parameters passed to MockProcessor.process()."""
+ return self._processor.processed
+
+ def _assert_file_reader(self, passed_to_processor, file_count):
+ """Assert the state of the file reader."""
+ self.assertEqual(passed_to_processor, self._passed_to_processor())
+ self.assertEqual(file_count, self._file_reader.file_count)
+
+ def test_process_file__does_not_exist(self):
+ try:
+ self._file_reader.process_file('does_not_exist.txt')
+ except SystemExit, err:
+ self.assertEqual(str(err), '1')
+ else:
+ self.fail('No Exception raised.')
+ self._assert_file_reader([], 1)
+ self.assertLog(["ERROR: File does not exist: 'does_not_exist.txt'\n"])
+
+ def test_process_file__is_dir(self):
+ temp_dir = self.filesystem.join(self._temp_dir, 'test_dir')
+ self.filesystem.maybe_make_directory(temp_dir)
+
+ self._file_reader.process_file(temp_dir)
+
+ # Because the log message below contains exception text, it is
+ # possible that the text varies across platforms. For this reason,
+ # we check only the portion of the log message that we control,
+ # namely the text at the beginning.
+ log_messages = self.logMessages()
+ # We remove the message we are looking at to prevent the tearDown()
+ # from raising an exception when it asserts that no log messages
+ # remain.
+ message = log_messages.pop()
+
+ self.assertTrue(message.startswith("WARNING: Could not read file. Skipping: '%s'\n " % temp_dir))
+
+ self._assert_file_reader([], 1)
+
+ def test_process_file__should_not_process(self):
+ file_path = self._create_file('should_not_process.txt', 'contents')
+
+ self._file_reader.process_file(file_path)
+ self._assert_file_reader([], 1)
+
+ def test_process_file__multiple_lines(self):
+ file_path = self._create_file('foo.txt', 'line one\r\nline two\n')
+
+ self._file_reader.process_file(file_path)
+ processed = [(['line one\r', 'line two', ''], file_path, None)]
+ self._assert_file_reader(processed, 1)
+
+ def test_process_file__file_stdin(self):
+ file_path = self._create_file('-', 'file contents')
+
+ self._file_reader.process_file(file_path=file_path, test_kwarg='foo')
+ processed = [(['file contents'], file_path, 'foo')]
+ self._assert_file_reader(processed, 1)
+
+ def test_process_file__with_kwarg(self):
+ file_path = self._create_file('foo.txt', 'file contents')
+
+ self._file_reader.process_file(file_path=file_path, test_kwarg='foo')
+ processed = [(['file contents'], file_path, 'foo')]
+ self._assert_file_reader(processed, 1)
+
+ def test_process_paths(self):
+ # We test a list of paths that contains both a file and a directory.
+ dir = self.filesystem.join(self._temp_dir, 'foo_dir')
+ self.filesystem.maybe_make_directory(dir)
+
+ file_path1 = self._create_file('file1.txt', 'foo')
+
+ rel_path = self.filesystem.join('foo_dir', 'file2.txt')
+ file_path2 = self._create_file(rel_path, 'bar')
+
+ self._file_reader.process_paths([dir, file_path1])
+ processed = [(['bar'], file_path2, None),
+ (['foo'], file_path1, None)]
+ self._assert_file_reader(processed, 2)
+
+ def test_count_delete_only_file(self):
+ self._file_reader.count_delete_only_file()
+ delete_only_file_count = self._file_reader.delete_only_file_count
+ self.assertEqual(delete_only_file_count, 1)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/filter.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/filter.py
new file mode 100644
index 0000000..608a9e6
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/filter.py
@@ -0,0 +1,278 @@
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Contains filter-related code."""
+
+
+def validate_filter_rules(filter_rules, all_categories):
+ """Validate the given filter rules, and raise a ValueError if not valid.
+
+ Args:
+ filter_rules: A list of boolean filter rules, for example--
+ ["-whitespace", "+whitespace/braces"]
+ all_categories: A list of all available category names, for example--
+ ["whitespace/tabs", "whitespace/braces"]
+
+ Raises:
+ ValueError: An error occurs if a filter rule does not begin
+ with "+" or "-" or if a filter rule does not match
+ the beginning of some category name in the list
+ of all available categories.
+
+ """
+ for rule in filter_rules:
+ if not (rule.startswith('+') or rule.startswith('-')):
+ raise ValueError('Invalid filter rule "%s": every rule '
+ "must start with + or -." % rule)
+
+ for category in all_categories:
+ if category.startswith(rule[1:]):
+ break
+ else:
+ raise ValueError('Suspected incorrect filter rule "%s": '
+ "the rule does not match the beginning "
+ "of any category name." % rule)
+
+
+class _CategoryFilter(object):
+
+ """Filters whether to check style categories."""
+
+ def __init__(self, filter_rules=None):
+ """Create a category filter.
+
+ Args:
+ filter_rules: A list of strings that are filter rules, which
+ are strings beginning with the plus or minus
+ symbol (+/-). The list should include any
+ default filter rules at the beginning.
+ Defaults to the empty list.
+
+ Raises:
+ ValueError: Invalid filter rule if a rule does not start with
+ plus ("+") or minus ("-").
+
+ """
+ if filter_rules is None:
+ filter_rules = []
+
+ self._filter_rules = filter_rules
+ self._should_check_category = {} # Cached dictionary of category to True/False
+
+ def __str__(self):
+ return ",".join(self._filter_rules)
+
+ # Useful for unit testing.
+ def __eq__(self, other):
+ """Return whether this CategoryFilter instance is equal to another."""
+ return self._filter_rules == other._filter_rules
+
+ # Useful for unit testing.
+ def __ne__(self, other):
+ # Python does not automatically deduce from __eq__().
+ return not (self == other)
+
+ def should_check(self, category):
+ """Return whether the category should be checked.
+
+ The rules for determining whether a category should be checked
+ are as follows. By default all categories should be checked.
+ Then apply the filter rules in order from first to last, with
+ later flags taking precedence.
+
+ A filter rule applies to a category if the string after the
+ leading plus/minus (+/-) matches the beginning of the category
+ name. A plus (+) means the category should be checked, while a
+ minus (-) means the category should not be checked.
+
+ """
+ if category in self._should_check_category:
+ return self._should_check_category[category]
+
+ should_check = True # All categories checked by default.
+ for rule in self._filter_rules:
+ if not category.startswith(rule[1:]):
+ continue
+ should_check = rule.startswith('+')
+ self._should_check_category[category] = should_check # Update cache.
+ return should_check
+
+
+class FilterConfiguration(object):
+
+ """Supports filtering with path-specific and user-specified rules."""
+
+ def __init__(self, base_rules=None, path_specific=None, user_rules=None):
+ """Create a FilterConfiguration instance.
+
+ Args:
+ base_rules: The starting list of filter rules to use for
+ processing. The default is the empty list, which
+ by itself would mean that all categories should be
+ checked.
+
+ path_specific: A list of (sub_paths, path_rules) pairs
+ that stores the path-specific filter rules for
+ appending to the base rules.
+ The "sub_paths" value is a list of path
+ substrings. If a file path contains one of the
+ substrings, then the corresponding path rules
+ are appended. The first substring match takes
+ precedence, i.e. only the first match triggers
+ an append.
+ The "path_rules" value is a list of filter
+ rules that can be appended to the base rules.
+
+ user_rules: A list of filter rules that is always appended
+ to the base rules and any path rules. In other
+ words, the user rules take precedence over the
+ everything. In practice, the user rules are
+ provided by the user from the command line.
+
+ """
+ if base_rules is None:
+ base_rules = []
+ if path_specific is None:
+ path_specific = []
+ if user_rules is None:
+ user_rules = []
+
+ self._base_rules = base_rules
+ self._path_specific = path_specific
+ self._path_specific_lower = None
+ """The backing store for self._get_path_specific_lower()."""
+
+ self._user_rules = user_rules
+
+ self._path_rules_to_filter = {}
+ """Cached dictionary of path rules to CategoryFilter instance."""
+
+ # The same CategoryFilter instance can be shared across
+ # multiple keys in this dictionary. This allows us to take
+ # greater advantage of the caching done by
+ # CategoryFilter.should_check().
+ self._path_to_filter = {}
+ """Cached dictionary of file path to CategoryFilter instance."""
+
+ # Useful for unit testing.
+ def __eq__(self, other):
+ """Return whether this FilterConfiguration is equal to another."""
+ if self._base_rules != other._base_rules:
+ return False
+ if self._path_specific != other._path_specific:
+ return False
+ if self._user_rules != other._user_rules:
+ return False
+
+ return True
+
+ # Useful for unit testing.
+ def __ne__(self, other):
+ # Python does not automatically deduce this from __eq__().
+ return not self.__eq__(other)
+
+ # We use the prefix "_get" since the name "_path_specific_lower"
+ # is already taken up by the data attribute backing store.
+ def _get_path_specific_lower(self):
+ """Return a copy of self._path_specific with the paths lower-cased."""
+ if self._path_specific_lower is None:
+ self._path_specific_lower = []
+ for (sub_paths, path_rules) in self._path_specific:
+ sub_paths = map(str.lower, sub_paths)
+ self._path_specific_lower.append((sub_paths, path_rules))
+ return self._path_specific_lower
+
+ def _path_rules_from_path(self, path):
+ """Determine the path-specific rules to use, and return as a tuple.
+
+ This method returns a tuple rather than a list so the return
+ value can be passed to _filter_from_path_rules() without change.
+
+ """
+ path = path.lower()
+ for (sub_paths, path_rules) in self._get_path_specific_lower():
+ for sub_path in sub_paths:
+ if path.find(sub_path) > -1:
+ return tuple(path_rules)
+ return () # Default to the empty tuple.
+
+ def _filter_from_path_rules(self, path_rules):
+ """Return the CategoryFilter associated to the given path rules.
+
+ Args:
+ path_rules: A tuple of path rules. We require a tuple rather
+ than a list so the value can be used as a dictionary
+ key in self._path_rules_to_filter.
+
+ """
+ # We reuse the same CategoryFilter where possible to take
+ # advantage of the caching they do.
+ if path_rules not in self._path_rules_to_filter:
+ rules = list(self._base_rules) # Make a copy
+ rules.extend(path_rules)
+ rules.extend(self._user_rules)
+ self._path_rules_to_filter[path_rules] = _CategoryFilter(rules)
+
+ return self._path_rules_to_filter[path_rules]
+
+ def _filter_from_path(self, path):
+ """Return the CategoryFilter associated to a path."""
+ if path not in self._path_to_filter:
+ path_rules = self._path_rules_from_path(path)
+ filter = self._filter_from_path_rules(path_rules)
+ self._path_to_filter[path] = filter
+
+ return self._path_to_filter[path]
+
+ def should_check(self, category, path):
+ """Return whether the given category should be checked.
+
+ This method determines whether a category should be checked
+ by checking the category name against the filter rules for
+ the given path.
+
+ For a given path, the filter rules are the combination of
+ the base rules, the path-specific rules, and the user-provided
+ rules -- in that order. As we will describe below, later rules
+ in the list take precedence. The path-specific rules are the
+ rules corresponding to the first element of the "path_specific"
+ parameter that contains a string case-insensitively matching
+ some substring of the path. If there is no such element,
+ there are no path-specific rules for that path.
+
+ Given a list of filter rules, the logic for determining whether
+ a category should be checked is as follows. By default all
+ categories should be checked. Then apply the filter rules in
+ order from first to last, with later flags taking precedence.
+
+ A filter rule applies to a category if the string after the
+ leading plus/minus (+/-) matches the beginning of the category
+ name. A plus (+) means the category should be checked, while a
+ minus (-) means the category should not be checked.
+
+ Args:
+ category: The category name.
+ path: The path of the file being checked.
+
+ """
+ return self._filter_from_path(path).should_check(category)
+
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/filter_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/filter_unittest.py
new file mode 100644
index 0000000..c20d998
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/filter_unittest.py
@@ -0,0 +1,256 @@
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for filter.py."""
+
+import unittest
+
+from filter import _CategoryFilter as CategoryFilter
+from filter import validate_filter_rules
+from filter import FilterConfiguration
+
+# On Testing __eq__() and __ne__():
+#
+# In the tests below, we deliberately do not use assertEqual() or
+# assertNotEquals() to test __eq__() or __ne__(). We do this to be
+# very explicit about what we are testing, especially in the case
+# of assertNotEquals().
+#
+# Part of the reason is that it is not immediately clear what
+# expression the unittest module uses to assert "not equals" -- the
+# negation of __eq__() or __ne__(), which are not necessarily
+# equivalent expresions in Python. For example, from Python's "Data
+# Model" documentation--
+#
+# "There are no implied relationships among the comparison
+# operators. The truth of x==y does not imply that x!=y is
+# false. Accordingly, when defining __eq__(), one should
+# also define __ne__() so that the operators will behave as
+# expected."
+#
+# (from http://docs.python.org/reference/datamodel.html#object.__ne__ )
+
+class ValidateFilterRulesTest(unittest.TestCase):
+
+ """Tests validate_filter_rules() function."""
+
+ def test_validate_filter_rules(self):
+ all_categories = ["tabs", "whitespace", "build/include"]
+
+ bad_rules = [
+ "tabs",
+ "*tabs",
+ " tabs",
+ " +tabs",
+ "+whitespace/newline",
+ "+xxx",
+ ]
+
+ good_rules = [
+ "+tabs",
+ "-tabs",
+ "+build"
+ ]
+
+ for rule in bad_rules:
+ self.assertRaises(ValueError, validate_filter_rules,
+ [rule], all_categories)
+
+ for rule in good_rules:
+ # This works: no error.
+ validate_filter_rules([rule], all_categories)
+
+
+class CategoryFilterTest(unittest.TestCase):
+
+ """Tests CategoryFilter class."""
+
+ def test_init(self):
+ """Test __init__ method."""
+ # Test that the attributes are getting set correctly.
+ filter = CategoryFilter(["+"])
+ self.assertEqual(["+"], filter._filter_rules)
+
+ def test_init_default_arguments(self):
+ """Test __init__ method default arguments."""
+ filter = CategoryFilter()
+ self.assertEqual([], filter._filter_rules)
+
+ def test_str(self):
+ """Test __str__ "to string" operator."""
+ filter = CategoryFilter(["+a", "-b"])
+ self.assertEqual(str(filter), "+a,-b")
+
+ def test_eq(self):
+ """Test __eq__ equality function."""
+ filter1 = CategoryFilter(["+a", "+b"])
+ filter2 = CategoryFilter(["+a", "+b"])
+ filter3 = CategoryFilter(["+b", "+a"])
+
+ # See the notes at the top of this module about testing
+ # __eq__() and __ne__().
+ self.assertTrue(filter1.__eq__(filter2))
+ self.assertFalse(filter1.__eq__(filter3))
+
+ def test_ne(self):
+ """Test __ne__ inequality function."""
+ # By default, __ne__ always returns true on different objects.
+ # Thus, just check the distinguishing case to verify that the
+ # code defines __ne__.
+ #
+ # Also, see the notes at the top of this module about testing
+ # __eq__() and __ne__().
+ self.assertFalse(CategoryFilter().__ne__(CategoryFilter()))
+
+ def test_should_check(self):
+ """Test should_check() method."""
+ filter = CategoryFilter()
+ self.assertTrue(filter.should_check("everything"))
+ # Check a second time to exercise cache.
+ self.assertTrue(filter.should_check("everything"))
+
+ filter = CategoryFilter(["-"])
+ self.assertFalse(filter.should_check("anything"))
+ # Check a second time to exercise cache.
+ self.assertFalse(filter.should_check("anything"))
+
+ filter = CategoryFilter(["-", "+ab"])
+ self.assertTrue(filter.should_check("abc"))
+ self.assertFalse(filter.should_check("a"))
+
+ filter = CategoryFilter(["+", "-ab"])
+ self.assertFalse(filter.should_check("abc"))
+ self.assertTrue(filter.should_check("a"))
+
+
+class FilterConfigurationTest(unittest.TestCase):
+
+ """Tests FilterConfiguration class."""
+
+ def _config(self, base_rules, path_specific, user_rules):
+ """Return a FilterConfiguration instance."""
+ return FilterConfiguration(base_rules=base_rules,
+ path_specific=path_specific,
+ user_rules=user_rules)
+
+ def test_init(self):
+ """Test __init__ method."""
+ # Test that the attributes are getting set correctly.
+ # We use parameter values that are different from the defaults.
+ base_rules = ["-"]
+ path_specific = [(["path"], ["+a"])]
+ user_rules = ["+"]
+
+ config = self._config(base_rules, path_specific, user_rules)
+
+ self.assertEqual(base_rules, config._base_rules)
+ self.assertEqual(path_specific, config._path_specific)
+ self.assertEqual(user_rules, config._user_rules)
+
+ def test_default_arguments(self):
+ # Test that the attributes are getting set correctly to the defaults.
+ config = FilterConfiguration()
+
+ self.assertEqual([], config._base_rules)
+ self.assertEqual([], config._path_specific)
+ self.assertEqual([], config._user_rules)
+
+ def test_eq(self):
+ """Test __eq__ method."""
+ # See the notes at the top of this module about testing
+ # __eq__() and __ne__().
+ self.assertTrue(FilterConfiguration().__eq__(FilterConfiguration()))
+
+ # Verify that a difference in any argument causes equality to fail.
+ config = FilterConfiguration()
+
+ # These parameter values are different from the defaults.
+ base_rules = ["-"]
+ path_specific = [(["path"], ["+a"])]
+ user_rules = ["+"]
+
+ self.assertFalse(config.__eq__(FilterConfiguration(
+ base_rules=base_rules)))
+ self.assertFalse(config.__eq__(FilterConfiguration(
+ path_specific=path_specific)))
+ self.assertFalse(config.__eq__(FilterConfiguration(
+ user_rules=user_rules)))
+
+ def test_ne(self):
+ """Test __ne__ method."""
+ # By default, __ne__ always returns true on different objects.
+ # Thus, just check the distinguishing case to verify that the
+ # code defines __ne__.
+ #
+ # Also, see the notes at the top of this module about testing
+ # __eq__() and __ne__().
+ self.assertFalse(FilterConfiguration().__ne__(FilterConfiguration()))
+
+ def test_base_rules(self):
+ """Test effect of base_rules on should_check()."""
+ base_rules = ["-b"]
+ path_specific = []
+ user_rules = []
+
+ config = self._config(base_rules, path_specific, user_rules)
+
+ self.assertTrue(config.should_check("a", "path"))
+ self.assertFalse(config.should_check("b", "path"))
+
+ def test_path_specific(self):
+ """Test effect of path_rules_specifier on should_check()."""
+ base_rules = ["-"]
+ path_specific = [(["path1"], ["+b"]),
+ (["path2"], ["+c"])]
+ user_rules = []
+
+ config = self._config(base_rules, path_specific, user_rules)
+
+ self.assertFalse(config.should_check("c", "path1"))
+ self.assertTrue(config.should_check("c", "path2"))
+ # Test that first match takes precedence.
+ self.assertFalse(config.should_check("c", "path2/path1"))
+
+ def test_path_with_different_case(self):
+ """Test a path that differs only in case."""
+ base_rules = ["-"]
+ path_specific = [(["Foo/"], ["+whitespace"])]
+ user_rules = []
+
+ config = self._config(base_rules, path_specific, user_rules)
+
+ self.assertFalse(config.should_check("whitespace", "Fooo/bar.txt"))
+ self.assertTrue(config.should_check("whitespace", "Foo/bar.txt"))
+ # Test different case.
+ self.assertTrue(config.should_check("whitespace", "FOO/bar.txt"))
+
+ def test_user_rules(self):
+ """Test effect of user_rules on should_check()."""
+ base_rules = ["-"]
+ path_specific = []
+ user_rules = ["+b"]
+
+ config = self._config(base_rules, path_specific, user_rules)
+
+ self.assertFalse(config.should_check("a", "path"))
+ self.assertTrue(config.should_check("b", "path"))
+
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/main.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/main.py
new file mode 100644
index 0000000..574368a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/main.py
@@ -0,0 +1,162 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import codecs
+import logging
+import sys
+
+import webkitpy.style.checker as checker
+from webkitpy.style.patchreader import PatchReader
+from webkitpy.style.checker import StyleProcessor
+from webkitpy.style.filereader import TextFileReader
+from webkitpy.common.host import Host
+
+
+_log = logging.getLogger(__name__)
+
+
+def change_directory(filesystem, checkout_root, paths):
+ """Change the working directory to the WebKit checkout root, if possible.
+
+ If every path in the paths parameter is below the checkout root (or if
+ the paths parameter is empty or None), this method changes the current
+ working directory to the checkout root and converts the paths parameter
+ as described below.
+ This allows the paths being checked to be displayed relative to the
+ checkout root, and for path-specific style checks to work as expected.
+ Path-specific checks include whether files should be skipped, whether
+ custom style rules should apply to certain files, etc.
+
+ Returns:
+ paths: A copy of the paths parameter -- possibly converted, as follows.
+ If this method changed the current working directory to the
+ checkout root, then the list is the paths parameter converted to
+ normalized paths relative to the checkout root.
+
+ Args:
+ paths: A list of paths to the files that should be checked for style.
+ This argument can be None or the empty list if a git commit
+ or all changes under the checkout root should be checked.
+ checkout_root: The path to the root of the WebKit checkout.
+
+ """
+ if paths is not None:
+ paths = list(paths)
+
+ if paths:
+ # Then try converting all of the paths to paths relative to
+ # the checkout root.
+ rel_paths = []
+ for path in paths:
+ rel_path = filesystem.relpath(path, checkout_root)
+ if rel_path.startswith(filesystem.pardir):
+ # Then the path is not below the checkout root. Since all
+ # paths should be interpreted relative to the same root,
+ # do not interpret any of the paths as relative to the
+ # checkout root. Interpret all of them relative to the
+ # current working directory, and do not change the current
+ # working directory.
+ _log.warn(
+"""Path-dependent style checks may not work correctly:
+
+ One of the given paths is outside the WebKit checkout of the current
+ working directory:
+
+ Path: %s
+ Checkout root: %s
+
+ Pass only files below the checkout root to ensure correct results.
+ See the help documentation for more info.
+"""
+ % (path, checkout_root))
+
+ return paths
+ rel_paths.append(rel_path)
+ # If we got here, the conversion was successful.
+ paths = rel_paths
+
+ _log.debug("Changing to checkout root: " + checkout_root)
+ filesystem.chdir(checkout_root)
+
+ return paths
+
+
+class CheckWebKitStyle(object):
+ def _engage_awesome_stderr_hacks(self):
+ # Change stderr to write with replacement characters so we don't die
+ # if we try to print something containing non-ASCII characters.
+ stderr = codecs.StreamReaderWriter(sys.stderr,
+ codecs.getreader('utf8'),
+ codecs.getwriter('utf8'),
+ 'replace')
+ # Setting an "encoding" attribute on the stream is necessary to
+ # prevent the logging module from raising an error. See
+ # the checker.configure_logging() function for more information.
+ stderr.encoding = "UTF-8"
+
+ # FIXME: Change webkitpy.style so that we do not need to overwrite
+ # the global sys.stderr. This involves updating the code to
+ # accept a stream parameter where necessary, and not calling
+ # sys.stderr explicitly anywhere.
+ sys.stderr = stderr
+ return stderr
+
+ def main(self):
+ args = sys.argv[1:]
+
+ host = Host()
+ host.initialize_scm()
+
+ stderr = self._engage_awesome_stderr_hacks()
+
+ # Checking for the verbose flag before calling check_webkit_style_parser()
+ # lets us enable verbose logging earlier.
+ is_verbose = "-v" in args or "--verbose" in args
+
+ checker.configure_logging(stream=stderr, is_verbose=is_verbose)
+ _log.debug("Verbose logging enabled.")
+
+ parser = checker.check_webkit_style_parser()
+ (paths, options) = parser.parse(args)
+
+ configuration = checker.check_webkit_style_configuration(options)
+
+ paths = change_directory(host.filesystem, checkout_root=host.scm().checkout_root, paths=paths)
+
+ style_processor = StyleProcessor(configuration)
+ file_reader = TextFileReader(host.filesystem, style_processor)
+
+ if paths and not options.diff_files:
+ file_reader.process_paths(paths)
+ else:
+ changed_files = paths if options.diff_files else None
+ patch = host.scm().create_patch(options.git_commit, changed_files=changed_files)
+ patch_checker = PatchReader(file_reader)
+ patch_checker.check(patch)
+
+ error_count = style_processor.error_count
+ file_count = file_reader.file_count
+ delete_only_file_count = file_reader.delete_only_file_count
+
+ _log.info("Total errors found: %d in %d files" % (error_count, file_count))
+ # We fail when style errors are found or there are no checked files.
+ return error_count > 0 or (file_count == 0 and delete_only_file_count == 0)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/main_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/main_unittest.py
new file mode 100644
index 0000000..e019168
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/main_unittest.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from main import change_directory
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.logtesting import LogTesting
+
+
+class ChangeDirectoryTest(unittest.TestCase):
+ _original_directory = "/original"
+ _checkout_root = "/WebKit"
+
+ def setUp(self):
+ self._log = LogTesting.setUp(self)
+ self.filesystem = MockFileSystem(dirs=[self._original_directory, self._checkout_root], cwd=self._original_directory)
+
+ def tearDown(self):
+ self._log.tearDown()
+
+ def _change_directory(self, paths, checkout_root):
+ return change_directory(self.filesystem, paths=paths, checkout_root=checkout_root)
+
+ def _assert_result(self, actual_return_value, expected_return_value,
+ expected_log_messages, expected_current_directory):
+ self.assertEqual(actual_return_value, expected_return_value)
+ self._log.assertMessages(expected_log_messages)
+ self.assertEqual(self.filesystem.getcwd(), expected_current_directory)
+
+ def test_paths_none(self):
+ paths = self._change_directory(checkout_root=self._checkout_root, paths=None)
+ self._assert_result(paths, None, [], self._checkout_root)
+
+ def test_paths_convertible(self):
+ paths = ["/WebKit/foo1.txt", "/WebKit/foo2.txt"]
+ paths = self._change_directory(checkout_root=self._checkout_root, paths=paths)
+ self._assert_result(paths, ["foo1.txt", "foo2.txt"], [], self._checkout_root)
+
+ def test_with_scm_paths_unconvertible(self):
+ paths = ["/WebKit/foo1.txt", "/outside/foo2.txt"]
+ paths = self._change_directory(checkout_root=self._checkout_root, paths=paths)
+ log_messages = [
+"""WARNING: Path-dependent style checks may not work correctly:
+
+ One of the given paths is outside the WebKit checkout of the current
+ working directory:
+
+ Path: /outside/foo2.txt
+ Checkout root: /WebKit
+
+ Pass only files below the checkout root to ensure correct results.
+ See the help documentation for more info.
+
+"""]
+ self._assert_result(paths, paths, log_messages, self._original_directory)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/optparser.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/optparser.py
new file mode 100644
index 0000000..4b638c0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/optparser.py
@@ -0,0 +1,457 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Supports the parsing of command-line options for check-webkit-style."""
+
+import logging
+from optparse import OptionParser
+import os.path
+import sys
+
+from filter import validate_filter_rules
+# This module should not import anything from checker.py.
+
+_log = logging.getLogger(__name__)
+
+_USAGE = """usage: %prog [--help] [options] [path1] [path2] ...
+
+Overview:
+ Check coding style according to WebKit style guidelines:
+
+ http://webkit.org/coding/coding-style.html
+
+ Path arguments can be files and directories. If neither a git commit nor
+ paths are passed, then all changes in your source control working directory
+ are checked.
+
+Style errors:
+ This script assigns to every style error a confidence score from 1-5 and
+ a category name. A confidence score of 5 means the error is certainly
+ a problem, and 1 means it could be fine.
+
+ Category names appear in error messages in brackets, for example
+ [whitespace/indent]. See the options section below for an option that
+ displays all available categories and which are reported by default.
+
+Filters:
+ Use filters to configure what errors to report. Filters are specified using
+ a comma-separated list of boolean filter rules. The script reports errors
+ in a category if the category passes the filter, as described below.
+
+ All categories start out passing. Boolean filter rules are then evaluated
+ from left to right, with later rules taking precedence. For example, the
+ rule "+foo" passes any category that starts with "foo", and "-foo" fails
+ any such category. The filter input "-whitespace,+whitespace/braces" fails
+ the category "whitespace/tab" and passes "whitespace/braces".
+
+ Examples: --filter=-whitespace,+whitespace/braces
+ --filter=-whitespace,-runtime/printf,+runtime/printf_format
+ --filter=-,+build/include_what_you_use
+
+Paths:
+ Certain style-checking behavior depends on the paths relative to
+ the WebKit source root of the files being checked. For example,
+ certain types of errors may be handled differently for files in
+ WebKit/gtk/webkit/ (e.g. by suppressing "readability/naming" errors
+ for files in this directory).
+
+ Consequently, if the path relative to the source root cannot be
+ determined for a file being checked, then style checking may not
+ work correctly for that file. This can occur, for example, if no
+ WebKit checkout can be found, or if the source root can be detected,
+ but one of the files being checked lies outside the source tree.
+
+ If a WebKit checkout can be detected and all files being checked
+ are in the source tree, then all paths will automatically be
+ converted to paths relative to the source root prior to checking.
+ This is also useful for display purposes.
+
+ Currently, this command can detect the source root only if the
+ command is run from within a WebKit checkout (i.e. if the current
+ working directory is below the root of a checkout). In particular,
+ it is not recommended to run this script from a directory outside
+ a checkout.
+
+ Running this script from a top-level WebKit source directory and
+ checking only files in the source tree will ensure that all style
+ checking behaves correctly -- whether or not a checkout can be
+ detected. This is because all file paths will already be relative
+ to the source root and so will not need to be converted."""
+
+_EPILOG = ("This script can miss errors and does not substitute for "
+ "code review.")
+
+
+# This class should not have knowledge of the flag key names.
+class DefaultCommandOptionValues(object):
+
+ """Stores the default check-webkit-style command-line options.
+
+ Attributes:
+ output_format: A string that is the default output format.
+ min_confidence: An integer that is the default minimum confidence level.
+
+ """
+
+ def __init__(self, min_confidence, output_format):
+ self.min_confidence = min_confidence
+ self.output_format = output_format
+
+
+# This class should not have knowledge of the flag key names.
+class CommandOptionValues(object):
+
+ """Stores the option values passed by the user via the command line.
+
+ Attributes:
+ is_verbose: A boolean value of whether verbose logging is enabled.
+
+ filter_rules: The list of filter rules provided by the user.
+ These rules are appended to the base rules and
+ path-specific rules and so take precedence over
+ the base filter rules, etc.
+
+ git_commit: A string representing the git commit to check.
+ The default is None.
+
+ min_confidence: An integer between 1 and 5 inclusive that is the
+ minimum confidence level of style errors to report.
+ The default is 1, which reports all errors.
+
+ output_format: A string that is the output format. The supported
+ output formats are "emacs" which emacs can parse
+ and "vs7" which Microsoft Visual Studio 7 can parse.
+
+ """
+ def __init__(self,
+ filter_rules=None,
+ git_commit=None,
+ diff_files=None,
+ is_verbose=False,
+ min_confidence=1,
+ output_format="emacs"):
+ if filter_rules is None:
+ filter_rules = []
+
+ if (min_confidence < 1) or (min_confidence > 5):
+ raise ValueError('Invalid "min_confidence" parameter: value '
+ "must be an integer between 1 and 5 inclusive. "
+ 'Value given: "%s".' % min_confidence)
+
+ if output_format not in ("emacs", "vs7"):
+ raise ValueError('Invalid "output_format" parameter: '
+ 'value must be "emacs" or "vs7". '
+ 'Value given: "%s".' % output_format)
+
+ self.filter_rules = filter_rules
+ self.git_commit = git_commit
+ self.diff_files = diff_files
+ self.is_verbose = is_verbose
+ self.min_confidence = min_confidence
+ self.output_format = output_format
+
+ # Useful for unit testing.
+ def __eq__(self, other):
+ """Return whether this instance is equal to another."""
+ if self.filter_rules != other.filter_rules:
+ return False
+ if self.git_commit != other.git_commit:
+ return False
+ if self.diff_files != other.diff_files:
+ return False
+ if self.is_verbose != other.is_verbose:
+ return False
+ if self.min_confidence != other.min_confidence:
+ return False
+ if self.output_format != other.output_format:
+ return False
+
+ return True
+
+ # Useful for unit testing.
+ def __ne__(self, other):
+ # Python does not automatically deduce this from __eq__().
+ return not self.__eq__(other)
+
+
+class ArgumentPrinter(object):
+
+ """Supports the printing of check-webkit-style command arguments."""
+
+ def _flag_pair_to_string(self, flag_key, flag_value):
+ return '--%(key)s=%(val)s' % {'key': flag_key, 'val': flag_value }
+
+ def to_flag_string(self, options):
+ """Return a flag string of the given CommandOptionValues instance.
+
+ This method orders the flag values alphabetically by the flag key.
+
+ Args:
+ options: A CommandOptionValues instance.
+
+ """
+ flags = {}
+ flags['min-confidence'] = options.min_confidence
+ flags['output'] = options.output_format
+ # Only include the filter flag if user-provided rules are present.
+ filter_rules = options.filter_rules
+ if filter_rules:
+ flags['filter'] = ",".join(filter_rules)
+ if options.git_commit:
+ flags['git-commit'] = options.git_commit
+ if options.diff_files:
+ flags['diff_files'] = options.diff_files
+
+ flag_string = ''
+ # Alphabetizing lets us unit test this method.
+ for key in sorted(flags.keys()):
+ flag_string += self._flag_pair_to_string(key, flags[key]) + ' '
+
+ return flag_string.strip()
+
+
+class ArgumentParser(object):
+
+ # FIXME: Move the documentation of the attributes to the __init__
+ # docstring after making the attributes internal.
+ """Supports the parsing of check-webkit-style command arguments.
+
+ Attributes:
+ create_usage: A function that accepts a DefaultCommandOptionValues
+ instance and returns a string of usage instructions.
+ Defaults to the function that generates the usage
+ string for check-webkit-style.
+ default_options: A DefaultCommandOptionValues instance that provides
+ the default values for options not explicitly
+ provided by the user.
+ stderr_write: A function that takes a string as a parameter and
+ serves as stderr.write. Defaults to sys.stderr.write.
+ This parameter should be specified only for unit tests.
+
+ """
+
+ def __init__(self,
+ all_categories,
+ default_options,
+ base_filter_rules=None,
+ mock_stderr=None,
+ usage=None):
+ """Create an ArgumentParser instance.
+
+ Args:
+ all_categories: The set of all available style categories.
+ default_options: See the corresponding attribute in the class
+ docstring.
+ Keyword Args:
+ base_filter_rules: The list of filter rules at the beginning of
+ the list of rules used to check style. This
+ list has the least precedence when checking
+ style and precedes any user-provided rules.
+ The class uses this parameter only for display
+ purposes to the user. Defaults to the empty list.
+ create_usage: See the documentation of the corresponding
+ attribute in the class docstring.
+ stderr_write: See the documentation of the corresponding
+ attribute in the class docstring.
+
+ """
+ if base_filter_rules is None:
+ base_filter_rules = []
+ stderr = sys.stderr if mock_stderr is None else mock_stderr
+ if usage is None:
+ usage = _USAGE
+
+ self._all_categories = all_categories
+ self._base_filter_rules = base_filter_rules
+
+ # FIXME: Rename these to reflect that they are internal.
+ self.default_options = default_options
+ self.stderr_write = stderr.write
+
+ self._parser = self._create_option_parser(stderr=stderr,
+ usage=usage,
+ default_min_confidence=self.default_options.min_confidence,
+ default_output_format=self.default_options.output_format)
+
+ def _create_option_parser(self, stderr, usage,
+ default_min_confidence, default_output_format):
+ # Since the epilog string is short, it is not necessary to replace
+ # the epilog string with a mock epilog string when testing.
+ # For this reason, we use _EPILOG directly rather than passing it
+ # as an argument like we do for the usage string.
+ parser = OptionParser(usage=usage, epilog=_EPILOG)
+
+ filter_help = ('set a filter to control what categories of style '
+ 'errors to report. Specify a filter using a comma-'
+ 'delimited list of boolean filter rules, for example '
+ '"--filter -whitespace,+whitespace/braces". To display '
+ 'all categories and which are enabled by default, pass '
+ """no value (e.g. '-f ""' or '--filter=').""")
+ parser.add_option("-f", "--filter-rules", metavar="RULES",
+ dest="filter_value", help=filter_help)
+
+ git_commit_help = ("check all changes in the given commit. "
+ "Use 'commit_id..' to check all changes after commmit_id")
+ parser.add_option("-g", "--git-diff", "--git-commit",
+ metavar="COMMIT", dest="git_commit", help=git_commit_help,)
+
+ diff_files_help = "diff the files passed on the command line rather than checking the style of every line"
+ parser.add_option("--diff-files", action="store_true", dest="diff_files", default=False, help=diff_files_help)
+
+ min_confidence_help = ("set the minimum confidence of style errors "
+ "to report. Can be an integer 1-5, with 1 "
+ "displaying all errors. Defaults to %default.")
+ parser.add_option("-m", "--min-confidence", metavar="INT",
+ type="int", dest="min_confidence",
+ default=default_min_confidence,
+ help=min_confidence_help)
+
+ output_format_help = ('set the output format, which can be "emacs" '
+ 'or "vs7" (for Visual Studio). '
+ 'Defaults to "%default".')
+ parser.add_option("-o", "--output-format", metavar="FORMAT",
+ choices=["emacs", "vs7"],
+ dest="output_format", default=default_output_format,
+ help=output_format_help)
+
+ verbose_help = "enable verbose logging."
+ parser.add_option("-v", "--verbose", dest="is_verbose", default=False,
+ action="store_true", help=verbose_help)
+
+ # Override OptionParser's error() method so that option help will
+ # also display when an error occurs. Normally, just the usage
+ # string displays and not option help.
+ parser.error = self._parse_error
+
+ # Override OptionParser's print_help() method so that help output
+ # does not render to the screen while running unit tests.
+ print_help = parser.print_help
+ parser.print_help = lambda file=stderr: print_help(file=file)
+
+ return parser
+
+ def _parse_error(self, error_message):
+ """Print the help string and an error message, and exit."""
+ # The method format_help() includes both the usage string and
+ # the flag options.
+ help = self._parser.format_help()
+ # Separate help from the error message with a single blank line.
+ self.stderr_write(help + "\n")
+ if error_message:
+ _log.error(error_message)
+
+ # Since we are using this method to replace/override the Python
+ # module optparse's OptionParser.error() method, we match its
+ # behavior and exit with status code 2.
+ #
+ # As additional background, Python documentation says--
+ #
+ # "Unix programs generally use 2 for command line syntax errors
+ # and 1 for all other kind of errors."
+ #
+ # (from http://docs.python.org/library/sys.html#sys.exit )
+ sys.exit(2)
+
+ def _exit_with_categories(self):
+ """Exit and print the style categories and default filter rules."""
+ self.stderr_write('\nAll categories:\n')
+ for category in sorted(self._all_categories):
+ self.stderr_write(' ' + category + '\n')
+
+ self.stderr_write('\nDefault filter rules**:\n')
+ for filter_rule in sorted(self._base_filter_rules):
+ self.stderr_write(' ' + filter_rule + '\n')
+ self.stderr_write('\n**The command always evaluates the above rules, '
+ 'and before any --filter flag.\n\n')
+
+ sys.exit(0)
+
+ def _parse_filter_flag(self, flag_value):
+ """Parse the --filter flag, and return a list of filter rules.
+
+ Args:
+ flag_value: A string of comma-separated filter rules, for
+ example "-whitespace,+whitespace/indent".
+
+ """
+ filters = []
+ for uncleaned_filter in flag_value.split(','):
+ filter = uncleaned_filter.strip()
+ if not filter:
+ continue
+ filters.append(filter)
+ return filters
+
+ def parse(self, args):
+ """Parse the command line arguments to check-webkit-style.
+
+ Args:
+ args: A list of command-line arguments as returned by sys.argv[1:].
+
+ Returns:
+ A tuple of (paths, options)
+
+ paths: The list of paths to check.
+ options: A CommandOptionValues instance.
+
+ """
+ (options, paths) = self._parser.parse_args(args=args)
+
+ filter_value = options.filter_value
+ git_commit = options.git_commit
+ diff_files = options.diff_files
+ is_verbose = options.is_verbose
+ min_confidence = options.min_confidence
+ output_format = options.output_format
+
+ if filter_value is not None and not filter_value:
+ # Then the user explicitly passed no filter, for
+ # example "-f ''" or "--filter=".
+ self._exit_with_categories()
+
+ # Validate user-provided values.
+
+ min_confidence = int(min_confidence)
+ if (min_confidence < 1) or (min_confidence > 5):
+ self._parse_error('option --min-confidence: invalid integer: '
+ '%s: value must be between 1 and 5'
+ % min_confidence)
+
+ if filter_value:
+ filter_rules = self._parse_filter_flag(filter_value)
+ else:
+ filter_rules = []
+
+ try:
+ validate_filter_rules(filter_rules, self._all_categories)
+ except ValueError, err:
+ self._parse_error(err)
+
+ options = CommandOptionValues(filter_rules=filter_rules,
+ git_commit=git_commit,
+ diff_files=diff_files,
+ is_verbose=is_verbose,
+ min_confidence=min_confidence,
+ output_format=output_format)
+
+ return (paths, options)
+
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/optparser_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/optparser_unittest.py
new file mode 100644
index 0000000..d72479f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/optparser_unittest.py
@@ -0,0 +1,258 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for parser.py."""
+
+import unittest
+
+from webkitpy.common.system.logtesting import LoggingTestCase
+from webkitpy.style.optparser import ArgumentParser
+from webkitpy.style.optparser import ArgumentPrinter
+from webkitpy.style.optparser import CommandOptionValues as ProcessorOptions
+from webkitpy.style.optparser import DefaultCommandOptionValues
+
+
+class ArgumentPrinterTest(unittest.TestCase):
+
+ """Tests the ArgumentPrinter class."""
+
+ _printer = ArgumentPrinter()
+
+ def _create_options(self,
+ output_format='emacs',
+ min_confidence=3,
+ filter_rules=[],
+ git_commit=None):
+ return ProcessorOptions(filter_rules=filter_rules,
+ git_commit=git_commit,
+ min_confidence=min_confidence,
+ output_format=output_format)
+
+ def test_to_flag_string(self):
+ options = self._create_options('vs7', 5, ['+foo', '-bar'], 'git')
+ self.assertEqual('--filter=+foo,-bar --git-commit=git '
+ '--min-confidence=5 --output=vs7',
+ self._printer.to_flag_string(options))
+
+ # This is to check that --filter and --git-commit do not
+ # show up when not user-specified.
+ options = self._create_options()
+ self.assertEqual('--min-confidence=3 --output=emacs',
+ self._printer.to_flag_string(options))
+
+
+class ArgumentParserTest(LoggingTestCase):
+
+ """Test the ArgumentParser class."""
+
+ class _MockStdErr(object):
+
+ def write(self, message):
+ # We do not want the usage string or style categories
+ # to print during unit tests, so print nothing.
+ return
+
+ def _parse(self, args):
+ """Call a test parser.parse()."""
+ parser = self._create_parser()
+ return parser.parse(args)
+
+ def _create_defaults(self):
+ """Return a DefaultCommandOptionValues instance for testing."""
+ base_filter_rules = ["-", "+whitespace"]
+ return DefaultCommandOptionValues(min_confidence=3,
+ output_format="vs7")
+
+ def _create_parser(self):
+ """Return an ArgumentParser instance for testing."""
+ default_options = self._create_defaults()
+
+ all_categories = ["build" ,"whitespace"]
+
+ mock_stderr = self._MockStdErr()
+
+ return ArgumentParser(all_categories=all_categories,
+ base_filter_rules=[],
+ default_options=default_options,
+ mock_stderr=mock_stderr,
+ usage="test usage")
+
+ def test_parse_documentation(self):
+ parse = self._parse
+
+ # FIXME: Test both the printing of the usage string and the
+ # filter categories help.
+
+ # Request the usage string.
+ self.assertRaises(SystemExit, parse, ['--help'])
+ # Request default filter rules and available style categories.
+ self.assertRaises(SystemExit, parse, ['--filter='])
+
+ def test_parse_bad_values(self):
+ parse = self._parse
+
+ # Pass an unsupported argument.
+ self.assertRaises(SystemExit, parse, ['--bad'])
+ self.assertLog(['ERROR: no such option: --bad\n'])
+
+ self.assertRaises(SystemExit, parse, ['--min-confidence=bad'])
+ self.assertLog(['ERROR: option --min-confidence: '
+ "invalid integer value: 'bad'\n"])
+ self.assertRaises(SystemExit, parse, ['--min-confidence=0'])
+ self.assertLog(['ERROR: option --min-confidence: invalid integer: 0: '
+ 'value must be between 1 and 5\n'])
+ self.assertRaises(SystemExit, parse, ['--min-confidence=6'])
+ self.assertLog(['ERROR: option --min-confidence: invalid integer: 6: '
+ 'value must be between 1 and 5\n'])
+ parse(['--min-confidence=1']) # works
+ parse(['--min-confidence=5']) # works
+
+ self.assertRaises(SystemExit, parse, ['--output=bad'])
+ self.assertLog(['ERROR: option --output-format: invalid choice: '
+ "'bad' (choose from 'emacs', 'vs7')\n"])
+ parse(['--output=vs7']) # works
+
+ # Pass a filter rule not beginning with + or -.
+ self.assertRaises(SystemExit, parse, ['--filter=build'])
+ self.assertLog(['ERROR: Invalid filter rule "build": '
+ 'every rule must start with + or -.\n'])
+ parse(['--filter=+build']) # works
+
+ def test_parse_default_arguments(self):
+ parse = self._parse
+
+ (files, options) = parse([])
+
+ self.assertEqual(files, [])
+
+ self.assertEqual(options.filter_rules, [])
+ self.assertIsNone(options.git_commit)
+ self.assertFalse(options.diff_files)
+ self.assertFalse(options.is_verbose)
+ self.assertEqual(options.min_confidence, 3)
+ self.assertEqual(options.output_format, 'vs7')
+
+ def test_parse_explicit_arguments(self):
+ parse = self._parse
+
+ # Pass non-default explicit values.
+ (files, options) = parse(['--min-confidence=4'])
+ self.assertEqual(options.min_confidence, 4)
+ (files, options) = parse(['--output=emacs'])
+ self.assertEqual(options.output_format, 'emacs')
+ (files, options) = parse(['-g', 'commit'])
+ self.assertEqual(options.git_commit, 'commit')
+ (files, options) = parse(['--git-commit=commit'])
+ self.assertEqual(options.git_commit, 'commit')
+ (files, options) = parse(['--git-diff=commit'])
+ self.assertEqual(options.git_commit, 'commit')
+ (files, options) = parse(['--verbose'])
+ self.assertTrue(options.is_verbose)
+ (files, options) = parse(['--diff-files', 'file.txt'])
+ self.assertTrue(options.diff_files)
+
+ # Pass user_rules.
+ (files, options) = parse(['--filter=+build,-whitespace'])
+ self.assertEqual(options.filter_rules,
+ ["+build", "-whitespace"])
+
+ # Pass spurious white space in user rules.
+ (files, options) = parse(['--filter=+build, -whitespace'])
+ self.assertEqual(options.filter_rules,
+ ["+build", "-whitespace"])
+
+ def test_parse_files(self):
+ parse = self._parse
+
+ (files, options) = parse(['foo.cpp'])
+ self.assertEqual(files, ['foo.cpp'])
+
+ # Pass multiple files.
+ (files, options) = parse(['--output=emacs', 'foo.cpp', 'bar.cpp'])
+ self.assertEqual(files, ['foo.cpp', 'bar.cpp'])
+
+
+class CommandOptionValuesTest(unittest.TestCase):
+
+ """Tests CommandOptionValues class."""
+
+ def test_init(self):
+ """Test __init__ constructor."""
+ # Check default parameters.
+ options = ProcessorOptions()
+ self.assertEqual(options.filter_rules, [])
+ self.assertIsNone(options.git_commit)
+ self.assertFalse(options.is_verbose)
+ self.assertEqual(options.min_confidence, 1)
+ self.assertEqual(options.output_format, "emacs")
+
+ # Check argument validation.
+ self.assertRaises(ValueError, ProcessorOptions, output_format="bad")
+ ProcessorOptions(output_format="emacs") # No ValueError: works
+ ProcessorOptions(output_format="vs7") # works
+ self.assertRaises(ValueError, ProcessorOptions, min_confidence=0)
+ self.assertRaises(ValueError, ProcessorOptions, min_confidence=6)
+ ProcessorOptions(min_confidence=1) # works
+ ProcessorOptions(min_confidence=5) # works
+
+ # Check attributes.
+ options = ProcessorOptions(filter_rules=["+"],
+ git_commit="commit",
+ is_verbose=True,
+ min_confidence=3,
+ output_format="vs7")
+ self.assertEqual(options.filter_rules, ["+"])
+ self.assertEqual(options.git_commit, "commit")
+ self.assertTrue(options.is_verbose)
+ self.assertEqual(options.min_confidence, 3)
+ self.assertEqual(options.output_format, "vs7")
+
+ def test_eq(self):
+ """Test __eq__ equality function."""
+ self.assertTrue(ProcessorOptions().__eq__(ProcessorOptions()))
+
+ # Also verify that a difference in any argument causes equality to fail.
+
+ # Explicitly create a ProcessorOptions instance with all default
+ # values. We do this to be sure we are assuming the right default
+ # values in our self.assertFalse() calls below.
+ options = ProcessorOptions(filter_rules=[],
+ git_commit=None,
+ is_verbose=False,
+ min_confidence=1,
+ output_format="emacs")
+ # Verify that we created options correctly.
+ self.assertTrue(options.__eq__(ProcessorOptions()))
+
+ self.assertFalse(options.__eq__(ProcessorOptions(filter_rules=["+"])))
+ self.assertFalse(options.__eq__(ProcessorOptions(git_commit="commit")))
+ self.assertFalse(options.__eq__(ProcessorOptions(is_verbose=True)))
+ self.assertFalse(options.__eq__(ProcessorOptions(min_confidence=2)))
+ self.assertFalse(options.__eq__(ProcessorOptions(output_format="vs7")))
+
+ def test_ne(self):
+ """Test __ne__ inequality function."""
+ # By default, __ne__ always returns true on different objects.
+ # Thus, just check the distinguishing case to verify that the
+ # code defines __ne__.
+ self.assertFalse(ProcessorOptions().__ne__(ProcessorOptions()))
+
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/patchreader.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/patchreader.py
new file mode 100644
index 0000000..8495cd0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/patchreader.py
@@ -0,0 +1,83 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+# Copyright (C) 2010 ProFUSION embedded systems
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import re
+
+from webkitpy.common.checkout.diff_parser import DiffParser
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.checkout.scm.detection import SCMDetector
+
+
+_log = logging.getLogger(__name__)
+
+
+class PatchReader(object):
+ """Supports checking style in patches."""
+
+ def __init__(self, text_file_reader):
+ """Create a PatchReader instance.
+
+ Args:
+ text_file_reader: A TextFileReader instance.
+
+ """
+ self._text_file_reader = text_file_reader
+
+ def check(self, patch_string, fs=None):
+ """Check style in the given patch."""
+ fs = fs or FileSystem()
+ patch_files = DiffParser(patch_string.splitlines()).files
+
+ # If the user uses git, checking subversion config file only once is enough.
+ call_only_once = True
+
+ for path, diff_file in patch_files.iteritems():
+ line_numbers = diff_file.added_or_modified_line_numbers()
+ _log.debug('Found %s new or modified lines in: %s' % (len(line_numbers), path))
+
+ if not line_numbers:
+ match = re.search("\s*png$", path)
+ if match and fs.exists(path):
+ if call_only_once:
+ self._text_file_reader.process_file(file_path=path, line_numbers=None)
+ cwd = FileSystem().getcwd()
+ detection = SCMDetector(fs, Executive()).detect_scm_system(cwd)
+ if detection.display_name() == "git":
+ call_only_once = False
+ continue
+ # Don't check files which contain only deleted lines
+ # as they can never add style errors. However, mark them as
+ # processed so that we count up number of such files.
+ self._text_file_reader.count_delete_only_file()
+ continue
+
+ self._text_file_reader.process_file(file_path=path, line_numbers=line_numbers)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/style/patchreader_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/style/patchreader_unittest.py
new file mode 100644
index 0000000..05d36d9
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/style/patchreader_unittest.py
@@ -0,0 +1,101 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2009 Torch Mobile Inc.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.style.patchreader import PatchReader
+
+
+class PatchReaderTest(unittest.TestCase):
+
+ """Test the PatchReader class."""
+
+ class MockTextFileReader(object):
+
+ def __init__(self):
+ self.passed_to_process_file = []
+ """A list of (file_path, line_numbers) pairs."""
+ self.delete_only_file_count = 0
+ """A number of times count_delete_only_file() called"""
+
+ def process_file(self, file_path, line_numbers):
+ self.passed_to_process_file.append((file_path, line_numbers))
+
+ def count_delete_only_file(self):
+ self.delete_only_file_count += 1
+
+ def setUp(self):
+ file_reader = self.MockTextFileReader()
+ self._file_reader = file_reader
+ self._patch_checker = PatchReader(file_reader)
+
+ def _call_check_patch(self, patch_string):
+ self._patch_checker.check(patch_string)
+
+ def _assert_checked(self, passed_to_process_file, delete_only_file_count):
+ self.assertEqual(self._file_reader.passed_to_process_file,
+ passed_to_process_file)
+ self.assertEqual(self._file_reader.delete_only_file_count,
+ delete_only_file_count)
+
+ def test_check_patch(self):
+ # The modified line_numbers array for this patch is: [2].
+ self._call_check_patch("""diff --git a/__init__.py b/__init__.py
+index ef65bee..e3db70e 100644
+--- a/__init__.py
++++ b/__init__.py
+@@ -1,1 +1,2 @@
+ # Required for Python to search this directory for module files
++# New line
+""")
+ self._assert_checked([("__init__.py", [2])], 0)
+
+ def test_check_patch_with_deletion(self):
+ self._call_check_patch("""Index: __init__.py
+===================================================================
+--- __init__.py (revision 3593)
++++ __init__.py (working copy)
+@@ -1 +0,0 @@
+-foobar
+""")
+ # _mock_check_file should not be called for the deletion patch.
+ self._assert_checked([], 1)
+
+ def test_check_patch_with_png_deletion(self):
+ fs = MockFileSystem()
+ diff_text = """Index: LayoutTests/platform/mac/foo-expected.png
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = image/png
+"""
+ self._patch_checker.check(diff_text, fs)
+ self._assert_checked([], 1)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/BeautifulSoup.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/BeautifulSoup.py
new file mode 100644
index 0000000..4b17b85
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/BeautifulSoup.py
@@ -0,0 +1,2014 @@
+"""Beautiful Soup
+Elixir and Tonic
+"The Screen-Scraper's Friend"
+http://www.crummy.com/software/BeautifulSoup/
+
+Beautiful Soup parses a (possibly invalid) XML or HTML document into a
+tree representation. It provides methods and Pythonic idioms that make
+it easy to navigate, search, and modify the tree.
+
+A well-formed XML/HTML document yields a well-formed data
+structure. An ill-formed XML/HTML document yields a correspondingly
+ill-formed data structure. If your document is only locally
+well-formed, you can use this library to find and process the
+well-formed part of it.
+
+Beautiful Soup works with Python 2.2 and up. It has no external
+dependencies, but you'll have more success at converting data to UTF-8
+if you also install these three packages:
+
+* chardet, for auto-detecting character encodings
+ http://chardet.feedparser.org/
+* cjkcodecs and iconv_codec, which add more encodings to the ones supported
+ by stock Python.
+ http://cjkpython.i18n.org/
+
+Beautiful Soup defines classes for two main parsing strategies:
+
+ * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
+ language that kind of looks like XML.
+
+ * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
+ or invalid. This class has web browser-like heuristics for
+ obtaining a sensible parse tree in the face of common HTML errors.
+
+Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
+the encoding of an HTML or XML document, and converting it to
+Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
+
+For more than you ever wanted to know about Beautiful Soup, see the
+documentation:
+http://www.crummy.com/software/BeautifulSoup/documentation.html
+
+Here, have some legalese:
+
+Copyright (c) 2004-2010, Leonard Richardson
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of the the Beautiful Soup Consortium and All
+ Night Kosher Bakery nor the names of its contributors may be
+ used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
+
+"""
+from __future__ import generators
+
+__author__ = "Leonard Richardson (leonardr@segfault.org)"
+__version__ = "3.2.0"
+__copyright__ = "Copyright (c) 2004-2010 Leonard Richardson"
+__license__ = "New-style BSD"
+
+from sgmllib import SGMLParser, SGMLParseError
+import codecs
+import markupbase
+import types
+import re
+import sgmllib
+try:
+ from htmlentitydefs import name2codepoint
+except ImportError:
+ name2codepoint = {}
+try:
+ set
+except NameError:
+ from sets import Set as set
+
+#These hacks make Beautiful Soup able to parse XML with namespaces
+sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
+markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
+
+DEFAULT_OUTPUT_ENCODING = "utf-8"
+
+def _match_css_class(str):
+ """Build a RE to match the given CSS class."""
+ return re.compile(r"(^|.*\s)%s($|\s)" % str)
+
+# First, the classes that represent markup elements.
+
+class PageElement(object):
+ """Contains the navigational information for some part of the page
+ (either a tag or a piece of text)"""
+
+ def setup(self, parent=None, previous=None):
+ """Sets up the initial relations between this element and
+ other elements."""
+ self.parent = parent
+ self.previous = previous
+ self.next = None
+ self.previousSibling = None
+ self.nextSibling = None
+ if self.parent and self.parent.contents:
+ self.previousSibling = self.parent.contents[-1]
+ self.previousSibling.nextSibling = self
+
+ def replaceWith(self, replaceWith):
+ oldParent = self.parent
+ myIndex = self.parent.index(self)
+ if hasattr(replaceWith, "parent")\
+ and replaceWith.parent is self.parent:
+ # We're replacing this element with one of its siblings.
+ index = replaceWith.parent.index(replaceWith)
+ if index and index < myIndex:
+ # Furthermore, it comes before this element. That
+ # means that when we extract it, the index of this
+ # element will change.
+ myIndex = myIndex - 1
+ self.extract()
+ oldParent.insert(myIndex, replaceWith)
+
+ def replaceWithChildren(self):
+ myParent = self.parent
+ myIndex = self.parent.index(self)
+ self.extract()
+ reversedChildren = list(self.contents)
+ reversedChildren.reverse()
+ for child in reversedChildren:
+ myParent.insert(myIndex, child)
+
+ def extract(self):
+ """Destructively rips this element out of the tree."""
+ if self.parent:
+ try:
+ del self.parent.contents[self.parent.index(self)]
+ except ValueError:
+ pass
+
+ #Find the two elements that would be next to each other if
+ #this element (and any children) hadn't been parsed. Connect
+ #the two.
+ lastChild = self._lastRecursiveChild()
+ nextElement = lastChild.next
+
+ if self.previous:
+ self.previous.next = nextElement
+ if nextElement:
+ nextElement.previous = self.previous
+ self.previous = None
+ lastChild.next = None
+
+ self.parent = None
+ if self.previousSibling:
+ self.previousSibling.nextSibling = self.nextSibling
+ if self.nextSibling:
+ self.nextSibling.previousSibling = self.previousSibling
+ self.previousSibling = self.nextSibling = None
+ return self
+
+ def _lastRecursiveChild(self):
+ "Finds the last element beneath this object to be parsed."
+ lastChild = self
+ while hasattr(lastChild, 'contents') and lastChild.contents:
+ lastChild = lastChild.contents[-1]
+ return lastChild
+
+ def insert(self, position, newChild):
+ if isinstance(newChild, basestring) \
+ and not isinstance(newChild, NavigableString):
+ newChild = NavigableString(newChild)
+
+ position = min(position, len(self.contents))
+ if hasattr(newChild, 'parent') and newChild.parent is not None:
+ # We're 'inserting' an element that's already one
+ # of this object's children.
+ if newChild.parent is self:
+ index = self.index(newChild)
+ if index > position:
+ # Furthermore we're moving it further down the
+ # list of this object's children. That means that
+ # when we extract this element, our target index
+ # will jump down one.
+ position = position - 1
+ newChild.extract()
+
+ newChild.parent = self
+ previousChild = None
+ if position == 0:
+ newChild.previousSibling = None
+ newChild.previous = self
+ else:
+ previousChild = self.contents[position-1]
+ newChild.previousSibling = previousChild
+ newChild.previousSibling.nextSibling = newChild
+ newChild.previous = previousChild._lastRecursiveChild()
+ if newChild.previous:
+ newChild.previous.next = newChild
+
+ newChildsLastElement = newChild._lastRecursiveChild()
+
+ if position >= len(self.contents):
+ newChild.nextSibling = None
+
+ parent = self
+ parentsNextSibling = None
+ while not parentsNextSibling:
+ parentsNextSibling = parent.nextSibling
+ parent = parent.parent
+ if not parent: # This is the last element in the document.
+ break
+ if parentsNextSibling:
+ newChildsLastElement.next = parentsNextSibling
+ else:
+ newChildsLastElement.next = None
+ else:
+ nextChild = self.contents[position]
+ newChild.nextSibling = nextChild
+ if newChild.nextSibling:
+ newChild.nextSibling.previousSibling = newChild
+ newChildsLastElement.next = nextChild
+
+ if newChildsLastElement.next:
+ newChildsLastElement.next.previous = newChildsLastElement
+ self.contents.insert(position, newChild)
+
+ def append(self, tag):
+ """Appends the given tag to the contents of this tag."""
+ self.insert(len(self.contents), tag)
+
+ def findNext(self, name=None, attrs={}, text=None, **kwargs):
+ """Returns the first item that matches the given criteria and
+ appears after this Tag in the document."""
+ return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
+
+ def findAllNext(self, name=None, attrs={}, text=None, limit=None,
+ **kwargs):
+ """Returns all items that match the given criteria and appear
+ after this Tag in the document."""
+ return self._findAll(name, attrs, text, limit, self.nextGenerator,
+ **kwargs)
+
+ def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
+ """Returns the closest sibling to this Tag that matches the
+ given criteria and appears after this Tag in the document."""
+ return self._findOne(self.findNextSiblings, name, attrs, text,
+ **kwargs)
+
+ def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
+ **kwargs):
+ """Returns the siblings of this Tag that match the given
+ criteria and appear after this Tag in the document."""
+ return self._findAll(name, attrs, text, limit,
+ self.nextSiblingGenerator, **kwargs)
+ fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
+
+ def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
+ """Returns the first item that matches the given criteria and
+ appears before this Tag in the document."""
+ return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
+
+ def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
+ **kwargs):
+ """Returns all items that match the given criteria and appear
+ before this Tag in the document."""
+ return self._findAll(name, attrs, text, limit, self.previousGenerator,
+ **kwargs)
+ fetchPrevious = findAllPrevious # Compatibility with pre-3.x
+
+ def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
+ """Returns the closest sibling to this Tag that matches the
+ given criteria and appears before this Tag in the document."""
+ return self._findOne(self.findPreviousSiblings, name, attrs, text,
+ **kwargs)
+
+ def findPreviousSiblings(self, name=None, attrs={}, text=None,
+ limit=None, **kwargs):
+ """Returns the siblings of this Tag that match the given
+ criteria and appear before this Tag in the document."""
+ return self._findAll(name, attrs, text, limit,
+ self.previousSiblingGenerator, **kwargs)
+ fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
+
+ def findParent(self, name=None, attrs={}, **kwargs):
+ """Returns the closest parent of this Tag that matches the given
+ criteria."""
+ # NOTE: We can't use _findOne because findParents takes a different
+ # set of arguments.
+ r = None
+ l = self.findParents(name, attrs, 1)
+ if l:
+ r = l[0]
+ return r
+
+ def findParents(self, name=None, attrs={}, limit=None, **kwargs):
+ """Returns the parents of this Tag that match the given
+ criteria."""
+
+ return self._findAll(name, attrs, None, limit, self.parentGenerator,
+ **kwargs)
+ fetchParents = findParents # Compatibility with pre-3.x
+
+ #These methods do the real heavy lifting.
+
+ def _findOne(self, method, name, attrs, text, **kwargs):
+ r = None
+ l = method(name, attrs, text, 1, **kwargs)
+ if l:
+ r = l[0]
+ return r
+
+ def _findAll(self, name, attrs, text, limit, generator, **kwargs):
+ "Iterates over a generator looking for things that match."
+
+ if isinstance(name, SoupStrainer):
+ strainer = name
+ # (Possibly) special case some findAll*(...) searches
+ elif text is None and not limit and not attrs and not kwargs:
+ # findAll*(True)
+ if name is True:
+ return [element for element in generator()
+ if isinstance(element, Tag)]
+ # findAll*('tag-name')
+ elif isinstance(name, basestring):
+ return [element for element in generator()
+ if isinstance(element, Tag) and
+ element.name == name]
+ else:
+ strainer = SoupStrainer(name, attrs, text, **kwargs)
+ # Build a SoupStrainer
+ else:
+ strainer = SoupStrainer(name, attrs, text, **kwargs)
+ results = ResultSet(strainer)
+ g = generator()
+ while True:
+ try:
+ i = g.next()
+ except StopIteration:
+ break
+ if i:
+ found = strainer.search(i)
+ if found:
+ results.append(found)
+ if limit and len(results) >= limit:
+ break
+ return results
+
+ #These Generators can be used to navigate starting from both
+ #NavigableStrings and Tags.
+ def nextGenerator(self):
+ i = self
+ while i is not None:
+ i = i.next
+ yield i
+
+ def nextSiblingGenerator(self):
+ i = self
+ while i is not None:
+ i = i.nextSibling
+ yield i
+
+ def previousGenerator(self):
+ i = self
+ while i is not None:
+ i = i.previous
+ yield i
+
+ def previousSiblingGenerator(self):
+ i = self
+ while i is not None:
+ i = i.previousSibling
+ yield i
+
+ def parentGenerator(self):
+ i = self
+ while i is not None:
+ i = i.parent
+ yield i
+
+ # Utility methods
+ def substituteEncoding(self, str, encoding=None):
+ encoding = encoding or "utf-8"
+ return str.replace("%SOUP-ENCODING%", encoding)
+
+ def toEncoding(self, s, encoding=None):
+ """Encodes an object to a string in some encoding, or to Unicode.
+ ."""
+ if isinstance(s, unicode):
+ if encoding:
+ s = s.encode(encoding)
+ elif isinstance(s, str):
+ if encoding:
+ s = s.encode(encoding)
+ else:
+ s = unicode(s)
+ else:
+ if encoding:
+ s = self.toEncoding(str(s), encoding)
+ else:
+ s = unicode(s)
+ return s
+
+class NavigableString(unicode, PageElement):
+
+ def __new__(cls, value):
+ """Create a new NavigableString.
+
+ When unpickling a NavigableString, this method is called with
+ the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
+ passed in to the superclass's __new__ or the superclass won't know
+ how to handle non-ASCII characters.
+ """
+ if isinstance(value, unicode):
+ return unicode.__new__(cls, value)
+ return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
+
+ def __getnewargs__(self):
+ return (NavigableString.__str__(self),)
+
+ def __getattr__(self, attr):
+ """text.string gives you text. This is for backwards
+ compatibility for Navigable*String, but for CData* it lets you
+ get the string without the CData wrapper."""
+ if attr == 'string':
+ return self
+ else:
+ raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
+
+ def __unicode__(self):
+ return str(self).decode(DEFAULT_OUTPUT_ENCODING)
+
+ def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
+ if encoding:
+ return self.encode(encoding)
+ else:
+ return self
+
+class CData(NavigableString):
+
+ def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
+ return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
+
+class ProcessingInstruction(NavigableString):
+ def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
+ output = self
+ if "%SOUP-ENCODING%" in output:
+ output = self.substituteEncoding(output, encoding)
+ return "<?%s?>" % self.toEncoding(output, encoding)
+
+class Comment(NavigableString):
+ def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
+ return "<!--%s-->" % NavigableString.__str__(self, encoding)
+
+class Declaration(NavigableString):
+ def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
+ return "<!%s>" % NavigableString.__str__(self, encoding)
+
+class Tag(PageElement):
+
+ """Represents a found HTML tag with its attributes and contents."""
+
+ def _invert(h):
+ "Cheap function to invert a hash."
+ i = {}
+ for k,v in h.items():
+ i[v] = k
+ return i
+
+ XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
+ "quot" : '"',
+ "amp" : "&",
+ "lt" : "<",
+ "gt" : ">" }
+
+ XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
+
+ def _convertEntities(self, match):
+ """Used in a call to re.sub to replace HTML, XML, and numeric
+ entities with the appropriate Unicode characters. If HTML
+ entities are being converted, any unrecognized entities are
+ escaped."""
+ x = match.group(1)
+ if self.convertHTMLEntities and x in name2codepoint:
+ return unichr(name2codepoint[x])
+ elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
+ if self.convertXMLEntities:
+ return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
+ else:
+ return u'&%s;' % x
+ elif len(x) > 0 and x[0] == '#':
+ # Handle numeric entities
+ if len(x) > 1 and x[1] == 'x':
+ return unichr(int(x[2:], 16))
+ else:
+ return unichr(int(x[1:]))
+
+ elif self.escapeUnrecognizedEntities:
+ return u'&%s;' % x
+ else:
+ return u'&%s;' % x
+
+ def __init__(self, parser, name, attrs=None, parent=None,
+ previous=None):
+ "Basic constructor."
+
+ # We don't actually store the parser object: that lets extracted
+ # chunks be garbage-collected
+ self.parserClass = parser.__class__
+ self.isSelfClosing = parser.isSelfClosingTag(name)
+ self.name = name
+ if attrs is None:
+ attrs = []
+ elif isinstance(attrs, dict):
+ attrs = attrs.items()
+ self.attrs = attrs
+ self.contents = []
+ self.setup(parent, previous)
+ self.hidden = False
+ self.containsSubstitutions = False
+ self.convertHTMLEntities = parser.convertHTMLEntities
+ self.convertXMLEntities = parser.convertXMLEntities
+ self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
+
+ # Convert any HTML, XML, or numeric entities in the attribute values.
+ convert = lambda(k, val): (k,
+ re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
+ self._convertEntities,
+ val))
+ self.attrs = map(convert, self.attrs)
+
+ def getString(self):
+ if (len(self.contents) == 1
+ and isinstance(self.contents[0], NavigableString)):
+ return self.contents[0]
+
+ def setString(self, string):
+ """Replace the contents of the tag with a string"""
+ self.clear()
+ self.append(string)
+
+ string = property(getString, setString)
+
+ def getText(self, separator=u""):
+ if not len(self.contents):
+ return u""
+ stopNode = self._lastRecursiveChild().next
+ strings = []
+ current = self.contents[0]
+ while current is not stopNode:
+ if isinstance(current, NavigableString):
+ strings.append(current.strip())
+ current = current.next
+ return separator.join(strings)
+
+ text = property(getText)
+
+ def get(self, key, default=None):
+ """Returns the value of the 'key' attribute for the tag, or
+ the value given for 'default' if it doesn't have that
+ attribute."""
+ return self._getAttrMap().get(key, default)
+
+ def clear(self):
+ """Extract all children."""
+ for child in self.contents[:]:
+ child.extract()
+
+ def index(self, element):
+ for i, child in enumerate(self.contents):
+ if child is element:
+ return i
+ raise ValueError("Tag.index: element not in tag")
+
+ def has_key(self, key):
+ return self._getAttrMap().has_key(key)
+
+ def __getitem__(self, key):
+ """tag[key] returns the value of the 'key' attribute for the tag,
+ and throws an exception if it's not there."""
+ return self._getAttrMap()[key]
+
+ def __iter__(self):
+ "Iterating over a tag iterates over its contents."
+ return iter(self.contents)
+
+ def __len__(self):
+ "The length of a tag is the length of its list of contents."
+ return len(self.contents)
+
+ def __contains__(self, x):
+ return x in self.contents
+
+ def __nonzero__(self):
+ "A tag is non-None even if it has no contents."
+ return True
+
+ def __setitem__(self, key, value):
+ """Setting tag[key] sets the value of the 'key' attribute for the
+ tag."""
+ self._getAttrMap()
+ self.attrMap[key] = value
+ found = False
+ for i in range(0, len(self.attrs)):
+ if self.attrs[i][0] == key:
+ self.attrs[i] = (key, value)
+ found = True
+ if not found:
+ self.attrs.append((key, value))
+ self._getAttrMap()[key] = value
+
+ def __delitem__(self, key):
+ "Deleting tag[key] deletes all 'key' attributes for the tag."
+ for item in self.attrs:
+ if item[0] == key:
+ self.attrs.remove(item)
+ #We don't break because bad HTML can define the same
+ #attribute multiple times.
+ self._getAttrMap()
+ if self.attrMap.has_key(key):
+ del self.attrMap[key]
+
+ def __call__(self, *args, **kwargs):
+ """Calling a tag like a function is the same as calling its
+ findAll() method. Eg. tag('a') returns a list of all the A tags
+ found within this tag."""
+ return apply(self.findAll, args, kwargs)
+
+ def __getattr__(self, tag):
+ #print "Getattr %s.%s" % (self.__class__, tag)
+ if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
+ return self.find(tag[:-3])
+ elif tag.find('__') != 0:
+ return self.find(tag)
+ raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
+
+ def __eq__(self, other):
+ """Returns true iff this tag has the same name, the same attributes,
+ and the same contents (recursively) as the given tag.
+
+ NOTE: right now this will return false if two tags have the
+ same attributes in a different order. Should this be fixed?"""
+ if other is self:
+ return True
+ if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
+ return False
+ for i in range(0, len(self.contents)):
+ if self.contents[i] != other.contents[i]:
+ return False
+ return True
+
+ def __ne__(self, other):
+ """Returns true iff this tag is not identical to the other tag,
+ as defined in __eq__."""
+ return not self == other
+
+ def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
+ """Renders this tag as a string."""
+ return self.__str__(encoding)
+
+ def __unicode__(self):
+ return self.__str__(None)
+
+ BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ + ")")
+
+ def _sub_entity(self, x):
+ """Used with a regular expression to substitute the
+ appropriate XML entity for an XML special character."""
+ return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
+
+ def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
+ prettyPrint=False, indentLevel=0):
+ """Returns a string or Unicode representation of this tag and
+ its contents. To get Unicode, pass None for encoding.
+
+ NOTE: since Python's HTML parser consumes whitespace, this
+ method is not certain to reproduce the whitespace present in
+ the original string."""
+
+ encodedName = self.toEncoding(self.name, encoding)
+
+ attrs = []
+ if self.attrs:
+ for key, val in self.attrs:
+ fmt = '%s="%s"'
+ if isinstance(val, basestring):
+ if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
+ val = self.substituteEncoding(val, encoding)
+
+ # The attribute value either:
+ #
+ # * Contains no embedded double quotes or single quotes.
+ # No problem: we enclose it in double quotes.
+ # * Contains embedded single quotes. No problem:
+ # double quotes work here too.
+ # * Contains embedded double quotes. No problem:
+ # we enclose it in single quotes.
+ # * Embeds both single _and_ double quotes. This
+ # can't happen naturally, but it can happen if
+ # you modify an attribute value after parsing
+ # the document. Now we have a bit of a
+ # problem. We solve it by enclosing the
+ # attribute in single quotes, and escaping any
+ # embedded single quotes to XML entities.
+ if '"' in val:
+ fmt = "%s='%s'"
+ if "'" in val:
+ # TODO: replace with apos when
+ # appropriate.
+ val = val.replace("'", "&squot;")
+
+ # Now we're okay w/r/t quotes. But the attribute
+ # value might also contain angle brackets, or
+ # ampersands that aren't part of entities. We need
+ # to escape those to XML entities too.
+ val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
+
+ attrs.append(fmt % (self.toEncoding(key, encoding),
+ self.toEncoding(val, encoding)))
+ close = ''
+ closeTag = ''
+ if self.isSelfClosing:
+ close = ' /'
+ else:
+ closeTag = '</%s>' % encodedName
+
+ indentTag, indentContents = 0, 0
+ if prettyPrint:
+ indentTag = indentLevel
+ space = (' ' * (indentTag-1))
+ indentContents = indentTag + 1
+ contents = self.renderContents(encoding, prettyPrint, indentContents)
+ if self.hidden:
+ s = contents
+ else:
+ s = []
+ attributeString = ''
+ if attrs:
+ attributeString = ' ' + ' '.join(attrs)
+ if prettyPrint:
+ s.append(space)
+ s.append('<%s%s%s>' % (encodedName, attributeString, close))
+ if prettyPrint:
+ s.append("\n")
+ s.append(contents)
+ if prettyPrint and contents and contents[-1] != "\n":
+ s.append("\n")
+ if prettyPrint and closeTag:
+ s.append(space)
+ s.append(closeTag)
+ if prettyPrint and closeTag and self.nextSibling:
+ s.append("\n")
+ s = ''.join(s)
+ return s
+
+ def decompose(self):
+ """Recursively destroys the contents of this tree."""
+ self.extract()
+ if len(self.contents) == 0:
+ return
+ current = self.contents[0]
+ while current is not None:
+ next = current.next
+ if isinstance(current, Tag):
+ del current.contents[:]
+ current.parent = None
+ current.previous = None
+ current.previousSibling = None
+ current.next = None
+ current.nextSibling = None
+ current = next
+
+ def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
+ return self.__str__(encoding, True)
+
+ def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
+ prettyPrint=False, indentLevel=0):
+ """Renders the contents of this tag as a string in the given
+ encoding. If encoding is None, returns a Unicode string.."""
+ s=[]
+ for c in self:
+ text = None
+ if isinstance(c, NavigableString):
+ text = c.__str__(encoding)
+ elif isinstance(c, Tag):
+ s.append(c.__str__(encoding, prettyPrint, indentLevel))
+ if text and prettyPrint:
+ text = text.strip()
+ if text:
+ if prettyPrint:
+ s.append(" " * (indentLevel-1))
+ s.append(text)
+ if prettyPrint:
+ s.append("\n")
+ return ''.join(s)
+
+ #Soup methods
+
+ def find(self, name=None, attrs={}, recursive=True, text=None,
+ **kwargs):
+ """Return only the first child of this Tag matching the given
+ criteria."""
+ r = None
+ l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
+ if l:
+ r = l[0]
+ return r
+ findChild = find
+
+ def findAll(self, name=None, attrs={}, recursive=True, text=None,
+ limit=None, **kwargs):
+ """Extracts a list of Tag objects that match the given
+ criteria. You can specify the name of the Tag and any
+ attributes you want the Tag to have.
+
+ The value of a key-value pair in the 'attrs' map can be a
+ string, a list of strings, a regular expression object, or a
+ callable that takes a string and returns whether or not the
+ string matches for some custom definition of 'matches'. The
+ same is true of the tag name."""
+ generator = self.recursiveChildGenerator
+ if not recursive:
+ generator = self.childGenerator
+ return self._findAll(name, attrs, text, limit, generator, **kwargs)
+ findChildren = findAll
+
+ # Pre-3.x compatibility methods
+ first = find
+ fetch = findAll
+
+ def fetchText(self, text=None, recursive=True, limit=None):
+ return self.findAll(text=text, recursive=recursive, limit=limit)
+
+ def firstText(self, text=None, recursive=True):
+ return self.find(text=text, recursive=recursive)
+
+ #Private methods
+
+ def _getAttrMap(self):
+ """Initializes a map representation of this tag's attributes,
+ if not already initialized."""
+ if not getattr(self, 'attrMap'):
+ self.attrMap = {}
+ for (key, value) in self.attrs:
+ self.attrMap[key] = value
+ return self.attrMap
+
+ #Generator methods
+ def childGenerator(self):
+ # Just use the iterator from the contents
+ return iter(self.contents)
+
+ def recursiveChildGenerator(self):
+ if not len(self.contents):
+ raise StopIteration
+ stopNode = self._lastRecursiveChild().next
+ current = self.contents[0]
+ while current is not stopNode:
+ yield current
+ current = current.next
+
+
+# Next, a couple classes to represent queries and their results.
+class SoupStrainer:
+ """Encapsulates a number of ways of matching a markup element (tag or
+ text)."""
+
+ def __init__(self, name=None, attrs={}, text=None, **kwargs):
+ self.name = name
+ if isinstance(attrs, basestring):
+ kwargs['class'] = _match_css_class(attrs)
+ attrs = None
+ if kwargs:
+ if attrs:
+ attrs = attrs.copy()
+ attrs.update(kwargs)
+ else:
+ attrs = kwargs
+ self.attrs = attrs
+ self.text = text
+
+ def __str__(self):
+ if self.text:
+ return self.text
+ else:
+ return "%s|%s" % (self.name, self.attrs)
+
+ def searchTag(self, markupName=None, markupAttrs={}):
+ found = None
+ markup = None
+ if isinstance(markupName, Tag):
+ markup = markupName
+ markupAttrs = markup
+ callFunctionWithTagData = callable(self.name) \
+ and not isinstance(markupName, Tag)
+
+ if (not self.name) \
+ or callFunctionWithTagData \
+ or (markup and self._matches(markup, self.name)) \
+ or (not markup and self._matches(markupName, self.name)):
+ if callFunctionWithTagData:
+ match = self.name(markupName, markupAttrs)
+ else:
+ match = True
+ markupAttrMap = None
+ for attr, matchAgainst in self.attrs.items():
+ if not markupAttrMap:
+ if hasattr(markupAttrs, 'get'):
+ markupAttrMap = markupAttrs
+ else:
+ markupAttrMap = {}
+ for k,v in markupAttrs:
+ markupAttrMap[k] = v
+ attrValue = markupAttrMap.get(attr)
+ if not self._matches(attrValue, matchAgainst):
+ match = False
+ break
+ if match:
+ if markup:
+ found = markup
+ else:
+ found = markupName
+ return found
+
+ def search(self, markup):
+ #print 'looking for %s in %s' % (self, markup)
+ found = None
+ # If given a list of items, scan it for a text element that
+ # matches.
+ if hasattr(markup, "__iter__") \
+ and not isinstance(markup, Tag):
+ for element in markup:
+ if isinstance(element, NavigableString) \
+ and self.search(element):
+ found = element
+ break
+ # If it's a Tag, make sure its name or attributes match.
+ # Don't bother with Tags if we're searching for text.
+ elif isinstance(markup, Tag):
+ if not self.text:
+ found = self.searchTag(markup)
+ # If it's text, make sure the text matches.
+ elif isinstance(markup, NavigableString) or \
+ isinstance(markup, basestring):
+ if self._matches(markup, self.text):
+ found = markup
+ else:
+ raise Exception, "I don't know how to match against a %s" \
+ % markup.__class__
+ return found
+
+ def _matches(self, markup, matchAgainst):
+ #print "Matching %s against %s" % (markup, matchAgainst)
+ result = False
+ if matchAgainst is True:
+ result = markup is not None
+ elif callable(matchAgainst):
+ result = matchAgainst(markup)
+ else:
+ #Custom match methods take the tag as an argument, but all
+ #other ways of matching match the tag name as a string.
+ if isinstance(markup, Tag):
+ markup = markup.name
+ if markup and not isinstance(markup, basestring):
+ markup = unicode(markup)
+ #Now we know that chunk is either a string, or None.
+ if hasattr(matchAgainst, 'match'):
+ # It's a regexp object.
+ result = markup and matchAgainst.search(markup)
+ elif hasattr(matchAgainst, '__iter__'): # list-like
+ result = markup in matchAgainst
+ elif hasattr(matchAgainst, 'items'):
+ result = markup.has_key(matchAgainst)
+ elif matchAgainst and isinstance(markup, basestring):
+ if isinstance(markup, unicode):
+ matchAgainst = unicode(matchAgainst)
+ else:
+ matchAgainst = str(matchAgainst)
+
+ if not result:
+ result = matchAgainst == markup
+ return result
+
+class ResultSet(list):
+ """A ResultSet is just a list that keeps track of the SoupStrainer
+ that created it."""
+ def __init__(self, source):
+ list.__init__([])
+ self.source = source
+
+# Now, some helper functions.
+
+def buildTagMap(default, *args):
+ """Turns a list of maps, lists, or scalars into a single map.
+ Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
+ NESTING_RESET_TAGS maps out of lists and partial maps."""
+ built = {}
+ for portion in args:
+ if hasattr(portion, 'items'):
+ #It's a map. Merge it.
+ for k,v in portion.items():
+ built[k] = v
+ elif hasattr(portion, '__iter__'): # is a list
+ #It's a list. Map each item to the default.
+ for k in portion:
+ built[k] = default
+ else:
+ #It's a scalar. Map it to the default.
+ built[portion] = default
+ return built
+
+# Now, the parser classes.
+
+class BeautifulStoneSoup(Tag, SGMLParser):
+
+ """This class contains the basic parser and search code. It defines
+ a parser that knows nothing about tag behavior except for the
+ following:
+
+ You can't close a tag without closing all the tags it encloses.
+ That is, "<foo><bar></foo>" actually means
+ "<foo><bar></bar></foo>".
+
+ [Another possible explanation is "<foo><bar /></foo>", but since
+ this class defines no SELF_CLOSING_TAGS, it will never use that
+ explanation.]
+
+ This class is useful for parsing XML or made-up markup languages,
+ or when BeautifulSoup makes an assumption counter to what you were
+ expecting."""
+
+ SELF_CLOSING_TAGS = {}
+ NESTABLE_TAGS = {}
+ RESET_NESTING_TAGS = {}
+ QUOTE_TAGS = {}
+ PRESERVE_WHITESPACE_TAGS = []
+
+ MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
+ lambda x: x.group(1) + ' />'),
+ (re.compile('<!\s+([^<>]*)>'),
+ lambda x: '<!' + x.group(1) + '>')
+ ]
+
+ ROOT_TAG_NAME = u'[document]'
+
+ HTML_ENTITIES = "html"
+ XML_ENTITIES = "xml"
+ XHTML_ENTITIES = "xhtml"
+ # TODO: This only exists for backwards-compatibility
+ ALL_ENTITIES = XHTML_ENTITIES
+
+ # Used when determining whether a text node is all whitespace and
+ # can be replaced with a single space. A text node that contains
+ # fancy Unicode spaces (usually non-breaking) should be left
+ # alone.
+ STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
+
+ def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
+ markupMassage=True, smartQuotesTo=XML_ENTITIES,
+ convertEntities=None, selfClosingTags=None, isHTML=False):
+ """The Soup object is initialized as the 'root tag', and the
+ provided markup (which can be a string or a file-like object)
+ is fed into the underlying parser.
+
+ sgmllib will process most bad HTML, and the BeautifulSoup
+ class has some tricks for dealing with some HTML that kills
+ sgmllib, but Beautiful Soup can nonetheless choke or lose data
+ if your data uses self-closing tags or declarations
+ incorrectly.
+
+ By default, Beautiful Soup uses regexes to sanitize input,
+ avoiding the vast majority of these problems. If the problems
+ don't apply to you, pass in False for markupMassage, and
+ you'll get better performance.
+
+ The default parser massage techniques fix the two most common
+ instances of invalid HTML that choke sgmllib:
+
+ <br/> (No space between name of closing tag and tag close)
+ <! --Comment--> (Extraneous whitespace in declaration)
+
+ You can pass in a custom list of (RE object, replace method)
+ tuples to get Beautiful Soup to scrub your input the way you
+ want."""
+
+ self.parseOnlyThese = parseOnlyThese
+ self.fromEncoding = fromEncoding
+ self.smartQuotesTo = smartQuotesTo
+ self.convertEntities = convertEntities
+ # Set the rules for how we'll deal with the entities we
+ # encounter
+ if self.convertEntities:
+ # It doesn't make sense to convert encoded characters to
+ # entities even while you're converting entities to Unicode.
+ # Just convert it all to Unicode.
+ self.smartQuotesTo = None
+ if convertEntities == self.HTML_ENTITIES:
+ self.convertXMLEntities = False
+ self.convertHTMLEntities = True
+ self.escapeUnrecognizedEntities = True
+ elif convertEntities == self.XHTML_ENTITIES:
+ self.convertXMLEntities = True
+ self.convertHTMLEntities = True
+ self.escapeUnrecognizedEntities = False
+ elif convertEntities == self.XML_ENTITIES:
+ self.convertXMLEntities = True
+ self.convertHTMLEntities = False
+ self.escapeUnrecognizedEntities = False
+ else:
+ self.convertXMLEntities = False
+ self.convertHTMLEntities = False
+ self.escapeUnrecognizedEntities = False
+
+ self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
+ SGMLParser.__init__(self)
+
+ if hasattr(markup, 'read'): # It's a file-type object.
+ markup = markup.read()
+ self.markup = markup
+ self.markupMassage = markupMassage
+ try:
+ self._feed(isHTML=isHTML)
+ except StopParsing:
+ pass
+ self.markup = None # The markup can now be GCed
+
+ def convert_charref(self, name):
+ """This method fixes a bug in Python's SGMLParser."""
+ try:
+ n = int(name)
+ except ValueError:
+ return
+ if not 0 <= n <= 127 : # ASCII ends at 127, not 255
+ return
+ return self.convert_codepoint(n)
+
+ def _feed(self, inDocumentEncoding=None, isHTML=False):
+ # Convert the document to Unicode.
+ markup = self.markup
+ if isinstance(markup, unicode):
+ if not hasattr(self, 'originalEncoding'):
+ self.originalEncoding = None
+ else:
+ dammit = UnicodeDammit\
+ (markup, [self.fromEncoding, inDocumentEncoding],
+ smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
+ markup = dammit.unicode
+ self.originalEncoding = dammit.originalEncoding
+ self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
+ if markup:
+ if self.markupMassage:
+ if not hasattr(self.markupMassage, "__iter__"):
+ self.markupMassage = self.MARKUP_MASSAGE
+ for fix, m in self.markupMassage:
+ markup = fix.sub(m, markup)
+ # TODO: We get rid of markupMassage so that the
+ # soup object can be deepcopied later on. Some
+ # Python installations can't copy regexes. If anyone
+ # was relying on the existence of markupMassage, this
+ # might cause problems.
+ del(self.markupMassage)
+ self.reset()
+
+ SGMLParser.feed(self, markup)
+ # Close out any unfinished strings and close all the open tags.
+ self.endData()
+ while self.currentTag.name != self.ROOT_TAG_NAME:
+ self.popTag()
+
+ def __getattr__(self, methodName):
+ """This method routes method call requests to either the SGMLParser
+ superclass or the Tag superclass, depending on the method name."""
+ #print "__getattr__ called on %s.%s" % (self.__class__, methodName)
+
+ if methodName.startswith('start_') or methodName.startswith('end_') \
+ or methodName.startswith('do_'):
+ return SGMLParser.__getattr__(self, methodName)
+ elif not methodName.startswith('__'):
+ return Tag.__getattr__(self, methodName)
+ else:
+ raise AttributeError
+
+ def isSelfClosingTag(self, name):
+ """Returns true iff the given string is the name of a
+ self-closing tag according to this parser."""
+ return self.SELF_CLOSING_TAGS.has_key(name) \
+ or self.instanceSelfClosingTags.has_key(name)
+
+ def reset(self):
+ Tag.__init__(self, self, self.ROOT_TAG_NAME)
+ self.hidden = 1
+ SGMLParser.reset(self)
+ self.currentData = []
+ self.currentTag = None
+ self.tagStack = []
+ self.quoteStack = []
+ self.pushTag(self)
+
+ def popTag(self):
+ tag = self.tagStack.pop()
+
+ #print "Pop", tag.name
+ if self.tagStack:
+ self.currentTag = self.tagStack[-1]
+ return self.currentTag
+
+ def pushTag(self, tag):
+ #print "Push", tag.name
+ if self.currentTag:
+ self.currentTag.contents.append(tag)
+ self.tagStack.append(tag)
+ self.currentTag = self.tagStack[-1]
+
+ def endData(self, containerClass=NavigableString):
+ if self.currentData:
+ currentData = u''.join(self.currentData)
+ if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
+ not set([tag.name for tag in self.tagStack]).intersection(
+ self.PRESERVE_WHITESPACE_TAGS)):
+ if '\n' in currentData:
+ currentData = '\n'
+ else:
+ currentData = ' '
+ self.currentData = []
+ if self.parseOnlyThese and len(self.tagStack) <= 1 and \
+ (not self.parseOnlyThese.text or \
+ not self.parseOnlyThese.search(currentData)):
+ return
+ o = containerClass(currentData)
+ o.setup(self.currentTag, self.previous)
+ if self.previous:
+ self.previous.next = o
+ self.previous = o
+ self.currentTag.contents.append(o)
+
+
+ def _popToTag(self, name, inclusivePop=True):
+ """Pops the tag stack up to and including the most recent
+ instance of the given tag. If inclusivePop is false, pops the tag
+ stack up to but *not* including the most recent instqance of
+ the given tag."""
+ #print "Popping to %s" % name
+ if name == self.ROOT_TAG_NAME:
+ return
+
+ numPops = 0
+ mostRecentTag = None
+ for i in range(len(self.tagStack)-1, 0, -1):
+ if name == self.tagStack[i].name:
+ numPops = len(self.tagStack)-i
+ break
+ if not inclusivePop:
+ numPops = numPops - 1
+
+ for i in range(0, numPops):
+ mostRecentTag = self.popTag()
+ return mostRecentTag
+
+ def _smartPop(self, name):
+
+ """We need to pop up to the previous tag of this type, unless
+ one of this tag's nesting reset triggers comes between this
+ tag and the previous tag of this type, OR unless this tag is a
+ generic nesting trigger and another generic nesting trigger
+ comes between this tag and the previous tag of this type.
+
+ Examples:
+ <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
+ <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
+ <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
+
+ <li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
+ <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
+ <td><tr><td> *<td>* should pop to 'tr', not the first 'td'
+ """
+
+ nestingResetTriggers = self.NESTABLE_TAGS.get(name)
+ isNestable = nestingResetTriggers != None
+ isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
+ popTo = None
+ inclusive = True
+ for i in range(len(self.tagStack)-1, 0, -1):
+ p = self.tagStack[i]
+ if (not p or p.name == name) and not isNestable:
+ #Non-nestable tags get popped to the top or to their
+ #last occurance.
+ popTo = name
+ break
+ if (nestingResetTriggers is not None
+ and p.name in nestingResetTriggers) \
+ or (nestingResetTriggers is None and isResetNesting
+ and self.RESET_NESTING_TAGS.has_key(p.name)):
+
+ #If we encounter one of the nesting reset triggers
+ #peculiar to this tag, or we encounter another tag
+ #that causes nesting to reset, pop up to but not
+ #including that tag.
+ popTo = p.name
+ inclusive = False
+ break
+ p = p.parent
+ if popTo:
+ self._popToTag(popTo, inclusive)
+
+ def unknown_starttag(self, name, attrs, selfClosing=0):
+ #print "Start tag %s: %s" % (name, attrs)
+ if self.quoteStack:
+ #This is not a real tag.
+ #print "<%s> is not real!" % name
+ attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
+ self.handle_data('<%s%s>' % (name, attrs))
+ return
+ self.endData()
+
+ if not self.isSelfClosingTag(name) and not selfClosing:
+ self._smartPop(name)
+
+ if self.parseOnlyThese and len(self.tagStack) <= 1 \
+ and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
+ return
+
+ tag = Tag(self, name, attrs, self.currentTag, self.previous)
+ if self.previous:
+ self.previous.next = tag
+ self.previous = tag
+ self.pushTag(tag)
+ if selfClosing or self.isSelfClosingTag(name):
+ self.popTag()
+ if name in self.QUOTE_TAGS:
+ #print "Beginning quote (%s)" % name
+ self.quoteStack.append(name)
+ self.literal = 1
+ return tag
+
+ def unknown_endtag(self, name):
+ #print "End tag %s" % name
+ if self.quoteStack and self.quoteStack[-1] != name:
+ #This is not a real end tag.
+ #print "</%s> is not real!" % name
+ self.handle_data('</%s>' % name)
+ return
+ self.endData()
+ self._popToTag(name)
+ if self.quoteStack and self.quoteStack[-1] == name:
+ self.quoteStack.pop()
+ self.literal = (len(self.quoteStack) > 0)
+
+ def handle_data(self, data):
+ self.currentData.append(data)
+
+ def _toStringSubclass(self, text, subclass):
+ """Adds a certain piece of text to the tree as a NavigableString
+ subclass."""
+ self.endData()
+ self.handle_data(text)
+ self.endData(subclass)
+
+ def handle_pi(self, text):
+ """Handle a processing instruction as a ProcessingInstruction
+ object, possibly one with a %SOUP-ENCODING% slot into which an
+ encoding will be plugged later."""
+ if text[:3] == "xml":
+ text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
+ self._toStringSubclass(text, ProcessingInstruction)
+
+ def handle_comment(self, text):
+ "Handle comments as Comment objects."
+ self._toStringSubclass(text, Comment)
+
+ def handle_charref(self, ref):
+ "Handle character references as data."
+ if self.convertEntities:
+ data = unichr(int(ref))
+ else:
+ data = '&#%s;' % ref
+ self.handle_data(data)
+
+ def handle_entityref(self, ref):
+ """Handle entity references as data, possibly converting known
+ HTML and/or XML entity references to the corresponding Unicode
+ characters."""
+ data = None
+ if self.convertHTMLEntities:
+ try:
+ data = unichr(name2codepoint[ref])
+ except KeyError:
+ pass
+
+ if not data and self.convertXMLEntities:
+ data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
+
+ if not data and self.convertHTMLEntities and \
+ not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
+ # TODO: We've got a problem here. We're told this is
+ # an entity reference, but it's not an XML entity
+ # reference or an HTML entity reference. Nonetheless,
+ # the logical thing to do is to pass it through as an
+ # unrecognized entity reference.
+ #
+ # Except: when the input is "&carol;" this function
+ # will be called with input "carol". When the input is
+ # "AT&T", this function will be called with input
+ # "T". We have no way of knowing whether a semicolon
+ # was present originally, so we don't know whether
+ # this is an unknown entity or just a misplaced
+ # ampersand.
+ #
+ # The more common case is a misplaced ampersand, so I
+ # escape the ampersand and omit the trailing semicolon.
+ data = "&%s" % ref
+ if not data:
+ # This case is different from the one above, because we
+ # haven't already gone through a supposedly comprehensive
+ # mapping of entities to Unicode characters. We might not
+ # have gone through any mapping at all. So the chances are
+ # very high that this is a real entity, and not a
+ # misplaced ampersand.
+ data = "&%s;" % ref
+ self.handle_data(data)
+
+ def handle_decl(self, data):
+ "Handle DOCTYPEs and the like as Declaration objects."
+ self._toStringSubclass(data, Declaration)
+
+ def parse_declaration(self, i):
+ """Treat a bogus SGML declaration as raw data. Treat a CDATA
+ declaration as a CData object."""
+ j = None
+ if self.rawdata[i:i+9] == '<![CDATA[':
+ k = self.rawdata.find(']]>', i)
+ if k == -1:
+ k = len(self.rawdata)
+ data = self.rawdata[i+9:k]
+ j = k+3
+ self._toStringSubclass(data, CData)
+ else:
+ try:
+ j = SGMLParser.parse_declaration(self, i)
+ except SGMLParseError:
+ toHandle = self.rawdata[i:]
+ self.handle_data(toHandle)
+ j = i + len(toHandle)
+ return j
+
+class BeautifulSoup(BeautifulStoneSoup):
+
+ """This parser knows the following facts about HTML:
+
+ * Some tags have no closing tag and should be interpreted as being
+ closed as soon as they are encountered.
+
+ * The text inside some tags (ie. 'script') may contain tags which
+ are not really part of the document and which should be parsed
+ as text, not tags. If you want to parse the text as tags, you can
+ always fetch it and parse it explicitly.
+
+ * Tag nesting rules:
+
+ Most tags can't be nested at all. For instance, the occurance of
+ a <p> tag should implicitly close the previous <p> tag.
+
+ <p>Para1<p>Para2
+ should be transformed into:
+ <p>Para1</p><p>Para2
+
+ Some tags can be nested arbitrarily. For instance, the occurance
+ of a <blockquote> tag should _not_ implicitly close the previous
+ <blockquote> tag.
+
+ Alice said: <blockquote>Bob said: <blockquote>Blah
+ should NOT be transformed into:
+ Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
+
+ Some tags can be nested, but the nesting is reset by the
+ interposition of other tags. For instance, a <tr> tag should
+ implicitly close the previous <tr> tag within the same <table>,
+ but not close a <tr> tag in another table.
+
+ <table><tr>Blah<tr>Blah
+ should be transformed into:
+ <table><tr>Blah</tr><tr>Blah
+ but,
+ <tr>Blah<table><tr>Blah
+ should NOT be transformed into
+ <tr>Blah<table></tr><tr>Blah
+
+ Differing assumptions about tag nesting rules are a major source
+ of problems with the BeautifulSoup class. If BeautifulSoup is not
+ treating as nestable a tag your page author treats as nestable,
+ try ICantBelieveItsBeautifulSoup, MinimalSoup, or
+ BeautifulStoneSoup before writing your own subclass."""
+
+ def __init__(self, *args, **kwargs):
+ if not kwargs.has_key('smartQuotesTo'):
+ kwargs['smartQuotesTo'] = self.HTML_ENTITIES
+ kwargs['isHTML'] = True
+ BeautifulStoneSoup.__init__(self, *args, **kwargs)
+
+ SELF_CLOSING_TAGS = buildTagMap(None,
+ ('br' , 'hr', 'input', 'img', 'meta',
+ 'spacer', 'link', 'frame', 'base', 'col'))
+
+ PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
+
+ QUOTE_TAGS = {'script' : None, 'textarea' : None}
+
+ #According to the HTML standard, each of these inline tags can
+ #contain another tag of the same type. Furthermore, it's common
+ #to actually use these tags this way.
+ NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
+ 'center')
+
+ #According to the HTML standard, these block tags can contain
+ #another tag of the same type. Furthermore, it's common
+ #to actually use these tags this way.
+ NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
+
+ #Lists can contain other lists, but there are restrictions.
+ NESTABLE_LIST_TAGS = { 'ol' : [],
+ 'ul' : [],
+ 'li' : ['ul', 'ol'],
+ 'dl' : [],
+ 'dd' : ['dl'],
+ 'dt' : ['dl'] }
+
+ #Tables can contain other tables, but there are restrictions.
+ NESTABLE_TABLE_TAGS = {'table' : [],
+ 'tr' : ['table', 'tbody', 'tfoot', 'thead'],
+ 'td' : ['tr'],
+ 'th' : ['tr'],
+ 'thead' : ['table'],
+ 'tbody' : ['table'],
+ 'tfoot' : ['table'],
+ }
+
+ NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
+
+ #If one of these tags is encountered, all tags up to the next tag of
+ #this type are popped.
+ RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
+ NON_NESTABLE_BLOCK_TAGS,
+ NESTABLE_LIST_TAGS,
+ NESTABLE_TABLE_TAGS)
+
+ NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
+ NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
+
+ # Used to detect the charset in a META tag; see start_meta
+ CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
+
+ def start_meta(self, attrs):
+ """Beautiful Soup can detect a charset included in a META tag,
+ try to convert the document to that charset, and re-parse the
+ document from the beginning."""
+ httpEquiv = None
+ contentType = None
+ contentTypeIndex = None
+ tagNeedsEncodingSubstitution = False
+
+ for i in range(0, len(attrs)):
+ key, value = attrs[i]
+ key = key.lower()
+ if key == 'http-equiv':
+ httpEquiv = value
+ elif key == 'content':
+ contentType = value
+ contentTypeIndex = i
+
+ if httpEquiv and contentType: # It's an interesting meta tag.
+ match = self.CHARSET_RE.search(contentType)
+ if match:
+ if (self.declaredHTMLEncoding is not None or
+ self.originalEncoding == self.fromEncoding):
+ # An HTML encoding was sniffed while converting
+ # the document to Unicode, or an HTML encoding was
+ # sniffed during a previous pass through the
+ # document, or an encoding was specified
+ # explicitly and it worked. Rewrite the meta tag.
+ def rewrite(match):
+ return match.group(1) + "%SOUP-ENCODING%"
+ newAttr = self.CHARSET_RE.sub(rewrite, contentType)
+ attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
+ newAttr)
+ tagNeedsEncodingSubstitution = True
+ else:
+ # This is our first pass through the document.
+ # Go through it again with the encoding information.
+ newCharset = match.group(3)
+ if newCharset and newCharset != self.originalEncoding:
+ self.declaredHTMLEncoding = newCharset
+ self._feed(self.declaredHTMLEncoding)
+ raise StopParsing
+ pass
+ tag = self.unknown_starttag("meta", attrs)
+ if tag and tagNeedsEncodingSubstitution:
+ tag.containsSubstitutions = True
+
+class StopParsing(Exception):
+ pass
+
+class ICantBelieveItsBeautifulSoup(BeautifulSoup):
+
+ """The BeautifulSoup class is oriented towards skipping over
+ common HTML errors like unclosed tags. However, sometimes it makes
+ errors of its own. For instance, consider this fragment:
+
+ <b>Foo<b>Bar</b></b>
+
+ This is perfectly valid (if bizarre) HTML. However, the
+ BeautifulSoup class will implicitly close the first b tag when it
+ encounters the second 'b'. It will think the author wrote
+ "<b>Foo<b>Bar", and didn't close the first 'b' tag, because
+ there's no real-world reason to bold something that's already
+ bold. When it encounters '</b></b>' it will close two more 'b'
+ tags, for a grand total of three tags closed instead of two. This
+ can throw off the rest of your document structure. The same is
+ true of a number of other tags, listed below.
+
+ It's much more common for someone to forget to close a 'b' tag
+ than to actually use nested 'b' tags, and the BeautifulSoup class
+ handles the common case. This class handles the not-co-common
+ case: where you can't believe someone wrote what they did, but
+ it's valid HTML and BeautifulSoup screwed up by assuming it
+ wouldn't be."""
+
+ I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
+ ('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
+ 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
+ 'big')
+
+ I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
+
+ NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
+ I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
+ I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
+
+class MinimalSoup(BeautifulSoup):
+ """The MinimalSoup class is for parsing HTML that contains
+ pathologically bad markup. It makes no assumptions about tag
+ nesting, but it does know which tags are self-closing, that
+ <script> tags contain Javascript and should not be parsed, that
+ META tags may contain encoding information, and so on.
+
+ This also makes it better for subclassing than BeautifulStoneSoup
+ or BeautifulSoup."""
+
+ RESET_NESTING_TAGS = buildTagMap('noscript')
+ NESTABLE_TAGS = {}
+
+class BeautifulSOAP(BeautifulStoneSoup):
+ """This class will push a tag with only a single string child into
+ the tag's parent as an attribute. The attribute's name is the tag
+ name, and the value is the string child. An example should give
+ the flavor of the change:
+
+ <foo><bar>baz</bar></foo>
+ =>
+ <foo bar="baz"><bar>baz</bar></foo>
+
+ You can then access fooTag['bar'] instead of fooTag.barTag.string.
+
+ This is, of course, useful for scraping structures that tend to
+ use subelements instead of attributes, such as SOAP messages. Note
+ that it modifies its input, so don't print the modified version
+ out.
+
+ I'm not sure how many people really want to use this class; let me
+ know if you do. Mainly I like the name."""
+
+ def popTag(self):
+ if len(self.tagStack) > 1:
+ tag = self.tagStack[-1]
+ parent = self.tagStack[-2]
+ parent._getAttrMap()
+ if (isinstance(tag, Tag) and len(tag.contents) == 1 and
+ isinstance(tag.contents[0], NavigableString) and
+ not parent.attrMap.has_key(tag.name)):
+ parent[tag.name] = tag.contents[0]
+ BeautifulStoneSoup.popTag(self)
+
+#Enterprise class names! It has come to our attention that some people
+#think the names of the Beautiful Soup parser classes are too silly
+#and "unprofessional" for use in enterprise screen-scraping. We feel
+#your pain! For such-minded folk, the Beautiful Soup Consortium And
+#All-Night Kosher Bakery recommends renaming this file to
+#"RobustParser.py" (or, in cases of extreme enterprisiness,
+#"RobustParserBeanInterface.class") and using the following
+#enterprise-friendly class aliases:
+class RobustXMLParser(BeautifulStoneSoup):
+ pass
+class RobustHTMLParser(BeautifulSoup):
+ pass
+class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
+ pass
+class RobustInsanelyWackAssHTMLParser(MinimalSoup):
+ pass
+class SimplifyingSOAPParser(BeautifulSOAP):
+ pass
+
+######################################################
+#
+# Bonus library: Unicode, Dammit
+#
+# This class forces XML data into a standard format (usually to UTF-8
+# or Unicode). It is heavily based on code from Mark Pilgrim's
+# Universal Feed Parser. It does not rewrite the XML or HTML to
+# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
+# (XML) and BeautifulSoup.start_meta (HTML).
+
+# Autodetects character encodings.
+# Download from http://chardet.feedparser.org/
+try:
+ import chardet
+# import chardet.constants
+# chardet.constants._debug = 1
+except ImportError:
+ chardet = None
+
+# cjkcodecs and iconv_codec make Python know about more character encodings.
+# Both are available from http://cjkpython.i18n.org/
+# They're built in if you use Python 2.4.
+try:
+ import cjkcodecs.aliases
+except ImportError:
+ pass
+try:
+ import iconv_codec
+except ImportError:
+ pass
+
+class UnicodeDammit:
+ """A class for detecting the encoding of a *ML document and
+ converting it to a Unicode string. If the source encoding is
+ windows-1252, can replace MS smart quotes with their HTML or XML
+ equivalents."""
+
+ # This dictionary maps commonly seen values for "charset" in HTML
+ # meta tags to the corresponding Python codec names. It only covers
+ # values that aren't in Python's aliases and can't be determined
+ # by the heuristics in find_codec.
+ CHARSET_ALIASES = { "macintosh" : "mac-roman",
+ "x-sjis" : "shift-jis" }
+
+ def __init__(self, markup, overrideEncodings=[],
+ smartQuotesTo='xml', isHTML=False):
+ self.declaredHTMLEncoding = None
+ self.markup, documentEncoding, sniffedEncoding = \
+ self._detectEncoding(markup, isHTML)
+ self.smartQuotesTo = smartQuotesTo
+ self.triedEncodings = []
+ if markup == '' or isinstance(markup, unicode):
+ self.originalEncoding = None
+ self.unicode = unicode(markup)
+ return
+
+ u = None
+ for proposedEncoding in overrideEncodings:
+ u = self._convertFrom(proposedEncoding)
+ if u: break
+ if not u:
+ for proposedEncoding in (documentEncoding, sniffedEncoding):
+ u = self._convertFrom(proposedEncoding)
+ if u: break
+
+ # If no luck and we have auto-detection library, try that:
+ if not u and chardet and not isinstance(self.markup, unicode):
+ u = self._convertFrom(chardet.detect(self.markup)['encoding'])
+
+ # As a last resort, try utf-8 and windows-1252:
+ if not u:
+ for proposed_encoding in ("utf-8", "windows-1252"):
+ u = self._convertFrom(proposed_encoding)
+ if u: break
+
+ self.unicode = u
+ if not u: self.originalEncoding = None
+
+ def _subMSChar(self, orig):
+ """Changes a MS smart quote character to an XML or HTML
+ entity."""
+ sub = self.MS_CHARS.get(orig)
+ if isinstance(sub, tuple):
+ if self.smartQuotesTo == 'xml':
+ sub = '&#x%s;' % sub[1]
+ else:
+ sub = '&%s;' % sub[0]
+ return sub
+
+ def _convertFrom(self, proposed):
+ proposed = self.find_codec(proposed)
+ if not proposed or proposed in self.triedEncodings:
+ return None
+ self.triedEncodings.append(proposed)
+ markup = self.markup
+
+ # Convert smart quotes to HTML if coming from an encoding
+ # that might have them.
+ if self.smartQuotesTo and proposed.lower() in("windows-1252",
+ "iso-8859-1",
+ "iso-8859-2"):
+ markup = re.compile("([\x80-\x9f])").sub \
+ (lambda(x): self._subMSChar(x.group(1)),
+ markup)
+
+ try:
+ # print "Trying to convert document to %s" % proposed
+ u = self._toUnicode(markup, proposed)
+ self.markup = u
+ self.originalEncoding = proposed
+ except Exception, e:
+ # print "That didn't work!"
+ # print e
+ return None
+ #print "Correct encoding: %s" % proposed
+ return self.markup
+
+ def _toUnicode(self, data, encoding):
+ '''Given a string and its encoding, decodes the string into Unicode.
+ %encoding is a string recognized by encodings.aliases'''
+
+ # strip Byte Order Mark (if present)
+ if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
+ and (data[2:4] != '\x00\x00'):
+ encoding = 'utf-16be'
+ data = data[2:]
+ elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
+ and (data[2:4] != '\x00\x00'):
+ encoding = 'utf-16le'
+ data = data[2:]
+ elif data[:3] == '\xef\xbb\xbf':
+ encoding = 'utf-8'
+ data = data[3:]
+ elif data[:4] == '\x00\x00\xfe\xff':
+ encoding = 'utf-32be'
+ data = data[4:]
+ elif data[:4] == '\xff\xfe\x00\x00':
+ encoding = 'utf-32le'
+ data = data[4:]
+ newdata = unicode(data, encoding)
+ return newdata
+
+ def _detectEncoding(self, xml_data, isHTML=False):
+ """Given a document, tries to detect its XML encoding."""
+ xml_encoding = sniffed_xml_encoding = None
+ try:
+ if xml_data[:4] == '\x4c\x6f\xa7\x94':
+ # EBCDIC
+ xml_data = self._ebcdic_to_ascii(xml_data)
+ elif xml_data[:4] == '\x00\x3c\x00\x3f':
+ # UTF-16BE
+ sniffed_xml_encoding = 'utf-16be'
+ xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
+ elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
+ and (xml_data[2:4] != '\x00\x00'):
+ # UTF-16BE with BOM
+ sniffed_xml_encoding = 'utf-16be'
+ xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
+ elif xml_data[:4] == '\x3c\x00\x3f\x00':
+ # UTF-16LE
+ sniffed_xml_encoding = 'utf-16le'
+ xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
+ elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
+ (xml_data[2:4] != '\x00\x00'):
+ # UTF-16LE with BOM
+ sniffed_xml_encoding = 'utf-16le'
+ xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
+ elif xml_data[:4] == '\x00\x00\x00\x3c':
+ # UTF-32BE
+ sniffed_xml_encoding = 'utf-32be'
+ xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
+ elif xml_data[:4] == '\x3c\x00\x00\x00':
+ # UTF-32LE
+ sniffed_xml_encoding = 'utf-32le'
+ xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
+ elif xml_data[:4] == '\x00\x00\xfe\xff':
+ # UTF-32BE with BOM
+ sniffed_xml_encoding = 'utf-32be'
+ xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
+ elif xml_data[:4] == '\xff\xfe\x00\x00':
+ # UTF-32LE with BOM
+ sniffed_xml_encoding = 'utf-32le'
+ xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
+ elif xml_data[:3] == '\xef\xbb\xbf':
+ # UTF-8 with BOM
+ sniffed_xml_encoding = 'utf-8'
+ xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
+ else:
+ sniffed_xml_encoding = 'ascii'
+ pass
+ except:
+ xml_encoding_match = None
+ xml_encoding_match = re.compile(
+ '^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
+ if not xml_encoding_match and isHTML:
+ regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
+ xml_encoding_match = regexp.search(xml_data)
+ if xml_encoding_match is not None:
+ xml_encoding = xml_encoding_match.groups()[0].lower()
+ if isHTML:
+ self.declaredHTMLEncoding = xml_encoding
+ if sniffed_xml_encoding and \
+ (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
+ 'iso-10646-ucs-4', 'ucs-4', 'csucs4',
+ 'utf-16', 'utf-32', 'utf_16', 'utf_32',
+ 'utf16', 'u16')):
+ xml_encoding = sniffed_xml_encoding
+ return xml_data, xml_encoding, sniffed_xml_encoding
+
+
+ def find_codec(self, charset):
+ return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
+ or (charset and self._codec(charset.replace("-", ""))) \
+ or (charset and self._codec(charset.replace("-", "_"))) \
+ or charset
+
+ def _codec(self, charset):
+ if not charset: return charset
+ codec = None
+ try:
+ codecs.lookup(charset)
+ codec = charset
+ except (LookupError, ValueError):
+ pass
+ return codec
+
+ EBCDIC_TO_ASCII_MAP = None
+ def _ebcdic_to_ascii(self, s):
+ c = self.__class__
+ if not c.EBCDIC_TO_ASCII_MAP:
+ emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
+ 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
+ 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
+ 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
+ 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
+ 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
+ 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
+ 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
+ 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
+ 201,202,106,107,108,109,110,111,112,113,114,203,204,205,
+ 206,207,208,209,126,115,116,117,118,119,120,121,122,210,
+ 211,212,213,214,215,216,217,218,219,220,221,222,223,224,
+ 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
+ 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
+ 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
+ 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
+ 250,251,252,253,254,255)
+ import string
+ c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
+ ''.join(map(chr, range(256))), ''.join(map(chr, emap)))
+ return s.translate(c.EBCDIC_TO_ASCII_MAP)
+
+ MS_CHARS = { '\x80' : ('euro', '20AC'),
+ '\x81' : ' ',
+ '\x82' : ('sbquo', '201A'),
+ '\x83' : ('fnof', '192'),
+ '\x84' : ('bdquo', '201E'),
+ '\x85' : ('hellip', '2026'),
+ '\x86' : ('dagger', '2020'),
+ '\x87' : ('Dagger', '2021'),
+ '\x88' : ('circ', '2C6'),
+ '\x89' : ('permil', '2030'),
+ '\x8A' : ('Scaron', '160'),
+ '\x8B' : ('lsaquo', '2039'),
+ '\x8C' : ('OElig', '152'),
+ '\x8D' : '?',
+ '\x8E' : ('#x17D', '17D'),
+ '\x8F' : '?',
+ '\x90' : '?',
+ '\x91' : ('lsquo', '2018'),
+ '\x92' : ('rsquo', '2019'),
+ '\x93' : ('ldquo', '201C'),
+ '\x94' : ('rdquo', '201D'),
+ '\x95' : ('bull', '2022'),
+ '\x96' : ('ndash', '2013'),
+ '\x97' : ('mdash', '2014'),
+ '\x98' : ('tilde', '2DC'),
+ '\x99' : ('trade', '2122'),
+ '\x9a' : ('scaron', '161'),
+ '\x9b' : ('rsaquo', '203A'),
+ '\x9c' : ('oelig', '153'),
+ '\x9d' : '?',
+ '\x9e' : ('#x17E', '17E'),
+ '\x9f' : ('Yuml', ''),}
+
+#######################################################################
+
+
+#By default, act as an HTML pretty-printer.
+if __name__ == '__main__':
+ import sys
+ soup = BeautifulSoup(sys.stdin)
+ print soup.prettify()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/README.chromium b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/README.chromium
new file mode 100644
index 0000000..77c7bb4
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/README.chromium
@@ -0,0 +1,69 @@
+This directory contains sources from other projects.
+
+Code in this directory must document the license under which the source is being
+used. If the source itself does not include a license header or file, create
+an entry in this file that refers to reliable documentation of the project's
+license terms on the web (and add a note pointing here in the README file in
+that directory).
+
+Name: autopep8
+Short Name: autopep8
+URL: https://pypi.python.org/packages/source/a/autopep8/autopep8-1.0.3.tar.gz#md5=7c16d385cf9ad7c1d7fbcfcea2588a56
+Version: 1.0.3
+License: MIT
+License File: NOT_SHIPPED
+Security Critical: no
+Description: Used to reformat python code via format-webkitpy
+Local Modifications: None
+
+Name: BeautifulSoup - HTML parser
+Short Name: BeautifulSoup
+URL: http://www.crummy.com/software/BeautifulSoup/download/3.x/BeautifulSoup-3.2.1.tar.gz (?)
+Version: 3.2
+License: MIT
+License File: NOT_SHIPPED
+Security Critical: no
+Description: Used during the w3c import, other places
+Local Modifications: None
+
+Name: coverage - code coverage metrics for python
+Short Name: coverage
+URL: http://pypi.python.org/packages/source/c/coverage/coverage-3.5.1.tar.gz#md5=410d4c8155a4dab222f2bc51212d4a24
+Version: 3.5.1
+License: BSD
+License File: NOT_SHIPPED
+Security Critical: no
+Description: code coverage metrics gathering for test-webkitpy.
+Local Modifications: None
+
+Name: ircbot
+Short Name: ircbot
+URL: http://downloads.sourceforge.net/project/python-irclib/python-irclib/0.4.8/python-irclib-0.4.8.zip
+Version: 0.4.8
+License: LGPLv2
+License File: NOT_SHIPPED
+Security Critical: no
+Description: Used to implement a simple IRC bot to check for updates
+Local Modifications: None
+
+Name: mod_pywebsocket
+Short Name: mod_pywebsocket
+URL: https://code.google.com/p/pywebsocket/
+Version: 0.7.9 (?)
+License: BSD
+License File: NOT_SHIPPED
+Security Critical: no
+Description: used by run-webkit-tests and run-webkit-httpd to test web sockets
+Local Modifications: None
+
+Name: pep8 - A Python style guide checker
+Short Name: pep8
+URL: https://pypi.python.org/packages/source/p/pep8/pep8-1.5.7.tar.gz#md5=f6adbdd69365ecca20513c709f9b7c93
+Version: 1.5.7
+License: MIT
+License File: NOT_SHIPPED
+Security Critical: no
+Description: Used during presubmit checks and via lint-webkitpy and format-webkitpy. There is
+ overlap between pep8 and pylint, but pep8 catches a bunch of stylistic
+ issues that pylint doesn't (e.g., warning about blank lines, various whitespace issues, etc.).
+Local Modifications: None
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/__init__.py
@@ -0,0 +1 @@
+
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/autopep8.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/autopep8.py
new file mode 100644
index 0000000..c54ebea
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/autopep8.py
@@ -0,0 +1,3664 @@
+# Copyright (C) 2010-2011 Hideo Hattori
+# Copyright (C) 2011-2013 Hideo Hattori, Steven Myint
+# Copyright (C) 2013-2014 Hideo Hattori, Steven Myint, Bill Wendling
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Automatically formats Python code to conform to the PEP 8 style guide.
+
+Fixes that only need be done once can be added by adding a function of the form
+"fix_<code>(source)" to this module. They should return the fixed source code.
+These fixes are picked up by apply_global_fixes().
+
+Fixes that depend on pep8 should be added as methods to FixPEP8. See the class
+documentation for more information.
+
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import bisect
+import codecs
+import collections
+import copy
+import difflib
+import fnmatch
+import inspect
+import io
+import itertools
+import keyword
+import locale
+import os
+import re
+import signal
+import sys
+import token
+import tokenize
+
+import pep8
+
+
+try:
+ unicode
+except NameError:
+ unicode = str
+
+
+__version__ = '1.0.3'
+
+
+CR = '\r'
+LF = '\n'
+CRLF = '\r\n'
+
+
+PYTHON_SHEBANG_REGEX = re.compile(r'^#!.*\bpython[23]?\b\s*$')
+
+
+# For generating line shortening candidates.
+SHORTEN_OPERATOR_GROUPS = frozenset([
+ frozenset([',']),
+ frozenset(['%']),
+ frozenset([',', '(', '[', '{']),
+ frozenset(['%', '(', '[', '{']),
+ frozenset([',', '(', '[', '{', '%', '+', '-', '*', '/', '//']),
+ frozenset(['%', '+', '-', '*', '/', '//']),
+])
+
+
+DEFAULT_IGNORE = 'E24'
+DEFAULT_INDENT_SIZE = 4
+
+
+# W602 is handled separately due to the need to avoid "with_traceback".
+CODE_TO_2TO3 = {
+ 'E721': ['idioms'],
+ 'W601': ['has_key'],
+ 'W603': ['ne'],
+ 'W604': ['repr'],
+ 'W690': ['apply',
+ 'except',
+ 'exitfunc',
+ 'import',
+ 'numliterals',
+ 'operator',
+ 'paren',
+ 'reduce',
+ 'renames',
+ 'standarderror',
+ 'sys_exc',
+ 'throw',
+ 'tuple_params',
+ 'xreadlines']}
+
+
+def open_with_encoding(filename, encoding=None, mode='r'):
+ """Return opened file with a specific encoding."""
+ if not encoding:
+ encoding = detect_encoding(filename)
+
+ return io.open(filename, mode=mode, encoding=encoding,
+ newline='') # Preserve line endings
+
+
+def detect_encoding(filename):
+ """Return file encoding."""
+ try:
+ with open(filename, 'rb') as input_file:
+ from lib2to3.pgen2 import tokenize as lib2to3_tokenize
+ encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]
+
+ # Check for correctness of encoding
+ with open_with_encoding(filename, encoding) as test_file:
+ test_file.read()
+
+ return encoding
+ except (LookupError, SyntaxError, UnicodeDecodeError):
+ return 'latin-1'
+
+
+def readlines_from_file(filename):
+ """Return contents of file."""
+ with open_with_encoding(filename) as input_file:
+ return input_file.readlines()
+
+
+def extended_blank_lines(logical_line,
+ blank_lines,
+ indent_level,
+ previous_logical):
+ """Check for missing blank lines after class declaration."""
+ if previous_logical.startswith('class '):
+ if (
+ logical_line.startswith(('def ', 'class ', '@')) or
+ pep8.DOCSTRING_REGEX.match(logical_line)
+ ):
+ if indent_level and not blank_lines:
+ yield (0, 'E309 expected 1 blank line after class declaration')
+ elif previous_logical.startswith('def '):
+ if blank_lines and pep8.DOCSTRING_REGEX.match(logical_line):
+ yield (0, 'E303 too many blank lines ({0})'.format(blank_lines))
+ elif pep8.DOCSTRING_REGEX.match(previous_logical):
+ # Missing blank line between class docstring and method declaration.
+ if (
+ indent_level and
+ not blank_lines and
+ logical_line.startswith(('def ')) and
+ '(self' in logical_line
+ ):
+ yield (0, 'E301 expected 1 blank line, found 0')
+pep8.register_check(extended_blank_lines)
+
+
+def continued_indentation(logical_line, tokens, indent_level, indent_char,
+ noqa):
+ """Override pep8's function to provide indentation information."""
+ first_row = tokens[0][2][0]
+ nrows = 1 + tokens[-1][2][0] - first_row
+ if noqa or nrows == 1:
+ return
+
+ # indent_next tells us whether the next block is indented. Assuming
+ # that it is indented by 4 spaces, then we should not allow 4-space
+ # indents on the final continuation line. In turn, some other
+ # indents are allowed to have an extra 4 spaces.
+ indent_next = logical_line.endswith(':')
+
+ row = depth = 0
+ valid_hangs = (
+ (DEFAULT_INDENT_SIZE,)
+ if indent_char != '\t' else (DEFAULT_INDENT_SIZE,
+ 2 * DEFAULT_INDENT_SIZE)
+ )
+
+ # Remember how many brackets were opened on each line.
+ parens = [0] * nrows
+
+ # Relative indents of physical lines.
+ rel_indent = [0] * nrows
+
+ # For each depth, collect a list of opening rows.
+ open_rows = [[0]]
+ # For each depth, memorize the hanging indentation.
+ hangs = [None]
+
+ # Visual indents.
+ indent_chances = {}
+ last_indent = tokens[0][2]
+ indent = [last_indent[1]]
+
+ last_token_multiline = None
+ line = None
+ last_line = ''
+ last_line_begins_with_multiline = False
+ for token_type, text, start, end, line in tokens:
+
+ newline = row < start[0] - first_row
+ if newline:
+ row = start[0] - first_row
+ newline = (not last_token_multiline and
+ token_type not in (tokenize.NL, tokenize.NEWLINE))
+ last_line_begins_with_multiline = last_token_multiline
+
+ if newline:
+ # This is the beginning of a continuation line.
+ last_indent = start
+
+ # Record the initial indent.
+ rel_indent[row] = pep8.expand_indent(line) - indent_level
+
+ # Identify closing bracket.
+ close_bracket = (token_type == tokenize.OP and text in ']})')
+
+ # Is the indent relative to an opening bracket line?
+ for open_row in reversed(open_rows[depth]):
+ hang = rel_indent[row] - rel_indent[open_row]
+ hanging_indent = hang in valid_hangs
+ if hanging_indent:
+ break
+ if hangs[depth]:
+ hanging_indent = (hang == hangs[depth])
+
+ visual_indent = (not close_bracket and hang > 0 and
+ indent_chances.get(start[1]))
+
+ if close_bracket and indent[depth]:
+ # Closing bracket for visual indent.
+ if start[1] != indent[depth]:
+ yield (start, 'E124 {0}'.format(indent[depth]))
+ elif close_bracket and not hang:
+ pass
+ elif indent[depth] and start[1] < indent[depth]:
+ # Visual indent is broken.
+ yield (start, 'E128 {0}'.format(indent[depth]))
+ elif (hanging_indent or
+ (indent_next and
+ rel_indent[row] == 2 * DEFAULT_INDENT_SIZE)):
+ # Hanging indent is verified.
+ if close_bracket:
+ yield (start, 'E123 {0}'.format(indent_level +
+ rel_indent[open_row]))
+ hangs[depth] = hang
+ elif visual_indent is True:
+ # Visual indent is verified.
+ indent[depth] = start[1]
+ elif visual_indent in (text, unicode):
+ # Ignore token lined up with matching one from a previous line.
+ pass
+ else:
+ one_indented = (indent_level + rel_indent[open_row] +
+ DEFAULT_INDENT_SIZE)
+ # Indent is broken.
+ if hang <= 0:
+ error = ('E122', one_indented)
+ elif indent[depth]:
+ error = ('E127', indent[depth])
+ elif hang > DEFAULT_INDENT_SIZE:
+ error = ('E126', one_indented)
+ else:
+ hangs[depth] = hang
+ error = ('E121', one_indented)
+
+ yield (start, '{0} {1}'.format(*error))
+
+ # Look for visual indenting.
+ if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)
+ and not indent[depth]):
+ indent[depth] = start[1]
+ indent_chances[start[1]] = True
+ # Deal with implicit string concatenation.
+ elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
+ text in ('u', 'ur', 'b', 'br')):
+ indent_chances[start[1]] = unicode
+ # Special case for the "if" statement because len("if (") is equal to
+ # 4.
+ elif not indent_chances and not row and not depth and text == 'if':
+ indent_chances[end[1] + 1] = True
+ elif text == ':' and line[end[1]:].isspace():
+ open_rows[depth].append(row)
+
+ # Keep track of bracket depth.
+ if token_type == tokenize.OP:
+ if text in '([{':
+ depth += 1
+ indent.append(0)
+ hangs.append(None)
+ if len(open_rows) == depth:
+ open_rows.append([])
+ open_rows[depth].append(row)
+ parens[row] += 1
+ elif text in ')]}' and depth > 0:
+ # Parent indents should not be more than this one.
+ prev_indent = indent.pop() or last_indent[1]
+ hangs.pop()
+ for d in range(depth):
+ if indent[d] > prev_indent:
+ indent[d] = 0
+ for ind in list(indent_chances):
+ if ind >= prev_indent:
+ del indent_chances[ind]
+ del open_rows[depth + 1:]
+ depth -= 1
+ if depth:
+ indent_chances[indent[depth]] = True
+ for idx in range(row, -1, -1):
+ if parens[idx]:
+ parens[idx] -= 1
+ break
+ assert len(indent) == depth + 1
+ if (
+ start[1] not in indent_chances and
+ # This is for purposes of speeding up E121 (GitHub #90).
+ not last_line.rstrip().endswith(',')
+ ):
+ # Allow to line up tokens.
+ indent_chances[start[1]] = text
+
+ last_token_multiline = (start[0] != end[0])
+ if last_token_multiline:
+ rel_indent[end[0] - first_row] = rel_indent[row]
+
+ last_line = line
+
+ if (
+ indent_next and
+ not last_line_begins_with_multiline and
+ pep8.expand_indent(line) == indent_level + DEFAULT_INDENT_SIZE
+ ):
+ pos = (start[0], indent[0] + 4)
+ yield (pos, 'E125 {0}'.format(indent_level +
+ 2 * DEFAULT_INDENT_SIZE))
+del pep8._checks['logical_line'][pep8.continued_indentation]
+pep8.register_check(continued_indentation)
+
+
+class FixPEP8(object):
+
+ """Fix invalid code.
+
+ Fixer methods are prefixed "fix_". The _fix_source() method looks for these
+ automatically.
+
+ The fixer method can take either one or two arguments (in addition to
+ self). The first argument is "result", which is the error information from
+ pep8. The second argument, "logical", is required only for logical-line
+ fixes.
+
+ The fixer method can return the list of modified lines or None. An empty
+ list would mean that no changes were made. None would mean that only the
+ line reported in the pep8 error was modified. Note that the modified line
+ numbers that are returned are indexed at 1. This typically would correspond
+ with the line number reported in the pep8 error information.
+
+ [fixed method list]
+ - e121,e122,e123,e124,e125,e126,e127,e128,e129
+ - e201,e202,e203
+ - e211
+ - e221,e222,e223,e224,e225
+ - e231
+ - e251
+ - e261,e262
+ - e271,e272,e273,e274
+ - e301,e302,e303
+ - e401
+ - e502
+ - e701,e702
+ - e711
+ - w291
+
+ """
+
+ def __init__(self, filename,
+ options,
+ contents=None,
+ long_line_ignore_cache=None):
+ self.filename = filename
+ if contents is None:
+ self.source = readlines_from_file(filename)
+ else:
+ sio = io.StringIO(contents)
+ self.source = sio.readlines()
+ self.options = options
+ self.indent_word = _get_indentword(''.join(self.source))
+
+ self.long_line_ignore_cache = (
+ set() if long_line_ignore_cache is None
+ else long_line_ignore_cache)
+
+ # Many fixers are the same even though pep8 categorizes them
+ # differently.
+ self.fix_e115 = self.fix_e112
+ self.fix_e116 = self.fix_e113
+ self.fix_e121 = self._fix_reindent
+ self.fix_e122 = self._fix_reindent
+ self.fix_e123 = self._fix_reindent
+ self.fix_e124 = self._fix_reindent
+ self.fix_e126 = self._fix_reindent
+ self.fix_e127 = self._fix_reindent
+ self.fix_e128 = self._fix_reindent
+ self.fix_e129 = self._fix_reindent
+ self.fix_e202 = self.fix_e201
+ self.fix_e203 = self.fix_e201
+ self.fix_e211 = self.fix_e201
+ self.fix_e221 = self.fix_e271
+ self.fix_e222 = self.fix_e271
+ self.fix_e223 = self.fix_e271
+ self.fix_e226 = self.fix_e225
+ self.fix_e227 = self.fix_e225
+ self.fix_e228 = self.fix_e225
+ self.fix_e241 = self.fix_e271
+ self.fix_e242 = self.fix_e224
+ self.fix_e261 = self.fix_e262
+ self.fix_e272 = self.fix_e271
+ self.fix_e273 = self.fix_e271
+ self.fix_e274 = self.fix_e271
+ self.fix_e309 = self.fix_e301
+ self.fix_e501 = (
+ self.fix_long_line_logically if
+ options and (options.aggressive >= 2 or options.experimental) else
+ self.fix_long_line_physically)
+ self.fix_e703 = self.fix_e702
+
+ self._ws_comma_done = False
+
+ def _fix_source(self, results):
+ try:
+ (logical_start, logical_end) = _find_logical(self.source)
+ logical_support = True
+ except (SyntaxError, tokenize.TokenError): # pragma: no cover
+ logical_support = False
+
+ completed_lines = set()
+ for result in sorted(results, key=_priority_key):
+ if result['line'] in completed_lines:
+ continue
+
+ fixed_methodname = 'fix_' + result['id'].lower()
+ if hasattr(self, fixed_methodname):
+ fix = getattr(self, fixed_methodname)
+
+ line_index = result['line'] - 1
+ original_line = self.source[line_index]
+
+ is_logical_fix = len(inspect.getargspec(fix).args) > 2
+ if is_logical_fix:
+ logical = None
+ if logical_support:
+ logical = _get_logical(self.source,
+ result,
+ logical_start,
+ logical_end)
+ if logical and set(range(
+ logical[0][0] + 1,
+ logical[1][0] + 1)).intersection(
+ completed_lines):
+ continue
+
+ modified_lines = fix(result, logical)
+ else:
+ modified_lines = fix(result)
+
+ if modified_lines is None:
+ # Force logical fixes to report what they modified.
+ assert not is_logical_fix
+
+ if self.source[line_index] == original_line:
+ modified_lines = []
+
+ if modified_lines:
+ completed_lines.update(modified_lines)
+ elif modified_lines == []: # Empty list means no fix
+ if self.options.verbose >= 2:
+ print(
+ '---> Not fixing {f} on line {l}'.format(
+ f=result['id'], l=result['line']),
+ file=sys.stderr)
+ else: # We assume one-line fix when None.
+ completed_lines.add(result['line'])
+ else:
+ if self.options.verbose >= 3:
+ print(
+ "---> '{0}' is not defined.".format(fixed_methodname),
+ file=sys.stderr)
+
+ info = result['info'].strip()
+ print('---> {0}:{1}:{2}:{3}'.format(self.filename,
+ result['line'],
+ result['column'],
+ info),
+ file=sys.stderr)
+
+ def fix(self):
+ """Return a version of the source code with PEP 8 violations fixed."""
+ pep8_options = {
+ 'ignore': self.options.ignore,
+ 'select': self.options.select,
+ 'max_line_length': self.options.max_line_length,
+ }
+ results = _execute_pep8(pep8_options, self.source)
+
+ if self.options.verbose:
+ progress = {}
+ for r in results:
+ if r['id'] not in progress:
+ progress[r['id']] = set()
+ progress[r['id']].add(r['line'])
+ print('---> {n} issue(s) to fix {progress}'.format(
+ n=len(results), progress=progress), file=sys.stderr)
+
+ if self.options.line_range:
+ start, end = self.options.line_range
+ results = [r for r in results
+ if start <= r['line'] <= end]
+
+ self._fix_source(filter_results(source=''.join(self.source),
+ results=results,
+ aggressive=self.options.aggressive))
+
+ if self.options.line_range:
+ # If number of lines has changed then change line_range.
+ count = sum(sline.count('\n')
+ for sline in self.source[start - 1:end])
+ self.options.line_range[1] = start + count - 1
+
+ return ''.join(self.source)
+
+ def _fix_reindent(self, result):
+ """Fix a badly indented line.
+
+ This is done by adding or removing from its initial indent only.
+
+ """
+ num_indent_spaces = int(result['info'].split()[1])
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+
+ self.source[line_index] = ' ' * num_indent_spaces + target.lstrip()
+
+ def fix_e112(self, result):
+ """Fix under-indented comments."""
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+
+ if not target.lstrip().startswith('#'):
+ # Don't screw with invalid syntax.
+ return []
+
+ self.source[line_index] = self.indent_word + target
+
+ def fix_e113(self, result):
+ """Fix over-indented comments."""
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+
+ indent = _get_indentation(target)
+ stripped = target.lstrip()
+
+ if not stripped.startswith('#'):
+ # Don't screw with invalid syntax.
+ return []
+
+ self.source[line_index] = indent[1:] + stripped
+
+ def fix_e125(self, result):
+ """Fix indentation undistinguish from the next logical line."""
+ num_indent_spaces = int(result['info'].split()[1])
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+
+ spaces_to_add = num_indent_spaces - len(_get_indentation(target))
+ indent = len(_get_indentation(target))
+ modified_lines = []
+
+ while len(_get_indentation(self.source[line_index])) >= indent:
+ self.source[line_index] = (' ' * spaces_to_add +
+ self.source[line_index])
+ modified_lines.append(1 + line_index) # Line indexed at 1.
+ line_index -= 1
+
+ return modified_lines
+
+ def fix_e201(self, result):
+ """Remove extraneous whitespace."""
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+ offset = result['column'] - 1
+
+ if is_probably_part_of_multiline(target):
+ return []
+
+ fixed = fix_whitespace(target,
+ offset=offset,
+ replacement='')
+
+ self.source[line_index] = fixed
+
+ def fix_e224(self, result):
+ """Remove extraneous whitespace around operator."""
+ target = self.source[result['line'] - 1]
+ offset = result['column'] - 1
+ fixed = target[:offset] + target[offset:].replace('\t', ' ')
+ self.source[result['line'] - 1] = fixed
+
+ def fix_e225(self, result):
+ """Fix missing whitespace around operator."""
+ target = self.source[result['line'] - 1]
+ offset = result['column'] - 1
+ fixed = target[:offset] + ' ' + target[offset:]
+
+ # Only proceed if non-whitespace characters match.
+ # And make sure we don't break the indentation.
+ if (
+ fixed.replace(' ', '') == target.replace(' ', '') and
+ _get_indentation(fixed) == _get_indentation(target)
+ ):
+ self.source[result['line'] - 1] = fixed
+ else:
+ return []
+
+ def fix_e231(self, result):
+ """Add missing whitespace."""
+ # Optimize for comma case. This will fix all commas in the full source
+ # code in one pass. Don't do this more than once. If it fails the first
+ # time, there is no point in trying again.
+ if ',' in result['info'] and not self._ws_comma_done:
+ self._ws_comma_done = True
+ original = ''.join(self.source)
+ new = refactor(original, ['ws_comma'])
+ if original.strip() != new.strip():
+ self.source = [new]
+ return range(1, 1 + len(original))
+
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+ offset = result['column']
+ fixed = target[:offset] + ' ' + target[offset:]
+ self.source[line_index] = fixed
+
+ def fix_e251(self, result):
+ """Remove whitespace around parameter '=' sign."""
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+
+ # This is necessary since pep8 sometimes reports columns that goes
+ # past the end of the physical line. This happens in cases like,
+ # foo(bar\n=None)
+ c = min(result['column'] - 1,
+ len(target) - 1)
+
+ if target[c].strip():
+ fixed = target
+ else:
+ fixed = target[:c].rstrip() + target[c:].lstrip()
+
+ # There could be an escaped newline
+ #
+ # def foo(a=\
+ # 1)
+ if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')):
+ self.source[line_index] = fixed.rstrip('\n\r \t\\')
+ self.source[line_index + 1] = self.source[line_index + 1].lstrip()
+ return [line_index + 1, line_index + 2] # Line indexed at 1
+
+ self.source[result['line'] - 1] = fixed
+
+ def fix_e262(self, result):
+ """Fix spacing after comment hash."""
+ target = self.source[result['line'] - 1]
+ offset = result['column']
+
+ code = target[:offset].rstrip(' \t#')
+ comment = target[offset:].lstrip(' \t#')
+
+ fixed = code + (' # ' + comment if comment.strip() else '\n')
+
+ self.source[result['line'] - 1] = fixed
+
+ def fix_e271(self, result):
+ """Fix extraneous whitespace around keywords."""
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+ offset = result['column'] - 1
+
+ if is_probably_part_of_multiline(target):
+ return []
+
+ fixed = fix_whitespace(target,
+ offset=offset,
+ replacement=' ')
+
+ if fixed == target:
+ return []
+ else:
+ self.source[line_index] = fixed
+
+ def fix_e301(self, result):
+ """Add missing blank line."""
+ cr = '\n'
+ self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
+
+ def fix_e302(self, result):
+ """Add missing 2 blank lines."""
+ add_linenum = 2 - int(result['info'].split()[-1])
+ cr = '\n' * add_linenum
+ self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
+
+ def fix_e303(self, result):
+ """Remove extra blank lines."""
+ delete_linenum = int(result['info'].split('(')[1].split(')')[0]) - 2
+ delete_linenum = max(1, delete_linenum)
+
+ # We need to count because pep8 reports an offset line number if there
+ # are comments.
+ cnt = 0
+ line = result['line'] - 2
+ modified_lines = []
+ while cnt < delete_linenum and line >= 0:
+ if not self.source[line].strip():
+ self.source[line] = ''
+ modified_lines.append(1 + line) # Line indexed at 1
+ cnt += 1
+ line -= 1
+
+ return modified_lines
+
+ def fix_e304(self, result):
+ """Remove blank line following function decorator."""
+ line = result['line'] - 2
+ if not self.source[line].strip():
+ self.source[line] = ''
+
+ def fix_e401(self, result):
+ """Put imports on separate lines."""
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+ offset = result['column'] - 1
+
+ if not target.lstrip().startswith('import'):
+ return []
+
+ indentation = re.split(pattern=r'\bimport\b',
+ string=target, maxsplit=1)[0]
+ fixed = (target[:offset].rstrip('\t ,') + '\n' +
+ indentation + 'import ' + target[offset:].lstrip('\t ,'))
+ self.source[line_index] = fixed
+
+ def fix_long_line_logically(self, result, logical):
+ """Try to make lines fit within --max-line-length characters."""
+ if (
+ not logical or
+ len(logical[2]) == 1 or
+ self.source[result['line'] - 1].lstrip().startswith('#')
+ ):
+ return self.fix_long_line_physically(result)
+
+ start_line_index = logical[0][0]
+ end_line_index = logical[1][0]
+ logical_lines = logical[2]
+
+ previous_line = get_item(self.source, start_line_index - 1, default='')
+ next_line = get_item(self.source, end_line_index + 1, default='')
+
+ single_line = join_logical_line(''.join(logical_lines))
+
+ try:
+ fixed = self.fix_long_line(
+ target=single_line,
+ previous_line=previous_line,
+ next_line=next_line,
+ original=''.join(logical_lines))
+ except (SyntaxError, tokenize.TokenError):
+ return self.fix_long_line_physically(result)
+
+ if fixed:
+ for line_index in range(start_line_index, end_line_index + 1):
+ self.source[line_index] = ''
+ self.source[start_line_index] = fixed
+ return range(start_line_index + 1, end_line_index + 1)
+ else:
+ return []
+
+ def fix_long_line_physically(self, result):
+ """Try to make lines fit within --max-line-length characters."""
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+
+ previous_line = get_item(self.source, line_index - 1, default='')
+ next_line = get_item(self.source, line_index + 1, default='')
+
+ try:
+ fixed = self.fix_long_line(
+ target=target,
+ previous_line=previous_line,
+ next_line=next_line,
+ original=target)
+ except (SyntaxError, tokenize.TokenError):
+ return []
+
+ if fixed:
+ self.source[line_index] = fixed
+ return [line_index + 1]
+ else:
+ return []
+
+ def fix_long_line(self, target, previous_line,
+ next_line, original):
+ cache_entry = (target, previous_line, next_line)
+ if cache_entry in self.long_line_ignore_cache:
+ return []
+
+ if target.lstrip().startswith('#'):
+ # Wrap commented lines.
+ return shorten_comment(
+ line=target,
+ max_line_length=self.options.max_line_length,
+ last_comment=not next_line.lstrip().startswith('#'))
+
+ fixed = get_fixed_long_line(
+ target=target,
+ previous_line=previous_line,
+ original=original,
+ indent_word=self.indent_word,
+ max_line_length=self.options.max_line_length,
+ aggressive=self.options.aggressive,
+ experimental=self.options.experimental,
+ verbose=self.options.verbose)
+ if fixed and not code_almost_equal(original, fixed):
+ return fixed
+ else:
+ self.long_line_ignore_cache.add(cache_entry)
+ return None
+
+ def fix_e502(self, result):
+ """Remove extraneous escape of newline."""
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+ self.source[line_index] = target.rstrip('\n\r \t\\') + '\n'
+
+ def fix_e701(self, result):
+ """Put colon-separated compound statement on separate lines."""
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+ c = result['column']
+
+ fixed_source = (target[:c] + '\n' +
+ _get_indentation(target) + self.indent_word +
+ target[c:].lstrip('\n\r \t\\'))
+ self.source[result['line'] - 1] = fixed_source
+ return [result['line'], result['line'] + 1]
+
+ def fix_e702(self, result, logical):
+ """Put semicolon-separated compound statement on separate lines."""
+ if not logical:
+ return [] # pragma: no cover
+ logical_lines = logical[2]
+
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+
+ if target.rstrip().endswith('\\'):
+ # Normalize '1; \\\n2' into '1; 2'.
+ self.source[line_index] = target.rstrip('\n \r\t\\')
+ self.source[line_index + 1] = self.source[line_index + 1].lstrip()
+ return [line_index + 1, line_index + 2]
+
+ if target.rstrip().endswith(';'):
+ self.source[line_index] = target.rstrip('\n \r\t;') + '\n'
+ return [line_index + 1]
+
+ offset = result['column'] - 1
+ first = target[:offset].rstrip(';').rstrip()
+ second = (_get_indentation(logical_lines[0]) +
+ target[offset:].lstrip(';').lstrip())
+
+ self.source[line_index] = first + '\n' + second
+ return [line_index + 1]
+
+ def fix_e711(self, result):
+ """Fix comparison with None."""
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+ offset = result['column'] - 1
+
+ right_offset = offset + 2
+ if right_offset >= len(target):
+ return []
+
+ left = target[:offset].rstrip()
+ center = target[offset:right_offset]
+ right = target[right_offset:].lstrip()
+
+ if not right.startswith('None'):
+ return []
+
+ if center.strip() == '==':
+ new_center = 'is'
+ elif center.strip() == '!=':
+ new_center = 'is not'
+ else:
+ return []
+
+ self.source[line_index] = ' '.join([left, new_center, right])
+
+ def fix_e712(self, result):
+ """Fix comparison with boolean."""
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+ offset = result['column'] - 1
+
+ # Handle very easy "not" special cases.
+ if re.match(r'^\s*if \w+ == False:$', target):
+ self.source[line_index] = re.sub(r'if (\w+) == False:',
+ r'if not \1:', target, count=1)
+ elif re.match(r'^\s*if \w+ != True:$', target):
+ self.source[line_index] = re.sub(r'if (\w+) != True:',
+ r'if not \1:', target, count=1)
+ else:
+ right_offset = offset + 2
+ if right_offset >= len(target):
+ return []
+
+ left = target[:offset].rstrip()
+ center = target[offset:right_offset]
+ right = target[right_offset:].lstrip()
+
+ # Handle simple cases only.
+ new_right = None
+ if center.strip() == '==':
+ if re.match(r'\bTrue\b', right):
+ new_right = re.sub(r'\bTrue\b *', '', right, count=1)
+ elif center.strip() == '!=':
+ if re.match(r'\bFalse\b', right):
+ new_right = re.sub(r'\bFalse\b *', '', right, count=1)
+
+ if new_right is None:
+ return []
+
+ if new_right[0].isalnum():
+ new_right = ' ' + new_right
+
+ self.source[line_index] = left + new_right
+
+ def fix_e713(self, result):
+ """Fix non-membership check."""
+ line_index = result['line'] - 1
+ target = self.source[line_index]
+
+ # Handle very easy case only.
+ if re.match(r'^\s*if not \w+ in \w+:$', target):
+ self.source[line_index] = re.sub(r'if not (\w+) in (\w+):',
+ r'if \1 not in \2:',
+ target,
+ count=1)
+
+ def fix_w291(self, result):
+ """Remove trailing whitespace."""
+ fixed_line = self.source[result['line'] - 1].rstrip()
+ self.source[result['line'] - 1] = fixed_line + '\n'
+
+
+def get_fixed_long_line(target, previous_line, original,
+ indent_word=' ', max_line_length=79,
+ aggressive=False, experimental=False, verbose=False):
+ """Break up long line and return result.
+
+ Do this by generating multiple reformatted candidates and then
+ ranking the candidates to heuristically select the best option.
+
+ """
+ indent = _get_indentation(target)
+ source = target[len(indent):]
+ assert source.lstrip() == source
+
+ # Check for partial multiline.
+ tokens = list(generate_tokens(source))
+
+ candidates = shorten_line(
+ tokens, source, indent,
+ indent_word,
+ max_line_length,
+ aggressive=aggressive,
+ experimental=experimental,
+ previous_line=previous_line)
+
+ # Also sort alphabetically as a tie breaker (for determinism).
+ candidates = sorted(
+ sorted(set(candidates).union([target, original])),
+ key=lambda x: line_shortening_rank(x,
+ indent_word,
+ max_line_length,
+ experimental))
+
+ if verbose >= 4:
+ print(('-' * 79 + '\n').join([''] + candidates + ['']),
+ file=codecs.getwriter('utf-8')(sys.stderr.buffer
+ if hasattr(sys.stderr,
+ 'buffer')
+ else sys.stderr))
+
+ if candidates:
+ return candidates[0]
+
+
+def join_logical_line(logical_line):
+ """Return single line based on logical line input."""
+ indentation = _get_indentation(logical_line)
+
+ return indentation + untokenize_without_newlines(
+ generate_tokens(logical_line.lstrip())) + '\n'
+
+
+def untokenize_without_newlines(tokens):
+ """Return source code based on tokens."""
+ text = ''
+ last_row = 0
+ last_column = -1
+
+ for t in tokens:
+ token_string = t[1]
+ (start_row, start_column) = t[2]
+ (end_row, end_column) = t[3]
+
+ if start_row > last_row:
+ last_column = 0
+ if (
+ (start_column > last_column or token_string == '\n') and
+ not text.endswith(' ')
+ ):
+ text += ' '
+
+ if token_string != '\n':
+ text += token_string
+
+ last_row = end_row
+ last_column = end_column
+
+ return text
+
+
+def _find_logical(source_lines):
+ # Make a variable which is the index of all the starts of lines.
+ logical_start = []
+ logical_end = []
+ last_newline = True
+ parens = 0
+ for t in generate_tokens(''.join(source_lines)):
+ if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
+ tokenize.INDENT, tokenize.NL,
+ tokenize.ENDMARKER]:
+ continue
+ if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
+ last_newline = True
+ logical_end.append((t[3][0] - 1, t[2][1]))
+ continue
+ if last_newline and not parens:
+ logical_start.append((t[2][0] - 1, t[2][1]))
+ last_newline = False
+ if t[0] == tokenize.OP:
+ if t[1] in '([{':
+ parens += 1
+ elif t[1] in '}])':
+ parens -= 1
+ return (logical_start, logical_end)
+
+
+def _get_logical(source_lines, result, logical_start, logical_end):
+ """Return the logical line corresponding to the result.
+
+ Assumes input is already E702-clean.
+
+ """
+ row = result['line'] - 1
+ col = result['column'] - 1
+ ls = None
+ le = None
+ for i in range(0, len(logical_start), 1):
+ assert logical_end
+ x = logical_end[i]
+ if x[0] > row or (x[0] == row and x[1] > col):
+ le = x
+ ls = logical_start[i]
+ break
+ if ls is None:
+ return None
+ original = source_lines[ls[0]:le[0] + 1]
+ return ls, le, original
+
+
+def get_item(items, index, default=None):
+ if 0 <= index < len(items):
+ return items[index]
+ else:
+ return default
+
+
+def reindent(source, indent_size):
+ """Reindent all lines."""
+ reindenter = Reindenter(source)
+ return reindenter.run(indent_size)
+
+
+def code_almost_equal(a, b):
+ """Return True if code is similar.
+
+ Ignore whitespace when comparing specific line.
+
+ """
+ split_a = split_and_strip_non_empty_lines(a)
+ split_b = split_and_strip_non_empty_lines(b)
+
+ if len(split_a) != len(split_b):
+ return False
+
+ for index in range(len(split_a)):
+ if ''.join(split_a[index].split()) != ''.join(split_b[index].split()):
+ return False
+
+ return True
+
+
+def split_and_strip_non_empty_lines(text):
+ """Return lines split by newline.
+
+ Ignore empty lines.
+
+ """
+ return [line.strip() for line in text.splitlines() if line.strip()]
+
+
+def fix_e265(source, aggressive=False): # pylint: disable=unused-argument
+ """Format block comments."""
+ if '#' not in source:
+ # Optimization.
+ return source
+
+ ignored_line_numbers = multiline_string_lines(
+ source,
+ include_docstrings=True) | set(commented_out_code_lines(source))
+
+ fixed_lines = []
+ sio = io.StringIO(source)
+ for (line_number, line) in enumerate(sio.readlines(), start=1):
+ if (
+ line.lstrip().startswith('#') and
+ line_number not in ignored_line_numbers
+ ):
+ indentation = _get_indentation(line)
+ line = line.lstrip()
+
+ # Normalize beginning if not a shebang.
+ if len(line) > 1:
+ if (
+ # Leave multiple spaces like '# ' alone.
+ (line.count('#') > 1 or line[1].isalnum())
+ # Leave stylistic outlined blocks alone.
+ and not line.rstrip().endswith('#')
+ ):
+ line = '# ' + line.lstrip('# \t')
+
+ fixed_lines.append(indentation + line)
+ else:
+ fixed_lines.append(line)
+
+ return ''.join(fixed_lines)
+
+
+def refactor(source, fixer_names, ignore=None):
+ """Return refactored code using lib2to3.
+
+ Skip if ignore string is produced in the refactored code.
+
+ """
+ from lib2to3 import pgen2
+ try:
+ new_text = refactor_with_2to3(source,
+ fixer_names=fixer_names)
+ except (pgen2.parse.ParseError,
+ SyntaxError,
+ UnicodeDecodeError,
+ UnicodeEncodeError):
+ return source
+
+ if ignore:
+ if ignore in new_text and ignore not in source:
+ return source
+
+ return new_text
+
+
+def code_to_2to3(select, ignore):
+ fixes = set()
+ for code, fix in CODE_TO_2TO3.items():
+ if code_match(code, select=select, ignore=ignore):
+ fixes |= set(fix)
+ return fixes
+
+
+def fix_2to3(source, aggressive=True, select=None, ignore=None):
+ """Fix various deprecated code (via lib2to3)."""
+ if not aggressive:
+ return source
+
+ select = select or []
+ ignore = ignore or []
+
+ return refactor(source,
+ code_to_2to3(select=select,
+ ignore=ignore))
+
+
+def fix_w602(source, aggressive=True):
+ """Fix deprecated form of raising exception."""
+ if not aggressive:
+ return source
+
+ return refactor(source, ['raise'],
+ ignore='with_traceback')
+
+
+def find_newline(source):
+ """Return type of newline used in source.
+
+ Input is a list of lines.
+
+ """
+ assert not isinstance(source, unicode)
+
+ counter = collections.defaultdict(int)
+ for line in source:
+ if line.endswith(CRLF):
+ counter[CRLF] += 1
+ elif line.endswith(CR):
+ counter[CR] += 1
+ elif line.endswith(LF):
+ counter[LF] += 1
+
+ return (sorted(counter, key=counter.get, reverse=True) or [LF])[0]
+
+
+def _get_indentword(source):
+ """Return indentation type."""
+ indent_word = ' ' # Default in case source has no indentation
+ try:
+ for t in generate_tokens(source):
+ if t[0] == token.INDENT:
+ indent_word = t[1]
+ break
+ except (SyntaxError, tokenize.TokenError):
+ pass
+ return indent_word
+
+
+def _get_indentation(line):
+ """Return leading whitespace."""
+ if line.strip():
+ non_whitespace_index = len(line) - len(line.lstrip())
+ return line[:non_whitespace_index]
+ else:
+ return ''
+
+
+def get_diff_text(old, new, filename):
+ """Return text of unified diff between old and new."""
+ newline = '\n'
+ diff = difflib.unified_diff(
+ old, new,
+ 'original/' + filename,
+ 'fixed/' + filename,
+ lineterm=newline)
+
+ text = ''
+ for line in diff:
+ text += line
+
+ # Work around missing newline (http://bugs.python.org/issue2142).
+ if text and not line.endswith(newline):
+ text += newline + r'\ No newline at end of file' + newline
+
+ return text
+
+
+def _priority_key(pep8_result):
+ """Key for sorting PEP8 results.
+
+ Global fixes should be done first. This is important for things like
+ indentation.
+
+ """
+ priority = [
+ # Fix multiline colon-based before semicolon based.
+ 'e701',
+ # Break multiline statements early.
+ 'e702',
+ # Things that make lines longer.
+ 'e225', 'e231',
+ # Remove extraneous whitespace before breaking lines.
+ 'e201',
+ # Shorten whitespace in comment before resorting to wrapping.
+ 'e262'
+ ]
+ middle_index = 10000
+ lowest_priority = [
+ # We need to shorten lines last since the logical fixer can get in a
+ # loop, which causes us to exit early.
+ 'e501'
+ ]
+ key = pep8_result['id'].lower()
+ try:
+ return priority.index(key)
+ except ValueError:
+ try:
+ return middle_index + lowest_priority.index(key) + 1
+ except ValueError:
+ return middle_index
+
+
+def shorten_line(tokens, source, indentation, indent_word, max_line_length,
+ aggressive=False, experimental=False, previous_line=''):
+ """Separate line at OPERATOR.
+
+ Multiple candidates will be yielded.
+
+ """
+ for candidate in _shorten_line(tokens=tokens,
+ source=source,
+ indentation=indentation,
+ indent_word=indent_word,
+ aggressive=aggressive,
+ previous_line=previous_line):
+ yield candidate
+
+ if aggressive:
+ for key_token_strings in SHORTEN_OPERATOR_GROUPS:
+ shortened = _shorten_line_at_tokens(
+ tokens=tokens,
+ source=source,
+ indentation=indentation,
+ indent_word=indent_word,
+ key_token_strings=key_token_strings,
+ aggressive=aggressive)
+
+ if shortened is not None and shortened != source:
+ yield shortened
+
+ if experimental:
+ for shortened in _shorten_line_at_tokens_new(
+ tokens=tokens,
+ source=source,
+ indentation=indentation,
+ max_line_length=max_line_length):
+
+ yield shortened
+
+
+def _shorten_line(tokens, source, indentation, indent_word,
+ aggressive=False, previous_line=''):
+ """Separate line at OPERATOR.
+
+ The input is expected to be free of newlines except for inside multiline
+ strings and at the end.
+
+ Multiple candidates will be yielded.
+
+ """
+ for (token_type,
+ token_string,
+ start_offset,
+ end_offset) in token_offsets(tokens):
+
+ if (
+ token_type == tokenize.COMMENT and
+ not is_probably_part_of_multiline(previous_line) and
+ not is_probably_part_of_multiline(source) and
+ not source[start_offset + 1:].strip().lower().startswith(
+ ('noqa', 'pragma:', 'pylint:'))
+ ):
+ # Move inline comments to previous line.
+ first = source[:start_offset]
+ second = source[start_offset:]
+ yield (indentation + second.strip() + '\n' +
+ indentation + first.strip() + '\n')
+ elif token_type == token.OP and token_string != '=':
+ # Don't break on '=' after keyword as this violates PEP 8.
+
+ assert token_type != token.INDENT
+
+ first = source[:end_offset]
+
+ second_indent = indentation
+ if first.rstrip().endswith('('):
+ second_indent += indent_word
+ elif '(' in first:
+ second_indent += ' ' * (1 + first.find('('))
+ else:
+ second_indent += indent_word
+
+ second = (second_indent + source[end_offset:].lstrip())
+ if (
+ not second.strip() or
+ second.lstrip().startswith('#')
+ ):
+ continue
+
+ # Do not begin a line with a comma
+ if second.lstrip().startswith(','):
+ continue
+ # Do end a line with a dot
+ if first.rstrip().endswith('.'):
+ continue
+ if token_string in '+-*/':
+ fixed = first + ' \\' + '\n' + second
+ else:
+ fixed = first + '\n' + second
+
+ # Only fix if syntax is okay.
+ if check_syntax(normalize_multiline(fixed)
+ if aggressive else fixed):
+ yield indentation + fixed
+
+
+# A convenient way to handle tokens.
+Token = collections.namedtuple('Token', ['token_type', 'token_string',
+ 'spos', 'epos', 'line'])
+
+
+class ReformattedLines(object):
+
+ """The reflowed lines of atoms.
+
+ Each part of the line is represented as an "atom." They can be moved
+ around when need be to get the optimal formatting.
+
+ """
+
+ ###########################################################################
+ # Private Classes
+
+ class _Indent(object):
+
+ """Represent an indentation in the atom stream."""
+
+ def __init__(self, indent_amt):
+ self._indent_amt = indent_amt
+
+ def emit(self):
+ return ' ' * self._indent_amt
+
+ @property
+ def size(self):
+ return self._indent_amt
+
+ class _Space(object):
+
+ """Represent a space in the atom stream."""
+
+ def emit(self):
+ return ' '
+
+ @property
+ def size(self):
+ return 1
+
+ class _LineBreak(object):
+
+ """Represent a line break in the atom stream."""
+
+ def emit(self):
+ return '\n'
+
+ @property
+ def size(self):
+ return 0
+
+ def __init__(self, max_line_length):
+ self._max_line_length = max_line_length
+ self._lines = []
+ self._bracket_depth = 0
+ self._prev_item = None
+ self._prev_prev_item = None
+
+ def __repr__(self):
+ return self.emit()
+
+ ###########################################################################
+ # Public Methods
+
+ def add(self, obj, indent_amt, break_after_open_bracket):
+ if isinstance(obj, Atom):
+ self._add_item(obj, indent_amt)
+ return
+
+ self._add_container(obj, indent_amt, break_after_open_bracket)
+
+ def add_comment(self, item):
+ num_spaces = 2
+ if len(self._lines) > 1:
+ if isinstance(self._lines[-1], self._Space):
+ num_spaces -= 1
+ if len(self._lines) > 2:
+ if isinstance(self._lines[-2], self._Space):
+ num_spaces -= 1
+
+ while num_spaces > 0:
+ self._lines.append(self._Space())
+ num_spaces -= 1
+ self._lines.append(item)
+
+ def add_indent(self, indent_amt):
+ self._lines.append(self._Indent(indent_amt))
+
+ def add_line_break(self, indent):
+ self._lines.append(self._LineBreak())
+ self.add_indent(len(indent))
+
+ def add_line_break_at(self, index, indent_amt):
+ self._lines.insert(index, self._LineBreak())
+ self._lines.insert(index + 1, self._Indent(indent_amt))
+
+ def add_space_if_needed(self, curr_text, equal=False):
+ if (
+ not self._lines or isinstance(
+ self._lines[-1], (self._LineBreak, self._Indent, self._Space))
+ ):
+ return
+
+ prev_text = unicode(self._prev_item)
+ prev_prev_text = (
+ unicode(self._prev_prev_item) if self._prev_prev_item else '')
+
+ if (
+ # The previous item was a keyword or identifier and the current
+ # item isn't an operator that doesn't require a space.
+ ((self._prev_item.is_keyword or self._prev_item.is_string or
+ self._prev_item.is_name or self._prev_item.is_number) and
+ (curr_text[0] not in '([{.,:}])' or
+ (curr_text[0] == '=' and equal))) or
+
+ # Don't place spaces around a '.', unless it's in an 'import'
+ # statement.
+ ((prev_prev_text != 'from' and prev_text[-1] != '.' and
+ curr_text != 'import') and
+
+ # Don't place a space before a colon.
+ curr_text[0] != ':' and
+
+ # Don't split up ending brackets by spaces.
+ ((prev_text[-1] in '}])' and curr_text[0] not in '.,}])') or
+
+ # Put a space after a colon or comma.
+ prev_text[-1] in ':,' or
+
+ # Put space around '=' if asked to.
+ (equal and prev_text == '=') or
+
+ # Put spaces around non-unary arithmetic operators.
+ ((self._prev_prev_item and
+ (prev_text not in '+-' and
+ (self._prev_prev_item.is_name or
+ self._prev_prev_item.is_number or
+ self._prev_prev_item.is_string)) and
+ prev_text in ('+', '-', '%', '*', '/', '//', '**')))))
+ ):
+ self._lines.append(self._Space())
+
+ def previous_item(self):
+ """Return the previous non-whitespace item."""
+ return self._prev_item
+
+ def fits_on_current_line(self, item_extent):
+ return self.current_size() + item_extent <= self._max_line_length
+
+ def current_size(self):
+ """The size of the current line minus the indentation."""
+ size = 0
+ for item in reversed(self._lines):
+ size += item.size
+ if isinstance(item, self._LineBreak):
+ break
+
+ return size
+
+ def line_empty(self):
+ return (self._lines and
+ isinstance(self._lines[-1],
+ (self._LineBreak, self._Indent)))
+
+ def emit(self):
+ string = ''
+ for item in self._lines:
+ if isinstance(item, self._LineBreak):
+ string = string.rstrip()
+ string += item.emit()
+
+ return string.rstrip() + '\n'
+
+ ###########################################################################
+ # Private Methods
+
+ def _add_item(self, item, indent_amt):
+ """Add an item to the line.
+
+ Reflow the line to get the best formatting after the item is
+ inserted. The bracket depth indicates if the item is being
+ inserted inside of a container or not.
+
+ """
+ if self._prev_item and self._prev_item.is_string and item.is_string:
+ # Place consecutive string literals on separate lines.
+ self._lines.append(self._LineBreak())
+ self._lines.append(self._Indent(indent_amt))
+
+ item_text = unicode(item)
+ if self._lines and self._bracket_depth:
+ # Adding the item into a container.
+ self._prevent_default_initializer_splitting(item, indent_amt)
+
+ if item_text in '.,)]}':
+ self._split_after_delimiter(item, indent_amt)
+
+ elif self._lines and not self.line_empty():
+ # Adding the item outside of a container.
+ if self.fits_on_current_line(len(item_text)):
+ self._enforce_space(item)
+
+ else:
+ # Line break for the new item.
+ self._lines.append(self._LineBreak())
+ self._lines.append(self._Indent(indent_amt))
+
+ self._lines.append(item)
+ self._prev_item, self._prev_prev_item = item, self._prev_item
+
+ if item_text in '([{':
+ self._bracket_depth += 1
+
+ elif item_text in '}])':
+ self._bracket_depth -= 1
+ assert self._bracket_depth >= 0
+
+ def _add_container(self, container, indent_amt, break_after_open_bracket):
+ actual_indent = indent_amt + 1
+
+ if (
+ unicode(self._prev_item) != '=' and
+ not self.line_empty() and
+ not self.fits_on_current_line(
+ container.size + self._bracket_depth + 2)
+ ):
+
+ if unicode(container)[0] == '(' and self._prev_item.is_name:
+ # Don't split before the opening bracket of a call.
+ break_after_open_bracket = True
+ actual_indent = indent_amt + 4
+ elif (
+ break_after_open_bracket or
+ unicode(self._prev_item) not in '([{'
+ ):
+ # If the container doesn't fit on the current line and the
+ # current line isn't empty, place the container on the next
+ # line.
+ self._lines.append(self._LineBreak())
+ self._lines.append(self._Indent(indent_amt))
+ break_after_open_bracket = False
+ else:
+ actual_indent = self.current_size() + 1
+ break_after_open_bracket = False
+
+ if isinstance(container, (ListComprehension, IfExpression)):
+ actual_indent = indent_amt
+
+ # Increase the continued indentation only if recursing on a
+ # container.
+ container.reflow(self, ' ' * actual_indent,
+ break_after_open_bracket=break_after_open_bracket)
+
+ def _prevent_default_initializer_splitting(self, item, indent_amt):
+ """Prevent splitting between a default initializer.
+
+ When there is a default initializer, it's best to keep it all on
+ the same line. It's nicer and more readable, even if it goes
+ over the maximum allowable line length. This goes back along the
+ current line to determine if we have a default initializer, and,
+ if so, to remove extraneous whitespaces and add a line
+ break/indent before it if needed.
+
+ """
+ if unicode(item) == '=':
+ # This is the assignment in the initializer. Just remove spaces for
+ # now.
+ self._delete_whitespace()
+ return
+
+ if (not self._prev_item or not self._prev_prev_item or
+ unicode(self._prev_item) != '='):
+ return
+
+ self._delete_whitespace()
+ prev_prev_index = self._lines.index(self._prev_prev_item)
+
+ if (
+ isinstance(self._lines[prev_prev_index - 1], self._Indent) or
+ self.fits_on_current_line(item.size + 1)
+ ):
+ # The default initializer is already the only item on this line.
+ # Don't insert a newline here.
+ return
+
+ # Replace the space with a newline/indent combo.
+ if isinstance(self._lines[prev_prev_index - 1], self._Space):
+ del self._lines[prev_prev_index - 1]
+
+ self.add_line_break_at(self._lines.index(self._prev_prev_item),
+ indent_amt)
+
+ def _split_after_delimiter(self, item, indent_amt):
+ """Split the line only after a delimiter."""
+ self._delete_whitespace()
+
+ if self.fits_on_current_line(item.size):
+ return
+
+ last_space = None
+ for item in reversed(self._lines):
+ if (
+ last_space and
+ (not isinstance(item, Atom) or not item.is_colon)
+ ):
+ break
+ else:
+ last_space = None
+ if isinstance(item, self._Space):
+ last_space = item
+ if isinstance(item, (self._LineBreak, self._Indent)):
+ return
+
+ if not last_space:
+ return
+
+ self.add_line_break_at(self._lines.index(last_space), indent_amt)
+
+ def _enforce_space(self, item):
+ """Enforce a space in certain situations.
+
+ There are cases where we will want a space where normally we
+ wouldn't put one. This just enforces the addition of a space.
+
+ """
+ if isinstance(self._lines[-1],
+ (self._Space, self._LineBreak, self._Indent)):
+ return
+
+ if not self._prev_item:
+ return
+
+ item_text = unicode(item)
+ prev_text = unicode(self._prev_item)
+
+ # Prefer a space around a '.' in an import statement, and between the
+ # 'import' and '('.
+ if (
+ (item_text == '.' and prev_text == 'from') or
+ (item_text == 'import' and prev_text == '.') or
+ (item_text == '(' and prev_text == 'import')
+ ):
+ self._lines.append(self._Space())
+
+ def _delete_whitespace(self):
+ """Delete all whitespace from the end of the line."""
+ while isinstance(self._lines[-1], (self._Space, self._LineBreak,
+ self._Indent)):
+ del self._lines[-1]
+
+
+class Atom(object):
+
+ """The smallest unbreakable unit that can be reflowed."""
+
+ def __init__(self, atom):
+ self._atom = atom
+
+ def __repr__(self):
+ return self._atom.token_string
+
+ def __len__(self):
+ return self.size
+
+ def reflow(
+ self, reflowed_lines, continued_indent, extent,
+ break_after_open_bracket=False,
+ is_list_comp_or_if_expr=False,
+ next_is_dot=False
+ ):
+ if self._atom.token_type == tokenize.COMMENT:
+ reflowed_lines.add_comment(self)
+ return
+
+ total_size = extent if extent else self.size
+
+ if self._atom.token_string not in ',:([{}])':
+ # Some atoms will need an extra 1-sized space token after them.
+ total_size += 1
+
+ prev_item = reflowed_lines.previous_item()
+ if (
+ not is_list_comp_or_if_expr and
+ not reflowed_lines.fits_on_current_line(total_size) and
+ not (next_is_dot and
+ reflowed_lines.fits_on_current_line(self.size + 1)) and
+ not reflowed_lines.line_empty() and
+ not self.is_colon and
+ not (prev_item and prev_item.is_name and
+ unicode(self) == '(')
+ ):
+ # Start a new line if there is already something on the line and
+ # adding this atom would make it go over the max line length.
+ reflowed_lines.add_line_break(continued_indent)
+ else:
+ reflowed_lines.add_space_if_needed(unicode(self))
+
+ reflowed_lines.add(self, len(continued_indent),
+ break_after_open_bracket)
+
+ def emit(self):
+ return self.__repr__()
+
+ @property
+ def is_keyword(self):
+ return keyword.iskeyword(self._atom.token_string)
+
+ @property
+ def is_string(self):
+ return self._atom.token_type == tokenize.STRING
+
+ @property
+ def is_name(self):
+ return self._atom.token_type == tokenize.NAME
+
+ @property
+ def is_number(self):
+ return self._atom.token_type == tokenize.NUMBER
+
+ @property
+ def is_comma(self):
+ return self._atom.token_string == ','
+
+ @property
+ def is_colon(self):
+ return self._atom.token_string == ':'
+
+ @property
+ def size(self):
+ return len(self._atom.token_string)
+
+
+class Container(object):
+
+ """Base class for all container types."""
+
+ def __init__(self, items):
+ self._items = items
+
+ def __repr__(self):
+ string = ''
+ last_was_keyword = False
+
+ for item in self._items:
+ if item.is_comma:
+ string += ', '
+ elif item.is_colon:
+ string += ': '
+ else:
+ item_string = unicode(item)
+ if (
+ string and
+ (last_was_keyword or
+ (not string.endswith(tuple('([{,.:}]) ')) and
+ not item_string.startswith(tuple('([{,.:}])'))))
+ ):
+ string += ' '
+ string += item_string
+
+ last_was_keyword = item.is_keyword
+ return string
+
+ def __iter__(self):
+ for element in self._items:
+ yield element
+
+ def __getitem__(self, idx):
+ return self._items[idx]
+
+ def reflow(self, reflowed_lines, continued_indent,
+ break_after_open_bracket=False):
+ last_was_container = False
+ for (index, item) in enumerate(self._items):
+ next_item = get_item(self._items, index + 1)
+
+ if isinstance(item, Atom):
+ is_list_comp_or_if_expr = (
+ isinstance(self, (ListComprehension, IfExpression)))
+ item.reflow(reflowed_lines, continued_indent,
+ self._get_extent(index),
+ is_list_comp_or_if_expr=is_list_comp_or_if_expr,
+ next_is_dot=(next_item and
+ unicode(next_item) == '.'))
+ if last_was_container and item.is_comma:
+ reflowed_lines.add_line_break(continued_indent)
+ last_was_container = False
+ else: # isinstance(item, Container)
+ reflowed_lines.add(item, len(continued_indent),
+ break_after_open_bracket)
+ last_was_container = not isinstance(item, (ListComprehension,
+ IfExpression))
+
+ if (
+ break_after_open_bracket and index == 0 and
+ # Prefer to keep empty containers together instead of
+ # separating them.
+ unicode(item) == self.open_bracket and
+ (not next_item or unicode(next_item) != self.close_bracket) and
+ (len(self._items) != 3 or not isinstance(next_item, Atom))
+ ):
+ reflowed_lines.add_line_break(continued_indent)
+ break_after_open_bracket = False
+ else:
+ next_next_item = get_item(self._items, index + 2)
+ if (
+ unicode(item) not in ['.', '%', 'in'] and
+ next_item and not isinstance(next_item, Container) and
+ unicode(next_item) != ':' and
+ next_next_item and (not isinstance(next_next_item, Atom) or
+ unicode(next_item) == 'not') and
+ not reflowed_lines.line_empty() and
+ not reflowed_lines.fits_on_current_line(
+ self._get_extent(index + 1) + 2)
+ ):
+ reflowed_lines.add_line_break(continued_indent)
+
+ def _get_extent(self, index):
+ """The extent of the full element.
+
+ E.g., the length of a function call or keyword.
+
+ """
+ extent = 0
+ prev_item = get_item(self._items, index - 1)
+ seen_dot = prev_item and unicode(prev_item) == '.'
+ while index < len(self._items):
+ item = get_item(self._items, index)
+ index += 1
+
+ if isinstance(item, (ListComprehension, IfExpression)):
+ break
+
+ if isinstance(item, Container):
+ if prev_item and prev_item.is_name:
+ if seen_dot:
+ extent += 1
+ else:
+ extent += item.size
+
+ prev_item = item
+ continue
+ elif (unicode(item) not in ['.', '=', ':', 'not'] and
+ not item.is_name and not item.is_string):
+ break
+
+ if unicode(item) == '.':
+ seen_dot = True
+
+ extent += item.size
+ prev_item = item
+
+ return extent
+
+ @property
+ def is_string(self):
+ return False
+
+ @property
+ def size(self):
+ return len(self.__repr__())
+
+ @property
+ def is_keyword(self):
+ return False
+
+ @property
+ def is_name(self):
+ return False
+
+ @property
+ def is_comma(self):
+ return False
+
+ @property
+ def is_colon(self):
+ return False
+
+ @property
+ def open_bracket(self):
+ return None
+
+ @property
+ def close_bracket(self):
+ return None
+
+
+class Tuple(Container):
+
+ """A high-level representation of a tuple."""
+
+ @property
+ def open_bracket(self):
+ return '('
+
+ @property
+ def close_bracket(self):
+ return ')'
+
+
+class List(Container):
+
+ """A high-level representation of a list."""
+
+ @property
+ def open_bracket(self):
+ return '['
+
+ @property
+ def close_bracket(self):
+ return ']'
+
+
+class DictOrSet(Container):
+
+ """A high-level representation of a dictionary or set."""
+
+ @property
+ def open_bracket(self):
+ return '{'
+
+ @property
+ def close_bracket(self):
+ return '}'
+
+
+class ListComprehension(Container):
+
+ """A high-level representation of a list comprehension."""
+
+ @property
+ def size(self):
+ length = 0
+ for item in self._items:
+ if isinstance(item, IfExpression):
+ break
+ length += item.size
+ return length
+
+
+class IfExpression(Container):
+
+ """A high-level representation of an if-expression."""
+
+
+def _parse_container(tokens, index, for_or_if=None):
+ """Parse a high-level container, such as a list, tuple, etc."""
+
+ # Store the opening bracket.
+ items = [Atom(Token(*tokens[index]))]
+ index += 1
+
+ num_tokens = len(tokens)
+ while index < num_tokens:
+ tok = Token(*tokens[index])
+
+ if tok.token_string in ',)]}':
+ # First check if we're at the end of a list comprehension or
+ # if-expression. Don't add the ending token as part of the list
+ # comprehension or if-expression, because they aren't part of those
+ # constructs.
+ if for_or_if == 'for':
+ return (ListComprehension(items), index - 1)
+
+ elif for_or_if == 'if':
+ return (IfExpression(items), index - 1)
+
+ # We've reached the end of a container.
+ items.append(Atom(tok))
+
+ # If not, then we are at the end of a container.
+ if tok.token_string == ')':
+ # The end of a tuple.
+ return (Tuple(items), index)
+
+ elif tok.token_string == ']':
+ # The end of a list.
+ return (List(items), index)
+
+ elif tok.token_string == '}':
+ # The end of a dictionary or set.
+ return (DictOrSet(items), index)
+
+ elif tok.token_string in '([{':
+ # A sub-container is being defined.
+ (container, index) = _parse_container(tokens, index)
+ items.append(container)
+
+ elif tok.token_string == 'for':
+ (container, index) = _parse_container(tokens, index, 'for')
+ items.append(container)
+
+ elif tok.token_string == 'if':
+ (container, index) = _parse_container(tokens, index, 'if')
+ items.append(container)
+
+ else:
+ items.append(Atom(tok))
+
+ index += 1
+
+ return (None, None)
+
+
+def _parse_tokens(tokens):
+ """Parse the tokens.
+
+ This converts the tokens into a form where we can manipulate them
+ more easily.
+
+ """
+
+ index = 0
+ parsed_tokens = []
+
+ num_tokens = len(tokens)
+ while index < num_tokens:
+ tok = Token(*tokens[index])
+
+ assert tok.token_type != token.INDENT
+ if tok.token_type == tokenize.NEWLINE:
+ # There's only one newline and it's at the end.
+ break
+
+ if tok.token_string in '([{':
+ (container, index) = _parse_container(tokens, index)
+ if not container:
+ return None
+ parsed_tokens.append(container)
+ else:
+ parsed_tokens.append(Atom(tok))
+
+ index += 1
+
+ return parsed_tokens
+
+
+def _reflow_lines(parsed_tokens, indentation, max_line_length,
+ start_on_prefix_line):
+ """Reflow the lines so that it looks nice."""
+
+ if unicode(parsed_tokens[0]) == 'def':
+ # A function definition gets indented a bit more.
+ continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE
+ else:
+ continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE
+
+ break_after_open_bracket = not start_on_prefix_line
+
+ lines = ReformattedLines(max_line_length)
+ lines.add_indent(len(indentation.lstrip('\r\n')))
+
+ if not start_on_prefix_line:
+ # If splitting after the opening bracket will cause the first element
+ # to be aligned weirdly, don't try it.
+ first_token = get_item(parsed_tokens, 0)
+ second_token = get_item(parsed_tokens, 1)
+
+ if (
+ first_token and second_token and
+ unicode(second_token)[0] == '(' and
+ len(indentation) + len(first_token) + 1 == len(continued_indent)
+ ):
+ return None
+
+ for item in parsed_tokens:
+ lines.add_space_if_needed(unicode(item), equal=True)
+
+ save_continued_indent = continued_indent
+ if start_on_prefix_line and isinstance(item, Container):
+ start_on_prefix_line = False
+ continued_indent = ' ' * (lines.current_size() + 1)
+
+ item.reflow(lines, continued_indent, break_after_open_bracket)
+ continued_indent = save_continued_indent
+
+ return lines.emit()
+
+
+def _shorten_line_at_tokens_new(tokens, source, indentation,
+ max_line_length):
+ """Shorten the line taking its length into account.
+
+ The input is expected to be free of newlines except for inside
+ multiline strings and at the end.
+
+ """
+ # Yield the original source so to see if it's a better choice than the
+ # shortened candidate lines we generate here.
+ yield indentation + source
+
+ parsed_tokens = _parse_tokens(tokens)
+
+ if parsed_tokens:
+ # Perform two reflows. The first one starts on the same line as the
+ # prefix. The second starts on the line after the prefix.
+ fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
+ start_on_prefix_line=True)
+ if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
+ yield fixed
+
+ fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
+ start_on_prefix_line=False)
+ if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
+ yield fixed
+
+
+def _shorten_line_at_tokens(tokens, source, indentation, indent_word,
+ key_token_strings, aggressive):
+ """Separate line by breaking at tokens in key_token_strings.
+
+ The input is expected to be free of newlines except for inside
+ multiline strings and at the end.
+
+ """
+ offsets = []
+ for (index, _t) in enumerate(token_offsets(tokens)):
+ (token_type,
+ token_string,
+ start_offset,
+ end_offset) = _t
+
+ assert token_type != token.INDENT
+
+ if token_string in key_token_strings:
+ # Do not break in containers with zero or one items.
+ unwanted_next_token = {
+ '(': ')',
+ '[': ']',
+ '{': '}'}.get(token_string)
+ if unwanted_next_token:
+ if (
+ get_item(tokens,
+ index + 1,
+ default=[None, None])[1] == unwanted_next_token or
+ get_item(tokens,
+ index + 2,
+ default=[None, None])[1] == unwanted_next_token
+ ):
+ continue
+
+ if (
+ index > 2 and token_string == '(' and
+ tokens[index - 1][1] in ',(%['
+ ):
+ # Don't split after a tuple start, or before a tuple start if
+ # the tuple is in a list.
+ continue
+
+ if end_offset < len(source) - 1:
+ # Don't split right before newline.
+ offsets.append(end_offset)
+ else:
+ # Break at adjacent strings. These were probably meant to be on
+ # separate lines in the first place.
+ previous_token = get_item(tokens, index - 1)
+ if (
+ token_type == tokenize.STRING and
+ previous_token and previous_token[0] == tokenize.STRING
+ ):
+ offsets.append(start_offset)
+
+ current_indent = None
+ fixed = None
+ for line in split_at_offsets(source, offsets):
+ if fixed:
+ fixed += '\n' + current_indent + line
+
+ for symbol in '([{':
+ if line.endswith(symbol):
+ current_indent += indent_word
+ else:
+ # First line.
+ fixed = line
+ assert not current_indent
+ current_indent = indent_word
+
+ assert fixed is not None
+
+ if check_syntax(normalize_multiline(fixed)
+ if aggressive > 1 else fixed):
+ return indentation + fixed
+ else:
+ return None
+
+
+def token_offsets(tokens):
+ """Yield tokens and offsets."""
+ end_offset = 0
+ previous_end_row = 0
+ previous_end_column = 0
+ for t in tokens:
+ token_type = t[0]
+ token_string = t[1]
+ (start_row, start_column) = t[2]
+ (end_row, end_column) = t[3]
+
+ # Account for the whitespace between tokens.
+ end_offset += start_column
+ if previous_end_row == start_row:
+ end_offset -= previous_end_column
+
+ # Record the start offset of the token.
+ start_offset = end_offset
+
+ # Account for the length of the token itself.
+ end_offset += len(token_string)
+
+ yield (token_type,
+ token_string,
+ start_offset,
+ end_offset)
+
+ previous_end_row = end_row
+ previous_end_column = end_column
+
+
+def normalize_multiline(line):
+ """Normalize multiline-related code that will cause syntax error.
+
+ This is for purposes of checking syntax.
+
+ """
+ if line.startswith('def ') and line.rstrip().endswith(':'):
+ return line + ' pass'
+ elif line.startswith('return '):
+ return 'def _(): ' + line
+ elif line.startswith('@'):
+ return line + 'def _(): pass'
+ elif line.startswith('class '):
+ return line + ' pass'
+ elif line.startswith('if '):
+ return line + ' pass'
+ else:
+ return line
+
+
+def fix_whitespace(line, offset, replacement):
+ """Replace whitespace at offset and return fixed line."""
+ # Replace escaped newlines too
+ left = line[:offset].rstrip('\n\r \t\\')
+ right = line[offset:].lstrip('\n\r \t\\')
+ if right.startswith('#'):
+ return line
+ else:
+ return left + replacement + right
+
+
+def _execute_pep8(pep8_options, source):
+ """Execute pep8 via python method calls."""
+ class QuietReport(pep8.BaseReport):
+
+ """Version of checker that does not print."""
+
+ def __init__(self, options):
+ super(QuietReport, self).__init__(options)
+ self.__full_error_results = []
+
+ def error(self, line_number, offset, text, _):
+ """Collect errors."""
+ code = super(QuietReport, self).error(line_number, offset, text, _)
+ if code:
+ self.__full_error_results.append(
+ {'id': code,
+ 'line': line_number,
+ 'column': offset + 1,
+ 'info': text})
+
+ def full_error_results(self):
+ """Return error results in detail.
+
+ Results are in the form of a list of dictionaries. Each
+ dictionary contains 'id', 'line', 'column', and 'info'.
+
+ """
+ return self.__full_error_results
+
+ checker = pep8.Checker('', lines=source,
+ reporter=QuietReport, **pep8_options)
+ checker.check_all()
+ return checker.report.full_error_results()
+
+
+def _remove_leading_and_normalize(line):
+ return line.lstrip().rstrip(CR + LF) + '\n'
+
+
+class Reindenter(object):
+
+ """Reindents badly-indented code to uniformly use four-space indentation.
+
+ Released to the public domain, by Tim Peters, 03 October 2000.
+
+ """
+
+ def __init__(self, input_text):
+ sio = io.StringIO(input_text)
+ source_lines = sio.readlines()
+
+ self.string_content_line_numbers = multiline_string_lines(input_text)
+
+ # File lines, rstripped & tab-expanded. Dummy at start is so
+ # that we can use tokenize's 1-based line numbering easily.
+ # Note that a line is all-blank iff it is a newline.
+ self.lines = []
+ for line_number, line in enumerate(source_lines, start=1):
+ # Do not modify if inside a multiline string.
+ if line_number in self.string_content_line_numbers:
+ self.lines.append(line)
+ else:
+ # Only expand leading tabs.
+ self.lines.append(_get_indentation(line).expandtabs() +
+ _remove_leading_and_normalize(line))
+
+ self.lines.insert(0, None)
+ self.index = 1 # index into self.lines of next line
+ self.input_text = input_text
+
+ def run(self, indent_size=DEFAULT_INDENT_SIZE):
+ """Fix indentation and return modified line numbers.
+
+ Line numbers are indexed at 1.
+
+ """
+ if indent_size < 1:
+ return self.input_text
+
+ try:
+ stats = _reindent_stats(tokenize.generate_tokens(self.getline))
+ except (SyntaxError, tokenize.TokenError):
+ return self.input_text
+ # Remove trailing empty lines.
+ lines = self.lines
+ while lines and lines[-1] == '\n':
+ lines.pop()
+ # Sentinel.
+ stats.append((len(lines), 0))
+ # Map count of leading spaces to # we want.
+ have2want = {}
+ # Program after transformation.
+ after = []
+ # Copy over initial empty lines -- there's nothing to do until
+ # we see a line with *something* on it.
+ i = stats[0][0]
+ after.extend(lines[1:i])
+ for i in range(len(stats) - 1):
+ thisstmt, thislevel = stats[i]
+ nextstmt = stats[i + 1][0]
+ have = _leading_space_count(lines[thisstmt])
+ want = thislevel * indent_size
+ if want < 0:
+ # A comment line.
+ if have:
+ # An indented comment line. If we saw the same
+ # indentation before, reuse what it most recently
+ # mapped to.
+ want = have2want.get(have, -1)
+ if want < 0:
+ # Then it probably belongs to the next real stmt.
+ for j in range(i + 1, len(stats) - 1):
+ jline, jlevel = stats[j]
+ if jlevel >= 0:
+ if have == _leading_space_count(lines[jline]):
+ want = jlevel * indent_size
+ break
+ if want < 0: # Maybe it's a hanging
+ # comment like this one,
+ # in which case we should shift it like its base
+ # line got shifted.
+ for j in range(i - 1, -1, -1):
+ jline, jlevel = stats[j]
+ if jlevel >= 0:
+ want = (have + _leading_space_count(
+ after[jline - 1]) -
+ _leading_space_count(lines[jline]))
+ break
+ if want < 0:
+ # Still no luck -- leave it alone.
+ want = have
+ else:
+ want = 0
+ assert want >= 0
+ have2want[have] = want
+ diff = want - have
+ if diff == 0 or have == 0:
+ after.extend(lines[thisstmt:nextstmt])
+ else:
+ for line_number, line in enumerate(lines[thisstmt:nextstmt],
+ start=thisstmt):
+ if line_number in self.string_content_line_numbers:
+ after.append(line)
+ elif diff > 0:
+ if line == '\n':
+ after.append(line)
+ else:
+ after.append(' ' * diff + line)
+ else:
+ remove = min(_leading_space_count(line), -diff)
+ after.append(line[remove:])
+
+ return ''.join(after)
+
+ def getline(self):
+ """Line-getter for tokenize."""
+ if self.index >= len(self.lines):
+ line = ''
+ else:
+ line = self.lines[self.index]
+ self.index += 1
+ return line
+
+
+def _reindent_stats(tokens):
+ """Return list of (lineno, indentlevel) pairs.
+
+ One for each stmt and comment line. indentlevel is -1 for comment lines, as
+ a signal that tokenize doesn't know what to do about them; indeed, they're
+ our headache!
+
+ """
+ find_stmt = 1 # Next token begins a fresh stmt?
+ level = 0 # Current indent level.
+ stats = []
+
+ for t in tokens:
+ token_type = t[0]
+ sline = t[2][0]
+ line = t[4]
+
+ if token_type == tokenize.NEWLINE:
+ # A program statement, or ENDMARKER, will eventually follow,
+ # after some (possibly empty) run of tokens of the form
+ # (NL | COMMENT)* (INDENT | DEDENT+)?
+ find_stmt = 1
+
+ elif token_type == tokenize.INDENT:
+ find_stmt = 1
+ level += 1
+
+ elif token_type == tokenize.DEDENT:
+ find_stmt = 1
+ level -= 1
+
+ elif token_type == tokenize.COMMENT:
+ if find_stmt:
+ stats.append((sline, -1))
+ # But we're still looking for a new stmt, so leave
+ # find_stmt alone.
+
+ elif token_type == tokenize.NL:
+ pass
+
+ elif find_stmt:
+ # This is the first "real token" following a NEWLINE, so it
+ # must be the first token of the next program statement, or an
+ # ENDMARKER.
+ find_stmt = 0
+ if line: # Not endmarker.
+ stats.append((sline, level))
+
+ return stats
+
+
+def _leading_space_count(line):
+ """Return number of leading spaces in line."""
+ i = 0
+ while i < len(line) and line[i] == ' ':
+ i += 1
+ return i
+
+
+def refactor_with_2to3(source_text, fixer_names):
+ """Use lib2to3 to refactor the source.
+
+ Return the refactored source code.
+
+ """
+ from lib2to3.refactor import RefactoringTool
+ fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names]
+ tool = RefactoringTool(fixer_names=fixers, explicit=fixers)
+
+ from lib2to3.pgen2 import tokenize as lib2to3_tokenize
+ try:
+ return unicode(tool.refactor_string(source_text, name=''))
+ except lib2to3_tokenize.TokenError:
+ return source_text
+
+
+def check_syntax(code):
+ """Return True if syntax is okay."""
+ try:
+ return compile(code, '<string>', 'exec')
+ except (SyntaxError, TypeError, UnicodeDecodeError):
+ return False
+
+
+def filter_results(source, results, aggressive):
+ """Filter out spurious reports from pep8.
+
+ If aggressive is True, we allow possibly unsafe fixes (E711, E712).
+
+ """
+ non_docstring_string_line_numbers = multiline_string_lines(
+ source, include_docstrings=False)
+ all_string_line_numbers = multiline_string_lines(
+ source, include_docstrings=True)
+
+ commented_out_code_line_numbers = commented_out_code_lines(source)
+
+ for r in results:
+ issue_id = r['id'].lower()
+
+ if r['line'] in non_docstring_string_line_numbers:
+ if issue_id.startswith(('e1', 'e501', 'w191')):
+ continue
+
+ if r['line'] in all_string_line_numbers:
+ if issue_id in ['e501']:
+ continue
+
+ # We must offset by 1 for lines that contain the trailing contents of
+ # multiline strings.
+ if not aggressive and (r['line'] + 1) in all_string_line_numbers:
+ # Do not modify multiline strings in non-aggressive mode. Remove
+ # trailing whitespace could break doctests.
+ if issue_id.startswith(('w29', 'w39')):
+ continue
+
+ if aggressive <= 0:
+ if issue_id.startswith(('e711', 'w6')):
+ continue
+
+ if aggressive <= 1:
+ if issue_id.startswith(('e712', 'e713')):
+ continue
+
+ if r['line'] in commented_out_code_line_numbers:
+ if issue_id.startswith(('e26', 'e501')):
+ continue
+
+ yield r
+
+
+def multiline_string_lines(source, include_docstrings=False):
+ """Return line numbers that are within multiline strings.
+
+ The line numbers are indexed at 1.
+
+ Docstrings are ignored.
+
+ """
+ line_numbers = set()
+ previous_token_type = ''
+ try:
+ for t in generate_tokens(source):
+ token_type = t[0]
+ start_row = t[2][0]
+ end_row = t[3][0]
+
+ if token_type == tokenize.STRING and start_row != end_row:
+ if (
+ include_docstrings or
+ previous_token_type != tokenize.INDENT
+ ):
+ # We increment by one since we want the contents of the
+ # string.
+ line_numbers |= set(range(1 + start_row, 1 + end_row))
+
+ previous_token_type = token_type
+ except (SyntaxError, tokenize.TokenError):
+ pass
+
+ return line_numbers
+
+
+def commented_out_code_lines(source):
+ """Return line numbers of comments that are likely code.
+
+ Commented-out code is bad practice, but modifying it just adds even more
+ clutter.
+
+ """
+ line_numbers = []
+ try:
+ for t in generate_tokens(source):
+ token_type = t[0]
+ token_string = t[1]
+ start_row = t[2][0]
+ line = t[4]
+
+ # Ignore inline comments.
+ if not line.lstrip().startswith('#'):
+ continue
+
+ if token_type == tokenize.COMMENT:
+ stripped_line = token_string.lstrip('#').strip()
+ if (
+ ' ' in stripped_line and
+ '#' not in stripped_line and
+ check_syntax(stripped_line)
+ ):
+ line_numbers.append(start_row)
+ except (SyntaxError, tokenize.TokenError):
+ pass
+
+ return line_numbers
+
+
+def shorten_comment(line, max_line_length, last_comment=False):
+ """Return trimmed or split long comment line.
+
+ If there are no comments immediately following it, do a text wrap.
+ Doing this wrapping on all comments in general would lead to jagged
+ comment text.
+
+ """
+ assert len(line) > max_line_length
+ line = line.rstrip()
+
+ # PEP 8 recommends 72 characters for comment text.
+ indentation = _get_indentation(line) + '# '
+ max_line_length = min(max_line_length,
+ len(indentation) + 72)
+
+ MIN_CHARACTER_REPEAT = 5
+ if (
+ len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and
+ not line[-1].isalnum()
+ ):
+ # Trim comments that end with things like ---------
+ return line[:max_line_length] + '\n'
+ elif last_comment and re.match(r'\s*#+\s*\w+', line):
+ import textwrap
+ split_lines = textwrap.wrap(line.lstrip(' \t#'),
+ initial_indent=indentation,
+ subsequent_indent=indentation,
+ width=max_line_length,
+ break_long_words=False,
+ break_on_hyphens=False)
+ return '\n'.join(split_lines) + '\n'
+ else:
+ return line + '\n'
+
+
+def normalize_line_endings(lines, newline):
+ """Return fixed line endings.
+
+ All lines will be modified to use the most common line ending.
+
+ """
+ return [line.rstrip('\n\r') + newline for line in lines]
+
+
+def mutual_startswith(a, b):
+ return b.startswith(a) or a.startswith(b)
+
+
+def code_match(code, select, ignore):
+ if ignore:
+ assert not isinstance(ignore, unicode)
+ for ignored_code in [c.strip() for c in ignore]:
+ if mutual_startswith(code.lower(), ignored_code.lower()):
+ return False
+
+ if select:
+ assert not isinstance(select, unicode)
+ for selected_code in [c.strip() for c in select]:
+ if mutual_startswith(code.lower(), selected_code.lower()):
+ return True
+ return False
+
+ return True
+
+
+def fix_code(source, options=None):
+ """Return fixed source code."""
+ if not options:
+ options = parse_args([''])
+
+ if not isinstance(source, unicode):
+ source = source.decode(locale.getpreferredencoding())
+
+ sio = io.StringIO(source)
+ return fix_lines(sio.readlines(), options=options)
+
+
+def fix_lines(source_lines, options, filename=''):
+ """Return fixed source code."""
+ # Transform everything to line feed. Then change them back to original
+ # before returning fixed source code.
+ original_newline = find_newline(source_lines)
+ tmp_source = ''.join(normalize_line_endings(source_lines, '\n'))
+
+ # Keep a history to break out of cycles.
+ previous_hashes = set()
+
+ if options.line_range:
+ fixed_source = apply_local_fixes(tmp_source, options)
+ else:
+ # Apply global fixes only once (for efficiency).
+ fixed_source = apply_global_fixes(tmp_source, options)
+
+ passes = 0
+ long_line_ignore_cache = set()
+ while hash(fixed_source) not in previous_hashes:
+ if options.pep8_passes >= 0 and passes > options.pep8_passes:
+ break
+ passes += 1
+
+ previous_hashes.add(hash(fixed_source))
+
+ tmp_source = copy.copy(fixed_source)
+
+ fix = FixPEP8(
+ filename,
+ options,
+ contents=tmp_source,
+ long_line_ignore_cache=long_line_ignore_cache)
+
+ fixed_source = fix.fix()
+
+ sio = io.StringIO(fixed_source)
+ return ''.join(normalize_line_endings(sio.readlines(), original_newline))
+
+
+def fix_file(filename, options=None, output=None):
+ if not options:
+ options = parse_args([filename])
+
+ original_source = readlines_from_file(filename)
+
+ fixed_source = original_source
+
+ if options.in_place or output:
+ encoding = detect_encoding(filename)
+
+ if output:
+ output = codecs.getwriter(encoding)(output.buffer
+ if hasattr(output, 'buffer')
+ else output)
+
+ output = LineEndingWrapper(output)
+
+ fixed_source = fix_lines(fixed_source, options, filename=filename)
+
+ if options.diff:
+ new = io.StringIO(fixed_source)
+ new = new.readlines()
+ diff = get_diff_text(original_source, new, filename)
+ if output:
+ output.write(diff)
+ output.flush()
+ else:
+ return diff
+ elif options.in_place:
+ fp = open_with_encoding(filename, encoding=encoding,
+ mode='w')
+ fp.write(fixed_source)
+ fp.close()
+ else:
+ if output:
+ output.write(fixed_source)
+ output.flush()
+ else:
+ return fixed_source
+
+
+def global_fixes():
+ """Yield multiple (code, function) tuples."""
+ for function in globals().values():
+ if inspect.isfunction(function):
+ arguments = inspect.getargspec(function)[0]
+ if arguments[:1] != ['source']:
+ continue
+
+ code = extract_code_from_function(function)
+ if code:
+ yield (code, function)
+
+
+def apply_global_fixes(source, options, where='global'):
+ """Run global fixes on source code.
+
+ These are fixes that only need be done once (unlike those in
+ FixPEP8, which are dependent on pep8).
+
+ """
+ if code_match('E101', select=options.select, ignore=options.ignore):
+ source = reindent(source,
+ indent_size=options.indent_size)
+
+ for (code, function) in global_fixes():
+ if code_match(code, select=options.select, ignore=options.ignore):
+ if options.verbose:
+ print('---> Applying {0} fix for {1}'.format(where,
+ code.upper()),
+ file=sys.stderr)
+ source = function(source,
+ aggressive=options.aggressive)
+
+ source = fix_2to3(source,
+ aggressive=options.aggressive,
+ select=options.select,
+ ignore=options.ignore)
+
+ return source
+
+
+def apply_local_fixes(source, options):
+ """Ananologus to apply_global_fixes, but runs only those which makes sense
+ for the given line_range.
+
+ Do as much as we can without breaking code.
+
+ """
+ def find_ge(a, x):
+ """Find leftmost item greater than or equal to x."""
+ i = bisect.bisect_left(a, x)
+ if i != len(a):
+ return i, a[i]
+ return len(a) - 1, a[-1]
+
+ def find_le(a, x):
+ """Find rightmost value less than or equal to x."""
+ i = bisect.bisect_right(a, x)
+ if i:
+ return i - 1, a[i - 1]
+ return 0, a[0]
+
+ def local_fix(source, start_log, end_log,
+ start_lines, end_lines, indents, last_line):
+ """apply_global_fixes to the source between start_log and end_log.
+
+ The subsource must be the correct syntax of a complete python program
+ (but all lines may share an indentation). The subsource's shared indent
+ is removed, fixes are applied and the indent prepended back. Taking
+ care to not reindent strings.
+
+ last_line is the strict cut off (options.line_range[1]), so that
+ lines after last_line are not modified.
+
+ """
+ if end_log < start_log:
+ return source
+
+ ind = indents[start_log]
+ indent = _get_indentation(source[start_lines[start_log]])
+
+ sl = slice(start_lines[start_log], end_lines[end_log] + 1)
+
+ subsource = source[sl]
+ # Remove indent from subsource.
+ if ind:
+ for line_no in start_lines[start_log:end_log + 1]:
+ pos = line_no - start_lines[start_log]
+ subsource[pos] = subsource[pos][ind:]
+
+ # Fix indentation of subsource.
+ fixed_subsource = apply_global_fixes(''.join(subsource),
+ options,
+ where='local')
+ fixed_subsource = fixed_subsource.splitlines(True)
+
+ # Add back indent for non multi-line strings lines.
+ msl = multiline_string_lines(''.join(fixed_subsource),
+ include_docstrings=False)
+ for i, line in enumerate(fixed_subsource):
+ if not i + 1 in msl:
+ fixed_subsource[i] = indent + line if line != '\n' else line
+
+ # We make a special case to look at the final line, if it's a multiline
+ # *and* the cut off is somewhere inside it, we take the fixed
+ # subset up until last_line, this assumes that the number of lines
+ # does not change in this multiline line.
+ changed_lines = len(fixed_subsource)
+ if (start_lines[end_log] != end_lines[end_log]
+ and end_lines[end_log] > last_line):
+ after_end = end_lines[end_log] - last_line
+ fixed_subsource = (fixed_subsource[:-after_end] +
+ source[sl][-after_end:])
+ changed_lines -= after_end
+
+ options.line_range[1] = (options.line_range[0] +
+ changed_lines - 1)
+
+ return (source[:start_lines[start_log]] +
+ fixed_subsource +
+ source[end_lines[end_log] + 1:])
+
+ def is_continued_stmt(line,
+ continued_stmts=frozenset(['else', 'elif',
+ 'finally', 'except'])):
+ return re.split('[ :]', line.strip(), 1)[0] in continued_stmts
+
+ assert options.line_range
+ start, end = options.line_range
+ start -= 1
+ end -= 1
+ last_line = end # We shouldn't modify lines after this cut-off.
+
+ try:
+ logical = _find_logical(source)
+ except (SyntaxError, tokenize.TokenError):
+ return ''.join(source)
+
+ if not logical[0]:
+ # Just blank lines, this should imply that it will become '\n' ?
+ return apply_global_fixes(source, options)
+
+ start_lines, indents = zip(*logical[0])
+ end_lines, _ = zip(*logical[1])
+
+ source = source.splitlines(True)
+
+ start_log, start = find_ge(start_lines, start)
+ end_log, end = find_le(start_lines, end)
+
+ # Look behind one line, if it's indented less than current indent
+ # then we can move to this previous line knowing that its
+ # indentation level will not be changed.
+ if (start_log > 0
+ and indents[start_log - 1] < indents[start_log]
+ and not is_continued_stmt(source[start_log - 1])):
+ start_log -= 1
+ start = start_lines[start_log]
+
+ while start < end:
+
+ if is_continued_stmt(source[start]):
+ start_log += 1
+ start = start_lines[start_log]
+ continue
+
+ ind = indents[start_log]
+ for t in itertools.takewhile(lambda t: t[1][1] >= ind,
+ enumerate(logical[0][start_log:])):
+ n_log, n = start_log + t[0], t[1][0]
+ # start shares indent up to n.
+
+ if n <= end:
+ source = local_fix(source, start_log, n_log,
+ start_lines, end_lines,
+ indents, last_line)
+ start_log = n_log if n == end else n_log + 1
+ start = start_lines[start_log]
+ continue
+
+ else:
+ # Look at the line after end and see if allows us to reindent.
+ after_end_log, after_end = find_ge(start_lines, end + 1)
+
+ if indents[after_end_log] > indents[start_log]:
+ start_log, start = find_ge(start_lines, start + 1)
+ continue
+
+ if (indents[after_end_log] == indents[start_log]
+ and is_continued_stmt(source[after_end])):
+ # find n, the beginning of the last continued statement
+ # Apply fix to previous block if there is one.
+ only_block = True
+ for n, n_ind in logical[0][start_log:end_log + 1][::-1]:
+ if n_ind == ind and not is_continued_stmt(source[n]):
+ n_log = start_lines.index(n)
+ source = local_fix(source, start_log, n_log - 1,
+ start_lines, end_lines,
+ indents, last_line)
+ start_log = n_log + 1
+ start = start_lines[start_log]
+ only_block = False
+ break
+ if only_block:
+ end_log, end = find_le(start_lines, end - 1)
+ continue
+
+ source = local_fix(source, start_log, end_log,
+ start_lines, end_lines,
+ indents, last_line)
+ break
+
+ return ''.join(source)
+
+
+def extract_code_from_function(function):
+ """Return code handled by function."""
+ if not function.__name__.startswith('fix_'):
+ return None
+
+ code = re.sub('^fix_', '', function.__name__)
+ if not code:
+ return None
+
+ try:
+ int(code[1:])
+ except ValueError:
+ return None
+
+ return code
+
+
+def create_parser():
+ """Return command-line parser."""
+ # Do import locally to be friendly to those who use autopep8 as a library
+ # and are supporting Python 2.6.
+ import argparse
+
+ parser = argparse.ArgumentParser(description=docstring_summary(__doc__),
+ prog='autopep8')
+ parser.add_argument('--version', action='version',
+ version='%(prog)s ' + __version__)
+ parser.add_argument('-v', '--verbose', action='count', dest='verbose',
+ default=0,
+ help='print verbose messages; '
+ 'multiple -v result in more verbose messages')
+ parser.add_argument('-d', '--diff', action='store_true', dest='diff',
+ help='print the diff for the fixed source')
+ parser.add_argument('-i', '--in-place', action='store_true',
+ help='make changes to files in place')
+ parser.add_argument('-r', '--recursive', action='store_true',
+ help='run recursively over directories; '
+ 'must be used with --in-place or --diff')
+ parser.add_argument('-j', '--jobs', type=int, metavar='n', default=1,
+ help='number of parallel jobs; '
+ 'match CPU count if value is less than 1')
+ parser.add_argument('-p', '--pep8-passes', metavar='n',
+ default=-1, type=int,
+ help='maximum number of additional pep8 passes '
+ '(default: infinite)')
+ parser.add_argument('-a', '--aggressive', action='count', default=0,
+ help='enable non-whitespace changes; '
+ 'multiple -a result in more aggressive changes')
+ parser.add_argument('--experimental', action='store_true',
+ help='enable experimental fixes')
+ parser.add_argument('--exclude', metavar='globs',
+ help='exclude file/directory names that match these '
+ 'comma-separated globs')
+ parser.add_argument('--list-fixes', action='store_true',
+ help='list codes for fixes; '
+ 'used by --ignore and --select')
+ parser.add_argument('--ignore', metavar='errors', default='',
+ help='do not fix these errors/warnings '
+ '(default: {0})'.format(DEFAULT_IGNORE))
+ parser.add_argument('--select', metavar='errors', default='',
+ help='fix only these errors/warnings (e.g. E4,W)')
+ parser.add_argument('--max-line-length', metavar='n', default=79, type=int,
+ help='set maximum allowed line length '
+ '(default: %(default)s)')
+ parser.add_argument('--range', metavar='line', dest='line_range',
+ default=None, type=int, nargs=2,
+ help='only fix errors found within this inclusive '
+ 'range of line numbers (e.g. 1 99); '
+ 'line numbers are indexed at 1')
+ parser.add_argument('--indent-size', default=DEFAULT_INDENT_SIZE,
+ type=int, metavar='n',
+ help='number of spaces per indent level '
+ '(default %(default)s)')
+ parser.add_argument('files', nargs='*',
+ help="files to format or '-' for standard in")
+
+ return parser
+
+
+def parse_args(arguments):
+ """Parse command-line options."""
+ parser = create_parser()
+ args = parser.parse_args(arguments)
+
+ if not args.files and not args.list_fixes:
+ parser.error('incorrect number of arguments')
+
+ args.files = [decode_filename(name) for name in args.files]
+
+ if '-' in args.files:
+ if len(args.files) > 1:
+ parser.error('cannot mix stdin and regular files')
+
+ if args.diff:
+ parser.error('--diff cannot be used with standard input')
+
+ if args.in_place:
+ parser.error('--in-place cannot be used with standard input')
+
+ if args.recursive:
+ parser.error('--recursive cannot be used with standard input')
+
+ if len(args.files) > 1 and not (args.in_place or args.diff):
+ parser.error('autopep8 only takes one filename as argument '
+ 'unless the "--in-place" or "--diff" args are '
+ 'used')
+
+ if args.recursive and not (args.in_place or args.diff):
+ parser.error('--recursive must be used with --in-place or --diff')
+
+ if args.exclude and not args.recursive:
+ parser.error('--exclude is only relevant when used with --recursive')
+
+ if args.in_place and args.diff:
+ parser.error('--in-place and --diff are mutually exclusive')
+
+ if args.max_line_length <= 0:
+ parser.error('--max-line-length must be greater than 0')
+
+ if args.select:
+ args.select = args.select.split(',')
+
+ if args.ignore:
+ args.ignore = args.ignore.split(',')
+ elif not args.select:
+ if args.aggressive:
+ # Enable everything by default if aggressive.
+ args.select = ['E', 'W']
+ else:
+ args.ignore = DEFAULT_IGNORE.split(',')
+
+ if args.exclude:
+ args.exclude = args.exclude.split(',')
+ else:
+ args.exclude = []
+
+ if args.jobs < 1:
+ # Do not import multiprocessing globally in case it is not supported
+ # on the platform.
+ import multiprocessing
+ args.jobs = multiprocessing.cpu_count()
+
+ if args.jobs > 1 and not args.in_place:
+ parser.error('parallel jobs requires --in-place')
+
+ if args.line_range:
+ if args.line_range[0] <= 0:
+ parser.error('--range must be positive numbers')
+ if args.line_range[0] > args.line_range[1]:
+ parser.error('First value of --range should be less than or equal '
+ 'to the second')
+
+ return args
+
+
+def decode_filename(filename):
+ """Return Unicode filename."""
+ if isinstance(filename, unicode):
+ return filename
+ else:
+ return filename.decode(sys.getfilesystemencoding())
+
+
+def supported_fixes():
+ """Yield pep8 error codes that autopep8 fixes.
+
+ Each item we yield is a tuple of the code followed by its
+ description.
+
+ """
+ yield ('E101', docstring_summary(reindent.__doc__))
+
+ instance = FixPEP8(filename=None, options=None, contents='')
+ for attribute in dir(instance):
+ code = re.match('fix_([ew][0-9][0-9][0-9])', attribute)
+ if code:
+ yield (
+ code.group(1).upper(),
+ re.sub(r'\s+', ' ',
+ docstring_summary(getattr(instance, attribute).__doc__))
+ )
+
+ for (code, function) in sorted(global_fixes()):
+ yield (code.upper() + (4 - len(code)) * ' ',
+ re.sub(r'\s+', ' ', docstring_summary(function.__doc__)))
+
+ for code in sorted(CODE_TO_2TO3):
+ yield (code.upper() + (4 - len(code)) * ' ',
+ re.sub(r'\s+', ' ', docstring_summary(fix_2to3.__doc__)))
+
+
+def docstring_summary(docstring):
+ """Return summary of docstring."""
+ return docstring.split('\n')[0]
+
+
+def line_shortening_rank(candidate, indent_word, max_line_length,
+ experimental=False):
+ """Return rank of candidate.
+
+ This is for sorting candidates.
+
+ """
+ if not candidate.strip():
+ return 0
+
+ rank = 0
+ lines = candidate.split('\n')
+
+ offset = 0
+ if (
+ not lines[0].lstrip().startswith('#') and
+ lines[0].rstrip()[-1] not in '([{'
+ ):
+ for (opening, closing) in ('()', '[]', '{}'):
+ # Don't penalize empty containers that aren't split up. Things like
+ # this "foo(\n )" aren't particularly good.
+ opening_loc = lines[0].find(opening)
+ closing_loc = lines[0].find(closing)
+ if opening_loc >= 0:
+ if closing_loc < 0 or closing_loc != opening_loc + 1:
+ offset = max(offset, 1 + opening_loc)
+
+ current_longest = max(offset + len(x.strip()) for x in lines)
+
+ rank += 4 * max(0, current_longest - max_line_length)
+
+ rank += len(lines)
+
+ # Too much variation in line length is ugly.
+ rank += 2 * standard_deviation(len(line) for line in lines)
+
+ bad_staring_symbol = {
+ '(': ')',
+ '[': ']',
+ '{': '}'}.get(lines[0][-1])
+
+ if len(lines) > 1:
+ if (
+ bad_staring_symbol and
+ lines[1].lstrip().startswith(bad_staring_symbol)
+ ):
+ rank += 20
+
+ for lineno, current_line in enumerate(lines):
+ current_line = current_line.strip()
+
+ if current_line.startswith('#'):
+ continue
+
+ for bad_start in ['.', '%', '+', '-', '/']:
+ if current_line.startswith(bad_start):
+ rank += 100
+
+ # Do not tolerate operators on their own line.
+ if current_line == bad_start:
+ rank += 1000
+
+ if current_line.endswith(('(', '[', '{', '.')):
+ # Avoid lonely opening. They result in longer lines.
+ if len(current_line) <= len(indent_word):
+ rank += 100
+
+ # Avoid the ugliness of ", (\n".
+ if (
+ current_line.endswith('(') and
+ current_line[:-1].rstrip().endswith(',')
+ ):
+ rank += 100
+
+ # Also avoid the ugliness of "foo.\nbar"
+ if current_line.endswith('.'):
+ rank += 100
+
+ if has_arithmetic_operator(current_line):
+ rank += 100
+
+ if current_line.endswith(('%', '(', '[', '{')):
+ rank -= 20
+
+ # Try to break list comprehensions at the "for".
+ if current_line.startswith('for '):
+ rank -= 50
+
+ if current_line.endswith('\\'):
+ # If a line ends in \-newline, it may be part of a
+ # multiline string. In that case, we would like to know
+ # how long that line is without the \-newline. If it's
+ # longer than the maximum, or has comments, then we assume
+ # that the \-newline is an okay candidate and only
+ # penalize it a bit.
+ total_len = len(current_line)
+ lineno += 1
+ while lineno < len(lines):
+ total_len += len(lines[lineno])
+
+ if lines[lineno].lstrip().startswith('#'):
+ total_len = max_line_length
+ break
+
+ if not lines[lineno].endswith('\\'):
+ break
+
+ lineno += 1
+
+ if total_len < max_line_length:
+ rank += 10
+ else:
+ rank += 100 if experimental else 1
+
+ # Prefer breaking at commas rather than colon.
+ if ',' in current_line and current_line.endswith(':'):
+ rank += 10
+
+ rank += 10 * count_unbalanced_brackets(current_line)
+
+ return max(0, rank)
+
+
+def standard_deviation(numbers):
+ """Return standard devation."""
+ numbers = list(numbers)
+ if not numbers:
+ return 0
+ mean = sum(numbers) / len(numbers)
+ return (sum((n - mean) ** 2 for n in numbers) /
+ len(numbers)) ** .5
+
+
+def has_arithmetic_operator(line):
+ """Return True if line contains any arithmetic operators."""
+ for operator in pep8.ARITHMETIC_OP:
+ if operator in line:
+ return True
+
+ return False
+
+
+def count_unbalanced_brackets(line):
+ """Return number of unmatched open/close brackets."""
+ count = 0
+ for opening, closing in ['()', '[]', '{}']:
+ count += abs(line.count(opening) - line.count(closing))
+
+ return count
+
+
+def split_at_offsets(line, offsets):
+ """Split line at offsets.
+
+ Return list of strings.
+
+ """
+ result = []
+
+ previous_offset = 0
+ current_offset = 0
+ for current_offset in sorted(offsets):
+ if current_offset < len(line) and previous_offset != current_offset:
+ result.append(line[previous_offset:current_offset].strip())
+ previous_offset = current_offset
+
+ result.append(line[current_offset:])
+
+ return result
+
+
+class LineEndingWrapper(object):
+
+ r"""Replace line endings to work with sys.stdout.
+
+ It seems that sys.stdout expects only '\n' as the line ending, no matter
+ the platform. Otherwise, we get repeated line endings.
+
+ """
+
+ def __init__(self, output):
+ self.__output = output
+
+ def write(self, s):
+ self.__output.write(s.replace('\r\n', '\n').replace('\r', '\n'))
+
+ def flush(self):
+ self.__output.flush()
+
+
+def match_file(filename, exclude):
+ """Return True if file is okay for modifying/recursing."""
+ base_name = os.path.basename(filename)
+
+ if base_name.startswith('.'):
+ return False
+
+ for pattern in exclude:
+ if fnmatch.fnmatch(base_name, pattern):
+ return False
+
+ if not os.path.isdir(filename) and not is_python_file(filename):
+ return False
+
+ return True
+
+
+def find_files(filenames, recursive, exclude):
+ """Yield filenames."""
+ while filenames:
+ name = filenames.pop(0)
+ if recursive and os.path.isdir(name):
+ for root, directories, children in os.walk(name):
+ filenames += [os.path.join(root, f) for f in children
+ if match_file(os.path.join(root, f),
+ exclude)]
+ directories[:] = [d for d in directories
+ if match_file(os.path.join(root, d),
+ exclude)]
+ else:
+ yield name
+
+
+def _fix_file(parameters):
+ """Helper function for optionally running fix_file() in parallel."""
+ if parameters[1].verbose:
+ print('[file:{0}]'.format(parameters[0]), file=sys.stderr)
+ try:
+ fix_file(*parameters)
+ except IOError as error:
+ print(unicode(error), file=sys.stderr)
+
+
+def fix_multiple_files(filenames, options, output=None):
+ """Fix list of files.
+
+ Optionally fix files recursively.
+
+ """
+ filenames = find_files(filenames, options.recursive, options.exclude)
+ if options.jobs > 1:
+ import multiprocessing
+ pool = multiprocessing.Pool(options.jobs)
+ pool.map(_fix_file,
+ [(name, options) for name in filenames])
+ else:
+ for name in filenames:
+ _fix_file((name, options, output))
+
+
+def is_python_file(filename):
+ """Return True if filename is Python file."""
+ if filename.endswith('.py'):
+ return True
+
+ try:
+ with open_with_encoding(filename) as f:
+ first_line = f.readlines(1)[0]
+ except (IOError, IndexError):
+ return False
+
+ if not PYTHON_SHEBANG_REGEX.match(first_line):
+ return False
+
+ return True
+
+
+def is_probably_part_of_multiline(line):
+ """Return True if line is likely part of a multiline string.
+
+ When multiline strings are involved, pep8 reports the error as being
+ at the start of the multiline string, which doesn't work for us.
+
+ """
+ return (
+ '"""' in line or
+ "'''" in line or
+ line.rstrip().endswith('\\')
+ )
+
+
+def main():
+ """Tool main."""
+ try:
+ # Exit on broken pipe.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+ except AttributeError: # pragma: no cover
+ # SIGPIPE is not available on Windows.
+ pass
+
+ try:
+ args = parse_args(sys.argv[1:])
+
+ if args.list_fixes:
+ for code, description in sorted(supported_fixes()):
+ print('{code} - {description}'.format(
+ code=code, description=description))
+ return 0
+
+ if args.files == ['-']:
+ assert not args.in_place
+
+ # LineEndingWrapper is unnecessary here due to the symmetry between
+ # standard in and standard out.
+ sys.stdout.write(fix_code(sys.stdin.read(), args))
+ else:
+ if args.in_place or args.diff:
+ args.files = list(set(args.files))
+ else:
+ assert len(args.files) == 1
+ assert not args.recursive
+
+ fix_multiple_files(args.files, args, sys.stdout)
+ except KeyboardInterrupt:
+ return 1 # pragma: no cover
+
+
+class CachedTokenizer(object):
+
+ """A one-element cache around tokenize.generate_tokens().
+
+ Original code written by Ned Batchelder, in coverage.py.
+
+ """
+
+ def __init__(self):
+ self.last_text = None
+ self.last_tokens = None
+
+ def generate_tokens(self, text):
+ """A stand-in for tokenize.generate_tokens()."""
+ if text != self.last_text:
+ string_io = io.StringIO(text)
+ self.last_tokens = list(
+ tokenize.generate_tokens(string_io.readline)
+ )
+ self.last_text = text
+ return self.last_tokens
+
+_cached_tokenizer = CachedTokenizer()
+generate_tokens = _cached_tokenizer.generate_tokens
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/__init__.py
new file mode 100644
index 0000000..c6a2cf8
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/__init__.py
@@ -0,0 +1,88 @@
+"""Code coverage measurement for Python.
+
+Ned Batchelder
+http://nedbatchelder.com/code/coverage
+
+"""
+
+__version__ = "3.5.1" # see detailed history in CHANGES.txt
+
+__url__ = "http://nedbatchelder.com/code/coverage"
+if max(__version__).isalpha():
+ # For pre-releases, use a version-specific URL.
+ __url__ += "/" + __version__
+
+from coverage.control import coverage, process_startup
+from coverage.data import CoverageData
+from coverage.cmdline import main, CoverageScript
+from coverage.misc import CoverageException
+
+
+# Module-level functions. The original API to this module was based on
+# functions defined directly in the module, with a singleton of the coverage()
+# class. That design hampered programmability, so the current api uses
+# explicitly-created coverage objects. But for backward compatibility, here we
+# define the top-level functions to create the singleton when they are first
+# called.
+
+# Singleton object for use with module-level functions. The singleton is
+# created as needed when one of the module-level functions is called.
+_the_coverage = None
+
+def _singleton_method(name):
+ """Return a function to the `name` method on a singleton `coverage` object.
+
+ The singleton object is created the first time one of these functions is
+ called.
+
+ """
+ def wrapper(*args, **kwargs):
+ """Singleton wrapper around a coverage method."""
+ global _the_coverage
+ if not _the_coverage:
+ _the_coverage = coverage(auto_data=True)
+ return getattr(_the_coverage, name)(*args, **kwargs)
+ return wrapper
+
+
+# Define the module-level functions.
+use_cache = _singleton_method('use_cache')
+start = _singleton_method('start')
+stop = _singleton_method('stop')
+erase = _singleton_method('erase')
+exclude = _singleton_method('exclude')
+analysis = _singleton_method('analysis')
+analysis2 = _singleton_method('analysis2')
+report = _singleton_method('report')
+annotate = _singleton_method('annotate')
+
+
+# COPYRIGHT AND LICENSE
+#
+# Copyright 2001 Gareth Rees. All rights reserved.
+# Copyright 2004-2010 Ned Batchelder. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/__main__.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/__main__.py
new file mode 100644
index 0000000..af5fa9f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/__main__.py
@@ -0,0 +1,3 @@
+"""Coverage.py's main entrypoint."""
+from coverage.cmdline import main
+main()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/annotate.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/annotate.py
new file mode 100644
index 0000000..a556d85
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/annotate.py
@@ -0,0 +1,101 @@
+"""Source file annotation for Coverage."""
+
+import os, re
+
+from coverage.report import Reporter
+
+class AnnotateReporter(Reporter):
+ """Generate annotated source files showing line coverage.
+
+ This reporter creates annotated copies of the measured source files. Each
+ .py file is copied as a .py,cover file, with a left-hand margin annotating
+ each line::
+
+ > def h(x):
+ - if 0: #pragma: no cover
+ - pass
+ > if x == 1:
+ ! a = 1
+ > else:
+ > a = 2
+
+ > h(2)
+
+ Executed lines use '>', lines not executed use '!', lines excluded from
+ consideration use '-'.
+
+ """
+
+ def __init__(self, coverage, ignore_errors=False):
+ super(AnnotateReporter, self).__init__(coverage, ignore_errors)
+ self.directory = None
+
+ blank_re = re.compile(r"\s*(#|$)")
+ else_re = re.compile(r"\s*else\s*:\s*(#|$)")
+
+ def report(self, morfs, config, directory=None):
+ """Run the report.
+
+ See `coverage.report()` for arguments.
+
+ """
+ self.report_files(self.annotate_file, morfs, config, directory)
+
+ def annotate_file(self, cu, analysis):
+ """Annotate a single file.
+
+ `cu` is the CodeUnit for the file to annotate.
+
+ """
+ if not cu.relative:
+ return
+
+ filename = cu.filename
+ source = cu.source_file()
+ if self.directory:
+ dest_file = os.path.join(self.directory, cu.flat_rootname())
+ dest_file += ".py,cover"
+ else:
+ dest_file = filename + ",cover"
+ dest = open(dest_file, 'w')
+
+ statements = analysis.statements
+ missing = analysis.missing
+ excluded = analysis.excluded
+
+ lineno = 0
+ i = 0
+ j = 0
+ covered = True
+ while True:
+ line = source.readline()
+ if line == '':
+ break
+ lineno += 1
+ while i < len(statements) and statements[i] < lineno:
+ i += 1
+ while j < len(missing) and missing[j] < lineno:
+ j += 1
+ if i < len(statements) and statements[i] == lineno:
+ covered = j >= len(missing) or missing[j] > lineno
+ if self.blank_re.match(line):
+ dest.write(' ')
+ elif self.else_re.match(line):
+ # Special logic for lines containing only 'else:'.
+ if i >= len(statements) and j >= len(missing):
+ dest.write('! ')
+ elif i >= len(statements) or j >= len(missing):
+ dest.write('> ')
+ elif statements[i] == missing[j]:
+ dest.write('! ')
+ else:
+ dest.write('> ')
+ elif lineno in excluded:
+ dest.write('- ')
+ elif covered:
+ dest.write('> ')
+ else:
+ dest.write('! ')
+ dest.write(line)
+ source.close()
+ dest.close()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/backward.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/backward.py
new file mode 100644
index 0000000..93cb793
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/backward.py
@@ -0,0 +1,147 @@
+"""Add things to old Pythons so I can pretend they are newer."""
+
+# This file does lots of tricky stuff, so disable a bunch of lintisms.
+# pylint: disable=F0401,W0611,W0622
+# F0401: Unable to import blah
+# W0611: Unused import blah
+# W0622: Redefining built-in blah
+
+import os, sys
+
+# Python 2.3 doesn't have `set`
+try:
+ set = set # new in 2.4
+except NameError:
+ from sets import Set as set
+
+# Python 2.3 doesn't have `sorted`.
+try:
+ sorted = sorted
+except NameError:
+ def sorted(iterable):
+ """A 2.3-compatible implementation of `sorted`."""
+ lst = list(iterable)
+ lst.sort()
+ return lst
+
+# Pythons 2 and 3 differ on where to get StringIO
+try:
+ from cStringIO import StringIO
+ BytesIO = StringIO
+except ImportError:
+ from io import StringIO, BytesIO
+
+# What's a string called?
+try:
+ string_class = basestring
+except NameError:
+ string_class = str
+
+# Where do pickles come from?
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+# range or xrange?
+try:
+ range = xrange
+except NameError:
+ range = range
+
+# Exec is a statement in Py2, a function in Py3
+if sys.version_info >= (3, 0):
+ def exec_code_object(code, global_map):
+ """A wrapper around exec()."""
+ exec(code, global_map)
+else:
+ # OK, this is pretty gross. In Py2, exec was a statement, but that will
+ # be a syntax error if we try to put it in a Py3 file, even if it is never
+ # executed. So hide it inside an evaluated string literal instead.
+ eval(
+ compile(
+ "def exec_code_object(code, global_map):\n"
+ " exec code in global_map\n",
+ "<exec_function>", "exec"
+ )
+ )
+
+# ConfigParser was renamed to the more-standard configparser
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
+
+# Python 3.2 provides `tokenize.open`, the best way to open source files.
+import tokenize
+try:
+ open_source = tokenize.open # pylint: disable=E1101
+except AttributeError:
+ try:
+ detect_encoding = tokenize.detect_encoding # pylint: disable=E1101
+ except AttributeError:
+ def open_source(fname):
+ """Open a source file the best way."""
+ return open(fname, "rU")
+ else:
+ from io import TextIOWrapper
+ # Copied from the 3.2 stdlib:
+ def open_source(fname):
+ """Open a file in read only mode using the encoding detected by
+ detect_encoding().
+ """
+ buffer = open(fname, 'rb')
+ encoding, _ = detect_encoding(buffer.readline)
+ buffer.seek(0)
+ text = TextIOWrapper(buffer, encoding, line_buffering=True)
+ text.mode = 'r'
+ return text
+
+# Python 3.x is picky about bytes and strings, so provide methods to
+# get them right, and make them no-ops in 2.x
+if sys.version_info >= (3, 0):
+ def to_bytes(s):
+ """Convert string `s` to bytes."""
+ return s.encode('utf8')
+
+ def to_string(b):
+ """Convert bytes `b` to a string."""
+ return b.decode('utf8')
+
+else:
+ def to_bytes(s):
+ """Convert string `s` to bytes (no-op in 2.x)."""
+ return s
+
+ def to_string(b):
+ """Convert bytes `b` to a string (no-op in 2.x)."""
+ return b
+
+# A few details about writing encoded text are different in 2.x and 3.x.
+if sys.version_info >= (3, 0):
+ def write_encoded(fname, text, encoding='utf8', errors='strict'):
+ '''Write string `text` to file names `fname`, with encoding.'''
+ # Don't use "with", so that this file is still good for old 2.x.
+ f = open(fname, 'w', encoding=encoding, errors=errors)
+ try:
+ f.write(text)
+ finally:
+ f.close()
+else:
+ # It's not clear that using utf8 strings in 2.x is the right thing to do.
+ def write_encoded(fname, text, encoding='utf8', errors='strict'):
+ '''Write utf8 string `text` to file names `fname`, with encoding.'''
+ import codecs
+ f = codecs.open(fname, 'w', encoding=encoding, errors=errors)
+ try:
+ f.write(text.decode('utf8'))
+ finally:
+ f.close()
+
+# Md5 is available in different places.
+try:
+ import hashlib
+ md5 = hashlib.md5
+except ImportError:
+ import md5
+ md5 = md5.new
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/bytecode.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/bytecode.py
new file mode 100644
index 0000000..61c311e
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/bytecode.py
@@ -0,0 +1,90 @@
+"""Bytecode manipulation for coverage.py"""
+
+import opcode, sys, types
+
+class ByteCode(object):
+ """A single bytecode."""
+ def __init__(self):
+ # The offset of this bytecode in the code object.
+ self.offset = -1
+
+ # The opcode, defined in the `opcode` module.
+ self.op = -1
+
+ # The argument, a small integer, whose meaning depends on the opcode.
+ self.arg = -1
+
+ # The offset in the code object of the next bytecode.
+ self.next_offset = -1
+
+ # The offset to jump to.
+ self.jump_to = -1
+
+
+class ByteCodes(object):
+ """Iterator over byte codes in `code`.
+
+ Returns `ByteCode` objects.
+
+ """
+ def __init__(self, code):
+ self.code = code
+ self.offset = 0
+
+ if sys.version_info >= (3, 0):
+ def __getitem__(self, i):
+ return self.code[i]
+ else:
+ def __getitem__(self, i):
+ return ord(self.code[i])
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.offset >= len(self.code):
+ raise StopIteration
+
+ bc = ByteCode()
+ bc.op = self[self.offset]
+ bc.offset = self.offset
+
+ next_offset = self.offset+1
+ if bc.op >= opcode.HAVE_ARGUMENT:
+ bc.arg = self[self.offset+1] + 256*self[self.offset+2]
+ next_offset += 2
+
+ label = -1
+ if bc.op in opcode.hasjrel:
+ label = next_offset + bc.arg
+ elif bc.op in opcode.hasjabs:
+ label = bc.arg
+ bc.jump_to = label
+
+ bc.next_offset = self.offset = next_offset
+ return bc
+
+ next = __next__ # Py2k uses an old-style non-dunder name.
+
+
+class CodeObjects(object):
+ """Iterate over all the code objects in `code`."""
+ def __init__(self, code):
+ self.stack = [code]
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.stack:
+ # We're going to return the code object on the stack, but first
+ # push its children for later returning.
+ code = self.stack.pop()
+ for c in code.co_consts:
+ if isinstance(c, types.CodeType):
+ self.stack.append(c)
+ return code
+
+ raise StopIteration
+
+ next = __next__
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/cmdline.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/cmdline.py
new file mode 100644
index 0000000..1ce5e0f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/cmdline.py
@@ -0,0 +1,677 @@
+"""Command-line support for Coverage."""
+
+import optparse, re, sys, traceback
+
+from coverage.backward import sorted # pylint: disable=W0622
+from coverage.execfile import run_python_file, run_python_module
+from coverage.misc import CoverageException, ExceptionDuringRun, NoSource
+
+
+class Opts(object):
+ """A namespace class for individual options we'll build parsers from."""
+
+ append = optparse.make_option(
+ '-a', '--append', action='store_false', dest="erase_first",
+ help="Append coverage data to .coverage, otherwise it is started "
+ "clean with each run."
+ )
+ branch = optparse.make_option(
+ '', '--branch', action='store_true',
+ help="Measure branch coverage in addition to statement coverage."
+ )
+ directory = optparse.make_option(
+ '-d', '--directory', action='store',
+ metavar="DIR",
+ help="Write the output files to DIR."
+ )
+ help = optparse.make_option(
+ '-h', '--help', action='store_true',
+ help="Get help on this command."
+ )
+ ignore_errors = optparse.make_option(
+ '-i', '--ignore-errors', action='store_true',
+ help="Ignore errors while reading source files."
+ )
+ include = optparse.make_option(
+ '', '--include', action='store',
+ metavar="PAT1,PAT2,...",
+ help="Include files only when their filename path matches one of "
+ "these patterns. Usually needs quoting on the command line."
+ )
+ pylib = optparse.make_option(
+ '-L', '--pylib', action='store_true',
+ help="Measure coverage even inside the Python installed library, "
+ "which isn't done by default."
+ )
+ show_missing = optparse.make_option(
+ '-m', '--show-missing', action='store_true',
+ help="Show line numbers of statements in each module that weren't "
+ "executed."
+ )
+ old_omit = optparse.make_option(
+ '-o', '--omit', action='store',
+ metavar="PAT1,PAT2,...",
+ help="Omit files when their filename matches one of these patterns. "
+ "Usually needs quoting on the command line."
+ )
+ omit = optparse.make_option(
+ '', '--omit', action='store',
+ metavar="PAT1,PAT2,...",
+ help="Omit files when their filename matches one of these patterns. "
+ "Usually needs quoting on the command line."
+ )
+ output_xml = optparse.make_option(
+ '-o', '', action='store', dest="outfile",
+ metavar="OUTFILE",
+ help="Write the XML report to this file. Defaults to 'coverage.xml'"
+ )
+ parallel_mode = optparse.make_option(
+ '-p', '--parallel-mode', action='store_true',
+ help="Append the machine name, process id and random number to the "
+ ".coverage data file name to simplify collecting data from "
+ "many processes."
+ )
+ module = optparse.make_option(
+ '-m', '--module', action='store_true',
+ help="<pyfile> is an importable Python module, not a script path, "
+ "to be run as 'python -m' would run it."
+ )
+ rcfile = optparse.make_option(
+ '', '--rcfile', action='store',
+ help="Specify configuration file. Defaults to '.coveragerc'"
+ )
+ source = optparse.make_option(
+ '', '--source', action='store', metavar="SRC1,SRC2,...",
+ help="A list of packages or directories of code to be measured."
+ )
+ timid = optparse.make_option(
+ '', '--timid', action='store_true',
+ help="Use a simpler but slower trace method. Try this if you get "
+ "seemingly impossible results!"
+ )
+ version = optparse.make_option(
+ '', '--version', action='store_true',
+ help="Display version information and exit."
+ )
+
+
+class CoverageOptionParser(optparse.OptionParser, object):
+ """Base OptionParser for coverage.
+
+ Problems don't exit the program.
+ Defaults are initialized for all options.
+
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(CoverageOptionParser, self).__init__(
+ add_help_option=False, *args, **kwargs
+ )
+ self.set_defaults(
+ actions=[],
+ branch=None,
+ directory=None,
+ help=None,
+ ignore_errors=None,
+ include=None,
+ omit=None,
+ parallel_mode=None,
+ module=None,
+ pylib=None,
+ rcfile=True,
+ show_missing=None,
+ source=None,
+ timid=None,
+ erase_first=None,
+ version=None,
+ )
+
+ self.disable_interspersed_args()
+ self.help_fn = self.help_noop
+
+ def help_noop(self, error=None, topic=None, parser=None):
+ """No-op help function."""
+ pass
+
+ class OptionParserError(Exception):
+ """Used to stop the optparse error handler ending the process."""
+ pass
+
+ def parse_args(self, args=None, options=None):
+ """Call optparse.parse_args, but return a triple:
+
+ (ok, options, args)
+
+ """
+ try:
+ options, args = \
+ super(CoverageOptionParser, self).parse_args(args, options)
+ except self.OptionParserError:
+ return False, None, None
+ return True, options, args
+
+ def error(self, msg):
+ """Override optparse.error so sys.exit doesn't get called."""
+ self.help_fn(msg)
+ raise self.OptionParserError
+
+
+class ClassicOptionParser(CoverageOptionParser):
+ """Command-line parser for coverage.py classic arguments."""
+
+ def __init__(self):
+ super(ClassicOptionParser, self).__init__()
+
+ self.add_action('-a', '--annotate', 'annotate')
+ self.add_action('-b', '--html', 'html')
+ self.add_action('-c', '--combine', 'combine')
+ self.add_action('-e', '--erase', 'erase')
+ self.add_action('-r', '--report', 'report')
+ self.add_action('-x', '--execute', 'execute')
+
+ self.add_options([
+ Opts.directory,
+ Opts.help,
+ Opts.ignore_errors,
+ Opts.pylib,
+ Opts.show_missing,
+ Opts.old_omit,
+ Opts.parallel_mode,
+ Opts.timid,
+ Opts.version,
+ ])
+
+ def add_action(self, dash, dashdash, action_code):
+ """Add a specialized option that is the action to execute."""
+ option = self.add_option(dash, dashdash, action='callback',
+ callback=self._append_action
+ )
+ option.action_code = action_code
+
+ def _append_action(self, option, opt_unused, value_unused, parser):
+ """Callback for an option that adds to the `actions` list."""
+ parser.values.actions.append(option.action_code)
+
+
+class CmdOptionParser(CoverageOptionParser):
+ """Parse one of the new-style commands for coverage.py."""
+
+ def __init__(self, action, options=None, defaults=None, usage=None,
+ cmd=None, description=None
+ ):
+ """Create an OptionParser for a coverage command.
+
+ `action` is the slug to put into `options.actions`.
+ `options` is a list of Option's for the command.
+ `defaults` is a dict of default value for options.
+ `usage` is the usage string to display in help.
+ `cmd` is the command name, if different than `action`.
+ `description` is the description of the command, for the help text.
+
+ """
+ if usage:
+ usage = "%prog " + usage
+ super(CmdOptionParser, self).__init__(
+ prog="coverage %s" % (cmd or action),
+ usage=usage,
+ description=description,
+ )
+ self.set_defaults(actions=[action], **(defaults or {}))
+ if options:
+ self.add_options(options)
+ self.cmd = cmd or action
+
+ def __eq__(self, other):
+ # A convenience equality, so that I can put strings in unit test
+ # results, and they will compare equal to objects.
+ return (other == "<CmdOptionParser:%s>" % self.cmd)
+
+GLOBAL_ARGS = [
+ Opts.rcfile,
+ Opts.help,
+ ]
+
+CMDS = {
+ 'annotate': CmdOptionParser("annotate",
+ [
+ Opts.directory,
+ Opts.ignore_errors,
+ Opts.omit,
+ Opts.include,
+ ] + GLOBAL_ARGS,
+ usage = "[options] [modules]",
+ description = "Make annotated copies of the given files, marking "
+ "statements that are executed with > and statements that are "
+ "missed with !."
+ ),
+
+ 'combine': CmdOptionParser("combine", GLOBAL_ARGS,
+ usage = " ",
+ description = "Combine data from multiple coverage files collected "
+ "with 'run -p'. The combined results are written to a single "
+ "file representing the union of the data."
+ ),
+
+ 'debug': CmdOptionParser("debug", GLOBAL_ARGS,
+ usage = "<topic>",
+ description = "Display information on the internals of coverage.py, "
+ "for diagnosing problems. "
+ "Topics are 'data' to show a summary of the collected data, "
+ "or 'sys' to show installation information."
+ ),
+
+ 'erase': CmdOptionParser("erase", GLOBAL_ARGS,
+ usage = " ",
+ description = "Erase previously collected coverage data."
+ ),
+
+ 'help': CmdOptionParser("help", GLOBAL_ARGS,
+ usage = "[command]",
+ description = "Describe how to use coverage.py"
+ ),
+
+ 'html': CmdOptionParser("html",
+ [
+ Opts.directory,
+ Opts.ignore_errors,
+ Opts.omit,
+ Opts.include,
+ ] + GLOBAL_ARGS,
+ usage = "[options] [modules]",
+ description = "Create an HTML report of the coverage of the files. "
+ "Each file gets its own page, with the source decorated to show "
+ "executed, excluded, and missed lines."
+ ),
+
+ 'report': CmdOptionParser("report",
+ [
+ Opts.ignore_errors,
+ Opts.omit,
+ Opts.include,
+ Opts.show_missing,
+ ] + GLOBAL_ARGS,
+ usage = "[options] [modules]",
+ description = "Report coverage statistics on modules."
+ ),
+
+ 'run': CmdOptionParser("execute",
+ [
+ Opts.append,
+ Opts.branch,
+ Opts.pylib,
+ Opts.parallel_mode,
+ Opts.module,
+ Opts.timid,
+ Opts.source,
+ Opts.omit,
+ Opts.include,
+ ] + GLOBAL_ARGS,
+ defaults = {'erase_first': True},
+ cmd = "run",
+ usage = "[options] <pyfile> [program options]",
+ description = "Run a Python program, measuring code execution."
+ ),
+
+ 'xml': CmdOptionParser("xml",
+ [
+ Opts.ignore_errors,
+ Opts.omit,
+ Opts.include,
+ Opts.output_xml,
+ ] + GLOBAL_ARGS,
+ cmd = "xml",
+ defaults = {'outfile': 'coverage.xml'},
+ usage = "[options] [modules]",
+ description = "Generate an XML report of coverage results."
+ ),
+ }
+
+
+OK, ERR = 0, 1
+
+
+class CoverageScript(object):
+ """The command-line interface to Coverage."""
+
+ def __init__(self, _covpkg=None, _run_python_file=None,
+ _run_python_module=None, _help_fn=None):
+ # _covpkg is for dependency injection, so we can test this code.
+ if _covpkg:
+ self.covpkg = _covpkg
+ else:
+ import coverage
+ self.covpkg = coverage
+
+ # For dependency injection:
+ self.run_python_file = _run_python_file or run_python_file
+ self.run_python_module = _run_python_module or run_python_module
+ self.help_fn = _help_fn or self.help
+
+ self.coverage = None
+
+ def help(self, error=None, topic=None, parser=None):
+ """Display an error message, or the named topic."""
+ assert error or topic or parser
+ if error:
+ print(error)
+ print("Use 'coverage help' for help.")
+ elif parser:
+ print(parser.format_help().strip())
+ else:
+ # Parse out the topic we want from HELP_TOPICS
+ topic_list = re.split("(?m)^=+ (\w+) =+$", HELP_TOPICS)
+ topics = dict(zip(topic_list[1::2], topic_list[2::2]))
+ help_msg = topics.get(topic, '').strip()
+ if help_msg:
+ print(help_msg % self.covpkg.__dict__)
+ else:
+ print("Don't know topic %r" % topic)
+
+ def command_line(self, argv):
+ """The bulk of the command line interface to Coverage.
+
+ `argv` is the argument list to process.
+
+ Returns 0 if all is well, 1 if something went wrong.
+
+ """
+ # Collect the command-line options.
+
+ if not argv:
+ self.help_fn(topic='minimum_help')
+ return OK
+
+ # The command syntax we parse depends on the first argument. Classic
+ # syntax always starts with an option.
+ classic = argv[0].startswith('-')
+ if classic:
+ parser = ClassicOptionParser()
+ else:
+ parser = CMDS.get(argv[0])
+ if not parser:
+ self.help_fn("Unknown command: '%s'" % argv[0])
+ return ERR
+ argv = argv[1:]
+
+ parser.help_fn = self.help_fn
+ ok, options, args = parser.parse_args(argv)
+ if not ok:
+ return ERR
+
+ # Handle help.
+ if options.help:
+ if classic:
+ self.help_fn(topic='help')
+ else:
+ self.help_fn(parser=parser)
+ return OK
+
+ if "help" in options.actions:
+ if args:
+ for a in args:
+ parser = CMDS.get(a)
+ if parser:
+ self.help_fn(parser=parser)
+ else:
+ self.help_fn(topic=a)
+ else:
+ self.help_fn(topic='help')
+ return OK
+
+ # Handle version.
+ if options.version:
+ self.help_fn(topic='version')
+ return OK
+
+ # Check for conflicts and problems in the options.
+ for i in ['erase', 'execute']:
+ for j in ['annotate', 'html', 'report', 'combine']:
+ if (i in options.actions) and (j in options.actions):
+ self.help_fn("You can't specify the '%s' and '%s' "
+ "options at the same time." % (i, j))
+ return ERR
+
+ if not options.actions:
+ self.help_fn(
+ "You must specify at least one of -e, -x, -c, -r, -a, or -b."
+ )
+ return ERR
+ args_allowed = (
+ 'execute' in options.actions or
+ 'annotate' in options.actions or
+ 'html' in options.actions or
+ 'debug' in options.actions or
+ 'report' in options.actions or
+ 'xml' in options.actions
+ )
+ if not args_allowed and args:
+ self.help_fn("Unexpected arguments: %s" % " ".join(args))
+ return ERR
+
+ if 'execute' in options.actions and not args:
+ self.help_fn("Nothing to do.")
+ return ERR
+
+ # Listify the list options.
+ source = unshell_list(options.source)
+ omit = unshell_list(options.omit)
+ include = unshell_list(options.include)
+
+ # Do something.
+ self.coverage = self.covpkg.coverage(
+ data_suffix = options.parallel_mode,
+ cover_pylib = options.pylib,
+ timid = options.timid,
+ branch = options.branch,
+ config_file = options.rcfile,
+ source = source,
+ omit = omit,
+ include = include,
+ )
+
+ if 'debug' in options.actions:
+ if not args:
+ self.help_fn("What information would you like: data, sys?")
+ return ERR
+ for info in args:
+ if info == 'sys':
+ print("-- sys ----------------------------------------")
+ for label, info in self.coverage.sysinfo():
+ if info == []:
+ info = "-none-"
+ if isinstance(info, list):
+ print("%15s:" % label)
+ for e in info:
+ print("%15s %s" % ("", e))
+ else:
+ print("%15s: %s" % (label, info))
+ elif info == 'data':
+ print("-- data ---------------------------------------")
+ self.coverage.load()
+ print("path: %s" % self.coverage.data.filename)
+ print("has_arcs: %r" % self.coverage.data.has_arcs())
+ summary = self.coverage.data.summary(fullpath=True)
+ if summary:
+ filenames = sorted(summary.keys())
+ print("\n%d files:" % len(filenames))
+ for f in filenames:
+ print("%s: %d lines" % (f, summary[f]))
+ else:
+ print("No data collected")
+ else:
+ self.help_fn("Don't know what you mean by %r" % info)
+ return ERR
+ return OK
+
+ if 'erase' in options.actions or options.erase_first:
+ self.coverage.erase()
+ else:
+ self.coverage.load()
+
+ if 'execute' in options.actions:
+ # Run the script.
+ self.coverage.start()
+ code_ran = True
+ try:
+ try:
+ if options.module:
+ self.run_python_module(args[0], args)
+ else:
+ self.run_python_file(args[0], args)
+ except NoSource:
+ code_ran = False
+ raise
+ finally:
+ if code_ran:
+ self.coverage.stop()
+ self.coverage.save()
+
+ if 'combine' in options.actions:
+ self.coverage.combine()
+ self.coverage.save()
+
+ # Remaining actions are reporting, with some common options.
+ report_args = dict(
+ morfs = args,
+ ignore_errors = options.ignore_errors,
+ omit = omit,
+ include = include,
+ )
+
+ if 'report' in options.actions:
+ self.coverage.report(
+ show_missing=options.show_missing, **report_args)
+ if 'annotate' in options.actions:
+ self.coverage.annotate(
+ directory=options.directory, **report_args)
+ if 'html' in options.actions:
+ self.coverage.html_report(
+ directory=options.directory, **report_args)
+ if 'xml' in options.actions:
+ outfile = options.outfile
+ self.coverage.xml_report(outfile=outfile, **report_args)
+
+ return OK
+
+
+def unshell_list(s):
+ """Turn a command-line argument into a list."""
+ if not s:
+ return None
+ if sys.platform == 'win32':
+ # When running coverage as coverage.exe, some of the behavior
+ # of the shell is emulated: wildcards are expanded into a list of
+ # filenames. So you have to single-quote patterns on the command
+ # line, but (not) helpfully, the single quotes are included in the
+ # argument, so we have to strip them off here.
+ s = s.strip("'")
+ return s.split(',')
+
+
+HELP_TOPICS = r"""
+
+== classic ====================================================================
+Coverage.py version %(__version__)s
+Measure, collect, and report on code coverage in Python programs.
+
+Usage:
+
+coverage -x [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...]
+ Execute the module, passing the given command-line arguments, collecting
+ coverage data. With the -p option, include the machine name and process
+ id in the .coverage file name. With -L, measure coverage even inside the
+ Python installed library, which isn't done by default. With --timid, use a
+ simpler but slower trace method.
+
+coverage -e
+ Erase collected coverage data.
+
+coverage -c
+ Combine data from multiple coverage files (as created by -p option above)
+ and store it into a single file representing the union of the coverage.
+
+coverage -r [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...]
+ Report on the statement coverage for the given files. With the -m
+ option, show line numbers of the statements that weren't executed.
+
+coverage -b -d DIR [-i] [-o DIR,...] [FILE1 FILE2 ...]
+ Create an HTML report of the coverage of the given files. Each file gets
+ its own page, with the file listing decorated to show executed, excluded,
+ and missed lines.
+
+coverage -a [-d DIR] [-i] [-o DIR,...] [FILE1 FILE2 ...]
+ Make annotated copies of the given files, marking statements that
+ are executed with > and statements that are missed with !.
+
+-d DIR
+ Write output files for -b or -a to this directory.
+
+-i Ignore errors while reporting or annotating.
+
+-o DIR,...
+ Omit reporting or annotating files when their filename path starts with
+ a directory listed in the omit list.
+ e.g. coverage -i -r -o c:\python25,lib\enthought\traits
+
+Coverage data is saved in the file .coverage by default. Set the
+COVERAGE_FILE environment variable to save it somewhere else.
+
+== help =======================================================================
+Coverage.py, version %(__version__)s
+Measure, collect, and report on code coverage in Python programs.
+
+usage: coverage <command> [options] [args]
+
+Commands:
+ annotate Annotate source files with execution information.
+ combine Combine a number of data files.
+ erase Erase previously collected coverage data.
+ help Get help on using coverage.py.
+ html Create an HTML report.
+ report Report coverage stats on modules.
+ run Run a Python program and measure code execution.
+ xml Create an XML report of coverage results.
+
+Use "coverage help <command>" for detailed help on any command.
+Use "coverage help classic" for help on older command syntax.
+For more information, see %(__url__)s
+
+== minimum_help ===============================================================
+Code coverage for Python. Use 'coverage help' for help.
+
+== version ====================================================================
+Coverage.py, version %(__version__)s. %(__url__)s
+
+"""
+
+
+def main(argv=None):
+ """The main entrypoint to Coverage.
+
+ This is installed as the script entrypoint.
+
+ """
+ if argv is None:
+ argv = sys.argv[1:]
+ try:
+ status = CoverageScript().command_line(argv)
+ except ExceptionDuringRun:
+ # An exception was caught while running the product code. The
+ # sys.exc_info() return tuple is packed into an ExceptionDuringRun
+ # exception.
+ _, err, _ = sys.exc_info()
+ traceback.print_exception(*err.args)
+ status = ERR
+ except CoverageException:
+ # A controlled error inside coverage.py: print the message to the user.
+ _, err, _ = sys.exc_info()
+ print(err)
+ status = ERR
+ except SystemExit:
+ # The user called `sys.exit()`. Exit with their argument, if any.
+ _, err, _ = sys.exc_info()
+ if err.args:
+ status = err.args[0]
+ else:
+ status = None
+ return status
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/codeunit.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/codeunit.py
new file mode 100644
index 0000000..55f44a2
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/codeunit.py
@@ -0,0 +1,117 @@
+"""Code unit (module) handling for Coverage."""
+
+import glob, os
+
+from coverage.backward import open_source, string_class, StringIO
+from coverage.misc import CoverageException
+
+
+def code_unit_factory(morfs, file_locator):
+ """Construct a list of CodeUnits from polymorphic inputs.
+
+ `morfs` is a module or a filename, or a list of same.
+
+ `file_locator` is a FileLocator that can help resolve filenames.
+
+ Returns a list of CodeUnit objects.
+
+ """
+ # Be sure we have a list.
+ if not isinstance(morfs, (list, tuple)):
+ morfs = [morfs]
+
+ # On Windows, the shell doesn't expand wildcards. Do it here.
+ globbed = []
+ for morf in morfs:
+ if isinstance(morf, string_class) and ('?' in morf or '*' in morf):
+ globbed.extend(glob.glob(morf))
+ else:
+ globbed.append(morf)
+ morfs = globbed
+
+ code_units = [CodeUnit(morf, file_locator) for morf in morfs]
+
+ return code_units
+
+
+class CodeUnit(object):
+ """Code unit: a filename or module.
+
+ Instance attributes:
+
+ `name` is a human-readable name for this code unit.
+ `filename` is the os path from which we can read the source.
+ `relative` is a boolean.
+
+ """
+ def __init__(self, morf, file_locator):
+ self.file_locator = file_locator
+
+ if hasattr(morf, '__file__'):
+ f = morf.__file__
+ else:
+ f = morf
+ # .pyc files should always refer to a .py instead.
+ if f.endswith('.pyc'):
+ f = f[:-1]
+ self.filename = self.file_locator.canonical_filename(f)
+
+ if hasattr(morf, '__name__'):
+ n = modname = morf.__name__
+ self.relative = True
+ else:
+ n = os.path.splitext(morf)[0]
+ rel = self.file_locator.relative_filename(n)
+ if os.path.isabs(n):
+ self.relative = (rel != n)
+ else:
+ self.relative = True
+ n = rel
+ modname = None
+ self.name = n
+ self.modname = modname
+
+ def __repr__(self):
+ return "<CodeUnit name=%r filename=%r>" % (self.name, self.filename)
+
+ # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
+ # of them defined.
+
+ def __lt__(self, other): return self.name < other.name
+ def __le__(self, other): return self.name <= other.name
+ def __eq__(self, other): return self.name == other.name
+ def __ne__(self, other): return self.name != other.name
+ def __gt__(self, other): return self.name > other.name
+ def __ge__(self, other): return self.name >= other.name
+
+ def flat_rootname(self):
+ """A base for a flat filename to correspond to this code unit.
+
+ Useful for writing files about the code where you want all the files in
+ the same directory, but need to differentiate same-named files from
+ different directories.
+
+ For example, the file a/b/c.py might return 'a_b_c'
+
+ """
+ if self.modname:
+ return self.modname.replace('.', '_')
+ else:
+ root = os.path.splitdrive(self.name)[1]
+ return root.replace('\\', '_').replace('/', '_').replace('.', '_')
+
+ def source_file(self):
+ """Return an open file for reading the source of the code unit."""
+ if os.path.exists(self.filename):
+ # A regular text file: open it.
+ return open_source(self.filename)
+
+ # Maybe it's in a zip file?
+ source = self.file_locator.get_zip_data(self.filename)
+ if source is not None:
+ return StringIO(source)
+
+ # Couldn't find source.
+ raise CoverageException(
+ "No source for code %r." % self.filename
+ )
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/collector.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/collector.py
new file mode 100644
index 0000000..3fdedaa
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/collector.py
@@ -0,0 +1,321 @@
+"""Raw data collector for Coverage."""
+
+import sys, threading
+
+try:
+ # Use the C extension code when we can, for speed.
+ from coverage.tracer import CTracer
+except ImportError:
+ # Couldn't import the C extension, maybe it isn't built.
+ CTracer = None
+
+
+class PyTracer(object):
+ """Python implementation of the raw data tracer."""
+
+ # Because of poor implementations of trace-function-manipulating tools,
+ # the Python trace function must be kept very simple. In particular, there
+ # must be only one function ever set as the trace function, both through
+ # sys.settrace, and as the return value from the trace function. Put
+ # another way, the trace function must always return itself. It cannot
+ # swap in other functions, or return None to avoid tracing a particular
+ # frame.
+ #
+ # The trace manipulator that introduced this restriction is DecoratorTools,
+ # which sets a trace function, and then later restores the pre-existing one
+ # by calling sys.settrace with a function it found in the current frame.
+ #
+ # Systems that use DecoratorTools (or similar trace manipulations) must use
+ # PyTracer to get accurate results. The command-line --timid argument is
+ # used to force the use of this tracer.
+
+ def __init__(self):
+ self.data = None
+ self.should_trace = None
+ self.should_trace_cache = None
+ self.warn = None
+ self.cur_file_data = None
+ self.last_line = 0
+ self.data_stack = []
+ self.last_exc_back = None
+ self.last_exc_firstlineno = 0
+ self.arcs = False
+
+ def _trace(self, frame, event, arg_unused):
+ """The trace function passed to sys.settrace."""
+
+ #print("trace event: %s %r @%d" % (
+ # event, frame.f_code.co_filename, frame.f_lineno))
+
+ if self.last_exc_back:
+ if frame == self.last_exc_back:
+ # Someone forgot a return event.
+ if self.arcs and self.cur_file_data:
+ pair = (self.last_line, -self.last_exc_firstlineno)
+ self.cur_file_data[pair] = None
+ self.cur_file_data, self.last_line = self.data_stack.pop()
+ self.last_exc_back = None
+
+ if event == 'call':
+ # Entering a new function context. Decide if we should trace
+ # in this file.
+ self.data_stack.append((self.cur_file_data, self.last_line))
+ filename = frame.f_code.co_filename
+ tracename = self.should_trace_cache.get(filename)
+ if tracename is None:
+ tracename = self.should_trace(filename, frame)
+ self.should_trace_cache[filename] = tracename
+ #print("called, stack is %d deep, tracename is %r" % (
+ # len(self.data_stack), tracename))
+ if tracename:
+ if tracename not in self.data:
+ self.data[tracename] = {}
+ self.cur_file_data = self.data[tracename]
+ else:
+ self.cur_file_data = None
+ # Set the last_line to -1 because the next arc will be entering a
+ # code block, indicated by (-1, n).
+ self.last_line = -1
+ elif event == 'line':
+ # Record an executed line.
+ if self.cur_file_data is not None:
+ if self.arcs:
+ #print("lin", self.last_line, frame.f_lineno)
+ self.cur_file_data[(self.last_line, frame.f_lineno)] = None
+ else:
+ #print("lin", frame.f_lineno)
+ self.cur_file_data[frame.f_lineno] = None
+ self.last_line = frame.f_lineno
+ elif event == 'return':
+ if self.arcs and self.cur_file_data:
+ first = frame.f_code.co_firstlineno
+ self.cur_file_data[(self.last_line, -first)] = None
+ # Leaving this function, pop the filename stack.
+ self.cur_file_data, self.last_line = self.data_stack.pop()
+ #print("returned, stack is %d deep" % (len(self.data_stack)))
+ elif event == 'exception':
+ #print("exc", self.last_line, frame.f_lineno)
+ self.last_exc_back = frame.f_back
+ self.last_exc_firstlineno = frame.f_code.co_firstlineno
+ return self._trace
+
+ def start(self):
+ """Start this Tracer.
+
+ Return a Python function suitable for use with sys.settrace().
+
+ """
+ sys.settrace(self._trace)
+ return self._trace
+
+ def stop(self):
+ """Stop this Tracer."""
+ if hasattr(sys, "gettrace") and self.warn:
+ if sys.gettrace() != self._trace:
+ msg = "Trace function changed, measurement is likely wrong: %r"
+ self.warn(msg % sys.gettrace())
+ sys.settrace(None)
+
+ def get_stats(self):
+ """Return a dictionary of statistics, or None."""
+ return None
+
+
+class Collector(object):
+ """Collects trace data.
+
+ Creates a Tracer object for each thread, since they track stack
+ information. Each Tracer points to the same shared data, contributing
+ traced data points.
+
+ When the Collector is started, it creates a Tracer for the current thread,
+ and installs a function to create Tracers for each new thread started.
+ When the Collector is stopped, all active Tracers are stopped.
+
+ Threads started while the Collector is stopped will never have Tracers
+ associated with them.
+
+ """
+
+ # The stack of active Collectors. Collectors are added here when started,
+ # and popped when stopped. Collectors on the stack are paused when not
+ # the top, and resumed when they become the top again.
+ _collectors = []
+
+ def __init__(self, should_trace, timid, branch, warn):
+ """Create a collector.
+
+ `should_trace` is a function, taking a filename, and returning a
+ canonicalized filename, or False depending on whether the file should
+ be traced or not.
+
+ If `timid` is true, then a slower simpler trace function will be
+ used. This is important for some environments where manipulation of
+ tracing functions make the faster more sophisticated trace function not
+ operate properly.
+
+ If `branch` is true, then branches will be measured. This involves
+ collecting data on which statements followed each other (arcs). Use
+ `get_arc_data` to get the arc data.
+
+ `warn` is a warning function, taking a single string message argument,
+ to be used if a warning needs to be issued.
+
+ """
+ self.should_trace = should_trace
+ self.warn = warn
+ self.branch = branch
+ self.reset()
+
+ if timid:
+ # Being timid: use the simple Python trace function.
+ self._trace_class = PyTracer
+ else:
+ # Being fast: use the C Tracer if it is available, else the Python
+ # trace function.
+ self._trace_class = CTracer or PyTracer
+
+ def __repr__(self):
+ return "<Collector at 0x%x>" % id(self)
+
+ def tracer_name(self):
+ """Return the class name of the tracer we're using."""
+ return self._trace_class.__name__
+
+ def reset(self):
+ """Clear collected data, and prepare to collect more."""
+ # A dictionary mapping filenames to dicts with linenumber keys,
+ # or mapping filenames to dicts with linenumber pairs as keys.
+ self.data = {}
+
+ # A cache of the results from should_trace, the decision about whether
+ # to trace execution in a file. A dict of filename to (filename or
+ # False).
+ self.should_trace_cache = {}
+
+ # Our active Tracers.
+ self.tracers = []
+
+ def _start_tracer(self):
+ """Start a new Tracer object, and store it in self.tracers."""
+ tracer = self._trace_class()
+ tracer.data = self.data
+ tracer.arcs = self.branch
+ tracer.should_trace = self.should_trace
+ tracer.should_trace_cache = self.should_trace_cache
+ tracer.warn = self.warn
+ fn = tracer.start()
+ self.tracers.append(tracer)
+ return fn
+
+ # The trace function has to be set individually on each thread before
+ # execution begins. Ironically, the only support the threading module has
+ # for running code before the thread main is the tracing function. So we
+ # install this as a trace function, and the first time it's called, it does
+ # the real trace installation.
+
+ def _installation_trace(self, frame_unused, event_unused, arg_unused):
+ """Called on new threads, installs the real tracer."""
+ # Remove ourselves as the trace function
+ sys.settrace(None)
+ # Install the real tracer.
+ fn = self._start_tracer()
+ # Invoke the real trace function with the current event, to be sure
+ # not to lose an event.
+ if fn:
+ fn = fn(frame_unused, event_unused, arg_unused)
+ # Return the new trace function to continue tracing in this scope.
+ return fn
+
+ def start(self):
+ """Start collecting trace information."""
+ if self._collectors:
+ self._collectors[-1].pause()
+ self._collectors.append(self)
+ #print >>sys.stderr, "Started: %r" % self._collectors
+
+ # Check to see whether we had a fullcoverage tracer installed.
+ traces0 = None
+ if hasattr(sys, "gettrace"):
+ fn0 = sys.gettrace()
+ if fn0:
+ tracer0 = getattr(fn0, '__self__', None)
+ if tracer0:
+ traces0 = getattr(tracer0, 'traces', None)
+
+ # Install the tracer on this thread.
+ fn = self._start_tracer()
+
+ if traces0:
+ for args in traces0:
+ (frame, event, arg), lineno = args
+ fn(frame, event, arg, lineno=lineno)
+
+ # Install our installation tracer in threading, to jump start other
+ # threads.
+ threading.settrace(self._installation_trace)
+
+ def stop(self):
+ """Stop collecting trace information."""
+ #print >>sys.stderr, "Stopping: %r" % self._collectors
+ assert self._collectors
+ assert self._collectors[-1] is self
+
+ self.pause()
+ self.tracers = []
+
+ # Remove this Collector from the stack, and resume the one underneath
+ # (if any).
+ self._collectors.pop()
+ if self._collectors:
+ self._collectors[-1].resume()
+
+ def pause(self):
+ """Pause tracing, but be prepared to `resume`."""
+ for tracer in self.tracers:
+ tracer.stop()
+ stats = tracer.get_stats()
+ if stats:
+ print("\nCoverage.py tracer stats:")
+ for k in sorted(stats.keys()):
+ print("%16s: %s" % (k, stats[k]))
+ threading.settrace(None)
+
+ def resume(self):
+ """Resume tracing after a `pause`."""
+ for tracer in self.tracers:
+ tracer.start()
+ threading.settrace(self._installation_trace)
+
+ def get_line_data(self):
+ """Return the line data collected.
+
+ Data is { filename: { lineno: None, ...}, ...}
+
+ """
+ if self.branch:
+ # If we were measuring branches, then we have to re-build the dict
+ # to show line data.
+ line_data = {}
+ for f, arcs in self.data.items():
+ line_data[f] = ldf = {}
+ for l1, _ in list(arcs.keys()):
+ if l1:
+ ldf[l1] = None
+ return line_data
+ else:
+ return self.data
+
+ def get_arc_data(self):
+ """Return the arc data collected.
+
+ Data is { filename: { (l1, l2): None, ...}, ...}
+
+ Note that no data is collected or returned if the Collector wasn't
+ created with `branch` true.
+
+ """
+ if self.branch:
+ return self.data
+ else:
+ return {}
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/config.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/config.py
new file mode 100644
index 0000000..e72a728
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/config.py
@@ -0,0 +1,164 @@
+"""Config file for coverage.py"""
+
+import os
+from coverage.backward import configparser # pylint: disable=W0622
+
+# The default line exclusion regexes
+DEFAULT_EXCLUDE = [
+ '(?i)# *pragma[: ]*no *cover',
+ ]
+
+# The default partial branch regexes, to be modified by the user.
+DEFAULT_PARTIAL = [
+ '(?i)# *pragma[: ]*no *branch',
+ ]
+
+# The default partial branch regexes, based on Python semantics.
+# These are any Python branching constructs that can't actually execute all
+# their branches.
+DEFAULT_PARTIAL_ALWAYS = [
+ 'while (True|1|False|0):',
+ 'if (True|1|False|0):',
+ ]
+
+
+class CoverageConfig(object):
+ """Coverage.py configuration.
+
+ The attributes of this class are the various settings that control the
+ operation of coverage.py.
+
+ """
+
+ def __init__(self):
+ """Initialize the configuration attributes to their defaults."""
+ # Defaults for [run]
+ self.branch = False
+ self.cover_pylib = False
+ self.data_file = ".coverage"
+ self.parallel = False
+ self.timid = False
+ self.source = None
+
+ # Defaults for [report]
+ self.exclude_list = DEFAULT_EXCLUDE[:]
+ self.ignore_errors = False
+ self.include = None
+ self.omit = None
+ self.partial_list = DEFAULT_PARTIAL[:]
+ self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
+ self.precision = 0
+
+ # Defaults for [html]
+ self.html_dir = "htmlcov"
+
+ # Defaults for [xml]
+ self.xml_output = "coverage.xml"
+
+ # Defaults for [paths]
+ self.paths = {}
+
+ def from_environment(self, env_var):
+ """Read configuration from the `env_var` environment variable."""
+ # Timidity: for nose users, read an environment variable. This is a
+ # cheap hack, since the rest of the command line arguments aren't
+ # recognized, but it solves some users' problems.
+ env = os.environ.get(env_var, '')
+ if env:
+ self.timid = ('--timid' in env)
+
+ def from_args(self, **kwargs):
+ """Read config values from `kwargs`."""
+ for k, v in kwargs.items():
+ if v is not None:
+ setattr(self, k, v)
+
+ def from_file(self, *files):
+ """Read configuration from .rc files.
+
+ Each argument in `files` is a file name to read.
+
+ """
+ cp = configparser.RawConfigParser()
+ cp.read(files)
+
+ # [run]
+ if cp.has_option('run', 'branch'):
+ self.branch = cp.getboolean('run', 'branch')
+ if cp.has_option('run', 'cover_pylib'):
+ self.cover_pylib = cp.getboolean('run', 'cover_pylib')
+ if cp.has_option('run', 'data_file'):
+ self.data_file = cp.get('run', 'data_file')
+ if cp.has_option('run', 'include'):
+ self.include = self.get_list(cp, 'run', 'include')
+ if cp.has_option('run', 'omit'):
+ self.omit = self.get_list(cp, 'run', 'omit')
+ if cp.has_option('run', 'parallel'):
+ self.parallel = cp.getboolean('run', 'parallel')
+ if cp.has_option('run', 'source'):
+ self.source = self.get_list(cp, 'run', 'source')
+ if cp.has_option('run', 'timid'):
+ self.timid = cp.getboolean('run', 'timid')
+
+ # [report]
+ if cp.has_option('report', 'exclude_lines'):
+ self.exclude_list = \
+ self.get_line_list(cp, 'report', 'exclude_lines')
+ if cp.has_option('report', 'ignore_errors'):
+ self.ignore_errors = cp.getboolean('report', 'ignore_errors')
+ if cp.has_option('report', 'include'):
+ self.include = self.get_list(cp, 'report', 'include')
+ if cp.has_option('report', 'omit'):
+ self.omit = self.get_list(cp, 'report', 'omit')
+ if cp.has_option('report', 'partial_branches'):
+ self.partial_list = \
+ self.get_line_list(cp, 'report', 'partial_branches')
+ if cp.has_option('report', 'partial_branches_always'):
+ self.partial_always_list = \
+ self.get_line_list(cp, 'report', 'partial_branches_always')
+ if cp.has_option('report', 'precision'):
+ self.precision = cp.getint('report', 'precision')
+
+ # [html]
+ if cp.has_option('html', 'directory'):
+ self.html_dir = cp.get('html', 'directory')
+
+ # [xml]
+ if cp.has_option('xml', 'output'):
+ self.xml_output = cp.get('xml', 'output')
+
+ # [paths]
+ if cp.has_section('paths'):
+ for option in cp.options('paths'):
+ self.paths[option] = self.get_list(cp, 'paths', option)
+
+ def get_list(self, cp, section, option):
+ """Read a list of strings from the ConfigParser `cp`.
+
+ The value of `section` and `option` is treated as a comma- and newline-
+ separated list of strings. Each value is stripped of whitespace.
+
+ Returns the list of strings.
+
+ """
+ value_list = cp.get(section, option)
+ values = []
+ for value_line in value_list.split('\n'):
+ for value in value_line.split(','):
+ value = value.strip()
+ if value:
+ values.append(value)
+ return values
+
+ def get_line_list(self, cp, section, option):
+ """Read a list of full-line strings from the ConfigParser `cp`.
+
+ The value of `section` and `option` is treated as a newline-separated
+ list of strings. Each value is stripped of whitespace.
+
+ Returns the list of strings.
+
+ """
+ value_list = cp.get(section, option)
+ return list(filter(None, value_list.split('\n')))
+
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/control.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/control.py
new file mode 100644
index 0000000..b617f48
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/control.py
@@ -0,0 +1,688 @@
+"""Core control stuff for Coverage."""
+
+import atexit, os, random, socket, sys
+
+from coverage.annotate import AnnotateReporter
+from coverage.backward import string_class
+from coverage.codeunit import code_unit_factory, CodeUnit
+from coverage.collector import Collector
+from coverage.config import CoverageConfig
+from coverage.data import CoverageData
+from coverage.files import FileLocator, TreeMatcher, FnmatchMatcher
+from coverage.files import PathAliases, find_python_files
+from coverage.html import HtmlReporter
+from coverage.misc import CoverageException, bool_or_none, join_regex
+from coverage.results import Analysis, Numbers
+from coverage.summary import SummaryReporter
+from coverage.xmlreport import XmlReporter
+
+class coverage(object):
+ """Programmatic access to Coverage.
+
+ To use::
+
+ from coverage import coverage
+
+ cov = coverage()
+ cov.start()
+ #.. blah blah (run your code) blah blah ..
+ cov.stop()
+ cov.html_report(directory='covhtml')
+
+ """
+ def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
+ auto_data=False, timid=None, branch=None, config_file=True,
+ source=None, omit=None, include=None):
+ """
+ `data_file` is the base name of the data file to use, defaulting to
+ ".coverage". `data_suffix` is appended (with a dot) to `data_file` to
+ create the final file name. If `data_suffix` is simply True, then a
+ suffix is created with the machine and process identity included.
+
+ `cover_pylib` is a boolean determining whether Python code installed
+ with the Python interpreter is measured. This includes the Python
+ standard library and any packages installed with the interpreter.
+
+ If `auto_data` is true, then any existing data file will be read when
+ coverage measurement starts, and data will be saved automatically when
+ measurement stops.
+
+ If `timid` is true, then a slower and simpler trace function will be
+ used. This is important for some environments where manipulation of
+ tracing functions breaks the faster trace function.
+
+ If `branch` is true, then branch coverage will be measured in addition
+ to the usual statement coverage.
+
+ `config_file` determines what config file to read. If it is a string,
+ it is the name of the config file to read. If it is True, then a
+ standard file is read (".coveragerc"). If it is False, then no file is
+ read.
+
+ `source` is a list of file paths or package names. Only code located
+ in the trees indicated by the file paths or package names will be
+ measured.
+
+ `include` and `omit` are lists of filename patterns. Files that match
+ `include` will be measured, files that match `omit` will not. Each
+ will also accept a single string argument.
+
+ """
+ from coverage import __version__
+
+ # A record of all the warnings that have been issued.
+ self._warnings = []
+
+ # Build our configuration from a number of sources:
+ # 1: defaults:
+ self.config = CoverageConfig()
+
+ # 2: from the coveragerc file:
+ if config_file:
+ if config_file is True:
+ config_file = ".coveragerc"
+ try:
+ self.config.from_file(config_file)
+ except ValueError:
+ _, err, _ = sys.exc_info()
+ raise CoverageException(
+ "Couldn't read config file %s: %s" % (config_file, err)
+ )
+
+ # 3: from environment variables:
+ self.config.from_environment('COVERAGE_OPTIONS')
+ env_data_file = os.environ.get('COVERAGE_FILE')
+ if env_data_file:
+ self.config.data_file = env_data_file
+
+ # 4: from constructor arguments:
+ if isinstance(omit, string_class):
+ omit = [omit]
+ if isinstance(include, string_class):
+ include = [include]
+ self.config.from_args(
+ data_file=data_file, cover_pylib=cover_pylib, timid=timid,
+ branch=branch, parallel=bool_or_none(data_suffix),
+ source=source, omit=omit, include=include
+ )
+
+ self.auto_data = auto_data
+ self.atexit_registered = False
+
+ # _exclude_re is a dict mapping exclusion list names to compiled
+ # regexes.
+ self._exclude_re = {}
+ self._exclude_regex_stale()
+
+ self.file_locator = FileLocator()
+
+ # The source argument can be directories or package names.
+ self.source = []
+ self.source_pkgs = []
+ for src in self.config.source or []:
+ if os.path.exists(src):
+ self.source.append(self.file_locator.canonical_filename(src))
+ else:
+ self.source_pkgs.append(src)
+
+ self.omit = self._prep_patterns(self.config.omit)
+ self.include = self._prep_patterns(self.config.include)
+
+ self.collector = Collector(
+ self._should_trace, timid=self.config.timid,
+ branch=self.config.branch, warn=self._warn
+ )
+
+ # Suffixes are a bit tricky. We want to use the data suffix only when
+ # collecting data, not when combining data. So we save it as
+ # `self.run_suffix` now, and promote it to `self.data_suffix` if we
+ # find that we are collecting data later.
+ if data_suffix or self.config.parallel:
+ if not isinstance(data_suffix, string_class):
+ # if data_suffix=True, use .machinename.pid.random
+ data_suffix = True
+ else:
+ data_suffix = None
+ self.data_suffix = None
+ self.run_suffix = data_suffix
+
+ # Create the data file. We do this at construction time so that the
+ # data file will be written into the directory where the process
+ # started rather than wherever the process eventually chdir'd to.
+ self.data = CoverageData(
+ basename=self.config.data_file,
+ collector="coverage v%s" % __version__
+ )
+
+ # The dirs for files considered "installed with the interpreter".
+ self.pylib_dirs = []
+ if not self.config.cover_pylib:
+ # Look at where some standard modules are located. That's the
+ # indication for "installed with the interpreter". In some
+ # environments (virtualenv, for example), these modules may be
+ # spread across a few locations. Look at all the candidate modules
+ # we've imported, and take all the different ones.
+ for m in (atexit, os, random, socket):
+ if hasattr(m, "__file__"):
+ m_dir = self._canonical_dir(m.__file__)
+ if m_dir not in self.pylib_dirs:
+ self.pylib_dirs.append(m_dir)
+
+ # To avoid tracing the coverage code itself, we skip anything located
+ # where we are.
+ self.cover_dir = self._canonical_dir(__file__)
+
+ # The matchers for _should_trace, created when tracing starts.
+ self.source_match = None
+ self.pylib_match = self.cover_match = None
+ self.include_match = self.omit_match = None
+
+ # Only _harvest_data once per measurement cycle.
+ self._harvested = False
+
+ # Set the reporting precision.
+ Numbers.set_precision(self.config.precision)
+
+ # When tearing down the coverage object, modules can become None.
+ # Saving the modules as object attributes avoids problems, but it is
+ # quite ad-hoc which modules need to be saved and which references
+ # need to use the object attributes.
+ self.socket = socket
+ self.os = os
+ self.random = random
+
+ def _canonical_dir(self, f):
+ """Return the canonical directory of the file `f`."""
+ return os.path.split(self.file_locator.canonical_filename(f))[0]
+
+ def _source_for_file(self, filename):
+ """Return the source file for `filename`."""
+ if not filename.endswith(".py"):
+ if filename[-4:-1] == ".py":
+ filename = filename[:-1]
+ return filename
+
+ def _should_trace(self, filename, frame):
+ """Decide whether to trace execution in `filename`
+
+ This function is called from the trace function. As each new file name
+ is encountered, this function determines whether it is traced or not.
+
+ Returns a canonicalized filename if it should be traced, False if it
+ should not.
+
+ """
+ if os is None:
+ return False
+
+ if filename.startswith('<'):
+ # Lots of non-file execution is represented with artificial
+ # filenames like "<string>", "<doctest readme.txt[0]>", or
+ # "<exec_function>". Don't ever trace these executions, since we
+ # can't do anything with the data later anyway.
+ return False
+
+ if filename.endswith(".html"):
+ # Jinja and maybe other templating systems compile templates into
+ # Python code, but use the template filename as the filename in
+ # the compiled code. Of course, those filenames are useless later
+ # so don't bother collecting. TODO: How should we really separate
+ # out good file extensions from bad?
+ return False
+
+ self._check_for_packages()
+
+ # Compiled Python files have two filenames: frame.f_code.co_filename is
+ # the filename at the time the .pyc was compiled. The second name is
+ # __file__, which is where the .pyc was actually loaded from. Since
+ # .pyc files can be moved after compilation (for example, by being
+ # installed), we look for __file__ in the frame and prefer it to the
+ # co_filename value.
+ dunder_file = frame.f_globals.get('__file__')
+ if dunder_file:
+ filename = self._source_for_file(dunder_file)
+
+ # Jython reports the .class file to the tracer, use the source file.
+ if filename.endswith("$py.class"):
+ filename = filename[:-9] + ".py"
+
+ canonical = self.file_locator.canonical_filename(filename)
+
+ # If the user specified source or include, then that's authoritative
+ # about the outer bound of what to measure and we don't have to apply
+ # any canned exclusions. If they didn't, then we have to exclude the
+ # stdlib and coverage.py directories.
+ if self.source_match:
+ if not self.source_match.match(canonical):
+ return False
+ elif self.include_match:
+ if not self.include_match.match(canonical):
+ return False
+ else:
+ # If we aren't supposed to trace installed code, then check if this
+ # is near the Python standard library and skip it if so.
+ if self.pylib_match and self.pylib_match.match(canonical):
+ return False
+
+ # We exclude the coverage code itself, since a little of it will be
+ # measured otherwise.
+ if self.cover_match and self.cover_match.match(canonical):
+ return False
+
+ # Check the file against the omit pattern.
+ if self.omit_match and self.omit_match.match(canonical):
+ return False
+
+ return canonical
+
+ # To log what should_trace returns, change this to "if 1:"
+ if 0:
+ _real_should_trace = _should_trace
+ def _should_trace(self, filename, frame): # pylint: disable=E0102
+ """A logging decorator around the real _should_trace function."""
+ ret = self._real_should_trace(filename, frame)
+ print("should_trace: %r -> %r" % (filename, ret))
+ return ret
+
+ def _warn(self, msg):
+ """Use `msg` as a warning."""
+ self._warnings.append(msg)
+ sys.stderr.write("Coverage.py warning: %s\n" % msg)
+
+ def _prep_patterns(self, patterns):
+ """Prepare the file patterns for use in a `FnmatchMatcher`.
+
+ If a pattern starts with a wildcard, it is used as a pattern
+ as-is. If it does not start with a wildcard, then it is made
+ absolute with the current directory.
+
+ If `patterns` is None, an empty list is returned.
+
+ """
+ patterns = patterns or []
+ prepped = []
+ for p in patterns or []:
+ if p.startswith("*") or p.startswith("?"):
+ prepped.append(p)
+ else:
+ prepped.append(self.file_locator.abs_file(p))
+ return prepped
+
+ def _check_for_packages(self):
+ """Update the source_match matcher with latest imported packages."""
+ # Our self.source_pkgs attribute is a list of package names we want to
+ # measure. Each time through here, we see if we've imported any of
+ # them yet. If so, we add its file to source_match, and we don't have
+ # to look for that package any more.
+ if self.source_pkgs:
+ found = []
+ for pkg in self.source_pkgs:
+ try:
+ mod = sys.modules[pkg]
+ except KeyError:
+ continue
+
+ found.append(pkg)
+
+ try:
+ pkg_file = mod.__file__
+ except AttributeError:
+ self._warn("Module %s has no Python source." % pkg)
+ else:
+ d, f = os.path.split(pkg_file)
+ if f.startswith('__init__.'):
+ # This is actually a package, return the directory.
+ pkg_file = d
+ else:
+ pkg_file = self._source_for_file(pkg_file)
+ pkg_file = self.file_locator.canonical_filename(pkg_file)
+ self.source.append(pkg_file)
+ self.source_match.add(pkg_file)
+
+ for pkg in found:
+ self.source_pkgs.remove(pkg)
+
+ def use_cache(self, usecache):
+ """Control the use of a data file (incorrectly called a cache).
+
+ `usecache` is true or false, whether to read and write data on disk.
+
+ """
+ self.data.usefile(usecache)
+
+ def load(self):
+ """Load previously-collected coverage data from the data file."""
+ self.collector.reset()
+ self.data.read()
+
+ def start(self):
+ """Start measuring code coverage."""
+ if self.run_suffix:
+ # Calling start() means we're running code, so use the run_suffix
+ # as the data_suffix when we eventually save the data.
+ self.data_suffix = self.run_suffix
+ if self.auto_data:
+ self.load()
+ # Save coverage data when Python exits.
+ if not self.atexit_registered:
+ atexit.register(self.save)
+ self.atexit_registered = True
+
+ # Create the matchers we need for _should_trace
+ if self.source or self.source_pkgs:
+ self.source_match = TreeMatcher(self.source)
+ else:
+ if self.cover_dir:
+ self.cover_match = TreeMatcher([self.cover_dir])
+ if self.pylib_dirs:
+ self.pylib_match = TreeMatcher(self.pylib_dirs)
+ if self.include:
+ self.include_match = FnmatchMatcher(self.include)
+ if self.omit:
+ self.omit_match = FnmatchMatcher(self.omit)
+
+ self._harvested = False
+ self.collector.start()
+
+ def stop(self):
+ """Stop measuring code coverage."""
+ self.collector.stop()
+ self._harvest_data()
+
+ def erase(self):
+ """Erase previously-collected coverage data.
+
+ This removes the in-memory data collected in this session as well as
+ discarding the data file.
+
+ """
+ self.collector.reset()
+ self.data.erase()
+
+ def clear_exclude(self, which='exclude'):
+ """Clear the exclude list."""
+ setattr(self.config, which + "_list", [])
+ self._exclude_regex_stale()
+
+ def exclude(self, regex, which='exclude'):
+ """Exclude source lines from execution consideration.
+
+ A number of lists of regular expressions are maintained. Each list
+ selects lines that are treated differently during reporting.
+
+ `which` determines which list is modified. The "exclude" list selects
+ lines that are not considered executable at all. The "partial" list
+ indicates lines with branches that are not taken.
+
+ `regex` is a regular expression. The regex is added to the specified
+ list. If any of the regexes in the list is found in a line, the line
+ is marked for special treatment during reporting.
+
+ """
+ excl_list = getattr(self.config, which + "_list")
+ excl_list.append(regex)
+ self._exclude_regex_stale()
+
+ def _exclude_regex_stale(self):
+ """Drop all the compiled exclusion regexes, a list was modified."""
+ self._exclude_re.clear()
+
+ def _exclude_regex(self, which):
+ """Return a compiled regex for the given exclusion list."""
+ if which not in self._exclude_re:
+ excl_list = getattr(self.config, which + "_list")
+ self._exclude_re[which] = join_regex(excl_list)
+ return self._exclude_re[which]
+
+ def get_exclude_list(self, which='exclude'):
+ """Return a list of excluded regex patterns.
+
+ `which` indicates which list is desired. See `exclude` for the lists
+ that are available, and their meaning.
+
+ """
+ return getattr(self.config, which + "_list")
+
+ def save(self):
+ """Save the collected coverage data to the data file."""
+ data_suffix = self.data_suffix
+ if data_suffix is True:
+ # If data_suffix was a simple true value, then make a suffix with
+ # plenty of distinguishing information. We do this here in
+ # `save()` at the last minute so that the pid will be correct even
+ # if the process forks.
+ data_suffix = "%s.%s.%06d" % (
+ self.socket.gethostname(), self.os.getpid(),
+ self.random.randint(0, 99999)
+ )
+
+ self._harvest_data()
+ self.data.write(suffix=data_suffix)
+
+ def combine(self):
+ """Combine together a number of similarly-named coverage data files.
+
+ All coverage data files whose name starts with `data_file` (from the
+ coverage() constructor) will be read, and combined together into the
+ current measurements.
+
+ """
+ aliases = None
+ if self.config.paths:
+ aliases = PathAliases(self.file_locator)
+ for paths in self.config.paths.values():
+ result = paths[0]
+ for pattern in paths[1:]:
+ aliases.add(pattern, result)
+ self.data.combine_parallel_data(aliases=aliases)
+
+ def _harvest_data(self):
+ """Get the collected data and reset the collector.
+
+ Also warn about various problems collecting data.
+
+ """
+ if not self._harvested:
+ self.data.add_line_data(self.collector.get_line_data())
+ self.data.add_arc_data(self.collector.get_arc_data())
+ self.collector.reset()
+
+ # If there are still entries in the source_pkgs list, then we never
+ # encountered those packages.
+ for pkg in self.source_pkgs:
+ self._warn("Module %s was never imported." % pkg)
+
+ # Find out if we got any data.
+ summary = self.data.summary()
+ if not summary:
+ self._warn("No data was collected.")
+
+ # Find files that were never executed at all.
+ for src in self.source:
+ for py_file in find_python_files(src):
+ self.data.touch_file(py_file)
+
+ self._harvested = True
+
+ # Backward compatibility with version 1.
+ def analysis(self, morf):
+ """Like `analysis2` but doesn't return excluded line numbers."""
+ f, s, _, m, mf = self.analysis2(morf)
+ return f, s, m, mf
+
+ def analysis2(self, morf):
+ """Analyze a module.
+
+ `morf` is a module or a filename. It will be analyzed to determine
+ its coverage statistics. The return value is a 5-tuple:
+
+ * The filename for the module.
+ * A list of line numbers of executable statements.
+ * A list of line numbers of excluded statements.
+ * A list of line numbers of statements not run (missing from
+ execution).
+ * A readable formatted string of the missing line numbers.
+
+ The analysis uses the source file itself and the current measured
+ coverage data.
+
+ """
+ analysis = self._analyze(morf)
+ return (
+ analysis.filename, analysis.statements, analysis.excluded,
+ analysis.missing, analysis.missing_formatted()
+ )
+
+ def _analyze(self, it):
+ """Analyze a single morf or code unit.
+
+ Returns an `Analysis` object.
+
+ """
+ if not isinstance(it, CodeUnit):
+ it = code_unit_factory(it, self.file_locator)[0]
+
+ return Analysis(self, it)
+
+ def report(self, morfs=None, show_missing=True, ignore_errors=None,
+ file=None, # pylint: disable=W0622
+ omit=None, include=None
+ ):
+ """Write a summary report to `file`.
+
+ Each module in `morfs` is listed, with counts of statements, executed
+ statements, missing statements, and a list of lines missed.
+
+ `include` is a list of filename patterns. Modules whose filenames
+ match those patterns will be included in the report. Modules matching
+ `omit` will not be included in the report.
+
+ """
+ self.config.from_args(
+ ignore_errors=ignore_errors, omit=omit, include=include
+ )
+ reporter = SummaryReporter(
+ self, show_missing, self.config.ignore_errors
+ )
+ reporter.report(morfs, outfile=file, config=self.config)
+
+ def annotate(self, morfs=None, directory=None, ignore_errors=None,
+ omit=None, include=None):
+ """Annotate a list of modules.
+
+ Each module in `morfs` is annotated. The source is written to a new
+ file, named with a ",cover" suffix, with each line prefixed with a
+ marker to indicate the coverage of the line. Covered lines have ">",
+ excluded lines have "-", and missing lines have "!".
+
+ See `coverage.report()` for other arguments.
+
+ """
+ self.config.from_args(
+ ignore_errors=ignore_errors, omit=omit, include=include
+ )
+ reporter = AnnotateReporter(self, self.config.ignore_errors)
+ reporter.report(morfs, config=self.config, directory=directory)
+
+ def html_report(self, morfs=None, directory=None, ignore_errors=None,
+ omit=None, include=None):
+ """Generate an HTML report.
+
+ See `coverage.report()` for other arguments.
+
+ """
+ self.config.from_args(
+ ignore_errors=ignore_errors, omit=omit, include=include,
+ html_dir=directory,
+ )
+ reporter = HtmlReporter(self, self.config.ignore_errors)
+ reporter.report(morfs, config=self.config)
+
+ def xml_report(self, morfs=None, outfile=None, ignore_errors=None,
+ omit=None, include=None):
+ """Generate an XML report of coverage results.
+
+ The report is compatible with Cobertura reports.
+
+ Each module in `morfs` is included in the report. `outfile` is the
+ path to write the file to, "-" will write to stdout.
+
+ See `coverage.report()` for other arguments.
+
+ """
+ self.config.from_args(
+ ignore_errors=ignore_errors, omit=omit, include=include,
+ xml_output=outfile,
+ )
+ file_to_close = None
+ if self.config.xml_output:
+ if self.config.xml_output == '-':
+ outfile = sys.stdout
+ else:
+ outfile = open(self.config.xml_output, "w")
+ file_to_close = outfile
+ try:
+ reporter = XmlReporter(self, self.config.ignore_errors)
+ reporter.report(morfs, outfile=outfile, config=self.config)
+ finally:
+ if file_to_close:
+ file_to_close.close()
+
+ def sysinfo(self):
+ """Return a list of (key, value) pairs showing internal information."""
+
+ import coverage as covmod
+ import platform, re
+
+ try:
+ implementation = platform.python_implementation()
+ except AttributeError:
+ implementation = "unknown"
+
+ info = [
+ ('version', covmod.__version__),
+ ('coverage', covmod.__file__),
+ ('cover_dir', self.cover_dir),
+ ('pylib_dirs', self.pylib_dirs),
+ ('tracer', self.collector.tracer_name()),
+ ('data_path', self.data.filename),
+ ('python', sys.version.replace('\n', '')),
+ ('platform', platform.platform()),
+ ('implementation', implementation),
+ ('cwd', os.getcwd()),
+ ('path', sys.path),
+ ('environment', [
+ ("%s = %s" % (k, v)) for k, v in os.environ.items()
+ if re.search("^COV|^PY", k)
+ ]),
+ ]
+ return info
+
+
+def process_startup():
+ """Call this at Python startup to perhaps measure coverage.
+
+ If the environment variable COVERAGE_PROCESS_START is defined, coverage
+ measurement is started. The value of the variable is the config file
+ to use.
+
+ There are two ways to configure your Python installation to invoke this
+ function when Python starts:
+
+ #. Create or append to sitecustomize.py to add these lines::
+
+ import coverage
+ coverage.process_startup()
+
+ #. Create a .pth file in your Python installation containing::
+
+ import coverage; coverage.process_startup()
+
+ """
+ cps = os.environ.get("COVERAGE_PROCESS_START")
+ if cps:
+ cov = coverage(config_file=cps, auto_data=True)
+ if os.environ.get("COVERAGE_COVERAGE"):
+ # Measuring coverage within coverage.py takes yet more trickery.
+ cov.cover_dir = "Please measure coverage.py!"
+ cov.start()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/data.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/data.py
new file mode 100644
index 0000000..7a8d656
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/data.py
@@ -0,0 +1,273 @@
+"""Coverage data for Coverage."""
+
+import os
+
+from coverage.backward import pickle, sorted # pylint: disable=W0622
+from coverage.files import PathAliases
+
+
+class CoverageData(object):
+ """Manages collected coverage data, including file storage.
+
+ The data file format is a pickled dict, with these keys:
+
+ * collector: a string identifying the collecting software
+
+ * lines: a dict mapping filenames to sorted lists of line numbers
+ executed:
+ { 'file1': [17,23,45], 'file2': [1,2,3], ... }
+
+ * arcs: a dict mapping filenames to sorted lists of line number pairs:
+ { 'file1': [(17,23), (17,25), (25,26)], ... }
+
+ """
+
+ def __init__(self, basename=None, collector=None):
+ """Create a CoverageData.
+
+ `basename` is the name of the file to use for storing data.
+
+ `collector` is a string describing the coverage measurement software.
+
+ """
+ self.collector = collector or 'unknown'
+
+ self.use_file = True
+
+ # Construct the filename that will be used for data file storage, if we
+ # ever do any file storage.
+ self.filename = basename or ".coverage"
+ self.filename = os.path.abspath(self.filename)
+
+ # A map from canonical Python source file name to a dictionary in
+ # which there's an entry for each line number that has been
+ # executed:
+ #
+ # {
+ # 'filename1.py': { 12: None, 47: None, ... },
+ # ...
+ # }
+ #
+ self.lines = {}
+
+ # A map from canonical Python source file name to a dictionary with an
+ # entry for each pair of line numbers forming an arc:
+ #
+ # {
+ # 'filename1.py': { (12,14): None, (47,48): None, ... },
+ # ...
+ # }
+ #
+ self.arcs = {}
+
+ self.os = os
+ self.sorted = sorted
+ self.pickle = pickle
+
+ def usefile(self, use_file=True):
+ """Set whether or not to use a disk file for data."""
+ self.use_file = use_file
+
+ def read(self):
+ """Read coverage data from the coverage data file (if it exists)."""
+ if self.use_file:
+ self.lines, self.arcs = self._read_file(self.filename)
+ else:
+ self.lines, self.arcs = {}, {}
+
+ def write(self, suffix=None):
+ """Write the collected coverage data to a file.
+
+ `suffix` is a suffix to append to the base file name. This can be used
+ for multiple or parallel execution, so that many coverage data files
+ can exist simultaneously. A dot will be used to join the base name and
+ the suffix.
+
+ """
+ if self.use_file:
+ filename = self.filename
+ if suffix:
+ filename += "." + suffix
+ self.write_file(filename)
+
+ def erase(self):
+ """Erase the data, both in this object, and from its file storage."""
+ if self.use_file:
+ if self.filename and os.path.exists(self.filename):
+ os.remove(self.filename)
+ self.lines = {}
+ self.arcs = {}
+
+ def line_data(self):
+ """Return the map from filenames to lists of line numbers executed."""
+ return dict(
+ [(f, self.sorted(lmap.keys())) for f, lmap in self.lines.items()]
+ )
+
+ def arc_data(self):
+ """Return the map from filenames to lists of line number pairs."""
+ return dict(
+ [(f, self.sorted(amap.keys())) for f, amap in self.arcs.items()]
+ )
+
+ def write_file(self, filename):
+ """Write the coverage data to `filename`."""
+
+ # Create the file data.
+ data = {}
+
+ data['lines'] = self.line_data()
+ arcs = self.arc_data()
+ if arcs:
+ data['arcs'] = arcs
+
+ if self.collector:
+ data['collector'] = self.collector
+
+ # Write the pickle to the file.
+ fdata = open(filename, 'wb')
+ try:
+ self.pickle.dump(data, fdata, 2)
+ finally:
+ fdata.close()
+
+ def read_file(self, filename):
+ """Read the coverage data from `filename`."""
+ self.lines, self.arcs = self._read_file(filename)
+
+ def raw_data(self, filename):
+ """Return the raw pickled data from `filename`."""
+ fdata = open(filename, 'rb')
+ try:
+ data = pickle.load(fdata)
+ finally:
+ fdata.close()
+ return data
+
+ def _read_file(self, filename):
+ """Return the stored coverage data from the given file.
+
+ Returns two values, suitable for assigning to `self.lines` and
+ `self.arcs`.
+
+ """
+ lines = {}
+ arcs = {}
+ try:
+ data = self.raw_data(filename)
+ if isinstance(data, dict):
+ # Unpack the 'lines' item.
+ lines = dict([
+ (f, dict.fromkeys(linenos, None))
+ for f, linenos in data.get('lines', {}).items()
+ ])
+ # Unpack the 'arcs' item.
+ arcs = dict([
+ (f, dict.fromkeys(arcpairs, None))
+ for f, arcpairs in data.get('arcs', {}).items()
+ ])
+ except Exception:
+ pass
+ return lines, arcs
+
+ def combine_parallel_data(self, aliases=None):
+ """Combine a number of data files together.
+
+ Treat `self.filename` as a file prefix, and combine the data from all
+ of the data files starting with that prefix plus a dot.
+
+ If `aliases` is provided, it's a `PathAliases` object that is used to
+ re-map paths to match the local machine's.
+
+ """
+ aliases = aliases or PathAliases()
+ data_dir, local = os.path.split(self.filename)
+ localdot = local + '.'
+ for f in os.listdir(data_dir or '.'):
+ if f.startswith(localdot):
+ full_path = os.path.join(data_dir, f)
+ new_lines, new_arcs = self._read_file(full_path)
+ for filename, file_data in new_lines.items():
+ filename = aliases.map(filename)
+ self.lines.setdefault(filename, {}).update(file_data)
+ for filename, file_data in new_arcs.items():
+ filename = aliases.map(filename)
+ self.arcs.setdefault(filename, {}).update(file_data)
+ if f != local:
+ os.remove(full_path)
+
+ def add_line_data(self, line_data):
+ """Add executed line data.
+
+ `line_data` is { filename: { lineno: None, ... }, ...}
+
+ """
+ for filename, linenos in line_data.items():
+ self.lines.setdefault(filename, {}).update(linenos)
+
+ def add_arc_data(self, arc_data):
+ """Add measured arc data.
+
+ `arc_data` is { filename: { (l1,l2): None, ... }, ...}
+
+ """
+ for filename, arcs in arc_data.items():
+ self.arcs.setdefault(filename, {}).update(arcs)
+
+ def touch_file(self, filename):
+ """Ensure that `filename` appears in the data, empty if needed."""
+ self.lines.setdefault(filename, {})
+
+ def measured_files(self):
+ """A list of all files that had been measured."""
+ return list(self.lines.keys())
+
+ def executed_lines(self, filename):
+ """A map containing all the line numbers executed in `filename`.
+
+ If `filename` hasn't been collected at all (because it wasn't executed)
+ then return an empty map.
+
+ """
+ return self.lines.get(filename) or {}
+
+ def executed_arcs(self, filename):
+ """A map containing all the arcs executed in `filename`."""
+ return self.arcs.get(filename) or {}
+
+ def add_to_hash(self, filename, hasher):
+ """Contribute `filename`'s data to the Md5Hash `hasher`."""
+ hasher.update(self.executed_lines(filename))
+ hasher.update(self.executed_arcs(filename))
+
+ def summary(self, fullpath=False):
+ """Return a dict summarizing the coverage data.
+
+ Keys are based on the filenames, and values are the number of executed
+ lines. If `fullpath` is true, then the keys are the full pathnames of
+ the files, otherwise they are the basenames of the files.
+
+ """
+ summ = {}
+ if fullpath:
+ filename_fn = lambda f: f
+ else:
+ filename_fn = self.os.path.basename
+ for filename, lines in self.lines.items():
+ summ[filename_fn(filename)] = len(lines)
+ return summ
+
+ def has_arcs(self):
+ """Does this data have arcs?"""
+ return bool(self.arcs)
+
+
+if __name__ == '__main__':
+ # Ad-hoc: show the raw data in a data file.
+ import pprint, sys
+ covdata = CoverageData()
+ if sys.argv[1:]:
+ fname = sys.argv[1]
+ else:
+ fname = covdata.filename
+ pprint.pprint(covdata.raw_data(fname))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/execfile.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/execfile.py
new file mode 100644
index 0000000..71227b7
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/execfile.py
@@ -0,0 +1,133 @@
+"""Execute files of Python code."""
+
+import imp, os, sys
+
+from coverage.backward import exec_code_object, open_source
+from coverage.misc import NoSource, ExceptionDuringRun
+
+
+try:
+ # In Py 2.x, the builtins were in __builtin__
+ BUILTINS = sys.modules['__builtin__']
+except KeyError:
+ # In Py 3.x, they're in builtins
+ BUILTINS = sys.modules['builtins']
+
+
+def rsplit1(s, sep):
+ """The same as s.rsplit(sep, 1), but works in 2.3"""
+ parts = s.split(sep)
+ return sep.join(parts[:-1]), parts[-1]
+
+
+def run_python_module(modulename, args):
+ """Run a python module, as though with ``python -m name args...``.
+
+ `modulename` is the name of the module, possibly a dot-separated name.
+ `args` is the argument array to present as sys.argv, including the first
+ element naming the module being executed.
+
+ """
+ openfile = None
+ glo, loc = globals(), locals()
+ try:
+ try:
+ # Search for the module - inside its parent package, if any - using
+ # standard import mechanics.
+ if '.' in modulename:
+ packagename, name = rsplit1(modulename, '.')
+ package = __import__(packagename, glo, loc, ['__path__'])
+ searchpath = package.__path__
+ else:
+ packagename, name = None, modulename
+ searchpath = None # "top-level search" in imp.find_module()
+ openfile, pathname, _ = imp.find_module(name, searchpath)
+
+ # Complain if this is a magic non-file module.
+ if openfile is None and pathname is None:
+ raise NoSource(
+ "module does not live in a file: %r" % modulename
+ )
+
+ # If `modulename` is actually a package, not a mere module, then we
+ # pretend to be Python 2.7 and try running its __main__.py script.
+ if openfile is None:
+ packagename = modulename
+ name = '__main__'
+ package = __import__(packagename, glo, loc, ['__path__'])
+ searchpath = package.__path__
+ openfile, pathname, _ = imp.find_module(name, searchpath)
+ except ImportError:
+ _, err, _ = sys.exc_info()
+ raise NoSource(str(err))
+ finally:
+ if openfile:
+ openfile.close()
+
+ # Finally, hand the file off to run_python_file for execution.
+ run_python_file(pathname, args, package=packagename)
+
+
+def run_python_file(filename, args, package=None):
+ """Run a python file as if it were the main program on the command line.
+
+ `filename` is the path to the file to execute, it need not be a .py file.
+ `args` is the argument array to present as sys.argv, including the first
+ element naming the file being executed. `package` is the name of the
+ enclosing package, if any.
+
+ """
+ # Create a module to serve as __main__
+ old_main_mod = sys.modules['__main__']
+ main_mod = imp.new_module('__main__')
+ sys.modules['__main__'] = main_mod
+ main_mod.__file__ = filename
+ main_mod.__package__ = package
+ main_mod.__builtins__ = BUILTINS
+
+ # Set sys.argv and the first path element properly.
+ old_argv = sys.argv
+ old_path0 = sys.path[0]
+ sys.argv = args
+ sys.path[0] = os.path.abspath(os.path.dirname(filename))
+
+ try:
+ # Open the source file.
+ try:
+ source_file = open_source(filename)
+ except IOError:
+ raise NoSource("No file to run: %r" % filename)
+
+ try:
+ source = source_file.read()
+ finally:
+ source_file.close()
+
+ # We have the source. `compile` still needs the last line to be clean,
+ # so make sure it is, then compile a code object from it.
+ if source[-1] != '\n':
+ source += '\n'
+ code = compile(source, filename, "exec")
+
+ # Execute the source file.
+ try:
+ exec_code_object(code, main_mod.__dict__)
+ except SystemExit:
+ # The user called sys.exit(). Just pass it along to the upper
+ # layers, where it will be handled.
+ raise
+ except:
+ # Something went wrong while executing the user code.
+ # Get the exc_info, and pack them into an exception that we can
+ # throw up to the outer loop. We peel two layers off the traceback
+ # so that the coverage.py code doesn't appear in the final printed
+ # traceback.
+ typ, err, tb = sys.exc_info()
+ raise ExceptionDuringRun(typ, err, tb.tb_next.tb_next)
+ finally:
+ # Restore the old __main__
+ sys.modules['__main__'] = old_main_mod
+
+ # Restore the old argv and path
+ sys.argv = old_argv
+ sys.path[0] = old_path0
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/files.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/files.py
new file mode 100644
index 0000000..81ec196
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/files.py
@@ -0,0 +1,219 @@
+"""File wrangling."""
+
+from coverage.backward import to_string
+from coverage.misc import CoverageException
+import fnmatch, os, re, sys
+
+class FileLocator(object):
+ """Understand how filenames work."""
+
+ def __init__(self):
+ # The absolute path to our current directory.
+ self.relative_dir = self.abs_file(os.curdir) + os.sep
+
+ # Cache of results of calling the canonical_filename() method, to
+ # avoid duplicating work.
+ self.canonical_filename_cache = {}
+
+ def abs_file(self, filename):
+ """Return the absolute normalized form of `filename`."""
+ return os.path.normcase(os.path.abspath(os.path.realpath(filename)))
+
+ def relative_filename(self, filename):
+ """Return the relative form of `filename`.
+
+ The filename will be relative to the current directory when the
+ `FileLocator` was constructed.
+
+ """
+ if filename.startswith(self.relative_dir):
+ filename = filename.replace(self.relative_dir, "")
+ return filename
+
+ def canonical_filename(self, filename):
+ """Return a canonical filename for `filename`.
+
+ An absolute path with no redundant components and normalized case.
+
+ """
+ if filename not in self.canonical_filename_cache:
+ f = filename
+ if os.path.isabs(f) and not os.path.exists(f):
+ if self.get_zip_data(f) is None:
+ f = os.path.basename(f)
+ if not os.path.isabs(f):
+ for path in [os.curdir] + sys.path:
+ if path is None:
+ continue
+ g = os.path.join(path, f)
+ if os.path.exists(g):
+ f = g
+ break
+ cf = self.abs_file(f)
+ self.canonical_filename_cache[filename] = cf
+ return self.canonical_filename_cache[filename]
+
+ def get_zip_data(self, filename):
+ """Get data from `filename` if it is a zip file path.
+
+ Returns the string data read from the zip file, or None if no zip file
+ could be found or `filename` isn't in it. The data returned will be
+ an empty string if the file is empty.
+
+ """
+ import zipimport
+ markers = ['.zip'+os.sep, '.egg'+os.sep]
+ for marker in markers:
+ if marker in filename:
+ parts = filename.split(marker)
+ try:
+ zi = zipimport.zipimporter(parts[0]+marker[:-1])
+ except zipimport.ZipImportError:
+ continue
+ try:
+ data = zi.get_data(parts[1])
+ except IOError:
+ continue
+ return to_string(data)
+ return None
+
+
+class TreeMatcher(object):
+ """A matcher for files in a tree."""
+ def __init__(self, directories):
+ self.dirs = directories[:]
+
+ def __repr__(self):
+ return "<TreeMatcher %r>" % self.dirs
+
+ def add(self, directory):
+ """Add another directory to the list we match for."""
+ self.dirs.append(directory)
+
+ def match(self, fpath):
+ """Does `fpath` indicate a file in one of our trees?"""
+ for d in self.dirs:
+ if fpath.startswith(d):
+ if fpath == d:
+ # This is the same file!
+ return True
+ if fpath[len(d)] == os.sep:
+ # This is a file in the directory
+ return True
+ return False
+
+
+class FnmatchMatcher(object):
+ """A matcher for files by filename pattern."""
+ def __init__(self, pats):
+ self.pats = pats[:]
+
+ def __repr__(self):
+ return "<FnmatchMatcher %r>" % self.pats
+
+ def match(self, fpath):
+ """Does `fpath` match one of our filename patterns?"""
+ for pat in self.pats:
+ if fnmatch.fnmatch(fpath, pat):
+ return True
+ return False
+
+
+def sep(s):
+ """Find the path separator used in this string, or os.sep if none."""
+ sep_match = re.search(r"[\\/]", s)
+ if sep_match:
+ the_sep = sep_match.group(0)
+ else:
+ the_sep = os.sep
+ return the_sep
+
+
+class PathAliases(object):
+ """A collection of aliases for paths.
+
+ When combining data files from remote machines, often the paths to source
+ code are different, for example, due to OS differences, or because of
+ serialized checkouts on continuous integration machines.
+
+ A `PathAliases` object tracks a list of pattern/result pairs, and can
+ map a path through those aliases to produce a unified path.
+
+ `locator` is a FileLocator that is used to canonicalize the results.
+
+ """
+ def __init__(self, locator=None):
+ self.aliases = []
+ self.locator = locator
+
+ def add(self, pattern, result):
+ """Add the `pattern`/`result` pair to the list of aliases.
+
+ `pattern` is an `fnmatch`-style pattern. `result` is a simple
+ string. When mapping paths, if a path starts with a match against
+ `pattern`, then that match is replaced with `result`. This models
+ isomorphic source trees being rooted at different places on two
+ different machines.
+
+ `pattern` can't end with a wildcard component, since that would
+ match an entire tree, and not just its root.
+
+ """
+ # The pattern can't end with a wildcard component.
+ pattern = pattern.rstrip(r"\/")
+ if pattern.endswith("*"):
+ raise CoverageException("Pattern must not end with wildcards.")
+ pattern_sep = sep(pattern)
+ pattern += pattern_sep
+
+ # Make a regex from the pattern. fnmatch always adds a \Z or $ to
+ # match the whole string, which we don't want.
+ regex_pat = fnmatch.translate(pattern).replace(r'\Z(', '(')
+ if regex_pat.endswith("$"):
+ regex_pat = regex_pat[:-1]
+ # We want */a/b.py to match on Windows to, so change slash to match
+ # either separator.
+ regex_pat = regex_pat.replace(r"\/", r"[\\/]")
+ # We want case-insensitive matching, so add that flag.
+ regex = re.compile("(?i)" + regex_pat)
+
+ # Normalize the result: it must end with a path separator.
+ result_sep = sep(result)
+ result = result.rstrip(r"\/") + result_sep
+ self.aliases.append((regex, result, pattern_sep, result_sep))
+
+ def map(self, path):
+ """Map `path` through the aliases.
+
+ `path` is checked against all of the patterns. The first pattern to
+ match is used to replace the root of the path with the result root.
+ Only one pattern is ever used. If no patterns match, `path` is
+ returned unchanged.
+
+ The separator style in the result is made to match that of the result
+ in the alias.
+
+ """
+ for regex, result, pattern_sep, result_sep in self.aliases:
+ m = regex.match(path)
+ if m:
+ new = path.replace(m.group(0), result)
+ if pattern_sep != result_sep:
+ new = new.replace(pattern_sep, result_sep)
+ if self.locator:
+ new = self.locator.canonical_filename(new)
+ return new
+ return path
+
+
+def find_python_files(dirname):
+ """Yield all of the importable Python files in `dirname`, recursively."""
+ for dirpath, dirnames, filenames in os.walk(dirname, topdown=True):
+ if '__init__.py' not in filenames:
+ # If a directory doesn't have __init__.py, then it isn't
+ # importable and neither are its files
+ del dirnames[:]
+ continue
+ for filename in filenames:
+ if fnmatch.fnmatch(filename, "*.py"):
+ yield os.path.join(dirpath, filename)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/fullcoverage/encodings.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/fullcoverage/encodings.py
new file mode 100644
index 0000000..9409b7d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/fullcoverage/encodings.py
@@ -0,0 +1,50 @@
+"""Imposter encodings module that installs a coverage-style tracer.
+
+This is NOT the encodings module; it is an imposter that sets up tracing
+instrumentation and then replaces itself with the real encodings module.
+
+If the directory that holds this file is placed first in the PYTHONPATH when
+using "coverage" to run Python's tests, then this file will become the very
+first module imported by the internals of Python 3. It installs a
+coverage-compatible trace function that can watch Standard Library modules
+execute from the very earliest stages of Python's own boot process. This fixes
+a problem with coverage - that it starts too late to trace the coverage of many
+of the most fundamental modules in the Standard Library.
+
+"""
+
+import sys
+
+class FullCoverageTracer(object):
+ def __init__(self):
+ # `traces` is a list of trace events. Frames are tricky: the same
+ # frame object is used for a whole scope, with new line numbers
+ # written into it. So in one scope, all the frame objects are the
+ # same object, and will eventually all will point to the last line
+ # executed. So we keep the line numbers alongside the frames.
+ # The list looks like:
+ #
+ # traces = [
+ # ((frame, event, arg), lineno), ...
+ # ]
+ #
+ self.traces = []
+
+ def fullcoverage_trace(self, *args):
+ frame, event, arg = args
+ self.traces.append((args, frame.f_lineno))
+ return self.fullcoverage_trace
+
+sys.settrace(FullCoverageTracer().fullcoverage_trace)
+
+# Finally, remove our own directory from sys.path; remove ourselves from
+# sys.modules; and re-import "encodings", which will be the real package
+# this time. Note that the delete from sys.modules dictionary has to
+# happen last, since all of the symbols in this module will become None
+# at that exact moment, including "sys".
+
+parentdirs = [ d for d in sys.path if __file__.startswith(d) ]
+parentdirs.sort(key=len)
+sys.path.remove(parentdirs[-1])
+del sys.modules['encodings']
+import encodings
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/html.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/html.py
new file mode 100644
index 0000000..4814c94
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/html.py
@@ -0,0 +1,328 @@
+"""HTML reporting for Coverage."""
+
+import os, re, shutil
+
+import coverage
+from coverage.backward import pickle, write_encoded
+from coverage.misc import CoverageException, Hasher
+from coverage.phystokens import source_token_lines
+from coverage.report import Reporter
+from coverage.templite import Templite
+
+# Disable pylint msg W0612, because a bunch of variables look unused, but
+# they're accessed in a Templite context via locals().
+# pylint: disable=W0612
+
+def data_filename(fname):
+ """Return the path to a data file of ours."""
+ return os.path.join(os.path.split(__file__)[0], fname)
+
+def data(fname):
+ """Return the contents of a data file of ours."""
+ data_file = open(data_filename(fname))
+ try:
+ return data_file.read()
+ finally:
+ data_file.close()
+
+
+class HtmlReporter(Reporter):
+ """HTML reporting."""
+
+ # These files will be copied from the htmlfiles dir to the output dir.
+ STATIC_FILES = [
+ "style.css",
+ "jquery-1.4.3.min.js",
+ "jquery.hotkeys.js",
+ "jquery.isonscreen.js",
+ "jquery.tablesorter.min.js",
+ "coverage_html.js",
+ "keybd_closed.png",
+ "keybd_open.png",
+ ]
+
+ def __init__(self, cov, ignore_errors=False):
+ super(HtmlReporter, self).__init__(cov, ignore_errors)
+ self.directory = None
+ self.template_globals = {
+ 'escape': escape,
+ '__url__': coverage.__url__,
+ '__version__': coverage.__version__,
+ }
+ self.source_tmpl = Templite(
+ data("htmlfiles/pyfile.html"), self.template_globals
+ )
+
+ self.coverage = cov
+
+ self.files = []
+ self.arcs = self.coverage.data.has_arcs()
+ self.status = HtmlStatus()
+
+ def report(self, morfs, config=None):
+ """Generate an HTML report for `morfs`.
+
+ `morfs` is a list of modules or filenames. `config` is a
+ CoverageConfig instance.
+
+ """
+ assert config.html_dir, "must provide a directory for html reporting"
+
+ # Read the status data.
+ self.status.read(config.html_dir)
+
+ # Check that this run used the same settings as the last run.
+ m = Hasher()
+ m.update(config)
+ these_settings = m.digest()
+ if self.status.settings_hash() != these_settings:
+ self.status.reset()
+ self.status.set_settings_hash(these_settings)
+
+ # Process all the files.
+ self.report_files(self.html_file, morfs, config, config.html_dir)
+
+ if not self.files:
+ raise CoverageException("No data to report.")
+
+ # Write the index file.
+ self.index_file()
+
+ self.make_local_static_report_files()
+
+ def make_local_static_report_files(self):
+ """Make local instances of static files for HTML report."""
+ for static in self.STATIC_FILES:
+ shutil.copyfile(
+ data_filename("htmlfiles/" + static),
+ os.path.join(self.directory, static)
+ )
+
+ def write_html(self, fname, html):
+ """Write `html` to `fname`, properly encoded."""
+ write_encoded(fname, html, 'ascii', 'xmlcharrefreplace')
+
+ def file_hash(self, source, cu):
+ """Compute a hash that changes if the file needs to be re-reported."""
+ m = Hasher()
+ m.update(source)
+ self.coverage.data.add_to_hash(cu.filename, m)
+ return m.digest()
+
+ def html_file(self, cu, analysis):
+ """Generate an HTML file for one source file."""
+ source_file = cu.source_file()
+ try:
+ source = source_file.read()
+ finally:
+ source_file.close()
+
+ # Find out if the file on disk is already correct.
+ flat_rootname = cu.flat_rootname()
+ this_hash = self.file_hash(source, cu)
+ that_hash = self.status.file_hash(flat_rootname)
+ if this_hash == that_hash:
+ # Nothing has changed to require the file to be reported again.
+ self.files.append(self.status.index_info(flat_rootname))
+ return
+
+ self.status.set_file_hash(flat_rootname, this_hash)
+
+ nums = analysis.numbers
+
+ missing_branch_arcs = analysis.missing_branch_arcs()
+ n_par = 0 # accumulated below.
+ arcs = self.arcs
+
+ # These classes determine which lines are highlighted by default.
+ c_run = "run hide_run"
+ c_exc = "exc"
+ c_mis = "mis"
+ c_par = "par " + c_run
+
+ lines = []
+
+ for lineno, line in enumerate(source_token_lines(source)):
+ lineno += 1 # 1-based line numbers.
+ # Figure out how to mark this line.
+ line_class = []
+ annotate_html = ""
+ annotate_title = ""
+ if lineno in analysis.statements:
+ line_class.append("stm")
+ if lineno in analysis.excluded:
+ line_class.append(c_exc)
+ elif lineno in analysis.missing:
+ line_class.append(c_mis)
+ elif self.arcs and lineno in missing_branch_arcs:
+ line_class.append(c_par)
+ n_par += 1
+ annlines = []
+ for b in missing_branch_arcs[lineno]:
+ if b < 0:
+ annlines.append("exit")
+ else:
+ annlines.append(str(b))
+ annotate_html = " ".join(annlines)
+ if len(annlines) > 1:
+ annotate_title = "no jumps to these line numbers"
+ elif len(annlines) == 1:
+ annotate_title = "no jump to this line number"
+ elif lineno in analysis.statements:
+ line_class.append(c_run)
+
+ # Build the HTML for the line
+ html = []
+ for tok_type, tok_text in line:
+ if tok_type == "ws":
+ html.append(escape(tok_text))
+ else:
+ tok_html = escape(tok_text) or ' '
+ html.append(
+ "<span class='%s'>%s</span>" % (tok_type, tok_html)
+ )
+
+ lines.append({
+ 'html': ''.join(html),
+ 'number': lineno,
+ 'class': ' '.join(line_class) or "pln",
+ 'annotate': annotate_html,
+ 'annotate_title': annotate_title,
+ })
+
+ # Write the HTML page for this file.
+ html_filename = flat_rootname + ".html"
+ html_path = os.path.join(self.directory, html_filename)
+
+ html = spaceless(self.source_tmpl.render(locals()))
+ self.write_html(html_path, html)
+
+ # Save this file's information for the index file.
+ index_info = {
+ 'nums': nums,
+ 'par': n_par,
+ 'html_filename': html_filename,
+ 'name': cu.name,
+ }
+ self.files.append(index_info)
+ self.status.set_index_info(flat_rootname, index_info)
+
+ def index_file(self):
+ """Write the index.html file for this report."""
+ index_tmpl = Templite(
+ data("htmlfiles/index.html"), self.template_globals
+ )
+
+ files = self.files
+ arcs = self.arcs
+
+ totals = sum([f['nums'] for f in files])
+
+ self.write_html(
+ os.path.join(self.directory, "index.html"),
+ index_tmpl.render(locals())
+ )
+
+ # Write the latest hashes for next time.
+ self.status.write(self.directory)
+
+
+class HtmlStatus(object):
+ """The status information we keep to support incremental reporting."""
+
+ STATUS_FILE = "status.dat"
+ STATUS_FORMAT = 1
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ """Initialize to empty."""
+ self.settings = ''
+ self.files = {}
+
+ def read(self, directory):
+ """Read the last status in `directory`."""
+ usable = False
+ try:
+ status_file = os.path.join(directory, self.STATUS_FILE)
+ status = pickle.load(open(status_file, "rb"))
+ except IOError:
+ usable = False
+ else:
+ usable = True
+ if status['format'] != self.STATUS_FORMAT:
+ usable = False
+ elif status['version'] != coverage.__version__:
+ usable = False
+
+ if usable:
+ self.files = status['files']
+ self.settings = status['settings']
+ else:
+ self.reset()
+
+ def write(self, directory):
+ """Write the current status to `directory`."""
+ status_file = os.path.join(directory, self.STATUS_FILE)
+ status = {
+ 'format': self.STATUS_FORMAT,
+ 'version': coverage.__version__,
+ 'settings': self.settings,
+ 'files': self.files,
+ }
+ fout = open(status_file, "wb")
+ try:
+ pickle.dump(status, fout)
+ finally:
+ fout.close()
+
+ def settings_hash(self):
+ """Get the hash of the coverage.py settings."""
+ return self.settings
+
+ def set_settings_hash(self, settings):
+ """Set the hash of the coverage.py settings."""
+ self.settings = settings
+
+ def file_hash(self, fname):
+ """Get the hash of `fname`'s contents."""
+ return self.files.get(fname, {}).get('hash', '')
+
+ def set_file_hash(self, fname, val):
+ """Set the hash of `fname`'s contents."""
+ self.files.setdefault(fname, {})['hash'] = val
+
+ def index_info(self, fname):
+ """Get the information for index.html for `fname`."""
+ return self.files.get(fname, {}).get('index', {})
+
+ def set_index_info(self, fname, info):
+ """Set the information for index.html for `fname`."""
+ self.files.setdefault(fname, {})['index'] = info
+
+
+# Helpers for templates and generating HTML
+
+def escape(t):
+ """HTML-escape the text in `t`."""
+ return (t
+ # Convert HTML special chars into HTML entities.
+ .replace("&", "&").replace("<", "<").replace(">", ">")
+ .replace("'", "'").replace('"', """)
+ # Convert runs of spaces: "......" -> " . . ."
+ .replace(" ", " ")
+ # To deal with odd-length runs, convert the final pair of spaces
+ # so that "....." -> " . ."
+ .replace(" ", " ")
+ )
+
+def spaceless(html):
+ """Squeeze out some annoying extra space from an HTML string.
+
+ Nicely-formatted templates mean lots of extra space in the result.
+ Get rid of some.
+
+ """
+ html = re.sub(">\s+<p ", ">\n<p ", html)
+ return html
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/coverage_html.js b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/coverage_html.js
new file mode 100644
index 0000000..da3e22c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/coverage_html.js
@@ -0,0 +1,372 @@
+// Coverage.py HTML report browser code.
+/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */
+/*global coverage: true, document, window, $ */
+
+coverage = {};
+
+// Find all the elements with shortkey_* class, and use them to assign a shotrtcut key.
+coverage.assign_shortkeys = function () {
+ $("*[class*='shortkey_']").each(function (i, e) {
+ $.each($(e).attr("class").split(" "), function (i, c) {
+ if (/^shortkey_/.test(c)) {
+ $(document).bind('keydown', c.substr(9), function () {
+ $(e).click();
+ });
+ }
+ });
+ });
+};
+
+// Create the events for the help panel.
+coverage.wire_up_help_panel = function () {
+ $("#keyboard_icon").click(function () {
+ // Show the help panel, and position it so the keyboard icon in the
+ // panel is in the same place as the keyboard icon in the header.
+ $(".help_panel").show();
+ var koff = $("#keyboard_icon").offset();
+ var poff = $("#panel_icon").position();
+ $(".help_panel").offset({
+ top: koff.top-poff.top,
+ left: koff.left-poff.left
+ });
+ });
+ $("#panel_icon").click(function () {
+ $(".help_panel").hide();
+ });
+};
+
+// Loaded on index.html
+coverage.index_ready = function ($) {
+ // Look for a cookie containing previous sort settings:
+ var sort_list = [];
+ var cookie_name = "COVERAGE_INDEX_SORT";
+ var i;
+
+ // This almost makes it worth installing the jQuery cookie plugin:
+ if (document.cookie.indexOf(cookie_name) > -1) {
+ var cookies = document.cookie.split(";");
+ for (i = 0; i < cookies.length; i++) {
+ var parts = cookies[i].split("=");
+
+ if ($.trim(parts[0]) === cookie_name && parts[1]) {
+ sort_list = eval("[[" + parts[1] + "]]");
+ break;
+ }
+ }
+ }
+
+ // Create a new widget which exists only to save and restore
+ // the sort order:
+ $.tablesorter.addWidget({
+ id: "persistentSort",
+
+ // Format is called by the widget before displaying:
+ format: function (table) {
+ if (table.config.sortList.length === 0 && sort_list.length > 0) {
+ // This table hasn't been sorted before - we'll use
+ // our stored settings:
+ $(table).trigger('sorton', [sort_list]);
+ }
+ else {
+ // This is not the first load - something has
+ // already defined sorting so we'll just update
+ // our stored value to match:
+ sort_list = table.config.sortList;
+ }
+ }
+ });
+
+ // Configure our tablesorter to handle the variable number of
+ // columns produced depending on report options:
+ var headers = [];
+ var col_count = $("table.index > thead > tr > th").length;
+
+ headers[0] = { sorter: 'text' };
+ for (i = 1; i < col_count-1; i++) {
+ headers[i] = { sorter: 'digit' };
+ }
+ headers[col_count-1] = { sorter: 'percent' };
+
+ // Enable the table sorter:
+ $("table.index").tablesorter({
+ widgets: ['persistentSort'],
+ headers: headers
+ });
+
+ coverage.assign_shortkeys();
+ coverage.wire_up_help_panel();
+
+ // Watch for page unload events so we can save the final sort settings:
+ $(window).unload(function () {
+ document.cookie = cookie_name + "=" + sort_list.toString() + "; path=/";
+ });
+};
+
+// -- pyfile stuff --
+
+coverage.pyfile_ready = function ($) {
+ // If we're directed to a particular line number, highlight the line.
+ var frag = location.hash;
+ if (frag.length > 2 && frag[1] === 'n') {
+ $(frag).addClass('highlight');
+ coverage.set_sel(parseInt(frag.substr(2), 10));
+ }
+ else {
+ coverage.set_sel(0);
+ }
+
+ $(document)
+ .bind('keydown', 'j', coverage.to_next_chunk_nicely)
+ .bind('keydown', 'k', coverage.to_prev_chunk_nicely)
+ .bind('keydown', '0', coverage.to_top)
+ .bind('keydown', '1', coverage.to_first_chunk)
+ ;
+
+ coverage.assign_shortkeys();
+ coverage.wire_up_help_panel();
+};
+
+coverage.toggle_lines = function (btn, cls) {
+ btn = $(btn);
+ var hide = "hide_"+cls;
+ if (btn.hasClass(hide)) {
+ $("#source ."+cls).removeClass(hide);
+ btn.removeClass(hide);
+ }
+ else {
+ $("#source ."+cls).addClass(hide);
+ btn.addClass(hide);
+ }
+};
+
+// Return the nth line div.
+coverage.line_elt = function (n) {
+ return $("#t" + n);
+};
+
+// Return the nth line number div.
+coverage.num_elt = function (n) {
+ return $("#n" + n);
+};
+
+// Return the container of all the code.
+coverage.code_container = function () {
+ return $(".linenos");
+};
+
+// Set the selection. b and e are line numbers.
+coverage.set_sel = function (b, e) {
+ // The first line selected.
+ coverage.sel_begin = b;
+ // The next line not selected.
+ coverage.sel_end = (e === undefined) ? b+1 : e;
+};
+
+coverage.to_top = function () {
+ coverage.set_sel(0, 1);
+ coverage.scroll_window(0);
+};
+
+coverage.to_first_chunk = function () {
+ coverage.set_sel(0, 1);
+ coverage.to_next_chunk();
+};
+
+coverage.is_transparent = function (color) {
+ // Different browsers return different colors for "none".
+ return color === "transparent" || color === "rgba(0, 0, 0, 0)";
+};
+
+coverage.to_next_chunk = function () {
+ var c = coverage;
+
+ // Find the start of the next colored chunk.
+ var probe = c.sel_end;
+ while (true) {
+ var probe_line = c.line_elt(probe);
+ if (probe_line.length === 0) {
+ return;
+ }
+ var color = probe_line.css("background-color");
+ if (!c.is_transparent(color)) {
+ break;
+ }
+ probe++;
+ }
+
+ // There's a next chunk, `probe` points to it.
+ var begin = probe;
+
+ // Find the end of this chunk.
+ var next_color = color;
+ while (next_color === color) {
+ probe++;
+ probe_line = c.line_elt(probe);
+ next_color = probe_line.css("background-color");
+ }
+ c.set_sel(begin, probe);
+ c.show_selection();
+};
+
+coverage.to_prev_chunk = function () {
+ var c = coverage;
+
+ // Find the end of the prev colored chunk.
+ var probe = c.sel_begin-1;
+ var probe_line = c.line_elt(probe);
+ if (probe_line.length === 0) {
+ return;
+ }
+ var color = probe_line.css("background-color");
+ while (probe > 0 && c.is_transparent(color)) {
+ probe--;
+ probe_line = c.line_elt(probe);
+ if (probe_line.length === 0) {
+ return;
+ }
+ color = probe_line.css("background-color");
+ }
+
+ // There's a prev chunk, `probe` points to its last line.
+ var end = probe+1;
+
+ // Find the beginning of this chunk.
+ var prev_color = color;
+ while (prev_color === color) {
+ probe--;
+ probe_line = c.line_elt(probe);
+ prev_color = probe_line.css("background-color");
+ }
+ c.set_sel(probe+1, end);
+ c.show_selection();
+};
+
+// Return the line number of the line nearest pixel position pos
+coverage.line_at_pos = function (pos) {
+ var l1 = coverage.line_elt(1),
+ l2 = coverage.line_elt(2),
+ result;
+ if (l1.length && l2.length) {
+ var l1_top = l1.offset().top,
+ line_height = l2.offset().top - l1_top,
+ nlines = (pos - l1_top) / line_height;
+ if (nlines < 1) {
+ result = 1;
+ }
+ else {
+ result = Math.ceil(nlines);
+ }
+ }
+ else {
+ result = 1;
+ }
+ return result;
+};
+
+// Returns 0, 1, or 2: how many of the two ends of the selection are on
+// the screen right now?
+coverage.selection_ends_on_screen = function () {
+ if (coverage.sel_begin === 0) {
+ return 0;
+ }
+
+ var top = coverage.line_elt(coverage.sel_begin);
+ var next = coverage.line_elt(coverage.sel_end-1);
+
+ return (
+ (top.isOnScreen() ? 1 : 0) +
+ (next.isOnScreen() ? 1 : 0)
+ );
+};
+
+coverage.to_next_chunk_nicely = function () {
+ coverage.finish_scrolling();
+ if (coverage.selection_ends_on_screen() === 0) {
+ // The selection is entirely off the screen: select the top line on
+ // the screen.
+ var win = $(window);
+ coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop()));
+ }
+ coverage.to_next_chunk();
+};
+
+coverage.to_prev_chunk_nicely = function () {
+ coverage.finish_scrolling();
+ if (coverage.selection_ends_on_screen() === 0) {
+ var win = $(window);
+ coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop() + win.height()));
+ }
+ coverage.to_prev_chunk();
+};
+
+// Select line number lineno, or if it is in a colored chunk, select the
+// entire chunk
+coverage.select_line_or_chunk = function (lineno) {
+ var c = coverage;
+ var probe_line = c.line_elt(lineno);
+ if (probe_line.length === 0) {
+ return;
+ }
+ var the_color = probe_line.css("background-color");
+ if (!c.is_transparent(the_color)) {
+ // The line is in a highlighted chunk.
+ // Search backward for the first line.
+ var probe = lineno;
+ var color = the_color;
+ while (probe > 0 && color === the_color) {
+ probe--;
+ probe_line = c.line_elt(probe);
+ if (probe_line.length === 0) {
+ break;
+ }
+ color = probe_line.css("background-color");
+ }
+ var begin = probe + 1;
+
+ // Search forward for the last line.
+ probe = lineno;
+ color = the_color;
+ while (color === the_color) {
+ probe++;
+ probe_line = c.line_elt(probe);
+ color = probe_line.css("background-color");
+ }
+
+ coverage.set_sel(begin, probe);
+ }
+ else {
+ coverage.set_sel(lineno);
+ }
+};
+
+coverage.show_selection = function () {
+ var c = coverage;
+
+ // Highlight the lines in the chunk
+ c.code_container().find(".highlight").removeClass("highlight");
+ for (var probe = c.sel_begin; probe > 0 && probe < c.sel_end; probe++) {
+ c.num_elt(probe).addClass("highlight");
+ }
+
+ c.scroll_to_selection();
+};
+
+coverage.scroll_to_selection = function () {
+ // Scroll the page if the chunk isn't fully visible.
+ if (coverage.selection_ends_on_screen() < 2) {
+ // Need to move the page. The html,body trick makes it scroll in all
+ // browsers, got it from http://stackoverflow.com/questions/3042651
+ var top = coverage.line_elt(coverage.sel_begin);
+ var top_pos = parseInt(top.offset().top, 10);
+ coverage.scroll_window(top_pos - 30);
+ }
+};
+
+coverage.scroll_window = function (to_pos) {
+ $("html,body").animate({scrollTop: to_pos}, 200);
+};
+
+coverage.finish_scrolling = function () {
+ $("html,body").stop(true, true);
+};
+
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/index.html b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/index.html
new file mode 100644
index 0000000..04b314a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/index.html
@@ -0,0 +1,101 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
+ <title>Coverage report</title>
+ <link rel='stylesheet' href='style.css' type='text/css'>
+ <script type='text/javascript' src='jquery-1.4.3.min.js'></script>
+ <script type='text/javascript' src='jquery.tablesorter.min.js'></script>
+ <script type='text/javascript' src='jquery.hotkeys.js'></script>
+ <script type='text/javascript' src='coverage_html.js'></script>
+ <script type='text/javascript' charset='utf-8'>
+ jQuery(document).ready(coverage.index_ready);
+ </script>
+</head>
+<body id='indexfile'>
+
+<div id='header'>
+ <div class='content'>
+ <h1>Coverage report:
+ <span class='pc_cov'>{{totals.pc_covered_str}}%</span>
+ </h1>
+ <img id='keyboard_icon' src='keybd_closed.png'>
+ </div>
+</div>
+
+<div class='help_panel'>
+ <img id='panel_icon' src='keybd_open.png'>
+ <p class='legend'>Hot-keys on this page</p>
+ <div>
+ <p class='keyhelp'>
+ <span class='key'>n</span>
+ <span class='key'>s</span>
+ <span class='key'>m</span>
+ <span class='key'>x</span>
+ {% if arcs %}
+ <span class='key'>b</span>
+ <span class='key'>p</span>
+ {% endif %}
+ <span class='key'>c</span> change column sorting
+ </p>
+ </div>
+</div>
+
+<div id='index'>
+ <table class='index'>
+ <thead>
+ {# The title='' attr doesn't work in Safari. #}
+ <tr class='tablehead' title='Click to sort'>
+ <th class='name left headerSortDown shortkey_n'>Module</th>
+ <th class='shortkey_s'>statements</th>
+ <th class='shortkey_m'>missing</th>
+ <th class='shortkey_x'>excluded</th>
+ {% if arcs %}
+ <th class='shortkey_b'>branches</th>
+ <th class='shortkey_p'>partial</th>
+ {% endif %}
+ <th class='right shortkey_c'>coverage</th>
+ </tr>
+ </thead>
+ {# HTML syntax requires thead, tfoot, tbody #}
+ <tfoot>
+ <tr class='total'>
+ <td class='name left'>Total</td>
+ <td>{{totals.n_statements}}</td>
+ <td>{{totals.n_missing}}</td>
+ <td>{{totals.n_excluded}}</td>
+ {% if arcs %}
+ <td>{{totals.n_branches}}</td>
+ <td>{{totals.n_missing_branches}}</td>
+ {% endif %}
+ <td class='right'>{{totals.pc_covered_str}}%</td>
+ </tr>
+ </tfoot>
+ <tbody>
+ {% for file in files %}
+ <tr class='file'>
+ <td class='name left'><a href='{{file.html_filename}}'>{{file.name}}</a></td>
+ <td>{{file.nums.n_statements}}</td>
+ <td>{{file.nums.n_missing}}</td>
+ <td>{{file.nums.n_excluded}}</td>
+ {% if arcs %}
+ <td>{{file.nums.n_branches}}</td>
+ <td>{{file.nums.n_missing_branches}}</td>
+ {% endif %}
+ <td class='right'>{{file.nums.pc_covered_str}}%</td>
+ </tr>
+ {% endfor %}
+ </tbody>
+ </table>
+</div>
+
+<div id='footer'>
+ <div class='content'>
+ <p>
+ <a class='nav' href='{{__url__}}'>coverage.py v{{__version__}}</a>
+ </p>
+ </div>
+</div>
+
+</body>
+</html>
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/jquery-1.4.3.min.js b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/jquery-1.4.3.min.js
new file mode 100644
index 0000000..c941a5f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/jquery-1.4.3.min.js
@@ -0,0 +1,166 @@
+/*!
+ * jQuery JavaScript Library v1.4.3
+ * http://jquery.com/
+ *
+ * Copyright 2010, John Resig
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ * http://jquery.org/license
+ *
+ * Includes Sizzle.js
+ * http://sizzlejs.com/
+ * Copyright 2010, The Dojo Foundation
+ * Released under the MIT, BSD, and GPL Licenses.
+ *
+ * Date: Thu Oct 14 23:10:06 2010 -0400
+ */
+(function(E,A){function U(){return false}function ba(){return true}function ja(a,b,d){d[0].type=a;return c.event.handle.apply(b,d)}function Ga(a){var b,d,e=[],f=[],h,k,l,n,s,v,B,D;k=c.data(this,this.nodeType?"events":"__events__");if(typeof k==="function")k=k.events;if(!(a.liveFired===this||!k||!k.live||a.button&&a.type==="click")){if(a.namespace)D=RegExp("(^|\\.)"+a.namespace.split(".").join("\\.(?:.*\\.)?")+"(\\.|$)");a.liveFired=this;var H=k.live.slice(0);for(n=0;n<H.length;n++){k=H[n];k.origType.replace(X,
+"")===a.type?f.push(k.selector):H.splice(n--,1)}f=c(a.target).closest(f,a.currentTarget);s=0;for(v=f.length;s<v;s++){B=f[s];for(n=0;n<H.length;n++){k=H[n];if(B.selector===k.selector&&(!D||D.test(k.namespace))){l=B.elem;h=null;if(k.preType==="mouseenter"||k.preType==="mouseleave"){a.type=k.preType;h=c(a.relatedTarget).closest(k.selector)[0]}if(!h||h!==l)e.push({elem:l,handleObj:k,level:B.level})}}}s=0;for(v=e.length;s<v;s++){f=e[s];if(d&&f.level>d)break;a.currentTarget=f.elem;a.data=f.handleObj.data;
+a.handleObj=f.handleObj;D=f.handleObj.origHandler.apply(f.elem,arguments);if(D===false||a.isPropagationStopped()){d=f.level;if(D===false)b=false}}return b}}function Y(a,b){return(a&&a!=="*"?a+".":"")+b.replace(Ha,"`").replace(Ia,"&")}function ka(a,b,d){if(c.isFunction(b))return c.grep(a,function(f,h){return!!b.call(f,h,f)===d});else if(b.nodeType)return c.grep(a,function(f){return f===b===d});else if(typeof b==="string"){var e=c.grep(a,function(f){return f.nodeType===1});if(Ja.test(b))return c.filter(b,
+e,!d);else b=c.filter(b,e)}return c.grep(a,function(f){return c.inArray(f,b)>=0===d})}function la(a,b){var d=0;b.each(function(){if(this.nodeName===(a[d]&&a[d].nodeName)){var e=c.data(a[d++]),f=c.data(this,e);if(e=e&&e.events){delete f.handle;f.events={};for(var h in e)for(var k in e[h])c.event.add(this,h,e[h][k],e[h][k].data)}}})}function Ka(a,b){b.src?c.ajax({url:b.src,async:false,dataType:"script"}):c.globalEval(b.text||b.textContent||b.innerHTML||"");b.parentNode&&b.parentNode.removeChild(b)}
+function ma(a,b,d){var e=b==="width"?a.offsetWidth:a.offsetHeight;if(d==="border")return e;c.each(b==="width"?La:Ma,function(){d||(e-=parseFloat(c.css(a,"padding"+this))||0);if(d==="margin")e+=parseFloat(c.css(a,"margin"+this))||0;else e-=parseFloat(c.css(a,"border"+this+"Width"))||0});return e}function ca(a,b,d,e){if(c.isArray(b)&&b.length)c.each(b,function(f,h){d||Na.test(a)?e(a,h):ca(a+"["+(typeof h==="object"||c.isArray(h)?f:"")+"]",h,d,e)});else if(!d&&b!=null&&typeof b==="object")c.isEmptyObject(b)?
+e(a,""):c.each(b,function(f,h){ca(a+"["+f+"]",h,d,e)});else e(a,b)}function S(a,b){var d={};c.each(na.concat.apply([],na.slice(0,b)),function(){d[this]=a});return d}function oa(a){if(!da[a]){var b=c("<"+a+">").appendTo("body"),d=b.css("display");b.remove();if(d==="none"||d==="")d="block";da[a]=d}return da[a]}function ea(a){return c.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:false}var u=E.document,c=function(){function a(){if(!b.isReady){try{u.documentElement.doScroll("left")}catch(i){setTimeout(a,
+1);return}b.ready()}}var b=function(i,r){return new b.fn.init(i,r)},d=E.jQuery,e=E.$,f,h=/^(?:[^<]*(<[\w\W]+>)[^>]*$|#([\w\-]+)$)/,k=/\S/,l=/^\s+/,n=/\s+$/,s=/\W/,v=/\d/,B=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,D=/^[\],:{}\s]*$/,H=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,w=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,G=/(?:^|:|,)(?:\s*\[)+/g,M=/(webkit)[ \/]([\w.]+)/,g=/(opera)(?:.*version)?[ \/]([\w.]+)/,j=/(msie) ([\w.]+)/,o=/(mozilla)(?:.*? rv:([\w.]+))?/,m=navigator.userAgent,p=false,
+q=[],t,x=Object.prototype.toString,C=Object.prototype.hasOwnProperty,P=Array.prototype.push,N=Array.prototype.slice,R=String.prototype.trim,Q=Array.prototype.indexOf,L={};b.fn=b.prototype={init:function(i,r){var y,z,F;if(!i)return this;if(i.nodeType){this.context=this[0]=i;this.length=1;return this}if(i==="body"&&!r&&u.body){this.context=u;this[0]=u.body;this.selector="body";this.length=1;return this}if(typeof i==="string")if((y=h.exec(i))&&(y[1]||!r))if(y[1]){F=r?r.ownerDocument||r:u;if(z=B.exec(i))if(b.isPlainObject(r)){i=
+[u.createElement(z[1])];b.fn.attr.call(i,r,true)}else i=[F.createElement(z[1])];else{z=b.buildFragment([y[1]],[F]);i=(z.cacheable?z.fragment.cloneNode(true):z.fragment).childNodes}return b.merge(this,i)}else{if((z=u.getElementById(y[2]))&&z.parentNode){if(z.id!==y[2])return f.find(i);this.length=1;this[0]=z}this.context=u;this.selector=i;return this}else if(!r&&!s.test(i)){this.selector=i;this.context=u;i=u.getElementsByTagName(i);return b.merge(this,i)}else return!r||r.jquery?(r||f).find(i):b(r).find(i);
+else if(b.isFunction(i))return f.ready(i);if(i.selector!==A){this.selector=i.selector;this.context=i.context}return b.makeArray(i,this)},selector:"",jquery:"1.4.3",length:0,size:function(){return this.length},toArray:function(){return N.call(this,0)},get:function(i){return i==null?this.toArray():i<0?this.slice(i)[0]:this[i]},pushStack:function(i,r,y){var z=b();b.isArray(i)?P.apply(z,i):b.merge(z,i);z.prevObject=this;z.context=this.context;if(r==="find")z.selector=this.selector+(this.selector?" ":
+"")+y;else if(r)z.selector=this.selector+"."+r+"("+y+")";return z},each:function(i,r){return b.each(this,i,r)},ready:function(i){b.bindReady();if(b.isReady)i.call(u,b);else q&&q.push(i);return this},eq:function(i){return i===-1?this.slice(i):this.slice(i,+i+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(N.apply(this,arguments),"slice",N.call(arguments).join(","))},map:function(i){return this.pushStack(b.map(this,function(r,y){return i.call(r,
+y,r)}))},end:function(){return this.prevObject||b(null)},push:P,sort:[].sort,splice:[].splice};b.fn.init.prototype=b.fn;b.extend=b.fn.extend=function(){var i=arguments[0]||{},r=1,y=arguments.length,z=false,F,I,K,J,fa;if(typeof i==="boolean"){z=i;i=arguments[1]||{};r=2}if(typeof i!=="object"&&!b.isFunction(i))i={};if(y===r){i=this;--r}for(;r<y;r++)if((F=arguments[r])!=null)for(I in F){K=i[I];J=F[I];if(i!==J)if(z&&J&&(b.isPlainObject(J)||(fa=b.isArray(J)))){if(fa){fa=false;clone=K&&b.isArray(K)?K:[]}else clone=
+K&&b.isPlainObject(K)?K:{};i[I]=b.extend(z,clone,J)}else if(J!==A)i[I]=J}return i};b.extend({noConflict:function(i){E.$=e;if(i)E.jQuery=d;return b},isReady:false,readyWait:1,ready:function(i){i===true&&b.readyWait--;if(!b.readyWait||i!==true&&!b.isReady){if(!u.body)return setTimeout(b.ready,1);b.isReady=true;if(!(i!==true&&--b.readyWait>0)){if(q){for(var r=0;i=q[r++];)i.call(u,b);q=null}b.fn.triggerHandler&&b(u).triggerHandler("ready")}}},bindReady:function(){if(!p){p=true;if(u.readyState==="complete")return setTimeout(b.ready,
+1);if(u.addEventListener){u.addEventListener("DOMContentLoaded",t,false);E.addEventListener("load",b.ready,false)}else if(u.attachEvent){u.attachEvent("onreadystatechange",t);E.attachEvent("onload",b.ready);var i=false;try{i=E.frameElement==null}catch(r){}u.documentElement.doScroll&&i&&a()}}},isFunction:function(i){return b.type(i)==="function"},isArray:Array.isArray||function(i){return b.type(i)==="array"},isWindow:function(i){return i&&typeof i==="object"&&"setInterval"in i},isNaN:function(i){return i==
+null||!v.test(i)||isNaN(i)},type:function(i){return i==null?String(i):L[x.call(i)]||"object"},isPlainObject:function(i){if(!i||b.type(i)!=="object"||i.nodeType||b.isWindow(i))return false;if(i.constructor&&!C.call(i,"constructor")&&!C.call(i.constructor.prototype,"isPrototypeOf"))return false;for(var r in i);return r===A||C.call(i,r)},isEmptyObject:function(i){for(var r in i)return false;return true},error:function(i){throw i;},parseJSON:function(i){if(typeof i!=="string"||!i)return null;i=b.trim(i);
+if(D.test(i.replace(H,"@").replace(w,"]").replace(G,"")))return E.JSON&&E.JSON.parse?E.JSON.parse(i):(new Function("return "+i))();else b.error("Invalid JSON: "+i)},noop:function(){},globalEval:function(i){if(i&&k.test(i)){var r=u.getElementsByTagName("head")[0]||u.documentElement,y=u.createElement("script");y.type="text/javascript";if(b.support.scriptEval)y.appendChild(u.createTextNode(i));else y.text=i;r.insertBefore(y,r.firstChild);r.removeChild(y)}},nodeName:function(i,r){return i.nodeName&&i.nodeName.toUpperCase()===
+r.toUpperCase()},each:function(i,r,y){var z,F=0,I=i.length,K=I===A||b.isFunction(i);if(y)if(K)for(z in i){if(r.apply(i[z],y)===false)break}else for(;F<I;){if(r.apply(i[F++],y)===false)break}else if(K)for(z in i){if(r.call(i[z],z,i[z])===false)break}else for(y=i[0];F<I&&r.call(y,F,y)!==false;y=i[++F]);return i},trim:R?function(i){return i==null?"":R.call(i)}:function(i){return i==null?"":i.toString().replace(l,"").replace(n,"")},makeArray:function(i,r){var y=r||[];if(i!=null){var z=b.type(i);i.length==
+null||z==="string"||z==="function"||z==="regexp"||b.isWindow(i)?P.call(y,i):b.merge(y,i)}return y},inArray:function(i,r){if(r.indexOf)return r.indexOf(i);for(var y=0,z=r.length;y<z;y++)if(r[y]===i)return y;return-1},merge:function(i,r){var y=i.length,z=0;if(typeof r.length==="number")for(var F=r.length;z<F;z++)i[y++]=r[z];else for(;r[z]!==A;)i[y++]=r[z++];i.length=y;return i},grep:function(i,r,y){var z=[],F;y=!!y;for(var I=0,K=i.length;I<K;I++){F=!!r(i[I],I);y!==F&&z.push(i[I])}return z},map:function(i,
+r,y){for(var z=[],F,I=0,K=i.length;I<K;I++){F=r(i[I],I,y);if(F!=null)z[z.length]=F}return z.concat.apply([],z)},guid:1,proxy:function(i,r,y){if(arguments.length===2)if(typeof r==="string"){y=i;i=y[r];r=A}else if(r&&!b.isFunction(r)){y=r;r=A}if(!r&&i)r=function(){return i.apply(y||this,arguments)};if(i)r.guid=i.guid=i.guid||r.guid||b.guid++;return r},access:function(i,r,y,z,F,I){var K=i.length;if(typeof r==="object"){for(var J in r)b.access(i,J,r[J],z,F,y);return i}if(y!==A){z=!I&&z&&b.isFunction(y);
+for(J=0;J<K;J++)F(i[J],r,z?y.call(i[J],J,F(i[J],r)):y,I);return i}return K?F(i[0],r):A},now:function(){return(new Date).getTime()},uaMatch:function(i){i=i.toLowerCase();i=M.exec(i)||g.exec(i)||j.exec(i)||i.indexOf("compatible")<0&&o.exec(i)||[];return{browser:i[1]||"",version:i[2]||"0"}},browser:{}});b.each("Boolean Number String Function Array Date RegExp Object".split(" "),function(i,r){L["[object "+r+"]"]=r.toLowerCase()});m=b.uaMatch(m);if(m.browser){b.browser[m.browser]=true;b.browser.version=
+m.version}if(b.browser.webkit)b.browser.safari=true;if(Q)b.inArray=function(i,r){return Q.call(r,i)};if(!/\s/.test("\u00a0")){l=/^[\s\xA0]+/;n=/[\s\xA0]+$/}f=b(u);if(u.addEventListener)t=function(){u.removeEventListener("DOMContentLoaded",t,false);b.ready()};else if(u.attachEvent)t=function(){if(u.readyState==="complete"){u.detachEvent("onreadystatechange",t);b.ready()}};return E.jQuery=E.$=b}();(function(){c.support={};var a=u.documentElement,b=u.createElement("script"),d=u.createElement("div"),
+e="script"+c.now();d.style.display="none";d.innerHTML=" <link/><table></table><a href='/a' style='color:red;float:left;opacity:.55;'>a</a><input type='checkbox'/>";var f=d.getElementsByTagName("*"),h=d.getElementsByTagName("a")[0],k=u.createElement("select"),l=k.appendChild(u.createElement("option"));if(!(!f||!f.length||!h)){c.support={leadingWhitespace:d.firstChild.nodeType===3,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/red/.test(h.getAttribute("style")),
+hrefNormalized:h.getAttribute("href")==="/a",opacity:/^0.55$/.test(h.style.opacity),cssFloat:!!h.style.cssFloat,checkOn:d.getElementsByTagName("input")[0].value==="on",optSelected:l.selected,optDisabled:false,checkClone:false,scriptEval:false,noCloneEvent:true,boxModel:null,inlineBlockNeedsLayout:false,shrinkWrapBlocks:false,reliableHiddenOffsets:true};k.disabled=true;c.support.optDisabled=!l.disabled;b.type="text/javascript";try{b.appendChild(u.createTextNode("window."+e+"=1;"))}catch(n){}a.insertBefore(b,
+a.firstChild);if(E[e]){c.support.scriptEval=true;delete E[e]}a.removeChild(b);if(d.attachEvent&&d.fireEvent){d.attachEvent("onclick",function s(){c.support.noCloneEvent=false;d.detachEvent("onclick",s)});d.cloneNode(true).fireEvent("onclick")}d=u.createElement("div");d.innerHTML="<input type='radio' name='radiotest' checked='checked'/>";a=u.createDocumentFragment();a.appendChild(d.firstChild);c.support.checkClone=a.cloneNode(true).cloneNode(true).lastChild.checked;c(function(){var s=u.createElement("div");
+s.style.width=s.style.paddingLeft="1px";u.body.appendChild(s);c.boxModel=c.support.boxModel=s.offsetWidth===2;if("zoom"in s.style){s.style.display="inline";s.style.zoom=1;c.support.inlineBlockNeedsLayout=s.offsetWidth===2;s.style.display="";s.innerHTML="<div style='width:4px;'></div>";c.support.shrinkWrapBlocks=s.offsetWidth!==2}s.innerHTML="<table><tr><td style='padding:0;display:none'></td><td>t</td></tr></table>";var v=s.getElementsByTagName("td");c.support.reliableHiddenOffsets=v[0].offsetHeight===
+0;v[0].style.display="";v[1].style.display="none";c.support.reliableHiddenOffsets=c.support.reliableHiddenOffsets&&v[0].offsetHeight===0;s.innerHTML="";u.body.removeChild(s).style.display="none"});a=function(s){var v=u.createElement("div");s="on"+s;var B=s in v;if(!B){v.setAttribute(s,"return;");B=typeof v[s]==="function"}return B};c.support.submitBubbles=a("submit");c.support.changeBubbles=a("change");a=b=d=f=h=null}})();c.props={"for":"htmlFor","class":"className",readonly:"readOnly",maxlength:"maxLength",
+cellspacing:"cellSpacing",rowspan:"rowSpan",colspan:"colSpan",tabindex:"tabIndex",usemap:"useMap",frameborder:"frameBorder"};var pa={},Oa=/^(?:\{.*\}|\[.*\])$/;c.extend({cache:{},uuid:0,expando:"jQuery"+c.now(),noData:{embed:true,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:true},data:function(a,b,d){if(c.acceptData(a)){a=a==E?pa:a;var e=a.nodeType,f=e?a[c.expando]:null,h=c.cache;if(!(e&&!f&&typeof b==="string"&&d===A)){if(e)f||(a[c.expando]=f=++c.uuid);else h=a;if(typeof b==="object")if(e)h[f]=
+c.extend(h[f],b);else c.extend(h,b);else if(e&&!h[f])h[f]={};a=e?h[f]:h;if(d!==A)a[b]=d;return typeof b==="string"?a[b]:a}}},removeData:function(a,b){if(c.acceptData(a)){a=a==E?pa:a;var d=a.nodeType,e=d?a[c.expando]:a,f=c.cache,h=d?f[e]:e;if(b){if(h){delete h[b];d&&c.isEmptyObject(h)&&c.removeData(a)}}else if(d&&c.support.deleteExpando)delete a[c.expando];else if(a.removeAttribute)a.removeAttribute(c.expando);else if(d)delete f[e];else for(var k in a)delete a[k]}},acceptData:function(a){if(a.nodeName){var b=
+c.noData[a.nodeName.toLowerCase()];if(b)return!(b===true||a.getAttribute("classid")!==b)}return true}});c.fn.extend({data:function(a,b){if(typeof a==="undefined")return this.length?c.data(this[0]):null;else if(typeof a==="object")return this.each(function(){c.data(this,a)});var d=a.split(".");d[1]=d[1]?"."+d[1]:"";if(b===A){var e=this.triggerHandler("getData"+d[1]+"!",[d[0]]);if(e===A&&this.length){e=c.data(this[0],a);if(e===A&&this[0].nodeType===1){e=this[0].getAttribute("data-"+a);if(typeof e===
+"string")try{e=e==="true"?true:e==="false"?false:e==="null"?null:!c.isNaN(e)?parseFloat(e):Oa.test(e)?c.parseJSON(e):e}catch(f){}else e=A}}return e===A&&d[1]?this.data(d[0]):e}else return this.each(function(){var h=c(this),k=[d[0],b];h.triggerHandler("setData"+d[1]+"!",k);c.data(this,a,b);h.triggerHandler("changeData"+d[1]+"!",k)})},removeData:function(a){return this.each(function(){c.removeData(this,a)})}});c.extend({queue:function(a,b,d){if(a){b=(b||"fx")+"queue";var e=c.data(a,b);if(!d)return e||
+[];if(!e||c.isArray(d))e=c.data(a,b,c.makeArray(d));else e.push(d);return e}},dequeue:function(a,b){b=b||"fx";var d=c.queue(a,b),e=d.shift();if(e==="inprogress")e=d.shift();if(e){b==="fx"&&d.unshift("inprogress");e.call(a,function(){c.dequeue(a,b)})}}});c.fn.extend({queue:function(a,b){if(typeof a!=="string"){b=a;a="fx"}if(b===A)return c.queue(this[0],a);return this.each(function(){var d=c.queue(this,a,b);a==="fx"&&d[0]!=="inprogress"&&c.dequeue(this,a)})},dequeue:function(a){return this.each(function(){c.dequeue(this,
+a)})},delay:function(a,b){a=c.fx?c.fx.speeds[a]||a:a;b=b||"fx";return this.queue(b,function(){var d=this;setTimeout(function(){c.dequeue(d,b)},a)})},clearQueue:function(a){return this.queue(a||"fx",[])}});var qa=/[\n\t]/g,ga=/\s+/,Pa=/\r/g,Qa=/^(?:href|src|style)$/,Ra=/^(?:button|input)$/i,Sa=/^(?:button|input|object|select|textarea)$/i,Ta=/^a(?:rea)?$/i,ra=/^(?:radio|checkbox)$/i;c.fn.extend({attr:function(a,b){return c.access(this,a,b,true,c.attr)},removeAttr:function(a){return this.each(function(){c.attr(this,
+a,"");this.nodeType===1&&this.removeAttribute(a)})},addClass:function(a){if(c.isFunction(a))return this.each(function(s){var v=c(this);v.addClass(a.call(this,s,v.attr("class")))});if(a&&typeof a==="string")for(var b=(a||"").split(ga),d=0,e=this.length;d<e;d++){var f=this[d];if(f.nodeType===1)if(f.className){for(var h=" "+f.className+" ",k=f.className,l=0,n=b.length;l<n;l++)if(h.indexOf(" "+b[l]+" ")<0)k+=" "+b[l];f.className=c.trim(k)}else f.className=a}return this},removeClass:function(a){if(c.isFunction(a))return this.each(function(n){var s=
+c(this);s.removeClass(a.call(this,n,s.attr("class")))});if(a&&typeof a==="string"||a===A)for(var b=(a||"").split(ga),d=0,e=this.length;d<e;d++){var f=this[d];if(f.nodeType===1&&f.className)if(a){for(var h=(" "+f.className+" ").replace(qa," "),k=0,l=b.length;k<l;k++)h=h.replace(" "+b[k]+" "," ");f.className=c.trim(h)}else f.className=""}return this},toggleClass:function(a,b){var d=typeof a,e=typeof b==="boolean";if(c.isFunction(a))return this.each(function(f){var h=c(this);h.toggleClass(a.call(this,
+f,h.attr("class"),b),b)});return this.each(function(){if(d==="string")for(var f,h=0,k=c(this),l=b,n=a.split(ga);f=n[h++];){l=e?l:!k.hasClass(f);k[l?"addClass":"removeClass"](f)}else if(d==="undefined"||d==="boolean"){this.className&&c.data(this,"__className__",this.className);this.className=this.className||a===false?"":c.data(this,"__className__")||""}})},hasClass:function(a){a=" "+a+" ";for(var b=0,d=this.length;b<d;b++)if((" "+this[b].className+" ").replace(qa," ").indexOf(a)>-1)return true;return false},
+val:function(a){if(!arguments.length){var b=this[0];if(b){if(c.nodeName(b,"option")){var d=b.attributes.value;return!d||d.specified?b.value:b.text}if(c.nodeName(b,"select")){var e=b.selectedIndex;d=[];var f=b.options;b=b.type==="select-one";if(e<0)return null;var h=b?e:0;for(e=b?e+1:f.length;h<e;h++){var k=f[h];if(k.selected&&(c.support.optDisabled?!k.disabled:k.getAttribute("disabled")===null)&&(!k.parentNode.disabled||!c.nodeName(k.parentNode,"optgroup"))){a=c(k).val();if(b)return a;d.push(a)}}return d}if(ra.test(b.type)&&
+!c.support.checkOn)return b.getAttribute("value")===null?"on":b.value;return(b.value||"").replace(Pa,"")}return A}var l=c.isFunction(a);return this.each(function(n){var s=c(this),v=a;if(this.nodeType===1){if(l)v=a.call(this,n,s.val());if(v==null)v="";else if(typeof v==="number")v+="";else if(c.isArray(v))v=c.map(v,function(D){return D==null?"":D+""});if(c.isArray(v)&&ra.test(this.type))this.checked=c.inArray(s.val(),v)>=0;else if(c.nodeName(this,"select")){var B=c.makeArray(v);c("option",this).each(function(){this.selected=
+c.inArray(c(this).val(),B)>=0});if(!B.length)this.selectedIndex=-1}else this.value=v}})}});c.extend({attrFn:{val:true,css:true,html:true,text:true,data:true,width:true,height:true,offset:true},attr:function(a,b,d,e){if(!a||a.nodeType===3||a.nodeType===8)return A;if(e&&b in c.attrFn)return c(a)[b](d);e=a.nodeType!==1||!c.isXMLDoc(a);var f=d!==A;b=e&&c.props[b]||b;if(a.nodeType===1){var h=Qa.test(b);if((b in a||a[b]!==A)&&e&&!h){if(f){b==="type"&&Ra.test(a.nodeName)&&a.parentNode&&c.error("type property can't be changed");
+if(d===null)a.nodeType===1&&a.removeAttribute(b);else a[b]=d}if(c.nodeName(a,"form")&&a.getAttributeNode(b))return a.getAttributeNode(b).nodeValue;if(b==="tabIndex")return(b=a.getAttributeNode("tabIndex"))&&b.specified?b.value:Sa.test(a.nodeName)||Ta.test(a.nodeName)&&a.href?0:A;return a[b]}if(!c.support.style&&e&&b==="style"){if(f)a.style.cssText=""+d;return a.style.cssText}f&&a.setAttribute(b,""+d);if(!a.attributes[b]&&a.hasAttribute&&!a.hasAttribute(b))return A;a=!c.support.hrefNormalized&&e&&
+h?a.getAttribute(b,2):a.getAttribute(b);return a===null?A:a}}});var X=/\.(.*)$/,ha=/^(?:textarea|input|select)$/i,Ha=/\./g,Ia=/ /g,Ua=/[^\w\s.|`]/g,Va=function(a){return a.replace(Ua,"\\$&")},sa={focusin:0,focusout:0};c.event={add:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(c.isWindow(a)&&a!==E&&!a.frameElement)a=E;if(d===false)d=U;var f,h;if(d.handler){f=d;d=f.handler}if(!d.guid)d.guid=c.guid++;if(h=c.data(a)){var k=a.nodeType?"events":"__events__",l=h[k],n=h.handle;if(typeof l===
+"function"){n=l.handle;l=l.events}else if(!l){a.nodeType||(h[k]=h=function(){});h.events=l={}}if(!n)h.handle=n=function(){return typeof c!=="undefined"&&!c.event.triggered?c.event.handle.apply(n.elem,arguments):A};n.elem=a;b=b.split(" ");for(var s=0,v;k=b[s++];){h=f?c.extend({},f):{handler:d,data:e};if(k.indexOf(".")>-1){v=k.split(".");k=v.shift();h.namespace=v.slice(0).sort().join(".")}else{v=[];h.namespace=""}h.type=k;if(!h.guid)h.guid=d.guid;var B=l[k],D=c.event.special[k]||{};if(!B){B=l[k]=[];
+if(!D.setup||D.setup.call(a,e,v,n)===false)if(a.addEventListener)a.addEventListener(k,n,false);else a.attachEvent&&a.attachEvent("on"+k,n)}if(D.add){D.add.call(a,h);if(!h.handler.guid)h.handler.guid=d.guid}B.push(h);c.event.global[k]=true}a=null}}},global:{},remove:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(d===false)d=U;var f,h,k=0,l,n,s,v,B,D,H=a.nodeType?"events":"__events__",w=c.data(a),G=w&&w[H];if(w&&G){if(typeof G==="function"){w=G;G=G.events}if(b&&b.type){d=b.handler;b=b.type}if(!b||
+typeof b==="string"&&b.charAt(0)==="."){b=b||"";for(f in G)c.event.remove(a,f+b)}else{for(b=b.split(" ");f=b[k++];){v=f;l=f.indexOf(".")<0;n=[];if(!l){n=f.split(".");f=n.shift();s=RegExp("(^|\\.)"+c.map(n.slice(0).sort(),Va).join("\\.(?:.*\\.)?")+"(\\.|$)")}if(B=G[f])if(d){v=c.event.special[f]||{};for(h=e||0;h<B.length;h++){D=B[h];if(d.guid===D.guid){if(l||s.test(D.namespace)){e==null&&B.splice(h--,1);v.remove&&v.remove.call(a,D)}if(e!=null)break}}if(B.length===0||e!=null&&B.length===1){if(!v.teardown||
+v.teardown.call(a,n)===false)c.removeEvent(a,f,w.handle);delete G[f]}}else for(h=0;h<B.length;h++){D=B[h];if(l||s.test(D.namespace)){c.event.remove(a,v,D.handler,h);B.splice(h--,1)}}}if(c.isEmptyObject(G)){if(b=w.handle)b.elem=null;delete w.events;delete w.handle;if(typeof w==="function")c.removeData(a,H);else c.isEmptyObject(w)&&c.removeData(a)}}}}},trigger:function(a,b,d,e){var f=a.type||a;if(!e){a=typeof a==="object"?a[c.expando]?a:c.extend(c.Event(f),a):c.Event(f);if(f.indexOf("!")>=0){a.type=
+f=f.slice(0,-1);a.exclusive=true}if(!d){a.stopPropagation();c.event.global[f]&&c.each(c.cache,function(){this.events&&this.events[f]&&c.event.trigger(a,b,this.handle.elem)})}if(!d||d.nodeType===3||d.nodeType===8)return A;a.result=A;a.target=d;b=c.makeArray(b);b.unshift(a)}a.currentTarget=d;(e=d.nodeType?c.data(d,"handle"):(c.data(d,"__events__")||{}).handle)&&e.apply(d,b);e=d.parentNode||d.ownerDocument;try{if(!(d&&d.nodeName&&c.noData[d.nodeName.toLowerCase()]))if(d["on"+f]&&d["on"+f].apply(d,b)===
+false){a.result=false;a.preventDefault()}}catch(h){}if(!a.isPropagationStopped()&&e)c.event.trigger(a,b,e,true);else if(!a.isDefaultPrevented()){e=a.target;var k,l=f.replace(X,""),n=c.nodeName(e,"a")&&l==="click",s=c.event.special[l]||{};if((!s._default||s._default.call(d,a)===false)&&!n&&!(e&&e.nodeName&&c.noData[e.nodeName.toLowerCase()])){try{if(e[l]){if(k=e["on"+l])e["on"+l]=null;c.event.triggered=true;e[l]()}}catch(v){}if(k)e["on"+l]=k;c.event.triggered=false}}},handle:function(a){var b,d,e;
+d=[];var f,h=c.makeArray(arguments);a=h[0]=c.event.fix(a||E.event);a.currentTarget=this;b=a.type.indexOf(".")<0&&!a.exclusive;if(!b){e=a.type.split(".");a.type=e.shift();d=e.slice(0).sort();e=RegExp("(^|\\.)"+d.join("\\.(?:.*\\.)?")+"(\\.|$)")}a.namespace=a.namespace||d.join(".");f=c.data(this,this.nodeType?"events":"__events__");if(typeof f==="function")f=f.events;d=(f||{})[a.type];if(f&&d){d=d.slice(0);f=0;for(var k=d.length;f<k;f++){var l=d[f];if(b||e.test(l.namespace)){a.handler=l.handler;a.data=
+l.data;a.handleObj=l;l=l.handler.apply(this,h);if(l!==A){a.result=l;if(l===false){a.preventDefault();a.stopPropagation()}}if(a.isImmediatePropagationStopped())break}}}return a.result},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode layerX layerY metaKey newValue offsetX offsetY pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),
+fix:function(a){if(a[c.expando])return a;var b=a;a=c.Event(b);for(var d=this.props.length,e;d;){e=this.props[--d];a[e]=b[e]}if(!a.target)a.target=a.srcElement||u;if(a.target.nodeType===3)a.target=a.target.parentNode;if(!a.relatedTarget&&a.fromElement)a.relatedTarget=a.fromElement===a.target?a.toElement:a.fromElement;if(a.pageX==null&&a.clientX!=null){b=u.documentElement;d=u.body;a.pageX=a.clientX+(b&&b.scrollLeft||d&&d.scrollLeft||0)-(b&&b.clientLeft||d&&d.clientLeft||0);a.pageY=a.clientY+(b&&b.scrollTop||
+d&&d.scrollTop||0)-(b&&b.clientTop||d&&d.clientTop||0)}if(a.which==null&&(a.charCode!=null||a.keyCode!=null))a.which=a.charCode!=null?a.charCode:a.keyCode;if(!a.metaKey&&a.ctrlKey)a.metaKey=a.ctrlKey;if(!a.which&&a.button!==A)a.which=a.button&1?1:a.button&2?3:a.button&4?2:0;return a},guid:1E8,proxy:c.proxy,special:{ready:{setup:c.bindReady,teardown:c.noop},live:{add:function(a){c.event.add(this,Y(a.origType,a.selector),c.extend({},a,{handler:Ga,guid:a.handler.guid}))},remove:function(a){c.event.remove(this,
+Y(a.origType,a.selector),a)}},beforeunload:{setup:function(a,b,d){if(c.isWindow(this))this.onbeforeunload=d},teardown:function(a,b){if(this.onbeforeunload===b)this.onbeforeunload=null}}}};c.removeEvent=u.removeEventListener?function(a,b,d){a.removeEventListener&&a.removeEventListener(b,d,false)}:function(a,b,d){a.detachEvent&&a.detachEvent("on"+b,d)};c.Event=function(a){if(!this.preventDefault)return new c.Event(a);if(a&&a.type){this.originalEvent=a;this.type=a.type}else this.type=a;this.timeStamp=
+c.now();this[c.expando]=true};c.Event.prototype={preventDefault:function(){this.isDefaultPrevented=ba;var a=this.originalEvent;if(a)if(a.preventDefault)a.preventDefault();else a.returnValue=false},stopPropagation:function(){this.isPropagationStopped=ba;var a=this.originalEvent;if(a){a.stopPropagation&&a.stopPropagation();a.cancelBubble=true}},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=ba;this.stopPropagation()},isDefaultPrevented:U,isPropagationStopped:U,isImmediatePropagationStopped:U};
+var ta=function(a){var b=a.relatedTarget;try{for(;b&&b!==this;)b=b.parentNode;if(b!==this){a.type=a.data;c.event.handle.apply(this,arguments)}}catch(d){}},ua=function(a){a.type=a.data;c.event.handle.apply(this,arguments)};c.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){c.event.special[a]={setup:function(d){c.event.add(this,b,d&&d.selector?ua:ta,a)},teardown:function(d){c.event.remove(this,b,d&&d.selector?ua:ta)}}});if(!c.support.submitBubbles)c.event.special.submit={setup:function(){if(this.nodeName.toLowerCase()!==
+"form"){c.event.add(this,"click.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="submit"||d==="image")&&c(b).closest("form").length){a.liveFired=A;return ja("submit",this,arguments)}});c.event.add(this,"keypress.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="text"||d==="password")&&c(b).closest("form").length&&a.keyCode===13){a.liveFired=A;return ja("submit",this,arguments)}})}else return false},teardown:function(){c.event.remove(this,".specialSubmit")}};if(!c.support.changeBubbles){var V,
+va=function(a){var b=a.type,d=a.value;if(b==="radio"||b==="checkbox")d=a.checked;else if(b==="select-multiple")d=a.selectedIndex>-1?c.map(a.options,function(e){return e.selected}).join("-"):"";else if(a.nodeName.toLowerCase()==="select")d=a.selectedIndex;return d},Z=function(a,b){var d=a.target,e,f;if(!(!ha.test(d.nodeName)||d.readOnly)){e=c.data(d,"_change_data");f=va(d);if(a.type!=="focusout"||d.type!=="radio")c.data(d,"_change_data",f);if(!(e===A||f===e))if(e!=null||f){a.type="change";a.liveFired=
+A;return c.event.trigger(a,b,d)}}};c.event.special.change={filters:{focusout:Z,beforedeactivate:Z,click:function(a){var b=a.target,d=b.type;if(d==="radio"||d==="checkbox"||b.nodeName.toLowerCase()==="select")return Z.call(this,a)},keydown:function(a){var b=a.target,d=b.type;if(a.keyCode===13&&b.nodeName.toLowerCase()!=="textarea"||a.keyCode===32&&(d==="checkbox"||d==="radio")||d==="select-multiple")return Z.call(this,a)},beforeactivate:function(a){a=a.target;c.data(a,"_change_data",va(a))}},setup:function(){if(this.type===
+"file")return false;for(var a in V)c.event.add(this,a+".specialChange",V[a]);return ha.test(this.nodeName)},teardown:function(){c.event.remove(this,".specialChange");return ha.test(this.nodeName)}};V=c.event.special.change.filters;V.focus=V.beforeactivate}u.addEventListener&&c.each({focus:"focusin",blur:"focusout"},function(a,b){function d(e){e=c.event.fix(e);e.type=b;return c.event.trigger(e,null,e.target)}c.event.special[b]={setup:function(){sa[b]++===0&&u.addEventListener(a,d,true)},teardown:function(){--sa[b]===
+0&&u.removeEventListener(a,d,true)}}});c.each(["bind","one"],function(a,b){c.fn[b]=function(d,e,f){if(typeof d==="object"){for(var h in d)this[b](h,e,d[h],f);return this}if(c.isFunction(e)||e===false){f=e;e=A}var k=b==="one"?c.proxy(f,function(n){c(this).unbind(n,k);return f.apply(this,arguments)}):f;if(d==="unload"&&b!=="one")this.one(d,e,f);else{h=0;for(var l=this.length;h<l;h++)c.event.add(this[h],d,k,e)}return this}});c.fn.extend({unbind:function(a,b){if(typeof a==="object"&&!a.preventDefault)for(var d in a)this.unbind(d,
+a[d]);else{d=0;for(var e=this.length;d<e;d++)c.event.remove(this[d],a,b)}return this},delegate:function(a,b,d,e){return this.live(b,d,e,a)},undelegate:function(a,b,d){return arguments.length===0?this.unbind("live"):this.die(b,null,d,a)},trigger:function(a,b){return this.each(function(){c.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0]){var d=c.Event(a);d.preventDefault();d.stopPropagation();c.event.trigger(d,b,this[0]);return d.result}},toggle:function(a){for(var b=arguments,d=
+1;d<b.length;)c.proxy(a,b[d++]);return this.click(c.proxy(a,function(e){var f=(c.data(this,"lastToggle"+a.guid)||0)%d;c.data(this,"lastToggle"+a.guid,f+1);e.preventDefault();return b[f].apply(this,arguments)||false}))},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}});var wa={focus:"focusin",blur:"focusout",mouseenter:"mouseover",mouseleave:"mouseout"};c.each(["live","die"],function(a,b){c.fn[b]=function(d,e,f,h){var k,l=0,n,s,v=h||this.selector;h=h?this:c(this.context);if(typeof d===
+"object"&&!d.preventDefault){for(k in d)h[b](k,e,d[k],v);return this}if(c.isFunction(e)){f=e;e=A}for(d=(d||"").split(" ");(k=d[l++])!=null;){n=X.exec(k);s="";if(n){s=n[0];k=k.replace(X,"")}if(k==="hover")d.push("mouseenter"+s,"mouseleave"+s);else{n=k;if(k==="focus"||k==="blur"){d.push(wa[k]+s);k+=s}else k=(wa[k]||k)+s;if(b==="live"){s=0;for(var B=h.length;s<B;s++)c.event.add(h[s],"live."+Y(k,v),{data:e,selector:v,handler:f,origType:k,origHandler:f,preType:n})}else h.unbind("live."+Y(k,v),f)}}return this}});
+c.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error".split(" "),function(a,b){c.fn[b]=function(d,e){if(e==null){e=d;d=null}return arguments.length>0?this.bind(b,d,e):this.trigger(b)};if(c.attrFn)c.attrFn[b]=true});E.attachEvent&&!E.addEventListener&&c(E).bind("unload",function(){for(var a in c.cache)if(c.cache[a].handle)try{c.event.remove(c.cache[a].handle.elem)}catch(b){}});
+(function(){function a(g,j,o,m,p,q){p=0;for(var t=m.length;p<t;p++){var x=m[p];if(x){x=x[g];for(var C=false;x;){if(x.sizcache===o){C=m[x.sizset];break}if(x.nodeType===1&&!q){x.sizcache=o;x.sizset=p}if(x.nodeName.toLowerCase()===j){C=x;break}x=x[g]}m[p]=C}}}function b(g,j,o,m,p,q){p=0;for(var t=m.length;p<t;p++){var x=m[p];if(x){x=x[g];for(var C=false;x;){if(x.sizcache===o){C=m[x.sizset];break}if(x.nodeType===1){if(!q){x.sizcache=o;x.sizset=p}if(typeof j!=="string"){if(x===j){C=true;break}}else if(l.filter(j,
+[x]).length>0){C=x;break}}x=x[g]}m[p]=C}}}var d=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,e=0,f=Object.prototype.toString,h=false,k=true;[0,0].sort(function(){k=false;return 0});var l=function(g,j,o,m){o=o||[];var p=j=j||u;if(j.nodeType!==1&&j.nodeType!==9)return[];if(!g||typeof g!=="string")return o;var q=[],t,x,C,P,N=true,R=l.isXML(j),Q=g,L;do{d.exec("");if(t=d.exec(Q)){Q=t[3];q.push(t[1]);if(t[2]){P=t[3];
+break}}}while(t);if(q.length>1&&s.exec(g))if(q.length===2&&n.relative[q[0]])x=M(q[0]+q[1],j);else for(x=n.relative[q[0]]?[j]:l(q.shift(),j);q.length;){g=q.shift();if(n.relative[g])g+=q.shift();x=M(g,x)}else{if(!m&&q.length>1&&j.nodeType===9&&!R&&n.match.ID.test(q[0])&&!n.match.ID.test(q[q.length-1])){t=l.find(q.shift(),j,R);j=t.expr?l.filter(t.expr,t.set)[0]:t.set[0]}if(j){t=m?{expr:q.pop(),set:D(m)}:l.find(q.pop(),q.length===1&&(q[0]==="~"||q[0]==="+")&&j.parentNode?j.parentNode:j,R);x=t.expr?l.filter(t.expr,
+t.set):t.set;if(q.length>0)C=D(x);else N=false;for(;q.length;){t=L=q.pop();if(n.relative[L])t=q.pop();else L="";if(t==null)t=j;n.relative[L](C,t,R)}}else C=[]}C||(C=x);C||l.error(L||g);if(f.call(C)==="[object Array]")if(N)if(j&&j.nodeType===1)for(g=0;C[g]!=null;g++){if(C[g]&&(C[g]===true||C[g].nodeType===1&&l.contains(j,C[g])))o.push(x[g])}else for(g=0;C[g]!=null;g++)C[g]&&C[g].nodeType===1&&o.push(x[g]);else o.push.apply(o,C);else D(C,o);if(P){l(P,p,o,m);l.uniqueSort(o)}return o};l.uniqueSort=function(g){if(w){h=
+k;g.sort(w);if(h)for(var j=1;j<g.length;j++)g[j]===g[j-1]&&g.splice(j--,1)}return g};l.matches=function(g,j){return l(g,null,null,j)};l.matchesSelector=function(g,j){return l(j,null,null,[g]).length>0};l.find=function(g,j,o){var m;if(!g)return[];for(var p=0,q=n.order.length;p<q;p++){var t=n.order[p],x;if(x=n.leftMatch[t].exec(g)){var C=x[1];x.splice(1,1);if(C.substr(C.length-1)!=="\\"){x[1]=(x[1]||"").replace(/\\/g,"");m=n.find[t](x,j,o);if(m!=null){g=g.replace(n.match[t],"");break}}}}m||(m=j.getElementsByTagName("*"));
+return{set:m,expr:g}};l.filter=function(g,j,o,m){for(var p=g,q=[],t=j,x,C,P=j&&j[0]&&l.isXML(j[0]);g&&j.length;){for(var N in n.filter)if((x=n.leftMatch[N].exec(g))!=null&&x[2]){var R=n.filter[N],Q,L;L=x[1];C=false;x.splice(1,1);if(L.substr(L.length-1)!=="\\"){if(t===q)q=[];if(n.preFilter[N])if(x=n.preFilter[N](x,t,o,q,m,P)){if(x===true)continue}else C=Q=true;if(x)for(var i=0;(L=t[i])!=null;i++)if(L){Q=R(L,x,i,t);var r=m^!!Q;if(o&&Q!=null)if(r)C=true;else t[i]=false;else if(r){q.push(L);C=true}}if(Q!==
+A){o||(t=q);g=g.replace(n.match[N],"");if(!C)return[];break}}}if(g===p)if(C==null)l.error(g);else break;p=g}return t};l.error=function(g){throw"Syntax error, unrecognized expression: "+g;};var n=l.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,CLASS:/\.((?:[\w\u00c0-\uFFFF\-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF\-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF\-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*\-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\((even|odd|[\dn+\-]*)\))?/,
+POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^\-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF\-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(g){return g.getAttribute("href")}},relative:{"+":function(g,j){var o=typeof j==="string",m=o&&!/\W/.test(j);o=o&&!m;if(m)j=j.toLowerCase();m=0;for(var p=g.length,q;m<p;m++)if(q=g[m]){for(;(q=q.previousSibling)&&q.nodeType!==1;);g[m]=o||q&&q.nodeName.toLowerCase()===
+j?q||false:q===j}o&&l.filter(j,g,true)},">":function(g,j){var o=typeof j==="string",m,p=0,q=g.length;if(o&&!/\W/.test(j))for(j=j.toLowerCase();p<q;p++){if(m=g[p]){o=m.parentNode;g[p]=o.nodeName.toLowerCase()===j?o:false}}else{for(;p<q;p++)if(m=g[p])g[p]=o?m.parentNode:m.parentNode===j;o&&l.filter(j,g,true)}},"":function(g,j,o){var m=e++,p=b,q;if(typeof j==="string"&&!/\W/.test(j)){q=j=j.toLowerCase();p=a}p("parentNode",j,m,g,q,o)},"~":function(g,j,o){var m=e++,p=b,q;if(typeof j==="string"&&!/\W/.test(j)){q=
+j=j.toLowerCase();p=a}p("previousSibling",j,m,g,q,o)}},find:{ID:function(g,j,o){if(typeof j.getElementById!=="undefined"&&!o)return(g=j.getElementById(g[1]))&&g.parentNode?[g]:[]},NAME:function(g,j){if(typeof j.getElementsByName!=="undefined"){for(var o=[],m=j.getElementsByName(g[1]),p=0,q=m.length;p<q;p++)m[p].getAttribute("name")===g[1]&&o.push(m[p]);return o.length===0?null:o}},TAG:function(g,j){return j.getElementsByTagName(g[1])}},preFilter:{CLASS:function(g,j,o,m,p,q){g=" "+g[1].replace(/\\/g,
+"")+" ";if(q)return g;q=0;for(var t;(t=j[q])!=null;q++)if(t)if(p^(t.className&&(" "+t.className+" ").replace(/[\t\n]/g," ").indexOf(g)>=0))o||m.push(t);else if(o)j[q]=false;return false},ID:function(g){return g[1].replace(/\\/g,"")},TAG:function(g){return g[1].toLowerCase()},CHILD:function(g){if(g[1]==="nth"){var j=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(g[2]==="even"&&"2n"||g[2]==="odd"&&"2n+1"||!/\D/.test(g[2])&&"0n+"+g[2]||g[2]);g[2]=j[1]+(j[2]||1)-0;g[3]=j[3]-0}g[0]=e++;return g},ATTR:function(g,j,o,
+m,p,q){j=g[1].replace(/\\/g,"");if(!q&&n.attrMap[j])g[1]=n.attrMap[j];if(g[2]==="~=")g[4]=" "+g[4]+" ";return g},PSEUDO:function(g,j,o,m,p){if(g[1]==="not")if((d.exec(g[3])||"").length>1||/^\w/.test(g[3]))g[3]=l(g[3],null,null,j);else{g=l.filter(g[3],j,o,true^p);o||m.push.apply(m,g);return false}else if(n.match.POS.test(g[0])||n.match.CHILD.test(g[0]))return true;return g},POS:function(g){g.unshift(true);return g}},filters:{enabled:function(g){return g.disabled===false&&g.type!=="hidden"},disabled:function(g){return g.disabled===
+true},checked:function(g){return g.checked===true},selected:function(g){return g.selected===true},parent:function(g){return!!g.firstChild},empty:function(g){return!g.firstChild},has:function(g,j,o){return!!l(o[3],g).length},header:function(g){return/h\d/i.test(g.nodeName)},text:function(g){return"text"===g.type},radio:function(g){return"radio"===g.type},checkbox:function(g){return"checkbox"===g.type},file:function(g){return"file"===g.type},password:function(g){return"password"===g.type},submit:function(g){return"submit"===
+g.type},image:function(g){return"image"===g.type},reset:function(g){return"reset"===g.type},button:function(g){return"button"===g.type||g.nodeName.toLowerCase()==="button"},input:function(g){return/input|select|textarea|button/i.test(g.nodeName)}},setFilters:{first:function(g,j){return j===0},last:function(g,j,o,m){return j===m.length-1},even:function(g,j){return j%2===0},odd:function(g,j){return j%2===1},lt:function(g,j,o){return j<o[3]-0},gt:function(g,j,o){return j>o[3]-0},nth:function(g,j,o){return o[3]-
+0===j},eq:function(g,j,o){return o[3]-0===j}},filter:{PSEUDO:function(g,j,o,m){var p=j[1],q=n.filters[p];if(q)return q(g,o,j,m);else if(p==="contains")return(g.textContent||g.innerText||l.getText([g])||"").indexOf(j[3])>=0;else if(p==="not"){j=j[3];o=0;for(m=j.length;o<m;o++)if(j[o]===g)return false;return true}else l.error("Syntax error, unrecognized expression: "+p)},CHILD:function(g,j){var o=j[1],m=g;switch(o){case "only":case "first":for(;m=m.previousSibling;)if(m.nodeType===1)return false;if(o===
+"first")return true;m=g;case "last":for(;m=m.nextSibling;)if(m.nodeType===1)return false;return true;case "nth":o=j[2];var p=j[3];if(o===1&&p===0)return true;var q=j[0],t=g.parentNode;if(t&&(t.sizcache!==q||!g.nodeIndex)){var x=0;for(m=t.firstChild;m;m=m.nextSibling)if(m.nodeType===1)m.nodeIndex=++x;t.sizcache=q}m=g.nodeIndex-p;return o===0?m===0:m%o===0&&m/o>=0}},ID:function(g,j){return g.nodeType===1&&g.getAttribute("id")===j},TAG:function(g,j){return j==="*"&&g.nodeType===1||g.nodeName.toLowerCase()===
+j},CLASS:function(g,j){return(" "+(g.className||g.getAttribute("class"))+" ").indexOf(j)>-1},ATTR:function(g,j){var o=j[1];o=n.attrHandle[o]?n.attrHandle[o](g):g[o]!=null?g[o]:g.getAttribute(o);var m=o+"",p=j[2],q=j[4];return o==null?p==="!=":p==="="?m===q:p==="*="?m.indexOf(q)>=0:p==="~="?(" "+m+" ").indexOf(q)>=0:!q?m&&o!==false:p==="!="?m!==q:p==="^="?m.indexOf(q)===0:p==="$="?m.substr(m.length-q.length)===q:p==="|="?m===q||m.substr(0,q.length+1)===q+"-":false},POS:function(g,j,o,m){var p=n.setFilters[j[2]];
+if(p)return p(g,o,j,m)}}},s=n.match.POS,v=function(g,j){return"\\"+(j-0+1)},B;for(B in n.match){n.match[B]=RegExp(n.match[B].source+/(?![^\[]*\])(?![^\(]*\))/.source);n.leftMatch[B]=RegExp(/(^(?:.|\r|\n)*?)/.source+n.match[B].source.replace(/\\(\d+)/g,v))}var D=function(g,j){g=Array.prototype.slice.call(g,0);if(j){j.push.apply(j,g);return j}return g};try{Array.prototype.slice.call(u.documentElement.childNodes,0)}catch(H){D=function(g,j){var o=j||[],m=0;if(f.call(g)==="[object Array]")Array.prototype.push.apply(o,
+g);else if(typeof g.length==="number")for(var p=g.length;m<p;m++)o.push(g[m]);else for(;g[m];m++)o.push(g[m]);return o}}var w,G;if(u.documentElement.compareDocumentPosition)w=function(g,j){if(g===j){h=true;return 0}if(!g.compareDocumentPosition||!j.compareDocumentPosition)return g.compareDocumentPosition?-1:1;return g.compareDocumentPosition(j)&4?-1:1};else{w=function(g,j){var o=[],m=[],p=g.parentNode,q=j.parentNode,t=p;if(g===j){h=true;return 0}else if(p===q)return G(g,j);else if(p){if(!q)return 1}else return-1;
+for(;t;){o.unshift(t);t=t.parentNode}for(t=q;t;){m.unshift(t);t=t.parentNode}p=o.length;q=m.length;for(t=0;t<p&&t<q;t++)if(o[t]!==m[t])return G(o[t],m[t]);return t===p?G(g,m[t],-1):G(o[t],j,1)};G=function(g,j,o){if(g===j)return o;for(g=g.nextSibling;g;){if(g===j)return-1;g=g.nextSibling}return 1}}l.getText=function(g){for(var j="",o,m=0;g[m];m++){o=g[m];if(o.nodeType===3||o.nodeType===4)j+=o.nodeValue;else if(o.nodeType!==8)j+=l.getText(o.childNodes)}return j};(function(){var g=u.createElement("div"),
+j="script"+(new Date).getTime();g.innerHTML="<a name='"+j+"'/>";var o=u.documentElement;o.insertBefore(g,o.firstChild);if(u.getElementById(j)){n.find.ID=function(m,p,q){if(typeof p.getElementById!=="undefined"&&!q)return(p=p.getElementById(m[1]))?p.id===m[1]||typeof p.getAttributeNode!=="undefined"&&p.getAttributeNode("id").nodeValue===m[1]?[p]:A:[]};n.filter.ID=function(m,p){var q=typeof m.getAttributeNode!=="undefined"&&m.getAttributeNode("id");return m.nodeType===1&&q&&q.nodeValue===p}}o.removeChild(g);
+o=g=null})();(function(){var g=u.createElement("div");g.appendChild(u.createComment(""));if(g.getElementsByTagName("*").length>0)n.find.TAG=function(j,o){var m=o.getElementsByTagName(j[1]);if(j[1]==="*"){for(var p=[],q=0;m[q];q++)m[q].nodeType===1&&p.push(m[q]);m=p}return m};g.innerHTML="<a href='#'></a>";if(g.firstChild&&typeof g.firstChild.getAttribute!=="undefined"&&g.firstChild.getAttribute("href")!=="#")n.attrHandle.href=function(j){return j.getAttribute("href",2)};g=null})();u.querySelectorAll&&
+function(){var g=l,j=u.createElement("div");j.innerHTML="<p class='TEST'></p>";if(!(j.querySelectorAll&&j.querySelectorAll(".TEST").length===0)){l=function(m,p,q,t){p=p||u;if(!t&&!l.isXML(p))if(p.nodeType===9)try{return D(p.querySelectorAll(m),q)}catch(x){}else if(p.nodeType===1&&p.nodeName.toLowerCase()!=="object"){var C=p.id,P=p.id="__sizzle__";try{return D(p.querySelectorAll("#"+P+" "+m),q)}catch(N){}finally{if(C)p.id=C;else p.removeAttribute("id")}}return g(m,p,q,t)};for(var o in g)l[o]=g[o];
+j=null}}();(function(){var g=u.documentElement,j=g.matchesSelector||g.mozMatchesSelector||g.webkitMatchesSelector||g.msMatchesSelector,o=false;try{j.call(u.documentElement,":sizzle")}catch(m){o=true}if(j)l.matchesSelector=function(p,q){try{if(o||!n.match.PSEUDO.test(q))return j.call(p,q)}catch(t){}return l(q,null,null,[p]).length>0}})();(function(){var g=u.createElement("div");g.innerHTML="<div class='test e'></div><div class='test'></div>";if(!(!g.getElementsByClassName||g.getElementsByClassName("e").length===
+0)){g.lastChild.className="e";if(g.getElementsByClassName("e").length!==1){n.order.splice(1,0,"CLASS");n.find.CLASS=function(j,o,m){if(typeof o.getElementsByClassName!=="undefined"&&!m)return o.getElementsByClassName(j[1])};g=null}}})();l.contains=u.documentElement.contains?function(g,j){return g!==j&&(g.contains?g.contains(j):true)}:function(g,j){return!!(g.compareDocumentPosition(j)&16)};l.isXML=function(g){return(g=(g?g.ownerDocument||g:0).documentElement)?g.nodeName!=="HTML":false};var M=function(g,
+j){for(var o=[],m="",p,q=j.nodeType?[j]:j;p=n.match.PSEUDO.exec(g);){m+=p[0];g=g.replace(n.match.PSEUDO,"")}g=n.relative[g]?g+"*":g;p=0;for(var t=q.length;p<t;p++)l(g,q[p],o);return l.filter(m,o)};c.find=l;c.expr=l.selectors;c.expr[":"]=c.expr.filters;c.unique=l.uniqueSort;c.text=l.getText;c.isXMLDoc=l.isXML;c.contains=l.contains})();var Wa=/Until$/,Xa=/^(?:parents|prevUntil|prevAll)/,Ya=/,/,Ja=/^.[^:#\[\.,]*$/,Za=Array.prototype.slice,$a=c.expr.match.POS;c.fn.extend({find:function(a){for(var b=this.pushStack("",
+"find",a),d=0,e=0,f=this.length;e<f;e++){d=b.length;c.find(a,this[e],b);if(e>0)for(var h=d;h<b.length;h++)for(var k=0;k<d;k++)if(b[k]===b[h]){b.splice(h--,1);break}}return b},has:function(a){var b=c(a);return this.filter(function(){for(var d=0,e=b.length;d<e;d++)if(c.contains(this,b[d]))return true})},not:function(a){return this.pushStack(ka(this,a,false),"not",a)},filter:function(a){return this.pushStack(ka(this,a,true),"filter",a)},is:function(a){return!!a&&c.filter(a,this).length>0},closest:function(a,
+b){var d=[],e,f,h=this[0];if(c.isArray(a)){var k={},l,n=1;if(h&&a.length){e=0;for(f=a.length;e<f;e++){l=a[e];k[l]||(k[l]=c.expr.match.POS.test(l)?c(l,b||this.context):l)}for(;h&&h.ownerDocument&&h!==b;){for(l in k){e=k[l];if(e.jquery?e.index(h)>-1:c(h).is(e))d.push({selector:l,elem:h,level:n})}h=h.parentNode;n++}}return d}k=$a.test(a)?c(a,b||this.context):null;e=0;for(f=this.length;e<f;e++)for(h=this[e];h;)if(k?k.index(h)>-1:c.find.matchesSelector(h,a)){d.push(h);break}else{h=h.parentNode;if(!h||
+!h.ownerDocument||h===b)break}d=d.length>1?c.unique(d):d;return this.pushStack(d,"closest",a)},index:function(a){if(!a||typeof a==="string")return c.inArray(this[0],a?c(a):this.parent().children());return c.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var d=typeof a==="string"?c(a,b||this.context):c.makeArray(a),e=c.merge(this.get(),d);return this.pushStack(!d[0]||!d[0].parentNode||d[0].parentNode.nodeType===11||!e[0]||!e[0].parentNode||e[0].parentNode.nodeType===11?e:c.unique(e))},andSelf:function(){return this.add(this.prevObject)}});
+c.each({parent:function(a){return(a=a.parentNode)&&a.nodeType!==11?a:null},parents:function(a){return c.dir(a,"parentNode")},parentsUntil:function(a,b,d){return c.dir(a,"parentNode",d)},next:function(a){return c.nth(a,2,"nextSibling")},prev:function(a){return c.nth(a,2,"previousSibling")},nextAll:function(a){return c.dir(a,"nextSibling")},prevAll:function(a){return c.dir(a,"previousSibling")},nextUntil:function(a,b,d){return c.dir(a,"nextSibling",d)},prevUntil:function(a,b,d){return c.dir(a,"previousSibling",
+d)},siblings:function(a){return c.sibling(a.parentNode.firstChild,a)},children:function(a){return c.sibling(a.firstChild)},contents:function(a){return c.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:c.makeArray(a.childNodes)}},function(a,b){c.fn[a]=function(d,e){var f=c.map(this,b,d);Wa.test(a)||(e=d);if(e&&typeof e==="string")f=c.filter(e,f);f=this.length>1?c.unique(f):f;if((this.length>1||Ya.test(e))&&Xa.test(a))f=f.reverse();return this.pushStack(f,a,Za.call(arguments).join(","))}});
+c.extend({filter:function(a,b,d){if(d)a=":not("+a+")";return b.length===1?c.find.matchesSelector(b[0],a)?[b[0]]:[]:c.find.matches(a,b)},dir:function(a,b,d){var e=[];for(a=a[b];a&&a.nodeType!==9&&(d===A||a.nodeType!==1||!c(a).is(d));){a.nodeType===1&&e.push(a);a=a[b]}return e},nth:function(a,b,d){b=b||1;for(var e=0;a;a=a[d])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){for(var d=[];a;a=a.nextSibling)a.nodeType===1&&a!==b&&d.push(a);return d}});var xa=/ jQuery\d+="(?:\d+|null)"/g,
+$=/^\s+/,ya=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,za=/<([\w:]+)/,ab=/<tbody/i,bb=/<|&#?\w+;/,Aa=/<(?:script|object|embed|option|style)/i,Ba=/checked\s*(?:[^=]|=\s*.checked.)/i,cb=/\=([^="'>\s]+\/)>/g,O={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],
+area:[1,"<map>","</map>"],_default:[0,"",""]};O.optgroup=O.option;O.tbody=O.tfoot=O.colgroup=O.caption=O.thead;O.th=O.td;if(!c.support.htmlSerialize)O._default=[1,"div<div>","</div>"];c.fn.extend({text:function(a){if(c.isFunction(a))return this.each(function(b){var d=c(this);d.text(a.call(this,b,d.text()))});if(typeof a!=="object"&&a!==A)return this.empty().append((this[0]&&this[0].ownerDocument||u).createTextNode(a));return c.text(this)},wrapAll:function(a){if(c.isFunction(a))return this.each(function(d){c(this).wrapAll(a.call(this,
+d))});if(this[0]){var b=c(a,this[0].ownerDocument).eq(0).clone(true);this[0].parentNode&&b.insertBefore(this[0]);b.map(function(){for(var d=this;d.firstChild&&d.firstChild.nodeType===1;)d=d.firstChild;return d}).append(this)}return this},wrapInner:function(a){if(c.isFunction(a))return this.each(function(b){c(this).wrapInner(a.call(this,b))});return this.each(function(){var b=c(this),d=b.contents();d.length?d.wrapAll(a):b.append(a)})},wrap:function(a){return this.each(function(){c(this).wrapAll(a)})},
+unwrap:function(){return this.parent().each(function(){c.nodeName(this,"body")||c(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this)});else if(arguments.length){var a=
+c(arguments[0]);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this.nextSibling)});else if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,c(arguments[0]).toArray());return a}},remove:function(a,b){for(var d=0,e;(e=this[d])!=null;d++)if(!a||c.filter(a,[e]).length){if(!b&&e.nodeType===1){c.cleanData(e.getElementsByTagName("*"));
+c.cleanData([e])}e.parentNode&&e.parentNode.removeChild(e)}return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++)for(b.nodeType===1&&c.cleanData(b.getElementsByTagName("*"));b.firstChild;)b.removeChild(b.firstChild);return this},clone:function(a){var b=this.map(function(){if(!c.support.noCloneEvent&&!c.isXMLDoc(this)){var d=this.outerHTML,e=this.ownerDocument;if(!d){d=e.createElement("div");d.appendChild(this.cloneNode(true));d=d.innerHTML}return c.clean([d.replace(xa,"").replace(cb,'="$1">').replace($,
+"")],e)[0]}else return this.cloneNode(true)});if(a===true){la(this,b);la(this.find("*"),b.find("*"))}return b},html:function(a){if(a===A)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(xa,""):null;else if(typeof a==="string"&&!Aa.test(a)&&(c.support.leadingWhitespace||!$.test(a))&&!O[(za.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(ya,"<$1></$2>");try{for(var b=0,d=this.length;b<d;b++)if(this[b].nodeType===1){c.cleanData(this[b].getElementsByTagName("*"));this[b].innerHTML=a}}catch(e){this.empty().append(a)}}else c.isFunction(a)?
+this.each(function(f){var h=c(this);h.html(a.call(this,f,h.html()))}):this.empty().append(a);return this},replaceWith:function(a){if(this[0]&&this[0].parentNode){if(c.isFunction(a))return this.each(function(b){var d=c(this),e=d.html();d.replaceWith(a.call(this,b,e))});if(typeof a!=="string")a=c(a).detach();return this.each(function(){var b=this.nextSibling,d=this.parentNode;c(this).remove();b?c(b).before(a):c(d).append(a)})}else return this.pushStack(c(c.isFunction(a)?a():a),"replaceWith",a)},detach:function(a){return this.remove(a,
+true)},domManip:function(a,b,d){var e,f,h=a[0],k=[],l;if(!c.support.checkClone&&arguments.length===3&&typeof h==="string"&&Ba.test(h))return this.each(function(){c(this).domManip(a,b,d,true)});if(c.isFunction(h))return this.each(function(s){var v=c(this);a[0]=h.call(this,s,b?v.html():A);v.domManip(a,b,d)});if(this[0]){e=h&&h.parentNode;e=c.support.parentNode&&e&&e.nodeType===11&&e.childNodes.length===this.length?{fragment:e}:c.buildFragment(a,this,k);l=e.fragment;if(f=l.childNodes.length===1?l=l.firstChild:
+l.firstChild){b=b&&c.nodeName(f,"tr");f=0;for(var n=this.length;f<n;f++)d.call(b?c.nodeName(this[f],"table")?this[f].getElementsByTagName("tbody")[0]||this[f].appendChild(this[f].ownerDocument.createElement("tbody")):this[f]:this[f],f>0||e.cacheable||this.length>1?l.cloneNode(true):l)}k.length&&c.each(k,Ka)}return this}});c.buildFragment=function(a,b,d){var e,f,h;b=b&&b[0]?b[0].ownerDocument||b[0]:u;if(a.length===1&&typeof a[0]==="string"&&a[0].length<512&&b===u&&!Aa.test(a[0])&&(c.support.checkClone||
+!Ba.test(a[0]))){f=true;if(h=c.fragments[a[0]])if(h!==1)e=h}if(!e){e=b.createDocumentFragment();c.clean(a,b,e,d)}if(f)c.fragments[a[0]]=h?e:1;return{fragment:e,cacheable:f}};c.fragments={};c.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){c.fn[a]=function(d){var e=[];d=c(d);var f=this.length===1&&this[0].parentNode;if(f&&f.nodeType===11&&f.childNodes.length===1&&d.length===1){d[b](this[0]);return this}else{f=0;for(var h=
+d.length;f<h;f++){var k=(f>0?this.clone(true):this).get();c(d[f])[b](k);e=e.concat(k)}return this.pushStack(e,a,d.selector)}}});c.extend({clean:function(a,b,d,e){b=b||u;if(typeof b.createElement==="undefined")b=b.ownerDocument||b[0]&&b[0].ownerDocument||u;for(var f=[],h=0,k;(k=a[h])!=null;h++){if(typeof k==="number")k+="";if(k){if(typeof k==="string"&&!bb.test(k))k=b.createTextNode(k);else if(typeof k==="string"){k=k.replace(ya,"<$1></$2>");var l=(za.exec(k)||["",""])[1].toLowerCase(),n=O[l]||O._default,
+s=n[0],v=b.createElement("div");for(v.innerHTML=n[1]+k+n[2];s--;)v=v.lastChild;if(!c.support.tbody){s=ab.test(k);l=l==="table"&&!s?v.firstChild&&v.firstChild.childNodes:n[1]==="<table>"&&!s?v.childNodes:[];for(n=l.length-1;n>=0;--n)c.nodeName(l[n],"tbody")&&!l[n].childNodes.length&&l[n].parentNode.removeChild(l[n])}!c.support.leadingWhitespace&&$.test(k)&&v.insertBefore(b.createTextNode($.exec(k)[0]),v.firstChild);k=v.childNodes}if(k.nodeType)f.push(k);else f=c.merge(f,k)}}if(d)for(h=0;f[h];h++)if(e&&
+c.nodeName(f[h],"script")&&(!f[h].type||f[h].type.toLowerCase()==="text/javascript"))e.push(f[h].parentNode?f[h].parentNode.removeChild(f[h]):f[h]);else{f[h].nodeType===1&&f.splice.apply(f,[h+1,0].concat(c.makeArray(f[h].getElementsByTagName("script"))));d.appendChild(f[h])}return f},cleanData:function(a){for(var b,d,e=c.cache,f=c.event.special,h=c.support.deleteExpando,k=0,l;(l=a[k])!=null;k++)if(!(l.nodeName&&c.noData[l.nodeName.toLowerCase()]))if(d=l[c.expando]){if((b=e[d])&&b.events)for(var n in b.events)f[n]?
+c.event.remove(l,n):c.removeEvent(l,n,b.handle);if(h)delete l[c.expando];else l.removeAttribute&&l.removeAttribute(c.expando);delete e[d]}}});var Ca=/alpha\([^)]*\)/i,db=/opacity=([^)]*)/,eb=/-([a-z])/ig,fb=/([A-Z])/g,Da=/^-?\d+(?:px)?$/i,gb=/^-?\d/,hb={position:"absolute",visibility:"hidden",display:"block"},La=["Left","Right"],Ma=["Top","Bottom"],W,ib=u.defaultView&&u.defaultView.getComputedStyle,jb=function(a,b){return b.toUpperCase()};c.fn.css=function(a,b){if(arguments.length===2&&b===A)return this;
+return c.access(this,a,b,true,function(d,e,f){return f!==A?c.style(d,e,f):c.css(d,e)})};c.extend({cssHooks:{opacity:{get:function(a,b){if(b){var d=W(a,"opacity","opacity");return d===""?"1":d}else return a.style.opacity}}},cssNumber:{zIndex:true,fontWeight:true,opacity:true,zoom:true,lineHeight:true},cssProps:{"float":c.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,d,e){if(!(!a||a.nodeType===3||a.nodeType===8||!a.style)){var f,h=c.camelCase(b),k=a.style,l=c.cssHooks[h];b=c.cssProps[h]||
+h;if(d!==A){if(!(typeof d==="number"&&isNaN(d)||d==null)){if(typeof d==="number"&&!c.cssNumber[h])d+="px";if(!l||!("set"in l)||(d=l.set(a,d))!==A)try{k[b]=d}catch(n){}}}else{if(l&&"get"in l&&(f=l.get(a,false,e))!==A)return f;return k[b]}}},css:function(a,b,d){var e,f=c.camelCase(b),h=c.cssHooks[f];b=c.cssProps[f]||f;if(h&&"get"in h&&(e=h.get(a,true,d))!==A)return e;else if(W)return W(a,b,f)},swap:function(a,b,d){var e={},f;for(f in b){e[f]=a.style[f];a.style[f]=b[f]}d.call(a);for(f in b)a.style[f]=
+e[f]},camelCase:function(a){return a.replace(eb,jb)}});c.curCSS=c.css;c.each(["height","width"],function(a,b){c.cssHooks[b]={get:function(d,e,f){var h;if(e){if(d.offsetWidth!==0)h=ma(d,b,f);else c.swap(d,hb,function(){h=ma(d,b,f)});return h+"px"}},set:function(d,e){if(Da.test(e)){e=parseFloat(e);if(e>=0)return e+"px"}else return e}}});if(!c.support.opacity)c.cssHooks.opacity={get:function(a,b){return db.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"":
+b?"1":""},set:function(a,b){var d=a.style;d.zoom=1;var e=c.isNaN(b)?"":"alpha(opacity="+b*100+")",f=d.filter||"";d.filter=Ca.test(f)?f.replace(Ca,e):d.filter+" "+e}};if(ib)W=function(a,b,d){var e;d=d.replace(fb,"-$1").toLowerCase();if(!(b=a.ownerDocument.defaultView))return A;if(b=b.getComputedStyle(a,null)){e=b.getPropertyValue(d);if(e===""&&!c.contains(a.ownerDocument.documentElement,a))e=c.style(a,d)}return e};else if(u.documentElement.currentStyle)W=function(a,b){var d,e,f=a.currentStyle&&a.currentStyle[b],
+h=a.style;if(!Da.test(f)&&gb.test(f)){d=h.left;e=a.runtimeStyle.left;a.runtimeStyle.left=a.currentStyle.left;h.left=b==="fontSize"?"1em":f||0;f=h.pixelLeft+"px";h.left=d;a.runtimeStyle.left=e}return f};if(c.expr&&c.expr.filters){c.expr.filters.hidden=function(a){var b=a.offsetHeight;return a.offsetWidth===0&&b===0||!c.support.reliableHiddenOffsets&&(a.style.display||c.css(a,"display"))==="none"};c.expr.filters.visible=function(a){return!c.expr.filters.hidden(a)}}var kb=c.now(),lb=/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,
+mb=/^(?:select|textarea)/i,nb=/^(?:color|date|datetime|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,ob=/^(?:GET|HEAD|DELETE)$/,Na=/\[\]$/,T=/\=\?(&|$)/,ia=/\?/,pb=/([?&])_=[^&]*/,qb=/^(\w+:)?\/\/([^\/?#]+)/,rb=/%20/g,sb=/#.*$/,Ea=c.fn.load;c.fn.extend({load:function(a,b,d){if(typeof a!=="string"&&Ea)return Ea.apply(this,arguments);else if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var f=a.slice(e,a.length);a=a.slice(0,e)}e="GET";if(b)if(c.isFunction(b)){d=
+b;b=null}else if(typeof b==="object"){b=c.param(b,c.ajaxSettings.traditional);e="POST"}var h=this;c.ajax({url:a,type:e,dataType:"html",data:b,complete:function(k,l){if(l==="success"||l==="notmodified")h.html(f?c("<div>").append(k.responseText.replace(lb,"")).find(f):k.responseText);d&&h.each(d,[k.responseText,l,k])}});return this},serialize:function(){return c.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?c.makeArray(this.elements):this}).filter(function(){return this.name&&
+!this.disabled&&(this.checked||mb.test(this.nodeName)||nb.test(this.type))}).map(function(a,b){var d=c(this).val();return d==null?null:c.isArray(d)?c.map(d,function(e){return{name:b.name,value:e}}):{name:b.name,value:d}}).get()}});c.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){c.fn[b]=function(d){return this.bind(b,d)}});c.extend({get:function(a,b,d,e){if(c.isFunction(b)){e=e||d;d=b;b=null}return c.ajax({type:"GET",url:a,data:b,success:d,dataType:e})},
+getScript:function(a,b){return c.get(a,null,b,"script")},getJSON:function(a,b,d){return c.get(a,b,d,"json")},post:function(a,b,d,e){if(c.isFunction(b)){e=e||d;d=b;b={}}return c.ajax({type:"POST",url:a,data:b,success:d,dataType:e})},ajaxSetup:function(a){c.extend(c.ajaxSettings,a)},ajaxSettings:{url:location.href,global:true,type:"GET",contentType:"application/x-www-form-urlencoded",processData:true,async:true,xhr:function(){return new E.XMLHttpRequest},accepts:{xml:"application/xml, text/xml",html:"text/html",
+script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},ajax:function(a){var b=c.extend(true,{},c.ajaxSettings,a),d,e,f,h=b.type.toUpperCase(),k=ob.test(h);b.url=b.url.replace(sb,"");b.context=a&&a.context!=null?a.context:b;if(b.data&&b.processData&&typeof b.data!=="string")b.data=c.param(b.data,b.traditional);if(b.dataType==="jsonp"){if(h==="GET")T.test(b.url)||(b.url+=(ia.test(b.url)?"&":"?")+(b.jsonp||"callback")+"=?");else if(!b.data||
+!T.test(b.data))b.data=(b.data?b.data+"&":"")+(b.jsonp||"callback")+"=?";b.dataType="json"}if(b.dataType==="json"&&(b.data&&T.test(b.data)||T.test(b.url))){d=b.jsonpCallback||"jsonp"+kb++;if(b.data)b.data=(b.data+"").replace(T,"="+d+"$1");b.url=b.url.replace(T,"="+d+"$1");b.dataType="script";var l=E[d];E[d]=function(m){f=m;c.handleSuccess(b,w,e,f);c.handleComplete(b,w,e,f);if(c.isFunction(l))l(m);else{E[d]=A;try{delete E[d]}catch(p){}}v&&v.removeChild(B)}}if(b.dataType==="script"&&b.cache===null)b.cache=
+false;if(b.cache===false&&h==="GET"){var n=c.now(),s=b.url.replace(pb,"$1_="+n);b.url=s+(s===b.url?(ia.test(b.url)?"&":"?")+"_="+n:"")}if(b.data&&h==="GET")b.url+=(ia.test(b.url)?"&":"?")+b.data;b.global&&c.active++===0&&c.event.trigger("ajaxStart");n=(n=qb.exec(b.url))&&(n[1]&&n[1]!==location.protocol||n[2]!==location.host);if(b.dataType==="script"&&h==="GET"&&n){var v=u.getElementsByTagName("head")[0]||u.documentElement,B=u.createElement("script");if(b.scriptCharset)B.charset=b.scriptCharset;B.src=
+b.url;if(!d){var D=false;B.onload=B.onreadystatechange=function(){if(!D&&(!this.readyState||this.readyState==="loaded"||this.readyState==="complete")){D=true;c.handleSuccess(b,w,e,f);c.handleComplete(b,w,e,f);B.onload=B.onreadystatechange=null;v&&B.parentNode&&v.removeChild(B)}}}v.insertBefore(B,v.firstChild);return A}var H=false,w=b.xhr();if(w){b.username?w.open(h,b.url,b.async,b.username,b.password):w.open(h,b.url,b.async);try{if(b.data!=null&&!k||a&&a.contentType)w.setRequestHeader("Content-Type",
+b.contentType);if(b.ifModified){c.lastModified[b.url]&&w.setRequestHeader("If-Modified-Since",c.lastModified[b.url]);c.etag[b.url]&&w.setRequestHeader("If-None-Match",c.etag[b.url])}n||w.setRequestHeader("X-Requested-With","XMLHttpRequest");w.setRequestHeader("Accept",b.dataType&&b.accepts[b.dataType]?b.accepts[b.dataType]+", */*; q=0.01":b.accepts._default)}catch(G){}if(b.beforeSend&&b.beforeSend.call(b.context,w,b)===false){b.global&&c.active--===1&&c.event.trigger("ajaxStop");w.abort();return false}b.global&&
+c.triggerGlobal(b,"ajaxSend",[w,b]);var M=w.onreadystatechange=function(m){if(!w||w.readyState===0||m==="abort"){H||c.handleComplete(b,w,e,f);H=true;if(w)w.onreadystatechange=c.noop}else if(!H&&w&&(w.readyState===4||m==="timeout")){H=true;w.onreadystatechange=c.noop;e=m==="timeout"?"timeout":!c.httpSuccess(w)?"error":b.ifModified&&c.httpNotModified(w,b.url)?"notmodified":"success";var p;if(e==="success")try{f=c.httpData(w,b.dataType,b)}catch(q){e="parsererror";p=q}if(e==="success"||e==="notmodified")d||
+c.handleSuccess(b,w,e,f);else c.handleError(b,w,e,p);d||c.handleComplete(b,w,e,f);m==="timeout"&&w.abort();if(b.async)w=null}};try{var g=w.abort;w.abort=function(){w&&g.call&&g.call(w);M("abort")}}catch(j){}b.async&&b.timeout>0&&setTimeout(function(){w&&!H&&M("timeout")},b.timeout);try{w.send(k||b.data==null?null:b.data)}catch(o){c.handleError(b,w,null,o);c.handleComplete(b,w,e,f)}b.async||M();return w}},param:function(a,b){var d=[],e=function(h,k){k=c.isFunction(k)?k():k;d[d.length]=encodeURIComponent(h)+
+"="+encodeURIComponent(k)};if(b===A)b=c.ajaxSettings.traditional;if(c.isArray(a)||a.jquery)c.each(a,function(){e(this.name,this.value)});else for(var f in a)ca(f,a[f],b,e);return d.join("&").replace(rb,"+")}});c.extend({active:0,lastModified:{},etag:{},handleError:function(a,b,d,e){a.error&&a.error.call(a.context,b,d,e);a.global&&c.triggerGlobal(a,"ajaxError",[b,a,e])},handleSuccess:function(a,b,d,e){a.success&&a.success.call(a.context,e,d,b);a.global&&c.triggerGlobal(a,"ajaxSuccess",[b,a])},handleComplete:function(a,
+b,d){a.complete&&a.complete.call(a.context,b,d);a.global&&c.triggerGlobal(a,"ajaxComplete",[b,a]);a.global&&c.active--===1&&c.event.trigger("ajaxStop")},triggerGlobal:function(a,b,d){(a.context&&a.context.url==null?c(a.context):c.event).trigger(b,d)},httpSuccess:function(a){try{return!a.status&&location.protocol==="file:"||a.status>=200&&a.status<300||a.status===304||a.status===1223}catch(b){}return false},httpNotModified:function(a,b){var d=a.getResponseHeader("Last-Modified"),e=a.getResponseHeader("Etag");
+if(d)c.lastModified[b]=d;if(e)c.etag[b]=e;return a.status===304},httpData:function(a,b,d){var e=a.getResponseHeader("content-type")||"",f=b==="xml"||!b&&e.indexOf("xml")>=0;a=f?a.responseXML:a.responseText;f&&a.documentElement.nodeName==="parsererror"&&c.error("parsererror");if(d&&d.dataFilter)a=d.dataFilter(a,b);if(typeof a==="string")if(b==="json"||!b&&e.indexOf("json")>=0)a=c.parseJSON(a);else if(b==="script"||!b&&e.indexOf("javascript")>=0)c.globalEval(a);return a}});if(E.ActiveXObject)c.ajaxSettings.xhr=
+function(){if(E.location.protocol!=="file:")try{return new E.XMLHttpRequest}catch(a){}try{return new E.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}};c.support.ajax=!!c.ajaxSettings.xhr();var da={},tb=/^(?:toggle|show|hide)$/,ub=/^([+\-]=)?([\d+.\-]+)(.*)$/,aa,na=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];c.fn.extend({show:function(a,b,d){if(a||a===0)return this.animate(S("show",3),a,b,d);else{a=
+0;for(b=this.length;a<b;a++){if(!c.data(this[a],"olddisplay")&&this[a].style.display==="none")this[a].style.display="";this[a].style.display===""&&c.css(this[a],"display")==="none"&&c.data(this[a],"olddisplay",oa(this[a].nodeName))}for(a=0;a<b;a++)this[a].style.display=c.data(this[a],"olddisplay")||"";return this}},hide:function(a,b,d){if(a||a===0)return this.animate(S("hide",3),a,b,d);else{a=0;for(b=this.length;a<b;a++){d=c.css(this[a],"display");d!=="none"&&c.data(this[a],"olddisplay",d)}for(a=
+0;a<b;a++)this[a].style.display="none";return this}},_toggle:c.fn.toggle,toggle:function(a,b,d){var e=typeof a==="boolean";if(c.isFunction(a)&&c.isFunction(b))this._toggle.apply(this,arguments);else a==null||e?this.each(function(){var f=e?a:c(this).is(":hidden");c(this)[f?"show":"hide"]()}):this.animate(S("toggle",3),a,b,d);return this},fadeTo:function(a,b,d,e){return this.filter(":hidden").css("opacity",0).show().end().animate({opacity:b},a,d,e)},animate:function(a,b,d,e){var f=c.speed(b,d,e);if(c.isEmptyObject(a))return this.each(f.complete);
+return this[f.queue===false?"each":"queue"](function(){var h=c.extend({},f),k,l=this.nodeType===1,n=l&&c(this).is(":hidden"),s=this;for(k in a){var v=c.camelCase(k);if(k!==v){a[v]=a[k];delete a[k];k=v}if(a[k]==="hide"&&n||a[k]==="show"&&!n)return h.complete.call(this);if(l&&(k==="height"||k==="width")){h.overflow=[this.style.overflow,this.style.overflowX,this.style.overflowY];if(c.css(this,"display")==="inline"&&c.css(this,"float")==="none")if(c.support.inlineBlockNeedsLayout)if(oa(this.nodeName)===
+"inline")this.style.display="inline-block";else{this.style.display="inline";this.style.zoom=1}else this.style.display="inline-block"}if(c.isArray(a[k])){(h.specialEasing=h.specialEasing||{})[k]=a[k][1];a[k]=a[k][0]}}if(h.overflow!=null)this.style.overflow="hidden";h.curAnim=c.extend({},a);c.each(a,function(B,D){var H=new c.fx(s,h,B);if(tb.test(D))H[D==="toggle"?n?"show":"hide":D](a);else{var w=ub.exec(D),G=H.cur(true)||0;if(w){var M=parseFloat(w[2]),g=w[3]||"px";if(g!=="px"){c.style(s,B,(M||1)+g);
+G=(M||1)/H.cur(true)*G;c.style(s,B,G+g)}if(w[1])M=(w[1]==="-="?-1:1)*M+G;H.custom(G,M,g)}else H.custom(G,D,"")}});return true})},stop:function(a,b){var d=c.timers;a&&this.queue([]);this.each(function(){for(var e=d.length-1;e>=0;e--)if(d[e].elem===this){b&&d[e](true);d.splice(e,1)}});b||this.dequeue();return this}});c.each({slideDown:S("show",1),slideUp:S("hide",1),slideToggle:S("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"}},function(a,b){c.fn[a]=function(d,e,f){return this.animate(b,
+d,e,f)}});c.extend({speed:function(a,b,d){var e=a&&typeof a==="object"?c.extend({},a):{complete:d||!d&&b||c.isFunction(a)&&a,duration:a,easing:d&&b||b&&!c.isFunction(b)&&b};e.duration=c.fx.off?0:typeof e.duration==="number"?e.duration:e.duration in c.fx.speeds?c.fx.speeds[e.duration]:c.fx.speeds._default;e.old=e.complete;e.complete=function(){e.queue!==false&&c(this).dequeue();c.isFunction(e.old)&&e.old.call(this)};return e},easing:{linear:function(a,b,d,e){return d+e*a},swing:function(a,b,d,e){return(-Math.cos(a*
+Math.PI)/2+0.5)*e+d}},timers:[],fx:function(a,b,d){this.options=b;this.elem=a;this.prop=d;if(!b.orig)b.orig={}}});c.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this);(c.fx.step[this.prop]||c.fx.step._default)(this)},cur:function(){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];var a=parseFloat(c.css(this.elem,this.prop));return a&&a>-1E4?a:0},custom:function(a,b,d){function e(h){return f.step(h)}
+this.startTime=c.now();this.start=a;this.end=b;this.unit=d||this.unit||"px";this.now=this.start;this.pos=this.state=0;var f=this;a=c.fx;e.elem=this.elem;if(e()&&c.timers.push(e)&&!aa)aa=setInterval(a.tick,a.interval)},show:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.show=true;this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur());c(this.elem).show()},hide:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.hide=true;
+this.custom(this.cur(),0)},step:function(a){var b=c.now(),d=true;if(a||b>=this.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;for(var e in this.options.curAnim)if(this.options.curAnim[e]!==true)d=false;if(d){if(this.options.overflow!=null&&!c.support.shrinkWrapBlocks){var f=this.elem,h=this.options;c.each(["","X","Y"],function(l,n){f.style["overflow"+n]=h.overflow[l]})}this.options.hide&&c(this.elem).hide();if(this.options.hide||
+this.options.show)for(var k in this.options.curAnim)c.style(this.elem,k,this.options.orig[k]);this.options.complete.call(this.elem)}return false}else{a=b-this.startTime;this.state=a/this.options.duration;b=this.options.easing||(c.easing.swing?"swing":"linear");this.pos=c.easing[this.options.specialEasing&&this.options.specialEasing[this.prop]||b](this.state,a,0,1,this.options.duration);this.now=this.start+(this.end-this.start)*this.pos;this.update()}return true}};c.extend(c.fx,{tick:function(){for(var a=
+c.timers,b=0;b<a.length;b++)a[b]()||a.splice(b--,1);a.length||c.fx.stop()},interval:13,stop:function(){clearInterval(aa);aa=null},speeds:{slow:600,fast:200,_default:400},step:{opacity:function(a){c.style(a.elem,"opacity",a.now)},_default:function(a){if(a.elem.style&&a.elem.style[a.prop]!=null)a.elem.style[a.prop]=(a.prop==="width"||a.prop==="height"?Math.max(0,a.now):a.now)+a.unit;else a.elem[a.prop]=a.now}}});if(c.expr&&c.expr.filters)c.expr.filters.animated=function(a){return c.grep(c.timers,function(b){return a===
+b.elem}).length};var vb=/^t(?:able|d|h)$/i,Fa=/^(?:body|html)$/i;c.fn.offset="getBoundingClientRect"in u.documentElement?function(a){var b=this[0],d;if(a)return this.each(function(k){c.offset.setOffset(this,a,k)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);try{d=b.getBoundingClientRect()}catch(e){}var f=b.ownerDocument,h=f.documentElement;if(!d||!c.contains(h,b))return d||{top:0,left:0};b=f.body;f=ea(f);return{top:d.top+(f.pageYOffset||c.support.boxModel&&
+h.scrollTop||b.scrollTop)-(h.clientTop||b.clientTop||0),left:d.left+(f.pageXOffset||c.support.boxModel&&h.scrollLeft||b.scrollLeft)-(h.clientLeft||b.clientLeft||0)}}:function(a){var b=this[0];if(a)return this.each(function(s){c.offset.setOffset(this,a,s)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);c.offset.initialize();var d=b.offsetParent,e=b.ownerDocument,f,h=e.documentElement,k=e.body;f=(e=e.defaultView)?e.getComputedStyle(b,null):b.currentStyle;
+for(var l=b.offsetTop,n=b.offsetLeft;(b=b.parentNode)&&b!==k&&b!==h;){if(c.offset.supportsFixedPosition&&f.position==="fixed")break;f=e?e.getComputedStyle(b,null):b.currentStyle;l-=b.scrollTop;n-=b.scrollLeft;if(b===d){l+=b.offsetTop;n+=b.offsetLeft;if(c.offset.doesNotAddBorder&&!(c.offset.doesAddBorderForTableAndCells&&vb.test(b.nodeName))){l+=parseFloat(f.borderTopWidth)||0;n+=parseFloat(f.borderLeftWidth)||0}d=b.offsetParent}if(c.offset.subtractsBorderForOverflowNotVisible&&f.overflow!=="visible"){l+=
+parseFloat(f.borderTopWidth)||0;n+=parseFloat(f.borderLeftWidth)||0}f=f}if(f.position==="relative"||f.position==="static"){l+=k.offsetTop;n+=k.offsetLeft}if(c.offset.supportsFixedPosition&&f.position==="fixed"){l+=Math.max(h.scrollTop,k.scrollTop);n+=Math.max(h.scrollLeft,k.scrollLeft)}return{top:l,left:n}};c.offset={initialize:function(){var a=u.body,b=u.createElement("div"),d,e,f,h=parseFloat(c.css(a,"marginTop"))||0;c.extend(b.style,{position:"absolute",top:0,left:0,margin:0,border:0,width:"1px",
+height:"1px",visibility:"hidden"});b.innerHTML="<div style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;'><div></div></div><table style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;' cellpadding='0' cellspacing='0'><tr><td></td></tr></table>";a.insertBefore(b,a.firstChild);d=b.firstChild;e=d.firstChild;f=d.nextSibling.firstChild.firstChild;this.doesNotAddBorder=e.offsetTop!==5;this.doesAddBorderForTableAndCells=
+f.offsetTop===5;e.style.position="fixed";e.style.top="20px";this.supportsFixedPosition=e.offsetTop===20||e.offsetTop===15;e.style.position=e.style.top="";d.style.overflow="hidden";d.style.position="relative";this.subtractsBorderForOverflowNotVisible=e.offsetTop===-5;this.doesNotIncludeMarginInBodyOffset=a.offsetTop!==h;a.removeChild(b);c.offset.initialize=c.noop},bodyOffset:function(a){var b=a.offsetTop,d=a.offsetLeft;c.offset.initialize();if(c.offset.doesNotIncludeMarginInBodyOffset){b+=parseFloat(c.css(a,
+"marginTop"))||0;d+=parseFloat(c.css(a,"marginLeft"))||0}return{top:b,left:d}},setOffset:function(a,b,d){var e=c.css(a,"position");if(e==="static")a.style.position="relative";var f=c(a),h=f.offset(),k=c.css(a,"top"),l=c.css(a,"left"),n=e==="absolute"&&c.inArray("auto",[k,l])>-1;e={};var s={};if(n)s=f.position();k=n?s.top:parseInt(k,10)||0;l=n?s.left:parseInt(l,10)||0;if(c.isFunction(b))b=b.call(a,d,h);if(b.top!=null)e.top=b.top-h.top+k;if(b.left!=null)e.left=b.left-h.left+l;"using"in b?b.using.call(a,
+e):f.css(e)}};c.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),d=this.offset(),e=Fa.test(b[0].nodeName)?{top:0,left:0}:b.offset();d.top-=parseFloat(c.css(a,"marginTop"))||0;d.left-=parseFloat(c.css(a,"marginLeft"))||0;e.top+=parseFloat(c.css(b[0],"borderTopWidth"))||0;e.left+=parseFloat(c.css(b[0],"borderLeftWidth"))||0;return{top:d.top-e.top,left:d.left-e.left}},offsetParent:function(){return this.map(function(){for(var a=this.offsetParent||u.body;a&&!Fa.test(a.nodeName)&&
+c.css(a,"position")==="static";)a=a.offsetParent;return a})}});c.each(["Left","Top"],function(a,b){var d="scroll"+b;c.fn[d]=function(e){var f=this[0],h;if(!f)return null;if(e!==A)return this.each(function(){if(h=ea(this))h.scrollTo(!a?e:c(h).scrollLeft(),a?e:c(h).scrollTop());else this[d]=e});else return(h=ea(f))?"pageXOffset"in h?h[a?"pageYOffset":"pageXOffset"]:c.support.boxModel&&h.document.documentElement[d]||h.document.body[d]:f[d]}});c.each(["Height","Width"],function(a,b){var d=b.toLowerCase();
+c.fn["inner"+b]=function(){return this[0]?parseFloat(c.css(this[0],d,"padding")):null};c.fn["outer"+b]=function(e){return this[0]?parseFloat(c.css(this[0],d,e?"margin":"border")):null};c.fn[d]=function(e){var f=this[0];if(!f)return e==null?null:this;if(c.isFunction(e))return this.each(function(h){var k=c(this);k[d](e.call(this,h,k[d]()))});return c.isWindow(f)?f.document.compatMode==="CSS1Compat"&&f.document.documentElement["client"+b]||f.document.body["client"+b]:f.nodeType===9?Math.max(f.documentElement["client"+
+b],f.body["scroll"+b],f.documentElement["scroll"+b],f.body["offset"+b],f.documentElement["offset"+b]):e===A?parseFloat(c.css(f,d)):this.css(d,typeof e==="string"?e:e+"px")}})})(window);
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/jquery.hotkeys.js b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/jquery.hotkeys.js
new file mode 100644
index 0000000..09b21e0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/jquery.hotkeys.js
@@ -0,0 +1,99 @@
+/*
+ * jQuery Hotkeys Plugin
+ * Copyright 2010, John Resig
+ * Dual licensed under the MIT or GPL Version 2 licenses.
+ *
+ * Based upon the plugin by Tzury Bar Yochay:
+ * http://github.com/tzuryby/hotkeys
+ *
+ * Original idea by:
+ * Binny V A, http://www.openjs.com/scripts/events/keyboard_shortcuts/
+*/
+
+(function(jQuery){
+
+ jQuery.hotkeys = {
+ version: "0.8",
+
+ specialKeys: {
+ 8: "backspace", 9: "tab", 13: "return", 16: "shift", 17: "ctrl", 18: "alt", 19: "pause",
+ 20: "capslock", 27: "esc", 32: "space", 33: "pageup", 34: "pagedown", 35: "end", 36: "home",
+ 37: "left", 38: "up", 39: "right", 40: "down", 45: "insert", 46: "del",
+ 96: "0", 97: "1", 98: "2", 99: "3", 100: "4", 101: "5", 102: "6", 103: "7",
+ 104: "8", 105: "9", 106: "*", 107: "+", 109: "-", 110: ".", 111 : "/",
+ 112: "f1", 113: "f2", 114: "f3", 115: "f4", 116: "f5", 117: "f6", 118: "f7", 119: "f8",
+ 120: "f9", 121: "f10", 122: "f11", 123: "f12", 144: "numlock", 145: "scroll", 191: "/", 224: "meta"
+ },
+
+ shiftNums: {
+ "`": "~", "1": "!", "2": "@", "3": "#", "4": "$", "5": "%", "6": "^", "7": "&",
+ "8": "*", "9": "(", "0": ")", "-": "_", "=": "+", ";": ": ", "'": "\"", ",": "<",
+ ".": ">", "/": "?", "\\": "|"
+ }
+ };
+
+ function keyHandler( handleObj ) {
+ // Only care when a possible input has been specified
+ if ( typeof handleObj.data !== "string" ) {
+ return;
+ }
+
+ var origHandler = handleObj.handler,
+ keys = handleObj.data.toLowerCase().split(" ");
+
+ handleObj.handler = function( event ) {
+ // Don't fire in text-accepting inputs that we didn't directly bind to
+ if ( this !== event.target && (/textarea|select/i.test( event.target.nodeName ) ||
+ event.target.type === "text") ) {
+ return;
+ }
+
+ // Keypress represents characters, not special keys
+ var special = event.type !== "keypress" && jQuery.hotkeys.specialKeys[ event.which ],
+ character = String.fromCharCode( event.which ).toLowerCase(),
+ key, modif = "", possible = {};
+
+ // check combinations (alt|ctrl|shift+anything)
+ if ( event.altKey && special !== "alt" ) {
+ modif += "alt+";
+ }
+
+ if ( event.ctrlKey && special !== "ctrl" ) {
+ modif += "ctrl+";
+ }
+
+ // TODO: Need to make sure this works consistently across platforms
+ if ( event.metaKey && !event.ctrlKey && special !== "meta" ) {
+ modif += "meta+";
+ }
+
+ if ( event.shiftKey && special !== "shift" ) {
+ modif += "shift+";
+ }
+
+ if ( special ) {
+ possible[ modif + special ] = true;
+
+ } else {
+ possible[ modif + character ] = true;
+ possible[ modif + jQuery.hotkeys.shiftNums[ character ] ] = true;
+
+ // "$" can be triggered as "Shift+4" or "Shift+$" or just "$"
+ if ( modif === "shift+" ) {
+ possible[ jQuery.hotkeys.shiftNums[ character ] ] = true;
+ }
+ }
+
+ for ( var i = 0, l = keys.length; i < l; i++ ) {
+ if ( possible[ keys[i] ] ) {
+ return origHandler.apply( this, arguments );
+ }
+ }
+ };
+ }
+
+ jQuery.each([ "keydown", "keyup", "keypress" ], function() {
+ jQuery.event.special[ this ] = { add: keyHandler };
+ });
+
+})( jQuery );
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/jquery.isonscreen.js b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/jquery.isonscreen.js
new file mode 100644
index 0000000..0182ebd
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/jquery.isonscreen.js
@@ -0,0 +1,53 @@
+/* Copyright (c) 2010
+ * @author Laurence Wheway
+ * Dual licensed under the MIT (http://www.opensource.org/licenses/mit-license.php)
+ * and GPL (http://www.opensource.org/licenses/gpl-license.php) licenses.
+ *
+ * @version 1.2.0
+ */
+(function($) {
+ jQuery.extend({
+ isOnScreen: function(box, container) {
+ //ensure numbers come in as intgers (not strings) and remove 'px' is it's there
+ for(var i in box){box[i] = parseFloat(box[i])};
+ for(var i in container){container[i] = parseFloat(container[i])};
+
+ if(!container){
+ container = {
+ left: $(window).scrollLeft(),
+ top: $(window).scrollTop(),
+ width: $(window).width(),
+ height: $(window).height()
+ }
+ }
+
+ if( box.left+box.width-container.left > 0 &&
+ box.left < container.width+container.left &&
+ box.top+box.height-container.top > 0 &&
+ box.top < container.height+container.top
+ ) return true;
+ return false;
+ }
+ })
+
+
+ jQuery.fn.isOnScreen = function (container) {
+ for(var i in container){container[i] = parseFloat(container[i])};
+
+ if(!container){
+ container = {
+ left: $(window).scrollLeft(),
+ top: $(window).scrollTop(),
+ width: $(window).width(),
+ height: $(window).height()
+ }
+ }
+
+ if( $(this).offset().left+$(this).width()-container.left > 0 &&
+ $(this).offset().left < container.width+container.left &&
+ $(this).offset().top+$(this).height()-container.top > 0 &&
+ $(this).offset().top < container.height+container.top
+ ) return true;
+ return false;
+ }
+})(jQuery);
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/jquery.tablesorter.min.js b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/jquery.tablesorter.min.js
new file mode 100644
index 0000000..64c7007
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/jquery.tablesorter.min.js
@@ -0,0 +1,2 @@
+
+(function($){$.extend({tablesorter:new function(){var parsers=[],widgets=[];this.defaults={cssHeader:"header",cssAsc:"headerSortUp",cssDesc:"headerSortDown",sortInitialOrder:"asc",sortMultiSortKey:"shiftKey",sortForce:null,sortAppend:null,textExtraction:"simple",parsers:{},widgets:[],widgetZebra:{css:["even","odd"]},headers:{},widthFixed:false,cancelSelection:true,sortList:[],headerList:[],dateFormat:"us",decimal:'.',debug:false};function benchmark(s,d){log(s+","+(new Date().getTime()-d.getTime())+"ms");}this.benchmark=benchmark;function log(s){if(typeof console!="undefined"&&typeof console.debug!="undefined"){console.log(s);}else{alert(s);}}function buildParserCache(table,$headers){if(table.config.debug){var parsersDebug="";}var rows=table.tBodies[0].rows;if(table.tBodies[0].rows[0]){var list=[],cells=rows[0].cells,l=cells.length;for(var i=0;i<l;i++){var p=false;if($.metadata&&($($headers[i]).metadata()&&$($headers[i]).metadata().sorter)){p=getParserById($($headers[i]).metadata().sorter);}else if((table.config.headers[i]&&table.config.headers[i].sorter)){p=getParserById(table.config.headers[i].sorter);}if(!p){p=detectParserForColumn(table,cells[i]);}if(table.config.debug){parsersDebug+="column:"+i+" parser:"+p.id+"\n";}list.push(p);}}if(table.config.debug){log(parsersDebug);}return list;};function detectParserForColumn(table,node){var l=parsers.length;for(var i=1;i<l;i++){if(parsers[i].is($.trim(getElementText(table.config,node)),table,node)){return parsers[i];}}return parsers[0];}function getParserById(name){var l=parsers.length;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==name.toLowerCase()){return parsers[i];}}return false;}function buildCache(table){if(table.config.debug){var cacheTime=new Date();}var totalRows=(table.tBodies[0]&&table.tBodies[0].rows.length)||0,totalCells=(table.tBodies[0].rows[0]&&table.tBodies[0].rows[0].cells.length)||0,parsers=table.config.parsers,cache={row:[],normalized:[]};for(var i=0;i<totalRows;++i){var c=table.tBodies[0].rows[i],cols=[];cache.row.push($(c));for(var j=0;j<totalCells;++j){cols.push(parsers[j].format(getElementText(table.config,c.cells[j]),table,c.cells[j]));}cols.push(i);cache.normalized.push(cols);cols=null;};if(table.config.debug){benchmark("Building cache for "+totalRows+" rows:",cacheTime);}return cache;};function getElementText(config,node){if(!node)return"";var t="";if(config.textExtraction=="simple"){if(node.childNodes[0]&&node.childNodes[0].hasChildNodes()){t=node.childNodes[0].innerHTML;}else{t=node.innerHTML;}}else{if(typeof(config.textExtraction)=="function"){t=config.textExtraction(node);}else{t=$(node).text();}}return t;}function appendToTable(table,cache){if(table.config.debug){var appendTime=new Date()}var c=cache,r=c.row,n=c.normalized,totalRows=n.length,checkCell=(n[0].length-1),tableBody=$(table.tBodies[0]),rows=[];for(var i=0;i<totalRows;i++){rows.push(r[n[i][checkCell]]);if(!table.config.appender){var o=r[n[i][checkCell]];var l=o.length;for(var j=0;j<l;j++){tableBody[0].appendChild(o[j]);}}}if(table.config.appender){table.config.appender(table,rows);}rows=null;if(table.config.debug){benchmark("Rebuilt table:",appendTime);}applyWidget(table);setTimeout(function(){$(table).trigger("sortEnd");},0);};function buildHeaders(table){if(table.config.debug){var time=new Date();}var meta=($.metadata)?true:false,tableHeadersRows=[];for(var i=0;i<table.tHead.rows.length;i++){tableHeadersRows[i]=0;};$tableHeaders=$("thead th",table);$tableHeaders.each(function(index){this.count=0;this.column=index;this.order=formatSortingOrder(table.config.sortInitialOrder);if(checkHeaderMetadata(this)||checkHeaderOptions(table,index))this.sortDisabled=true;if(!this.sortDisabled){$(this).addClass(table.config.cssHeader);}table.config.headerList[index]=this;});if(table.config.debug){benchmark("Built headers:",time);log($tableHeaders);}return $tableHeaders;};function checkCellColSpan(table,rows,row){var arr=[],r=table.tHead.rows,c=r[row].cells;for(var i=0;i<c.length;i++){var cell=c[i];if(cell.colSpan>1){arr=arr.concat(checkCellColSpan(table,headerArr,row++));}else{if(table.tHead.length==1||(cell.rowSpan>1||!r[row+1])){arr.push(cell);}}}return arr;};function checkHeaderMetadata(cell){if(($.metadata)&&($(cell).metadata().sorter===false)){return true;};return false;}function checkHeaderOptions(table,i){if((table.config.headers[i])&&(table.config.headers[i].sorter===false)){return true;};return false;}function applyWidget(table){var c=table.config.widgets;var l=c.length;for(var i=0;i<l;i++){getWidgetById(c[i]).format(table);}}function getWidgetById(name){var l=widgets.length;for(var i=0;i<l;i++){if(widgets[i].id.toLowerCase()==name.toLowerCase()){return widgets[i];}}};function formatSortingOrder(v){if(typeof(v)!="Number"){i=(v.toLowerCase()=="desc")?1:0;}else{i=(v==(0||1))?v:0;}return i;}function isValueInArray(v,a){var l=a.length;for(var i=0;i<l;i++){if(a[i][0]==v){return true;}}return false;}function setHeadersCss(table,$headers,list,css){$headers.removeClass(css[0]).removeClass(css[1]);var h=[];$headers.each(function(offset){if(!this.sortDisabled){h[this.column]=$(this);}});var l=list.length;for(var i=0;i<l;i++){h[list[i][0]].addClass(css[list[i][1]]);}}function fixColumnWidth(table,$headers){var c=table.config;if(c.widthFixed){var colgroup=$('<colgroup>');$("tr:first td",table.tBodies[0]).each(function(){colgroup.append($('<col>').css('width',$(this).width()));});$(table).prepend(colgroup);};}function updateHeaderSortCount(table,sortList){var c=table.config,l=sortList.length;for(var i=0;i<l;i++){var s=sortList[i],o=c.headerList[s[0]];o.count=s[1];o.count++;}}function multisort(table,sortList,cache){if(table.config.debug){var sortTime=new Date();}var dynamicExp="var sortWrapper = function(a,b) {",l=sortList.length;for(var i=0;i<l;i++){var c=sortList[i][0];var order=sortList[i][1];var s=(getCachedSortType(table.config.parsers,c)=="text")?((order==0)?"sortText":"sortTextDesc"):((order==0)?"sortNumeric":"sortNumericDesc");var e="e"+i;dynamicExp+="var "+e+" = "+s+"(a["+c+"],b["+c+"]); ";dynamicExp+="if("+e+") { return "+e+"; } ";dynamicExp+="else { ";}var orgOrderCol=cache.normalized[0].length-1;dynamicExp+="return a["+orgOrderCol+"]-b["+orgOrderCol+"];";for(var i=0;i<l;i++){dynamicExp+="}; ";}dynamicExp+="return 0; ";dynamicExp+="}; ";eval(dynamicExp);cache.normalized.sort(sortWrapper);if(table.config.debug){benchmark("Sorting on "+sortList.toString()+" and dir "+order+" time:",sortTime);}return cache;};function sortText(a,b){return((a<b)?-1:((a>b)?1:0));};function sortTextDesc(a,b){return((b<a)?-1:((b>a)?1:0));};function sortNumeric(a,b){return a-b;};function sortNumericDesc(a,b){return b-a;};function getCachedSortType(parsers,i){return parsers[i].type;};this.construct=function(settings){return this.each(function(){if(!this.tHead||!this.tBodies)return;var $this,$document,$headers,cache,config,shiftDown=0,sortOrder;this.config={};config=$.extend(this.config,$.tablesorter.defaults,settings);$this=$(this);$headers=buildHeaders(this);this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);var sortCSS=[config.cssDesc,config.cssAsc];fixColumnWidth(this);$headers.click(function(e){$this.trigger("sortStart");var totalRows=($this[0].tBodies[0]&&$this[0].tBodies[0].rows.length)||0;if(!this.sortDisabled&&totalRows>0){var $cell=$(this);var i=this.column;this.order=this.count++%2;if(!e[config.sortMultiSortKey]){config.sortList=[];if(config.sortForce!=null){var a=config.sortForce;for(var j=0;j<a.length;j++){if(a[j][0]!=i){config.sortList.push(a[j]);}}}config.sortList.push([i,this.order]);}else{if(isValueInArray(i,config.sortList)){for(var j=0;j<config.sortList.length;j++){var s=config.sortList[j],o=config.headerList[s[0]];if(s[0]==i){o.count=s[1];o.count++;s[1]=o.count%2;}}}else{config.sortList.push([i,this.order]);}};setTimeout(function(){setHeadersCss($this[0],$headers,config.sortList,sortCSS);appendToTable($this[0],multisort($this[0],config.sortList,cache));},1);return false;}}).mousedown(function(){if(config.cancelSelection){this.onselectstart=function(){return false};return false;}});$this.bind("update",function(){this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);}).bind("sorton",function(e,list){$(this).trigger("sortStart");config.sortList=list;var sortList=config.sortList;updateHeaderSortCount(this,sortList);setHeadersCss(this,$headers,sortList,sortCSS);appendToTable(this,multisort(this,sortList,cache));}).bind("appendCache",function(){appendToTable(this,cache);}).bind("applyWidgetId",function(e,id){getWidgetById(id).format(this);}).bind("applyWidgets",function(){applyWidget(this);});if($.metadata&&($(this).metadata()&&$(this).metadata().sortlist)){config.sortList=$(this).metadata().sortlist;}if(config.sortList.length>0){$this.trigger("sorton",[config.sortList]);}applyWidget(this);});};this.addParser=function(parser){var l=parsers.length,a=true;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==parser.id.toLowerCase()){a=false;}}if(a){parsers.push(parser);};};this.addWidget=function(widget){widgets.push(widget);};this.formatFloat=function(s){var i=parseFloat(s);return(isNaN(i))?0:i;};this.formatInt=function(s){var i=parseInt(s);return(isNaN(i))?0:i;};this.isDigit=function(s,config){var DECIMAL='\\'+config.decimal;var exp='/(^[+]?0('+DECIMAL+'0+)?$)|(^([-+]?[1-9][0-9]*)$)|(^([-+]?((0?|[1-9][0-9]*)'+DECIMAL+'(0*[1-9][0-9]*)))$)|(^[-+]?[1-9]+[0-9]*'+DECIMAL+'0+$)/';return RegExp(exp).test($.trim(s));};this.clearTableBody=function(table){if($.browser.msie){function empty(){while(this.firstChild)this.removeChild(this.firstChild);}empty.apply(table.tBodies[0]);}else{table.tBodies[0].innerHTML="";}};}});$.fn.extend({tablesorter:$.tablesorter.construct});var ts=$.tablesorter;ts.addParser({id:"text",is:function(s){return true;},format:function(s){return $.trim(s.toLowerCase());},type:"text"});ts.addParser({id:"digit",is:function(s,table){var c=table.config;return $.tablesorter.isDigit(s,c);},format:function(s){return $.tablesorter.formatFloat(s);},type:"numeric"});ts.addParser({id:"currency",is:function(s){return/^[£$€?.]/.test(s);},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/[^0-9.]/g),""));},type:"numeric"});ts.addParser({id:"ipAddress",is:function(s){return/^\d{2,3}[\.]\d{2,3}[\.]\d{2,3}[\.]\d{2,3}$/.test(s);},format:function(s){var a=s.split("."),r="",l=a.length;for(var i=0;i<l;i++){var item=a[i];if(item.length==2){r+="0"+item;}else{r+=item;}}return $.tablesorter.formatFloat(r);},type:"numeric"});ts.addParser({id:"url",is:function(s){return/^(https?|ftp|file):\/\/$/.test(s);},format:function(s){return jQuery.trim(s.replace(new RegExp(/(https?|ftp|file):\/\//),''));},type:"text"});ts.addParser({id:"isoDate",is:function(s){return/^\d{4}[\/-]\d{1,2}[\/-]\d{1,2}$/.test(s);},format:function(s){return $.tablesorter.formatFloat((s!="")?new Date(s.replace(new RegExp(/-/g),"/")).getTime():"0");},type:"numeric"});ts.addParser({id:"percent",is:function(s){return/\%$/.test($.trim(s));},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/%/g),""));},type:"numeric"});ts.addParser({id:"usLongDate",is:function(s){return s.match(new RegExp(/^[A-Za-z]{3,10}\.? [0-9]{1,2}, ([0-9]{4}|'?[0-9]{2}) (([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(AM|PM)))$/));},format:function(s){return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"shortDate",is:function(s){return/\d{1,2}[\/\-]\d{1,2}[\/\-]\d{2,4}/.test(s);},format:function(s,table){var c=table.config;s=s.replace(/\-/g,"/");if(c.dateFormat=="us"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$1/$2");}else if(c.dateFormat=="uk"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$2/$1");}else if(c.dateFormat=="dd/mm/yy"||c.dateFormat=="dd-mm-yy"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{2})/,"$1/$2/$3");}return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"time",is:function(s){return/^(([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(am|pm)))$/.test(s);},format:function(s){return $.tablesorter.formatFloat(new Date("2000/01/01 "+s).getTime());},type:"numeric"});ts.addParser({id:"metadata",is:function(s){return false;},format:function(s,table,cell){var c=table.config,p=(!c.parserMetadataName)?'sortValue':c.parserMetadataName;return $(cell).metadata()[p];},type:"numeric"});ts.addWidget({id:"zebra",format:function(table){if(table.config.debug){var time=new Date();}$("tr:visible",table.tBodies[0]).filter(':even').removeClass(table.config.widgetZebra.css[1]).addClass(table.config.widgetZebra.css[0]).end().filter(':odd').removeClass(table.config.widgetZebra.css[0]).addClass(table.config.widgetZebra.css[1]);if(table.config.debug){$.tablesorter.benchmark("Applying Zebra widget",time);}}});})(jQuery);
\ No newline at end of file
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/keybd_closed.png b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/keybd_closed.png
new file mode 100644
index 0000000..f2b0418
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/keybd_closed.png
Binary files differ
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/keybd_open.png b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/keybd_open.png
new file mode 100644
index 0000000..a77961d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/keybd_open.png
Binary files differ
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/pyfile.html b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/pyfile.html
new file mode 100644
index 0000000..ee0a3b1
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/pyfile.html
@@ -0,0 +1,87 @@
+<!doctype html PUBLIC "-//W3C//DTD html 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+ <meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
+ {# IE8 rounds line-height incorrectly, and adding this emulateIE7 line makes it right! #}
+ {# http://social.msdn.microsoft.com/Forums/en-US/iewebdevelopment/thread/7684445e-f080-4d8f-8529-132763348e21 #}
+ <meta http-equiv='X-UA-Compatible' content='IE=emulateIE7' />
+ <title>Coverage for {{cu.name|escape}}: {{nums.pc_covered_str}}%</title>
+ <link rel='stylesheet' href='style.css' type='text/css'>
+ <script type='text/javascript' src='jquery-1.4.3.min.js'></script>
+ <script type='text/javascript' src='jquery.hotkeys.js'></script>
+ <script type='text/javascript' src='jquery.isonscreen.js'></script>
+ <script type='text/javascript' src='coverage_html.js'></script>
+ <script type='text/javascript' charset='utf-8'>
+ jQuery(document).ready(coverage.pyfile_ready);
+ </script>
+</head>
+<body id='pyfile'>
+
+<div id='header'>
+ <div class='content'>
+ <h1>Coverage for <b>{{cu.name|escape}}</b> :
+ <span class='pc_cov'>{{nums.pc_covered_str}}%</span>
+ </h1>
+ <img id='keyboard_icon' src='keybd_closed.png'>
+ <h2 class='stats'>
+ {{nums.n_statements}} statements
+ <span class='{{c_run}} shortkey_r' onclick='coverage.toggle_lines(this, "run")'>{{nums.n_executed}} run</span>
+ <span class='{{c_mis}} shortkey_m' onclick='coverage.toggle_lines(this, "mis")'>{{nums.n_missing}} missing</span>
+ <span class='{{c_exc}} shortkey_x' onclick='coverage.toggle_lines(this, "exc")'>{{nums.n_excluded}} excluded</span>
+ {% if arcs %}
+ <span class='{{c_par}} shortkey_p' onclick='coverage.toggle_lines(this, "par")'>{{n_par}} partial</span>
+ {% endif %}
+ </h2>
+ </div>
+</div>
+
+<div class='help_panel'>
+ <img id='panel_icon' src='keybd_open.png'>
+ <p class='legend'>Hot-keys on this page</p>
+ <div>
+ <p class='keyhelp'>
+ <span class='key'>r</span>
+ <span class='key'>m</span>
+ <span class='key'>x</span>
+ <span class='key'>p</span> toggle line displays
+ </p>
+ <p class='keyhelp'>
+ <span class='key'>j</span>
+ <span class='key'>k</span> next/prev highlighted chunk
+ </p>
+ <p class='keyhelp'>
+ <span class='key'>0</span> (zero) top of page
+ </p>
+ <p class='keyhelp'>
+ <span class='key'>1</span> (one) first highlighted chunk
+ </p>
+ </div>
+</div>
+
+<div id='source'>
+ <table cellspacing='0' cellpadding='0'>
+ <tr>
+ <td class='linenos' valign='top'>
+ {% for line in lines %}
+ <p id='n{{line.number}}' class='{{line.class}}'><a href='#n{{line.number}}'>{{line.number}}</a></p>
+ {% endfor %}
+ </td>
+ <td class='text' valign='top'>
+ {% for line in lines %}
+ <p id='t{{line.number}}' class='{{line.class}}'>{% if line.annotate %}<span class='annotate' title='{{line.annotate_title}}'>{{line.annotate}}</span>{% endif %}{{line.html}}<span class='strut'> </span></p>
+ {% endfor %}
+ </td>
+ </tr>
+ </table>
+</div>
+
+<div id='footer'>
+ <div class='content'>
+ <p>
+ <a class='nav' href='index.html'>« index</a> <a class='nav' href='{{__url__}}'>coverage.py v{{__version__}}</a>
+ </p>
+ </div>
+</div>
+
+</body>
+</html>
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/style.css b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/style.css
new file mode 100644
index 0000000..c40357b
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/htmlfiles/style.css
@@ -0,0 +1,275 @@
+/* CSS styles for Coverage. */
+/* Page-wide styles */
+html, body, h1, h2, h3, p, td, th {
+ margin: 0;
+ padding: 0;
+ border: 0;
+ outline: 0;
+ font-weight: inherit;
+ font-style: inherit;
+ font-size: 100%;
+ font-family: inherit;
+ vertical-align: baseline;
+ }
+
+/* Set baseline grid to 16 pt. */
+body {
+ font-family: georgia, serif;
+ font-size: 1em;
+ }
+
+html>body {
+ font-size: 16px;
+ }
+
+/* Set base font size to 12/16 */
+p {
+ font-size: .75em; /* 12/16 */
+ line-height: 1.3333em; /* 16/12 */
+ }
+
+table {
+ border-collapse: collapse;
+ }
+
+a.nav {
+ text-decoration: none;
+ color: inherit;
+ }
+a.nav:hover {
+ text-decoration: underline;
+ color: inherit;
+ }
+
+/* Page structure */
+#header {
+ background: #f8f8f8;
+ width: 100%;
+ border-bottom: 1px solid #eee;
+ }
+
+#source {
+ padding: 1em;
+ font-family: "courier new", monospace;
+ }
+
+#indexfile #footer {
+ margin: 1em 3em;
+ }
+
+#pyfile #footer {
+ margin: 1em 1em;
+ }
+
+#footer .content {
+ padding: 0;
+ font-size: 85%;
+ font-family: verdana, sans-serif;
+ color: #666666;
+ font-style: italic;
+ }
+
+#index {
+ margin: 1em 0 0 3em;
+ }
+
+/* Header styles */
+#header .content {
+ padding: 1em 3em;
+ }
+
+h1 {
+ font-size: 1.25em;
+}
+
+h2.stats {
+ margin-top: .5em;
+ font-size: 1em;
+}
+.stats span {
+ border: 1px solid;
+ padding: .1em .25em;
+ margin: 0 .1em;
+ cursor: pointer;
+ border-color: #999 #ccc #ccc #999;
+}
+.stats span.hide_run, .stats span.hide_exc,
+.stats span.hide_mis, .stats span.hide_par,
+.stats span.par.hide_run.hide_par {
+ border-color: #ccc #999 #999 #ccc;
+}
+.stats span.par.hide_run {
+ border-color: #999 #ccc #ccc #999;
+}
+
+/* Help panel */
+#keyboard_icon {
+ float: right;
+ cursor: pointer;
+}
+
+.help_panel {
+ position: absolute;
+ background: #ffc;
+ padding: .5em;
+ border: 1px solid #883;
+ display: none;
+}
+
+#indexfile .help_panel {
+ width: 20em; height: 4em;
+}
+
+#pyfile .help_panel {
+ width: 16em; height: 8em;
+}
+
+.help_panel .legend {
+ font-style: italic;
+ margin-bottom: 1em;
+}
+
+#panel_icon {
+ float: right;
+ cursor: pointer;
+}
+
+.keyhelp {
+ margin: .75em;
+}
+
+.keyhelp .key {
+ border: 1px solid black;
+ border-color: #888 #333 #333 #888;
+ padding: .1em .35em;
+ font-family: monospace;
+ font-weight: bold;
+ background: #eee;
+}
+
+/* Source file styles */
+.linenos p {
+ text-align: right;
+ margin: 0;
+ padding: 0 .5em;
+ color: #999999;
+ font-family: verdana, sans-serif;
+ font-size: .625em; /* 10/16 */
+ line-height: 1.6em; /* 16/10 */
+ }
+.linenos p.highlight {
+ background: #ffdd00;
+ }
+.linenos p a {
+ text-decoration: none;
+ color: #999999;
+ }
+.linenos p a:hover {
+ text-decoration: underline;
+ color: #999999;
+ }
+
+td.text {
+ width: 100%;
+ }
+.text p {
+ margin: 0;
+ padding: 0 0 0 .5em;
+ border-left: 2px solid #ffffff;
+ white-space: nowrap;
+ }
+
+.text p.mis {
+ background: #ffdddd;
+ border-left: 2px solid #ff0000;
+ }
+.text p.run, .text p.run.hide_par {
+ background: #ddffdd;
+ border-left: 2px solid #00ff00;
+ }
+.text p.exc {
+ background: #eeeeee;
+ border-left: 2px solid #808080;
+ }
+.text p.par, .text p.par.hide_run {
+ background: #ffffaa;
+ border-left: 2px solid #eeee99;
+ }
+.text p.hide_run, .text p.hide_exc, .text p.hide_mis, .text p.hide_par,
+.text p.hide_run.hide_par {
+ background: inherit;
+ }
+
+.text span.annotate {
+ font-family: georgia;
+ font-style: italic;
+ color: #666;
+ float: right;
+ padding-right: .5em;
+ }
+.text p.hide_par span.annotate {
+ display: none;
+ }
+
+/* Syntax coloring */
+.text .com {
+ color: green;
+ font-style: italic;
+ line-height: 1px;
+ }
+.text .key {
+ font-weight: bold;
+ line-height: 1px;
+ }
+.text .str {
+ color: #000080;
+ }
+
+/* index styles */
+#index td, #index th {
+ text-align: right;
+ width: 5em;
+ padding: .25em .5em;
+ border-bottom: 1px solid #eee;
+ }
+#index th {
+ font-style: italic;
+ color: #333;
+ border-bottom: 1px solid #ccc;
+ cursor: pointer;
+ }
+#index th:hover {
+ background: #eee;
+ border-bottom: 1px solid #999;
+ }
+#index td.left, #index th.left {
+ padding-left: 0;
+ }
+#index td.right, #index th.right {
+ padding-right: 0;
+ }
+#index th.headerSortDown, #index th.headerSortUp {
+ border-bottom: 1px solid #000;
+ }
+#index td.name, #index th.name {
+ text-align: left;
+ width: auto;
+ }
+#index td.name a {
+ text-decoration: none;
+ color: #000;
+ }
+#index td.name a:hover {
+ text-decoration: underline;
+ color: #000;
+ }
+#index tr.total {
+ }
+#index tr.total td {
+ font-weight: bold;
+ border-top: 1px solid #ccc;
+ border-bottom: none;
+ }
+#index tr.file:hover {
+ background: #eeeeee;
+ }
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/misc.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/misc.py
new file mode 100644
index 0000000..fd9be85
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/misc.py
@@ -0,0 +1,139 @@
+"""Miscellaneous stuff for Coverage."""
+
+import inspect
+from coverage.backward import md5, sorted # pylint: disable=W0622
+from coverage.backward import string_class, to_bytes
+
+
+def nice_pair(pair):
+ """Make a nice string representation of a pair of numbers.
+
+ If the numbers are equal, just return the number, otherwise return the pair
+ with a dash between them, indicating the range.
+
+ """
+ start, end = pair
+ if start == end:
+ return "%d" % start
+ else:
+ return "%d-%d" % (start, end)
+
+
+def format_lines(statements, lines):
+ """Nicely format a list of line numbers.
+
+ Format a list of line numbers for printing by coalescing groups of lines as
+ long as the lines represent consecutive statements. This will coalesce
+ even if there are gaps between statements.
+
+ For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
+ `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
+
+ """
+ pairs = []
+ i = 0
+ j = 0
+ start = None
+ while i < len(statements) and j < len(lines):
+ if statements[i] == lines[j]:
+ if start == None:
+ start = lines[j]
+ end = lines[j]
+ j += 1
+ elif start:
+ pairs.append((start, end))
+ start = None
+ i += 1
+ if start:
+ pairs.append((start, end))
+ ret = ', '.join(map(nice_pair, pairs))
+ return ret
+
+
+def expensive(fn):
+ """A decorator to cache the result of an expensive operation.
+
+ Only applies to methods with no arguments.
+
+ """
+ attr = "_cache_" + fn.__name__
+ def _wrapped(self):
+ """Inner fn that checks the cache."""
+ if not hasattr(self, attr):
+ setattr(self, attr, fn(self))
+ return getattr(self, attr)
+ return _wrapped
+
+
+def bool_or_none(b):
+ """Return bool(b), but preserve None."""
+ if b is None:
+ return None
+ else:
+ return bool(b)
+
+
+def join_regex(regexes):
+ """Combine a list of regexes into one that matches any of them."""
+ if len(regexes) > 1:
+ return "(" + ")|(".join(regexes) + ")"
+ elif regexes:
+ return regexes[0]
+ else:
+ return ""
+
+
+class Hasher(object):
+ """Hashes Python data into md5."""
+ def __init__(self):
+ self.md5 = md5()
+
+ def update(self, v):
+ """Add `v` to the hash, recursively if needed."""
+ self.md5.update(to_bytes(str(type(v))))
+ if isinstance(v, string_class):
+ self.md5.update(to_bytes(v))
+ elif isinstance(v, (int, float)):
+ self.update(str(v))
+ elif isinstance(v, (tuple, list)):
+ for e in v:
+ self.update(e)
+ elif isinstance(v, dict):
+ keys = v.keys()
+ for k in sorted(keys):
+ self.update(k)
+ self.update(v[k])
+ else:
+ for k in dir(v):
+ if k.startswith('__'):
+ continue
+ a = getattr(v, k)
+ if inspect.isroutine(a):
+ continue
+ self.update(k)
+ self.update(a)
+
+ def digest(self):
+ """Retrieve the digest of the hash."""
+ return self.md5.digest()
+
+
+class CoverageException(Exception):
+ """An exception specific to Coverage."""
+ pass
+
+class NoSource(CoverageException):
+ """We couldn't find the source for a module."""
+ pass
+
+class NotPython(CoverageException):
+ """A source file turned out not to be parsable Python."""
+ pass
+
+class ExceptionDuringRun(CoverageException):
+ """An exception happened while running customer code.
+
+ Construct it with three arguments, the values from `sys.exc_info`.
+
+ """
+ pass
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/parser.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/parser.py
new file mode 100644
index 0000000..d380eda
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/parser.py
@@ -0,0 +1,650 @@
+"""Code parsing for Coverage."""
+
+import opcode, re, sys, token, tokenize
+
+from coverage.backward import set, sorted, StringIO # pylint: disable=W0622
+from coverage.backward import open_source
+from coverage.bytecode import ByteCodes, CodeObjects
+from coverage.misc import nice_pair, expensive, join_regex
+from coverage.misc import CoverageException, NoSource, NotPython
+
+
+class CodeParser(object):
+ """Parse code to find executable lines, excluded lines, etc."""
+
+ def __init__(self, text=None, filename=None, exclude=None):
+ """
+ Source can be provided as `text`, the text itself, or `filename`, from
+ which the text will be read. Excluded lines are those that match
+ `exclude`, a regex.
+
+ """
+ assert text or filename, "CodeParser needs either text or filename"
+ self.filename = filename or "<code>"
+ self.text = text
+ if not self.text:
+ try:
+ sourcef = open_source(self.filename)
+ try:
+ self.text = sourcef.read()
+ finally:
+ sourcef.close()
+ except IOError:
+ _, err, _ = sys.exc_info()
+ raise NoSource(
+ "No source for code: %r: %s" % (self.filename, err)
+ )
+
+ self.exclude = exclude
+
+ self.show_tokens = False
+
+ # The text lines of the parsed code.
+ self.lines = self.text.split('\n')
+
+ # The line numbers of excluded lines of code.
+ self.excluded = set()
+
+ # The line numbers of docstring lines.
+ self.docstrings = set()
+
+ # The line numbers of class definitions.
+ self.classdefs = set()
+
+ # A dict mapping line numbers to (lo,hi) for multi-line statements.
+ self.multiline = {}
+
+ # The line numbers that start statements.
+ self.statement_starts = set()
+
+ # Lazily-created ByteParser
+ self._byte_parser = None
+
+ def _get_byte_parser(self):
+ """Create a ByteParser on demand."""
+ if not self._byte_parser:
+ self._byte_parser = \
+ ByteParser(text=self.text, filename=self.filename)
+ return self._byte_parser
+ byte_parser = property(_get_byte_parser)
+
+ def lines_matching(self, *regexes):
+ """Find the lines matching one of a list of regexes.
+
+ Returns a set of line numbers, the lines that contain a match for one
+ of the regexes in `regexes`. The entire line needn't match, just a
+ part of it.
+
+ """
+ regex_c = re.compile(join_regex(regexes))
+ matches = set()
+ for i, ltext in enumerate(self.lines):
+ if regex_c.search(ltext):
+ matches.add(i+1)
+ return matches
+
+ def _raw_parse(self):
+ """Parse the source to find the interesting facts about its lines.
+
+ A handful of member fields are updated.
+
+ """
+ # Find lines which match an exclusion pattern.
+ if self.exclude:
+ self.excluded = self.lines_matching(self.exclude)
+
+ # Tokenize, to find excluded suites, to find docstrings, and to find
+ # multi-line statements.
+ indent = 0
+ exclude_indent = 0
+ excluding = False
+ prev_toktype = token.INDENT
+ first_line = None
+ empty = True
+
+ tokgen = tokenize.generate_tokens(StringIO(self.text).readline)
+ for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
+ if self.show_tokens: # pragma: no cover
+ print("%10s %5s %-20r %r" % (
+ tokenize.tok_name.get(toktype, toktype),
+ nice_pair((slineno, elineno)), ttext, ltext
+ ))
+ if toktype == token.INDENT:
+ indent += 1
+ elif toktype == token.DEDENT:
+ indent -= 1
+ elif toktype == token.NAME and ttext == 'class':
+ # Class definitions look like branches in the byte code, so
+ # we need to exclude them. The simplest way is to note the
+ # lines with the 'class' keyword.
+ self.classdefs.add(slineno)
+ elif toktype == token.OP and ttext == ':':
+ if not excluding and elineno in self.excluded:
+ # Start excluding a suite. We trigger off of the colon
+ # token so that the #pragma comment will be recognized on
+ # the same line as the colon.
+ exclude_indent = indent
+ excluding = True
+ elif toktype == token.STRING and prev_toktype == token.INDENT:
+ # Strings that are first on an indented line are docstrings.
+ # (a trick from trace.py in the stdlib.) This works for
+ # 99.9999% of cases. For the rest (!) see:
+ # http://stackoverflow.com/questions/1769332/x/1769794#1769794
+ for i in range(slineno, elineno+1):
+ self.docstrings.add(i)
+ elif toktype == token.NEWLINE:
+ if first_line is not None and elineno != first_line:
+ # We're at the end of a line, and we've ended on a
+ # different line than the first line of the statement,
+ # so record a multi-line range.
+ rng = (first_line, elineno)
+ for l in range(first_line, elineno+1):
+ self.multiline[l] = rng
+ first_line = None
+
+ if ttext.strip() and toktype != tokenize.COMMENT:
+ # A non-whitespace token.
+ empty = False
+ if first_line is None:
+ # The token is not whitespace, and is the first in a
+ # statement.
+ first_line = slineno
+ # Check whether to end an excluded suite.
+ if excluding and indent <= exclude_indent:
+ excluding = False
+ if excluding:
+ self.excluded.add(elineno)
+
+ prev_toktype = toktype
+
+ # Find the starts of the executable statements.
+ if not empty:
+ self.statement_starts.update(self.byte_parser._find_statements())
+
+ def first_line(self, line):
+ """Return the first line number of the statement including `line`."""
+ rng = self.multiline.get(line)
+ if rng:
+ first_line = rng[0]
+ else:
+ first_line = line
+ return first_line
+
+ def first_lines(self, lines, ignore=None):
+ """Map the line numbers in `lines` to the correct first line of the
+ statement.
+
+ Skip any line mentioned in `ignore`.
+
+ Returns a sorted list of the first lines.
+
+ """
+ ignore = ignore or []
+ lset = set()
+ for l in lines:
+ if l in ignore:
+ continue
+ new_l = self.first_line(l)
+ if new_l not in ignore:
+ lset.add(new_l)
+ return sorted(lset)
+
+ def parse_source(self):
+ """Parse source text to find executable lines, excluded lines, etc.
+
+ Return values are 1) a sorted list of executable line numbers, and
+ 2) a sorted list of excluded line numbers.
+
+ Reported line numbers are normalized to the first line of multi-line
+ statements.
+
+ """
+ self._raw_parse()
+
+ excluded_lines = self.first_lines(self.excluded)
+ ignore = excluded_lines + list(self.docstrings)
+ lines = self.first_lines(self.statement_starts, ignore)
+
+ return lines, excluded_lines
+
+ def arcs(self):
+ """Get information about the arcs available in the code.
+
+ Returns a sorted list of line number pairs. Line numbers have been
+ normalized to the first line of multiline statements.
+
+ """
+ all_arcs = []
+ for l1, l2 in self.byte_parser._all_arcs():
+ fl1 = self.first_line(l1)
+ fl2 = self.first_line(l2)
+ if fl1 != fl2:
+ all_arcs.append((fl1, fl2))
+ return sorted(all_arcs)
+ arcs = expensive(arcs)
+
+ def exit_counts(self):
+ """Get a mapping from line numbers to count of exits from that line.
+
+ Excluded lines are excluded.
+
+ """
+ excluded_lines = self.first_lines(self.excluded)
+ exit_counts = {}
+ for l1, l2 in self.arcs():
+ if l1 < 0:
+ # Don't ever report -1 as a line number
+ continue
+ if l1 in excluded_lines:
+ # Don't report excluded lines as line numbers.
+ continue
+ if l2 in excluded_lines:
+ # Arcs to excluded lines shouldn't count.
+ continue
+ if l1 not in exit_counts:
+ exit_counts[l1] = 0
+ exit_counts[l1] += 1
+
+ # Class definitions have one extra exit, so remove one for each:
+ for l in self.classdefs:
+ # Ensure key is there: classdefs can include excluded lines.
+ if l in exit_counts:
+ exit_counts[l] -= 1
+
+ return exit_counts
+ exit_counts = expensive(exit_counts)
+
+
+## Opcodes that guide the ByteParser.
+
+def _opcode(name):
+ """Return the opcode by name from the opcode module."""
+ return opcode.opmap[name]
+
+def _opcode_set(*names):
+ """Return a set of opcodes by the names in `names`."""
+ s = set()
+ for name in names:
+ try:
+ s.add(_opcode(name))
+ except KeyError:
+ pass
+ return s
+
+# Opcodes that leave the code object.
+OPS_CODE_END = _opcode_set('RETURN_VALUE')
+
+# Opcodes that unconditionally end the code chunk.
+OPS_CHUNK_END = _opcode_set(
+ 'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'RETURN_VALUE', 'RAISE_VARARGS',
+ 'BREAK_LOOP', 'CONTINUE_LOOP',
+ )
+
+# Opcodes that unconditionally begin a new code chunk. By starting new chunks
+# with unconditional jump instructions, we neatly deal with jumps to jumps
+# properly.
+OPS_CHUNK_BEGIN = _opcode_set('JUMP_ABSOLUTE', 'JUMP_FORWARD')
+
+# Opcodes that push a block on the block stack.
+OPS_PUSH_BLOCK = _opcode_set(
+ 'SETUP_LOOP', 'SETUP_EXCEPT', 'SETUP_FINALLY', 'SETUP_WITH'
+ )
+
+# Block types for exception handling.
+OPS_EXCEPT_BLOCKS = _opcode_set('SETUP_EXCEPT', 'SETUP_FINALLY')
+
+# Opcodes that pop a block from the block stack.
+OPS_POP_BLOCK = _opcode_set('POP_BLOCK')
+
+# Opcodes that have a jump destination, but aren't really a jump.
+OPS_NO_JUMP = OPS_PUSH_BLOCK
+
+# Individual opcodes we need below.
+OP_BREAK_LOOP = _opcode('BREAK_LOOP')
+OP_END_FINALLY = _opcode('END_FINALLY')
+OP_COMPARE_OP = _opcode('COMPARE_OP')
+COMPARE_EXCEPTION = 10 # just have to get this const from the code.
+OP_LOAD_CONST = _opcode('LOAD_CONST')
+OP_RETURN_VALUE = _opcode('RETURN_VALUE')
+
+
+class ByteParser(object):
+ """Parse byte codes to understand the structure of code."""
+
+ def __init__(self, code=None, text=None, filename=None):
+ if code:
+ self.code = code
+ self.text = text
+ else:
+ if not text:
+ assert filename, "If no code or text, need a filename"
+ sourcef = open_source(filename)
+ try:
+ text = sourcef.read()
+ finally:
+ sourcef.close()
+ self.text = text
+
+ try:
+ # Python 2.3 and 2.4 don't like partial last lines, so be sure
+ # the text ends nicely for them.
+ self.code = compile(text + '\n', filename, "exec")
+ except SyntaxError:
+ _, synerr, _ = sys.exc_info()
+ raise NotPython(
+ "Couldn't parse '%s' as Python source: '%s' at line %d" %
+ (filename, synerr.msg, synerr.lineno)
+ )
+
+ # Alternative Python implementations don't always provide all the
+ # attributes on code objects that we need to do the analysis.
+ for attr in ['co_lnotab', 'co_firstlineno', 'co_consts', 'co_code']:
+ if not hasattr(self.code, attr):
+ raise CoverageException(
+ "This implementation of Python doesn't support code "
+ "analysis.\n"
+ "Run coverage.py under CPython for this command."
+ )
+
+ def child_parsers(self):
+ """Iterate over all the code objects nested within this one.
+
+ The iteration includes `self` as its first value.
+
+ """
+ children = CodeObjects(self.code)
+ return [ByteParser(code=c, text=self.text) for c in children]
+
+ # Getting numbers from the lnotab value changed in Py3.0.
+ if sys.version_info >= (3, 0):
+ def _lnotab_increments(self, lnotab):
+ """Return a list of ints from the lnotab bytes in 3.x"""
+ return list(lnotab)
+ else:
+ def _lnotab_increments(self, lnotab):
+ """Return a list of ints from the lnotab string in 2.x"""
+ return [ord(c) for c in lnotab]
+
+ def _bytes_lines(self):
+ """Map byte offsets to line numbers in `code`.
+
+ Uses co_lnotab described in Python/compile.c to map byte offsets to
+ line numbers. Returns a list: [(b0, l0), (b1, l1), ...]
+
+ """
+ # Adapted from dis.py in the standard library.
+ byte_increments = self._lnotab_increments(self.code.co_lnotab[0::2])
+ line_increments = self._lnotab_increments(self.code.co_lnotab[1::2])
+
+ bytes_lines = []
+ last_line_num = None
+ line_num = self.code.co_firstlineno
+ byte_num = 0
+ for byte_incr, line_incr in zip(byte_increments, line_increments):
+ if byte_incr:
+ if line_num != last_line_num:
+ bytes_lines.append((byte_num, line_num))
+ last_line_num = line_num
+ byte_num += byte_incr
+ line_num += line_incr
+ if line_num != last_line_num:
+ bytes_lines.append((byte_num, line_num))
+ return bytes_lines
+
+ def _find_statements(self):
+ """Find the statements in `self.code`.
+
+ Return a set of line numbers that start statements. Recurses into all
+ code objects reachable from `self.code`.
+
+ """
+ stmts = set()
+ for bp in self.child_parsers():
+ # Get all of the lineno information from this code.
+ for _, l in bp._bytes_lines():
+ stmts.add(l)
+ return stmts
+
+ def _split_into_chunks(self):
+ """Split the code object into a list of `Chunk` objects.
+
+ Each chunk is only entered at its first instruction, though there can
+ be many exits from a chunk.
+
+ Returns a list of `Chunk` objects.
+
+ """
+
+ # The list of chunks so far, and the one we're working on.
+ chunks = []
+ chunk = None
+ bytes_lines_map = dict(self._bytes_lines())
+
+ # The block stack: loops and try blocks get pushed here for the
+ # implicit jumps that can occur.
+ # Each entry is a tuple: (block type, destination)
+ block_stack = []
+
+ # Some op codes are followed by branches that should be ignored. This
+ # is a count of how many ignores are left.
+ ignore_branch = 0
+
+ # We have to handle the last two bytecodes specially.
+ ult = penult = None
+
+ for bc in ByteCodes(self.code.co_code):
+ # Maybe have to start a new chunk
+ if bc.offset in bytes_lines_map:
+ # Start a new chunk for each source line number.
+ if chunk:
+ chunk.exits.add(bc.offset)
+ chunk = Chunk(bc.offset, bytes_lines_map[bc.offset])
+ chunks.append(chunk)
+ elif bc.op in OPS_CHUNK_BEGIN:
+ # Jumps deserve their own unnumbered chunk. This fixes
+ # problems with jumps to jumps getting confused.
+ if chunk:
+ chunk.exits.add(bc.offset)
+ chunk = Chunk(bc.offset)
+ chunks.append(chunk)
+
+ if not chunk:
+ chunk = Chunk(bc.offset)
+ chunks.append(chunk)
+
+ # Look at the opcode
+ if bc.jump_to >= 0 and bc.op not in OPS_NO_JUMP:
+ if ignore_branch:
+ # Someone earlier wanted us to ignore this branch.
+ ignore_branch -= 1
+ else:
+ # The opcode has a jump, it's an exit for this chunk.
+ chunk.exits.add(bc.jump_to)
+
+ if bc.op in OPS_CODE_END:
+ # The opcode can exit the code object.
+ chunk.exits.add(-self.code.co_firstlineno)
+ if bc.op in OPS_PUSH_BLOCK:
+ # The opcode adds a block to the block_stack.
+ block_stack.append((bc.op, bc.jump_to))
+ if bc.op in OPS_POP_BLOCK:
+ # The opcode pops a block from the block stack.
+ block_stack.pop()
+ if bc.op in OPS_CHUNK_END:
+ # This opcode forces the end of the chunk.
+ if bc.op == OP_BREAK_LOOP:
+ # A break is implicit: jump where the top of the
+ # block_stack points.
+ chunk.exits.add(block_stack[-1][1])
+ chunk = None
+ if bc.op == OP_END_FINALLY:
+ if block_stack:
+ # A break that goes through a finally will jump to whatever
+ # block is on top of the stack.
+ chunk.exits.add(block_stack[-1][1])
+ # For the finally clause we need to find the closest exception
+ # block, and use its jump target as an exit.
+ for iblock in range(len(block_stack)-1, -1, -1):
+ if block_stack[iblock][0] in OPS_EXCEPT_BLOCKS:
+ chunk.exits.add(block_stack[iblock][1])
+ break
+ if bc.op == OP_COMPARE_OP and bc.arg == COMPARE_EXCEPTION:
+ # This is an except clause. We want to overlook the next
+ # branch, so that except's don't count as branches.
+ ignore_branch += 1
+
+ penult = ult
+ ult = bc
+
+ if chunks:
+ # The last two bytecodes could be a dummy "return None" that
+ # shouldn't be counted as real code. Every Python code object seems
+ # to end with a return, and a "return None" is inserted if there
+ # isn't an explicit return in the source.
+ if ult and penult:
+ if penult.op == OP_LOAD_CONST and ult.op == OP_RETURN_VALUE:
+ if self.code.co_consts[penult.arg] is None:
+ # This is "return None", but is it dummy? A real line
+ # would be a last chunk all by itself.
+ if chunks[-1].byte != penult.offset:
+ ex = -self.code.co_firstlineno
+ # Split the last chunk
+ last_chunk = chunks[-1]
+ last_chunk.exits.remove(ex)
+ last_chunk.exits.add(penult.offset)
+ chunk = Chunk(penult.offset)
+ chunk.exits.add(ex)
+ chunks.append(chunk)
+
+ # Give all the chunks a length.
+ chunks[-1].length = bc.next_offset - chunks[-1].byte
+ for i in range(len(chunks)-1):
+ chunks[i].length = chunks[i+1].byte - chunks[i].byte
+
+ return chunks
+
+ def _arcs(self):
+ """Find the executable arcs in the code.
+
+ Returns a set of pairs, (from,to). From and to are integer line
+ numbers. If from is < 0, then the arc is an entrance into the code
+ object. If to is < 0, the arc is an exit from the code object.
+
+ """
+ chunks = self._split_into_chunks()
+
+ # A map from byte offsets to chunks jumped into.
+ byte_chunks = dict([(c.byte, c) for c in chunks])
+
+ # Build a map from byte offsets to actual lines reached.
+ byte_lines = {}
+ bytes_to_add = set([c.byte for c in chunks])
+
+ while bytes_to_add:
+ byte_to_add = bytes_to_add.pop()
+ if byte_to_add in byte_lines or byte_to_add < 0:
+ continue
+
+ # Which lines does this chunk lead to?
+ bytes_considered = set()
+ bytes_to_consider = [byte_to_add]
+ lines = set()
+
+ while bytes_to_consider:
+ byte = bytes_to_consider.pop()
+ bytes_considered.add(byte)
+
+ # Find chunk for byte
+ try:
+ ch = byte_chunks[byte]
+ except KeyError:
+ for ch in chunks:
+ if ch.byte <= byte < ch.byte+ch.length:
+ break
+ else:
+ # No chunk for this byte!
+ raise Exception("Couldn't find chunk @ %d" % byte)
+ byte_chunks[byte] = ch
+
+ if ch.line:
+ lines.add(ch.line)
+ else:
+ for ex in ch.exits:
+ if ex < 0:
+ lines.add(ex)
+ elif ex not in bytes_considered:
+ bytes_to_consider.append(ex)
+
+ bytes_to_add.update(ch.exits)
+
+ byte_lines[byte_to_add] = lines
+
+ # Figure out for each chunk where the exits go.
+ arcs = set()
+ for chunk in chunks:
+ if chunk.line:
+ for ex in chunk.exits:
+ if ex < 0:
+ exit_lines = [ex]
+ else:
+ exit_lines = byte_lines[ex]
+ for exit_line in exit_lines:
+ if chunk.line != exit_line:
+ arcs.add((chunk.line, exit_line))
+ for line in byte_lines[0]:
+ arcs.add((-1, line))
+
+ return arcs
+
+ def _all_chunks(self):
+ """Returns a list of `Chunk` objects for this code and its children.
+
+ See `_split_into_chunks` for details.
+
+ """
+ chunks = []
+ for bp in self.child_parsers():
+ chunks.extend(bp._split_into_chunks())
+
+ return chunks
+
+ def _all_arcs(self):
+ """Get the set of all arcs in this code object and its children.
+
+ See `_arcs` for details.
+
+ """
+ arcs = set()
+ for bp in self.child_parsers():
+ arcs.update(bp._arcs())
+
+ return arcs
+
+
+class Chunk(object):
+ """A sequence of bytecodes with a single entrance.
+
+ To analyze byte code, we have to divide it into chunks, sequences of byte
+ codes such that each basic block has only one entrance, the first
+ instruction in the block.
+
+ This is almost the CS concept of `basic block`_, except that we're willing
+ to have many exits from a chunk, and "basic block" is a more cumbersome
+ term.
+
+ .. _basic block: http://en.wikipedia.org/wiki/Basic_block
+
+ An exit < 0 means the chunk can leave the code (return). The exit is
+ the negative of the starting line number of the code block.
+
+ """
+ def __init__(self, byte, line=0):
+ self.byte = byte
+ self.line = line
+ self.length = 0
+ self.exits = set()
+
+ def __repr__(self):
+ return "<%d+%d @%d %r>" % (
+ self.byte, self.length, self.line, list(self.exits)
+ )
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/phystokens.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/phystokens.py
new file mode 100644
index 0000000..fc4f2c9
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/phystokens.py
@@ -0,0 +1,108 @@
+"""Better tokenizing for coverage.py."""
+
+import keyword, re, token, tokenize
+from coverage.backward import StringIO # pylint: disable=W0622
+
+def phys_tokens(toks):
+ """Return all physical tokens, even line continuations.
+
+ tokenize.generate_tokens() doesn't return a token for the backslash that
+ continues lines. This wrapper provides those tokens so that we can
+ re-create a faithful representation of the original source.
+
+ Returns the same values as generate_tokens()
+
+ """
+ last_line = None
+ last_lineno = -1
+ last_ttype = None
+ for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
+ if last_lineno != elineno:
+ if last_line and last_line[-2:] == "\\\n":
+ # We are at the beginning of a new line, and the last line
+ # ended with a backslash. We probably have to inject a
+ # backslash token into the stream. Unfortunately, there's more
+ # to figure out. This code::
+ #
+ # usage = """\
+ # HEY THERE
+ # """
+ #
+ # triggers this condition, but the token text is::
+ #
+ # '"""\\\nHEY THERE\n"""'
+ #
+ # so we need to figure out if the backslash is already in the
+ # string token or not.
+ inject_backslash = True
+ if last_ttype == tokenize.COMMENT:
+ # Comments like this \
+ # should never result in a new token.
+ inject_backslash = False
+ elif ttype == token.STRING:
+ if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\':
+ # It's a multiline string and the first line ends with
+ # a backslash, so we don't need to inject another.
+ inject_backslash = False
+ if inject_backslash:
+ # Figure out what column the backslash is in.
+ ccol = len(last_line.split("\n")[-2]) - 1
+ # Yield the token, with a fake token type.
+ yield (
+ 99999, "\\\n",
+ (slineno, ccol), (slineno, ccol+2),
+ last_line
+ )
+ last_line = ltext
+ last_ttype = ttype
+ yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext
+ last_lineno = elineno
+
+
+def source_token_lines(source):
+ """Generate a series of lines, one for each line in `source`.
+
+ Each line is a list of pairs, each pair is a token::
+
+ [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
+
+ Each pair has a token class, and the token text.
+
+ If you concatenate all the token texts, and then join them with newlines,
+ you should have your original `source` back, with two differences:
+ trailing whitespace is not preserved, and a final line with no newline
+ is indistinguishable from a final line with a newline.
+
+ """
+ ws_tokens = [token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL]
+ line = []
+ col = 0
+ source = source.expandtabs(8).replace('\r\n', '\n')
+ tokgen = tokenize.generate_tokens(StringIO(source).readline)
+ for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen):
+ mark_start = True
+ for part in re.split('(\n)', ttext):
+ if part == '\n':
+ yield line
+ line = []
+ col = 0
+ mark_end = False
+ elif part == '':
+ mark_end = False
+ elif ttype in ws_tokens:
+ mark_end = False
+ else:
+ if mark_start and scol > col:
+ line.append(("ws", " " * (scol - col)))
+ mark_start = False
+ tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
+ if ttype == token.NAME and keyword.iskeyword(ttext):
+ tok_class = "key"
+ line.append((tok_class, part))
+ mark_end = True
+ scol = 0
+ if mark_end:
+ col = ecol
+
+ if line:
+ yield line
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/report.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/report.py
new file mode 100644
index 0000000..6c5510a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/report.py
@@ -0,0 +1,89 @@
+"""Reporter foundation for Coverage."""
+
+import fnmatch, os
+from coverage.codeunit import code_unit_factory
+from coverage.misc import CoverageException, NoSource, NotPython
+
+class Reporter(object):
+ """A base class for all reporters."""
+
+ def __init__(self, coverage, ignore_errors=False):
+ """Create a reporter.
+
+ `coverage` is the coverage instance. `ignore_errors` controls how
+ skittish the reporter will be during file processing.
+
+ """
+ self.coverage = coverage
+ self.ignore_errors = ignore_errors
+
+ # The code units to report on. Set by find_code_units.
+ self.code_units = []
+
+ # The directory into which to place the report, used by some derived
+ # classes.
+ self.directory = None
+
+ def find_code_units(self, morfs, config):
+ """Find the code units we'll report on.
+
+ `morfs` is a list of modules or filenames. `config` is a
+ CoverageConfig instance.
+
+ """
+ morfs = morfs or self.coverage.data.measured_files()
+ file_locator = self.coverage.file_locator
+ self.code_units = code_unit_factory(morfs, file_locator)
+
+ if config.include:
+ patterns = [file_locator.abs_file(p) for p in config.include]
+ filtered = []
+ for cu in self.code_units:
+ for pattern in patterns:
+ if fnmatch.fnmatch(cu.filename, pattern):
+ filtered.append(cu)
+ break
+ self.code_units = filtered
+
+ if config.omit:
+ patterns = [file_locator.abs_file(p) for p in config.omit]
+ filtered = []
+ for cu in self.code_units:
+ for pattern in patterns:
+ if fnmatch.fnmatch(cu.filename, pattern):
+ break
+ else:
+ filtered.append(cu)
+ self.code_units = filtered
+
+ self.code_units.sort()
+
+ def report_files(self, report_fn, morfs, config, directory=None):
+ """Run a reporting function on a number of morfs.
+
+ `report_fn` is called for each relative morf in `morfs`. It is called
+ as::
+
+ report_fn(code_unit, analysis)
+
+ where `code_unit` is the `CodeUnit` for the morf, and `analysis` is
+ the `Analysis` for the morf.
+
+ `config` is a CoverageConfig instance.
+
+ """
+ self.find_code_units(morfs, config)
+
+ if not self.code_units:
+ raise CoverageException("No data to report.")
+
+ self.directory = directory
+ if self.directory and not os.path.exists(self.directory):
+ os.makedirs(self.directory)
+
+ for cu in self.code_units:
+ try:
+ report_fn(cu, self.coverage._analyze(cu))
+ except (NoSource, NotPython):
+ if not self.ignore_errors:
+ raise
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/results.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/results.py
new file mode 100644
index 0000000..d7e2a9d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/results.py
@@ -0,0 +1,247 @@
+"""Results of coverage measurement."""
+
+import os
+
+from coverage.backward import set, sorted # pylint: disable=W0622
+from coverage.misc import format_lines, join_regex, NoSource
+from coverage.parser import CodeParser
+
+
+class Analysis(object):
+ """The results of analyzing a code unit."""
+
+ def __init__(self, cov, code_unit):
+ self.coverage = cov
+ self.code_unit = code_unit
+
+ self.filename = self.code_unit.filename
+ ext = os.path.splitext(self.filename)[1]
+ source = None
+ if ext == '.py':
+ if not os.path.exists(self.filename):
+ source = self.coverage.file_locator.get_zip_data(self.filename)
+ if not source:
+ raise NoSource("No source for code: %r" % self.filename)
+
+ self.parser = CodeParser(
+ text=source, filename=self.filename,
+ exclude=self.coverage._exclude_regex('exclude')
+ )
+ self.statements, self.excluded = self.parser.parse_source()
+
+ # Identify missing statements.
+ executed = self.coverage.data.executed_lines(self.filename)
+ exec1 = self.parser.first_lines(executed)
+ self.missing = sorted(set(self.statements) - set(exec1))
+
+ if self.coverage.data.has_arcs():
+ self.no_branch = self.parser.lines_matching(
+ join_regex(self.coverage.config.partial_list),
+ join_regex(self.coverage.config.partial_always_list)
+ )
+ n_branches = self.total_branches()
+ mba = self.missing_branch_arcs()
+ n_missing_branches = sum(
+ [len(v) for k,v in mba.items() if k not in self.missing]
+ )
+ else:
+ n_branches = n_missing_branches = 0
+ self.no_branch = set()
+
+ self.numbers = Numbers(
+ n_files=1,
+ n_statements=len(self.statements),
+ n_excluded=len(self.excluded),
+ n_missing=len(self.missing),
+ n_branches=n_branches,
+ n_missing_branches=n_missing_branches,
+ )
+
+ def missing_formatted(self):
+ """The missing line numbers, formatted nicely.
+
+ Returns a string like "1-2, 5-11, 13-14".
+
+ """
+ return format_lines(self.statements, self.missing)
+
+ def has_arcs(self):
+ """Were arcs measured in this result?"""
+ return self.coverage.data.has_arcs()
+
+ def arc_possibilities(self):
+ """Returns a sorted list of the arcs in the code."""
+ arcs = self.parser.arcs()
+ return arcs
+
+ def arcs_executed(self):
+ """Returns a sorted list of the arcs actually executed in the code."""
+ executed = self.coverage.data.executed_arcs(self.filename)
+ m2fl = self.parser.first_line
+ executed = [(m2fl(l1), m2fl(l2)) for (l1,l2) in executed]
+ return sorted(executed)
+
+ def arcs_missing(self):
+ """Returns a sorted list of the arcs in the code not executed."""
+ possible = self.arc_possibilities()
+ executed = self.arcs_executed()
+ missing = [
+ p for p in possible
+ if p not in executed
+ and p[0] not in self.no_branch
+ ]
+ return sorted(missing)
+
+ def arcs_unpredicted(self):
+ """Returns a sorted list of the executed arcs missing from the code."""
+ possible = self.arc_possibilities()
+ executed = self.arcs_executed()
+ # Exclude arcs here which connect a line to itself. They can occur
+ # in executed data in some cases. This is where they can cause
+ # trouble, and here is where it's the least burden to remove them.
+ unpredicted = [
+ e for e in executed
+ if e not in possible
+ and e[0] != e[1]
+ ]
+ return sorted(unpredicted)
+
+ def branch_lines(self):
+ """Returns a list of line numbers that have more than one exit."""
+ exit_counts = self.parser.exit_counts()
+ return [l1 for l1,count in exit_counts.items() if count > 1]
+
+ def total_branches(self):
+ """How many total branches are there?"""
+ exit_counts = self.parser.exit_counts()
+ return sum([count for count in exit_counts.values() if count > 1])
+
+ def missing_branch_arcs(self):
+ """Return arcs that weren't executed from branch lines.
+
+ Returns {l1:[l2a,l2b,...], ...}
+
+ """
+ missing = self.arcs_missing()
+ branch_lines = set(self.branch_lines())
+ mba = {}
+ for l1, l2 in missing:
+ if l1 in branch_lines:
+ if l1 not in mba:
+ mba[l1] = []
+ mba[l1].append(l2)
+ return mba
+
+ def branch_stats(self):
+ """Get stats about branches.
+
+ Returns a dict mapping line numbers to a tuple:
+ (total_exits, taken_exits).
+ """
+
+ exit_counts = self.parser.exit_counts()
+ missing_arcs = self.missing_branch_arcs()
+ stats = {}
+ for lnum in self.branch_lines():
+ exits = exit_counts[lnum]
+ try:
+ missing = len(missing_arcs[lnum])
+ except KeyError:
+ missing = 0
+ stats[lnum] = (exits, exits - missing)
+ return stats
+
+
+class Numbers(object):
+ """The numerical results of measuring coverage.
+
+ This holds the basic statistics from `Analysis`, and is used to roll
+ up statistics across files.
+
+ """
+ # A global to determine the precision on coverage percentages, the number
+ # of decimal places.
+ _precision = 0
+ _near0 = 1.0 # These will change when _precision is changed.
+ _near100 = 99.0
+
+ def __init__(self, n_files=0, n_statements=0, n_excluded=0, n_missing=0,
+ n_branches=0, n_missing_branches=0
+ ):
+ self.n_files = n_files
+ self.n_statements = n_statements
+ self.n_excluded = n_excluded
+ self.n_missing = n_missing
+ self.n_branches = n_branches
+ self.n_missing_branches = n_missing_branches
+
+ def set_precision(cls, precision):
+ """Set the number of decimal places used to report percentages."""
+ assert 0 <= precision < 10
+ cls._precision = precision
+ cls._near0 = 1.0 / 10**precision
+ cls._near100 = 100.0 - cls._near0
+ set_precision = classmethod(set_precision)
+
+ def _get_n_executed(self):
+ """Returns the number of executed statements."""
+ return self.n_statements - self.n_missing
+ n_executed = property(_get_n_executed)
+
+ def _get_n_executed_branches(self):
+ """Returns the number of executed branches."""
+ return self.n_branches - self.n_missing_branches
+ n_executed_branches = property(_get_n_executed_branches)
+
+ def _get_pc_covered(self):
+ """Returns a single percentage value for coverage."""
+ if self.n_statements > 0:
+ pc_cov = (100.0 * (self.n_executed + self.n_executed_branches) /
+ (self.n_statements + self.n_branches))
+ else:
+ pc_cov = 100.0
+ return pc_cov
+ pc_covered = property(_get_pc_covered)
+
+ def _get_pc_covered_str(self):
+ """Returns the percent covered, as a string, without a percent sign.
+
+ Note that "0" is only returned when the value is truly zero, and "100"
+ is only returned when the value is truly 100. Rounding can never
+ result in either "0" or "100".
+
+ """
+ pc = self.pc_covered
+ if 0 < pc < self._near0:
+ pc = self._near0
+ elif self._near100 < pc < 100:
+ pc = self._near100
+ else:
+ pc = round(pc, self._precision)
+ return "%.*f" % (self._precision, pc)
+ pc_covered_str = property(_get_pc_covered_str)
+
+ def pc_str_width(cls):
+ """How many characters wide can pc_covered_str be?"""
+ width = 3 # "100"
+ if cls._precision > 0:
+ width += 1 + cls._precision
+ return width
+ pc_str_width = classmethod(pc_str_width)
+
+ def __add__(self, other):
+ nums = Numbers()
+ nums.n_files = self.n_files + other.n_files
+ nums.n_statements = self.n_statements + other.n_statements
+ nums.n_excluded = self.n_excluded + other.n_excluded
+ nums.n_missing = self.n_missing + other.n_missing
+ nums.n_branches = self.n_branches + other.n_branches
+ nums.n_missing_branches = (self.n_missing_branches +
+ other.n_missing_branches)
+ return nums
+
+ def __radd__(self, other):
+ # Implementing 0+Numbers allows us to sum() a list of Numbers.
+ if other == 0:
+ return self
+ return NotImplemented
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/summary.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/summary.py
new file mode 100644
index 0000000..599ae78
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/summary.py
@@ -0,0 +1,81 @@
+"""Summary reporting"""
+
+import sys
+
+from coverage.report import Reporter
+from coverage.results import Numbers
+
+
+class SummaryReporter(Reporter):
+ """A reporter for writing the summary report."""
+
+ def __init__(self, coverage, show_missing=True, ignore_errors=False):
+ super(SummaryReporter, self).__init__(coverage, ignore_errors)
+ self.show_missing = show_missing
+ self.branches = coverage.data.has_arcs()
+
+ def report(self, morfs, outfile=None, config=None):
+ """Writes a report summarizing coverage statistics per module.
+
+ `outfile` is a file object to write the summary to. `config` is a
+ CoverageConfig instance.
+
+ """
+ self.find_code_units(morfs, config)
+
+ # Prepare the formatting strings
+ max_name = max([len(cu.name) for cu in self.code_units] + [5])
+ fmt_name = "%%- %ds " % max_name
+ fmt_err = "%s %s: %s\n"
+ header = (fmt_name % "Name") + " Stmts Miss"
+ fmt_coverage = fmt_name + "%6d %6d"
+ if self.branches:
+ header += " Branch BrPart"
+ fmt_coverage += " %6d %6d"
+ width100 = Numbers.pc_str_width()
+ header += "%*s" % (width100+4, "Cover")
+ fmt_coverage += "%%%ds%%%%" % (width100+3,)
+ if self.show_missing:
+ header += " Missing"
+ fmt_coverage += " %s"
+ rule = "-" * len(header) + "\n"
+ header += "\n"
+ fmt_coverage += "\n"
+
+ if not outfile:
+ outfile = sys.stdout
+
+ # Write the header
+ outfile.write(header)
+ outfile.write(rule)
+
+ total = Numbers()
+
+ for cu in self.code_units:
+ try:
+ analysis = self.coverage._analyze(cu)
+ nums = analysis.numbers
+ args = (cu.name, nums.n_statements, nums.n_missing)
+ if self.branches:
+ args += (nums.n_branches, nums.n_missing_branches)
+ args += (nums.pc_covered_str,)
+ if self.show_missing:
+ args += (analysis.missing_formatted(),)
+ outfile.write(fmt_coverage % args)
+ total += nums
+ except KeyboardInterrupt: # pragma: no cover
+ raise
+ except:
+ if not self.ignore_errors:
+ typ, msg = sys.exc_info()[:2]
+ outfile.write(fmt_err % (cu.name, typ.__name__, msg))
+
+ if total.n_files > 1:
+ outfile.write(rule)
+ args = ("TOTAL", total.n_statements, total.n_missing)
+ if self.branches:
+ args += (total.n_branches, total.n_missing_branches)
+ args += (total.pc_covered_str,)
+ if self.show_missing:
+ args += ("",)
+ outfile.write(fmt_coverage % args)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/templite.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/templite.py
new file mode 100644
index 0000000..c39e061
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/templite.py
@@ -0,0 +1,166 @@
+"""A simple Python template renderer, for a nano-subset of Django syntax."""
+
+# Coincidentally named the same as http://code.activestate.com/recipes/496702/
+
+import re, sys
+
+class Templite(object):
+ """A simple template renderer, for a nano-subset of Django syntax.
+
+ Supported constructs are extended variable access::
+
+ {{var.modifer.modifier|filter|filter}}
+
+ loops::
+
+ {% for var in list %}...{% endfor %}
+
+ and ifs::
+
+ {% if var %}...{% endif %}
+
+ Comments are within curly-hash markers::
+
+ {# This will be ignored #}
+
+ Construct a Templite with the template text, then use `render` against a
+ dictionary context to create a finished string.
+
+ """
+ def __init__(self, text, *contexts):
+ """Construct a Templite with the given `text`.
+
+ `contexts` are dictionaries of values to use for future renderings.
+ These are good for filters and global values.
+
+ """
+ self.text = text
+ self.context = {}
+ for context in contexts:
+ self.context.update(context)
+
+ # Split the text to form a list of tokens.
+ toks = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
+
+ # Parse the tokens into a nested list of operations. Each item in the
+ # list is a tuple with an opcode, and arguments. They'll be
+ # interpreted by TempliteEngine.
+ #
+ # When parsing an action tag with nested content (if, for), the current
+ # ops list is pushed onto ops_stack, and the parsing continues in a new
+ # ops list that is part of the arguments to the if or for op.
+ ops = []
+ ops_stack = []
+ for tok in toks:
+ if tok.startswith('{{'):
+ # Expression: ('exp', expr)
+ ops.append(('exp', tok[2:-2].strip()))
+ elif tok.startswith('{#'):
+ # Comment: ignore it and move on.
+ continue
+ elif tok.startswith('{%'):
+ # Action tag: split into words and parse further.
+ words = tok[2:-2].strip().split()
+ if words[0] == 'if':
+ # If: ('if', (expr, body_ops))
+ if_ops = []
+ assert len(words) == 2
+ ops.append(('if', (words[1], if_ops)))
+ ops_stack.append(ops)
+ ops = if_ops
+ elif words[0] == 'for':
+ # For: ('for', (varname, listexpr, body_ops))
+ assert len(words) == 4 and words[2] == 'in'
+ for_ops = []
+ ops.append(('for', (words[1], words[3], for_ops)))
+ ops_stack.append(ops)
+ ops = for_ops
+ elif words[0].startswith('end'):
+ # Endsomething. Pop the ops stack
+ ops = ops_stack.pop()
+ assert ops[-1][0] == words[0][3:]
+ else:
+ raise SyntaxError("Don't understand tag %r" % words)
+ else:
+ ops.append(('lit', tok))
+
+ assert not ops_stack, "Unmatched action tag: %r" % ops_stack[-1][0]
+ self.ops = ops
+
+ def render(self, context=None):
+ """Render this template by applying it to `context`.
+
+ `context` is a dictionary of values to use in this rendering.
+
+ """
+ # Make the complete context we'll use.
+ ctx = dict(self.context)
+ if context:
+ ctx.update(context)
+
+ # Run it through an engine, and return the result.
+ engine = _TempliteEngine(ctx)
+ engine.execute(self.ops)
+ return "".join(engine.result)
+
+
+class _TempliteEngine(object):
+ """Executes Templite objects to produce strings."""
+ def __init__(self, context):
+ self.context = context
+ self.result = []
+
+ def execute(self, ops):
+ """Execute `ops` in the engine.
+
+ Called recursively for the bodies of if's and loops.
+
+ """
+ for op, args in ops:
+ if op == 'lit':
+ self.result.append(args)
+ elif op == 'exp':
+ try:
+ self.result.append(str(self.evaluate(args)))
+ except:
+ exc_class, exc, _ = sys.exc_info()
+ new_exc = exc_class("Couldn't evaluate {{ %s }}: %s"
+ % (args, exc))
+ raise new_exc
+ elif op == 'if':
+ expr, body = args
+ if self.evaluate(expr):
+ self.execute(body)
+ elif op == 'for':
+ var, lis, body = args
+ vals = self.evaluate(lis)
+ for val in vals:
+ self.context[var] = val
+ self.execute(body)
+ else:
+ raise AssertionError("TempliteEngine doesn't grok op %r" % op)
+
+ def evaluate(self, expr):
+ """Evaluate an expression.
+
+ `expr` can have pipes and dots to indicate data access and filtering.
+
+ """
+ if "|" in expr:
+ pipes = expr.split("|")
+ value = self.evaluate(pipes[0])
+ for func in pipes[1:]:
+ value = self.evaluate(func)(value)
+ elif "." in expr:
+ dots = expr.split('.')
+ value = self.evaluate(dots[0])
+ for dot in dots[1:]:
+ try:
+ value = getattr(value, dot)
+ except AttributeError:
+ value = value[dot]
+ if hasattr(value, '__call__'):
+ value = value()
+ else:
+ value = self.context[expr]
+ return value
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/tracer.c b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/tracer.c
new file mode 100644
index 0000000..cff0bd8
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/tracer.c
@@ -0,0 +1,719 @@
+/* C-based Tracer for Coverage. */
+
+#include "Python.h"
+#include "compile.h" /* in 2.3, this wasn't part of Python.h */
+#include "eval.h" /* or this. */
+#include "structmember.h"
+#include "frameobject.h"
+
+/* Compile-time debugging helpers */
+#undef WHAT_LOG /* Define to log the WHAT params in the trace function. */
+#undef TRACE_LOG /* Define to log our bookkeeping. */
+#undef COLLECT_STATS /* Collect counters: stats are printed when tracer is stopped. */
+
+#if COLLECT_STATS
+#define STATS(x) x
+#else
+#define STATS(x)
+#endif
+
+/* Py 2.x and 3.x compatibility */
+
+#ifndef Py_TYPE
+#define Py_TYPE(o) (((PyObject*)(o))->ob_type)
+#endif
+
+#if PY_MAJOR_VERSION >= 3
+
+#define MyText_Type PyUnicode_Type
+#define MyText_Check(o) PyUnicode_Check(o)
+#define MyText_AS_STRING(o) PyBytes_AS_STRING(PyUnicode_AsASCIIString(o))
+#define MyInt_FromLong(l) PyLong_FromLong(l)
+
+#define MyType_HEAD_INIT PyVarObject_HEAD_INIT(NULL, 0)
+
+#else
+
+#define MyText_Type PyString_Type
+#define MyText_Check(o) PyString_Check(o)
+#define MyText_AS_STRING(o) PyString_AS_STRING(o)
+#define MyInt_FromLong(l) PyInt_FromLong(l)
+
+#define MyType_HEAD_INIT PyObject_HEAD_INIT(NULL) 0,
+
+#endif /* Py3k */
+
+/* The values returned to indicate ok or error. */
+#define RET_OK 0
+#define RET_ERROR -1
+
+/* An entry on the data stack. For each call frame, we need to record the
+ dictionary to capture data, and the last line number executed in that
+ frame.
+*/
+typedef struct {
+ PyObject * file_data; /* PyMem_Malloc'ed, a borrowed ref. */
+ int last_line;
+} DataStackEntry;
+
+/* The CTracer type. */
+
+typedef struct {
+ PyObject_HEAD
+
+ /* Python objects manipulated directly by the Collector class. */
+ PyObject * should_trace;
+ PyObject * warn;
+ PyObject * data;
+ PyObject * should_trace_cache;
+ PyObject * arcs;
+
+ /* Has the tracer been started? */
+ int started;
+ /* Are we tracing arcs, or just lines? */
+ int tracing_arcs;
+
+ /*
+ The data stack is a stack of dictionaries. Each dictionary collects
+ data for a single source file. The data stack parallels the call stack:
+ each call pushes the new frame's file data onto the data stack, and each
+ return pops file data off.
+
+ The file data is a dictionary whose form depends on the tracing options.
+ If tracing arcs, the keys are line number pairs. If not tracing arcs,
+ the keys are line numbers. In both cases, the value is irrelevant
+ (None).
+ */
+ /* The index of the last-used entry in data_stack. */
+ int depth;
+ /* The file data at each level, or NULL if not recording. */
+ DataStackEntry * data_stack;
+ int data_stack_alloc; /* number of entries allocated at data_stack. */
+
+ /* The current file_data dictionary. Borrowed. */
+ PyObject * cur_file_data;
+
+ /* The line number of the last line recorded, for tracing arcs.
+ -1 means there was no previous line, as when entering a code object.
+ */
+ int last_line;
+
+ /* The parent frame for the last exception event, to fix missing returns. */
+ PyFrameObject * last_exc_back;
+ int last_exc_firstlineno;
+
+#if COLLECT_STATS
+ struct {
+ unsigned calls;
+ unsigned lines;
+ unsigned returns;
+ unsigned exceptions;
+ unsigned others;
+ unsigned new_files;
+ unsigned missed_returns;
+ unsigned stack_reallocs;
+ unsigned errors;
+ } stats;
+#endif /* COLLECT_STATS */
+} CTracer;
+
+#define STACK_DELTA 100
+
+static int
+CTracer_init(CTracer *self, PyObject *args_unused, PyObject *kwds_unused)
+{
+#if COLLECT_STATS
+ self->stats.calls = 0;
+ self->stats.lines = 0;
+ self->stats.returns = 0;
+ self->stats.exceptions = 0;
+ self->stats.others = 0;
+ self->stats.new_files = 0;
+ self->stats.missed_returns = 0;
+ self->stats.stack_reallocs = 0;
+ self->stats.errors = 0;
+#endif /* COLLECT_STATS */
+
+ self->should_trace = NULL;
+ self->warn = NULL;
+ self->data = NULL;
+ self->should_trace_cache = NULL;
+ self->arcs = NULL;
+
+ self->started = 0;
+ self->tracing_arcs = 0;
+
+ self->depth = -1;
+ self->data_stack = PyMem_Malloc(STACK_DELTA*sizeof(DataStackEntry));
+ if (self->data_stack == NULL) {
+ STATS( self->stats.errors++; )
+ PyErr_NoMemory();
+ return RET_ERROR;
+ }
+ self->data_stack_alloc = STACK_DELTA;
+
+ self->cur_file_data = NULL;
+ self->last_line = -1;
+
+ self->last_exc_back = NULL;
+
+ return RET_OK;
+}
+
+static void
+CTracer_dealloc(CTracer *self)
+{
+ if (self->started) {
+ PyEval_SetTrace(NULL, NULL);
+ }
+
+ Py_XDECREF(self->should_trace);
+ Py_XDECREF(self->warn);
+ Py_XDECREF(self->data);
+ Py_XDECREF(self->should_trace_cache);
+
+ PyMem_Free(self->data_stack);
+
+ Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+#if TRACE_LOG
+static const char *
+indent(int n)
+{
+ static const char * spaces =
+ " "
+ " "
+ " "
+ " "
+ ;
+ return spaces + strlen(spaces) - n*2;
+}
+
+static int logging = 0;
+/* Set these constants to be a file substring and line number to start logging. */
+static const char * start_file = "tests/views";
+static int start_line = 27;
+
+static void
+showlog(int depth, int lineno, PyObject * filename, const char * msg)
+{
+ if (logging) {
+ printf("%s%3d ", indent(depth), depth);
+ if (lineno) {
+ printf("%4d", lineno);
+ }
+ else {
+ printf(" ");
+ }
+ if (filename) {
+ printf(" %s", MyText_AS_STRING(filename));
+ }
+ if (msg) {
+ printf(" %s", msg);
+ }
+ printf("\n");
+ }
+}
+
+#define SHOWLOG(a,b,c,d) showlog(a,b,c,d)
+#else
+#define SHOWLOG(a,b,c,d)
+#endif /* TRACE_LOG */
+
+#if WHAT_LOG
+static const char * what_sym[] = {"CALL", "EXC ", "LINE", "RET "};
+#endif
+
+/* Record a pair of integers in self->cur_file_data. */
+static int
+CTracer_record_pair(CTracer *self, int l1, int l2)
+{
+ int ret = RET_OK;
+
+ PyObject * t = PyTuple_New(2);
+ if (t != NULL) {
+ PyTuple_SET_ITEM(t, 0, MyInt_FromLong(l1));
+ PyTuple_SET_ITEM(t, 1, MyInt_FromLong(l2));
+ if (PyDict_SetItem(self->cur_file_data, t, Py_None) < 0) {
+ STATS( self->stats.errors++; )
+ ret = RET_ERROR;
+ }
+ Py_DECREF(t);
+ }
+ else {
+ STATS( self->stats.errors++; )
+ ret = RET_ERROR;
+ }
+ return ret;
+}
+
+/*
+ * The Trace Function
+ */
+static int
+CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unused)
+{
+ int ret = RET_OK;
+ PyObject * filename = NULL;
+ PyObject * tracename = NULL;
+
+ #if WHAT_LOG
+ if (what <= sizeof(what_sym)/sizeof(const char *)) {
+ printf("trace: %s @ %s %d\n", what_sym[what], MyText_AS_STRING(frame->f_code->co_filename), frame->f_lineno);
+ }
+ #endif
+
+ #if TRACE_LOG
+ if (strstr(MyText_AS_STRING(frame->f_code->co_filename), start_file) && frame->f_lineno == start_line) {
+ logging = 1;
+ }
+ #endif
+
+ /* See below for details on missing-return detection. */
+ if (self->last_exc_back) {
+ if (frame == self->last_exc_back) {
+ /* Looks like someone forgot to send a return event. We'll clear
+ the exception state and do the RETURN code here. Notice that the
+ frame we have in hand here is not the correct frame for the RETURN,
+ that frame is gone. Our handling for RETURN doesn't need the
+ actual frame, but we do log it, so that will look a little off if
+ you're looking at the detailed log.
+
+ If someday we need to examine the frame when doing RETURN, then
+ we'll need to keep more of the missed frame's state.
+ */
+ STATS( self->stats.missed_returns++; )
+ if (self->depth >= 0) {
+ if (self->tracing_arcs && self->cur_file_data) {
+ if (CTracer_record_pair(self, self->last_line, -self->last_exc_firstlineno) < 0) {
+ return RET_ERROR;
+ }
+ }
+ SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "missedreturn");
+ self->cur_file_data = self->data_stack[self->depth].file_data;
+ self->last_line = self->data_stack[self->depth].last_line;
+ self->depth--;
+ }
+ }
+ self->last_exc_back = NULL;
+ }
+
+
+ switch (what) {
+ case PyTrace_CALL: /* 0 */
+ STATS( self->stats.calls++; )
+ /* Grow the stack. */
+ self->depth++;
+ if (self->depth >= self->data_stack_alloc) {
+ STATS( self->stats.stack_reallocs++; )
+ /* We've outgrown our data_stack array: make it bigger. */
+ int bigger = self->data_stack_alloc + STACK_DELTA;
+ DataStackEntry * bigger_data_stack = PyMem_Realloc(self->data_stack, bigger * sizeof(DataStackEntry));
+ if (bigger_data_stack == NULL) {
+ STATS( self->stats.errors++; )
+ PyErr_NoMemory();
+ self->depth--;
+ return RET_ERROR;
+ }
+ self->data_stack = bigger_data_stack;
+ self->data_stack_alloc = bigger;
+ }
+
+ /* Push the current state on the stack. */
+ self->data_stack[self->depth].file_data = self->cur_file_data;
+ self->data_stack[self->depth].last_line = self->last_line;
+
+ /* Check if we should trace this line. */
+ filename = frame->f_code->co_filename;
+ tracename = PyDict_GetItem(self->should_trace_cache, filename);
+ if (tracename == NULL) {
+ STATS( self->stats.new_files++; )
+ /* We've never considered this file before. */
+ /* Ask should_trace about it. */
+ PyObject * args = Py_BuildValue("(OO)", filename, frame);
+ tracename = PyObject_Call(self->should_trace, args, NULL);
+ Py_DECREF(args);
+ if (tracename == NULL) {
+ /* An error occurred inside should_trace. */
+ STATS( self->stats.errors++; )
+ return RET_ERROR;
+ }
+ if (PyDict_SetItem(self->should_trace_cache, filename, tracename) < 0) {
+ STATS( self->stats.errors++; )
+ return RET_ERROR;
+ }
+ }
+ else {
+ Py_INCREF(tracename);
+ }
+
+ /* If tracename is a string, then we're supposed to trace. */
+ if (MyText_Check(tracename)) {
+ PyObject * file_data = PyDict_GetItem(self->data, tracename);
+ if (file_data == NULL) {
+ file_data = PyDict_New();
+ if (file_data == NULL) {
+ STATS( self->stats.errors++; )
+ return RET_ERROR;
+ }
+ ret = PyDict_SetItem(self->data, tracename, file_data);
+ Py_DECREF(file_data);
+ if (ret < 0) {
+ STATS( self->stats.errors++; )
+ return RET_ERROR;
+ }
+ }
+ self->cur_file_data = file_data;
+ /* Make the frame right in case settrace(gettrace()) happens. */
+ Py_INCREF(self);
+ frame->f_trace = (PyObject*)self;
+ SHOWLOG(self->depth, frame->f_lineno, filename, "traced");
+ }
+ else {
+ self->cur_file_data = NULL;
+ SHOWLOG(self->depth, frame->f_lineno, filename, "skipped");
+ }
+
+ Py_DECREF(tracename);
+
+ self->last_line = -1;
+ break;
+
+ case PyTrace_RETURN: /* 3 */
+ STATS( self->stats.returns++; )
+ /* A near-copy of this code is above in the missing-return handler. */
+ if (self->depth >= 0) {
+ if (self->tracing_arcs && self->cur_file_data) {
+ int first = frame->f_code->co_firstlineno;
+ if (CTracer_record_pair(self, self->last_line, -first) < 0) {
+ return RET_ERROR;
+ }
+ }
+
+ SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "return");
+ self->cur_file_data = self->data_stack[self->depth].file_data;
+ self->last_line = self->data_stack[self->depth].last_line;
+ self->depth--;
+ }
+ break;
+
+ case PyTrace_LINE: /* 2 */
+ STATS( self->stats.lines++; )
+ if (self->depth >= 0) {
+ SHOWLOG(self->depth, frame->f_lineno, frame->f_code->co_filename, "line");
+ if (self->cur_file_data) {
+ /* We're tracing in this frame: record something. */
+ if (self->tracing_arcs) {
+ /* Tracing arcs: key is (last_line,this_line). */
+ if (CTracer_record_pair(self, self->last_line, frame->f_lineno) < 0) {
+ return RET_ERROR;
+ }
+ }
+ else {
+ /* Tracing lines: key is simply this_line. */
+ PyObject * this_line = MyInt_FromLong(frame->f_lineno);
+ if (this_line == NULL) {
+ STATS( self->stats.errors++; )
+ return RET_ERROR;
+ }
+ ret = PyDict_SetItem(self->cur_file_data, this_line, Py_None);
+ Py_DECREF(this_line);
+ if (ret < 0) {
+ STATS( self->stats.errors++; )
+ return RET_ERROR;
+ }
+ }
+ }
+ self->last_line = frame->f_lineno;
+ }
+ break;
+
+ case PyTrace_EXCEPTION:
+ /* Some code (Python 2.3, and pyexpat anywhere) fires an exception event
+ without a return event. To detect that, we'll keep a copy of the
+ parent frame for an exception event. If the next event is in that
+ frame, then we must have returned without a return event. We can
+ synthesize the missing event then.
+
+ Python itself fixed this problem in 2.4. Pyexpat still has the bug.
+ I've reported the problem with pyexpat as http://bugs.python.org/issue6359 .
+ If it gets fixed, this code should still work properly. Maybe some day
+ the bug will be fixed everywhere coverage.py is supported, and we can
+ remove this missing-return detection.
+
+ More about this fix: http://nedbatchelder.com/blog/200907/a_nasty_little_bug.html
+ */
+ STATS( self->stats.exceptions++; )
+ self->last_exc_back = frame->f_back;
+ self->last_exc_firstlineno = frame->f_code->co_firstlineno;
+ break;
+
+ default:
+ STATS( self->stats.others++; )
+ break;
+ }
+
+ return RET_OK;
+}
+
+/*
+ * Python has two ways to set the trace function: sys.settrace(fn), which
+ * takes a Python callable, and PyEval_SetTrace(func, obj), which takes
+ * a C function and a Python object. The way these work together is that
+ * sys.settrace(pyfn) calls PyEval_SetTrace(builtin_func, pyfn), using the
+ * Python callable as the object in PyEval_SetTrace. So sys.gettrace()
+ * simply returns the Python object used as the second argument to
+ * PyEval_SetTrace. So sys.gettrace() will return our self parameter, which
+ * means it must be callable to be used in sys.settrace().
+ *
+ * So we make our self callable, equivalent to invoking our trace function.
+ *
+ * To help with the process of replaying stored frames, this function has an
+ * optional keyword argument:
+ *
+ * def CTracer_call(frame, event, arg, lineno=0)
+ *
+ * If provided, the lineno argument is used as the line number, and the
+ * frame's f_lineno member is ignored.
+ */
+static PyObject *
+CTracer_call(CTracer *self, PyObject *args, PyObject *kwds)
+{
+ PyFrameObject *frame;
+ PyObject *what_str;
+ PyObject *arg;
+ int lineno = 0;
+ int what;
+ int orig_lineno;
+ PyObject *ret = NULL;
+
+ static char *what_names[] = {
+ "call", "exception", "line", "return",
+ "c_call", "c_exception", "c_return",
+ NULL
+ };
+
+ #if WHAT_LOG
+ printf("pytrace\n");
+ #endif
+
+ static char *kwlist[] = {"frame", "event", "arg", "lineno", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O!O|i:Tracer_call", kwlist,
+ &PyFrame_Type, &frame, &MyText_Type, &what_str, &arg, &lineno)) {
+ goto done;
+ }
+
+ /* In Python, the what argument is a string, we need to find an int
+ for the C function. */
+ for (what = 0; what_names[what]; what++) {
+ if (!strcmp(MyText_AS_STRING(what_str), what_names[what])) {
+ break;
+ }
+ }
+
+ /* Save off the frame's lineno, and use the forced one, if provided. */
+ orig_lineno = frame->f_lineno;
+ if (lineno > 0) {
+ frame->f_lineno = lineno;
+ }
+
+ /* Invoke the C function, and return ourselves. */
+ if (CTracer_trace(self, frame, what, arg) == RET_OK) {
+ Py_INCREF(self);
+ ret = (PyObject *)self;
+ }
+
+ /* Clean up. */
+ frame->f_lineno = orig_lineno;
+
+done:
+ return ret;
+}
+
+static PyObject *
+CTracer_start(CTracer *self, PyObject *args_unused)
+{
+ PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self);
+ self->started = 1;
+ self->tracing_arcs = self->arcs && PyObject_IsTrue(self->arcs);
+ self->last_line = -1;
+
+ /* start() returns a trace function usable with sys.settrace() */
+ Py_INCREF(self);
+ return (PyObject *)self;
+}
+
+static PyObject *
+CTracer_stop(CTracer *self, PyObject *args_unused)
+{
+ if (self->started) {
+ PyEval_SetTrace(NULL, NULL);
+ self->started = 0;
+ }
+
+ return Py_BuildValue("");
+}
+
+static PyObject *
+CTracer_get_stats(CTracer *self)
+{
+#if COLLECT_STATS
+ return Py_BuildValue(
+ "{sI,sI,sI,sI,sI,sI,sI,sI,si,sI}",
+ "calls", self->stats.calls,
+ "lines", self->stats.lines,
+ "returns", self->stats.returns,
+ "exceptions", self->stats.exceptions,
+ "others", self->stats.others,
+ "new_files", self->stats.new_files,
+ "missed_returns", self->stats.missed_returns,
+ "stack_reallocs", self->stats.stack_reallocs,
+ "stack_alloc", self->data_stack_alloc,
+ "errors", self->stats.errors
+ );
+#else
+ return Py_BuildValue("");
+#endif /* COLLECT_STATS */
+}
+
+static PyMemberDef
+CTracer_members[] = {
+ { "should_trace", T_OBJECT, offsetof(CTracer, should_trace), 0,
+ PyDoc_STR("Function indicating whether to trace a file.") },
+
+ { "warn", T_OBJECT, offsetof(CTracer, warn), 0,
+ PyDoc_STR("Function for issuing warnings.") },
+
+ { "data", T_OBJECT, offsetof(CTracer, data), 0,
+ PyDoc_STR("The raw dictionary of trace data.") },
+
+ { "should_trace_cache", T_OBJECT, offsetof(CTracer, should_trace_cache), 0,
+ PyDoc_STR("Dictionary caching should_trace results.") },
+
+ { "arcs", T_OBJECT, offsetof(CTracer, arcs), 0,
+ PyDoc_STR("Should we trace arcs, or just lines?") },
+
+ { NULL }
+};
+
+static PyMethodDef
+CTracer_methods[] = {
+ { "start", (PyCFunction) CTracer_start, METH_VARARGS,
+ PyDoc_STR("Start the tracer") },
+
+ { "stop", (PyCFunction) CTracer_stop, METH_VARARGS,
+ PyDoc_STR("Stop the tracer") },
+
+ { "get_stats", (PyCFunction) CTracer_get_stats, METH_VARARGS,
+ PyDoc_STR("Get statistics about the tracing") },
+
+ { NULL }
+};
+
+static PyTypeObject
+CTracerType = {
+ MyType_HEAD_INIT
+ "coverage.CTracer", /*tp_name*/
+ sizeof(CTracer), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ (destructor)CTracer_dealloc, /*tp_dealloc*/
+ 0, /*tp_print*/
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ 0, /*tp_compare*/
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash */
+ (ternaryfunc)CTracer_call, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ "CTracer objects", /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ CTracer_methods, /* tp_methods */
+ CTracer_members, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)CTracer_init, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+};
+
+/* Module definition */
+
+#define MODULE_DOC PyDoc_STR("Fast coverage tracer.")
+
+#if PY_MAJOR_VERSION >= 3
+
+static PyModuleDef
+moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "coverage.tracer",
+ MODULE_DOC,
+ -1,
+ NULL, /* methods */
+ NULL,
+ NULL, /* traverse */
+ NULL, /* clear */
+ NULL
+};
+
+
+PyObject *
+PyInit_tracer(void)
+{
+ PyObject * mod = PyModule_Create(&moduledef);
+ if (mod == NULL) {
+ return NULL;
+ }
+
+ CTracerType.tp_new = PyType_GenericNew;
+ if (PyType_Ready(&CTracerType) < 0) {
+ Py_DECREF(mod);
+ return NULL;
+ }
+
+ Py_INCREF(&CTracerType);
+ PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType);
+
+ return mod;
+}
+
+#else
+
+void
+inittracer(void)
+{
+ PyObject * mod;
+
+ mod = Py_InitModule3("coverage.tracer", NULL, MODULE_DOC);
+ if (mod == NULL) {
+ return;
+ }
+
+ CTracerType.tp_new = PyType_GenericNew;
+ if (PyType_Ready(&CTracerType) < 0) {
+ return;
+ }
+
+ Py_INCREF(&CTracerType);
+ PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType);
+}
+
+#endif /* Py3k */
+
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/xmlreport.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/xmlreport.py
new file mode 100644
index 0000000..5f6cc87
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/coverage/xmlreport.py
@@ -0,0 +1,147 @@
+"""XML reporting for coverage.py"""
+
+import os, sys, time
+import xml.dom.minidom
+
+from coverage import __url__, __version__
+from coverage.backward import sorted # pylint: disable=W0622
+from coverage.report import Reporter
+
+def rate(hit, num):
+ """Return the fraction of `hit`/`num`, as a string."""
+ return "%.4g" % (float(hit) / (num or 1.0))
+
+
+class XmlReporter(Reporter):
+ """A reporter for writing Cobertura-style XML coverage results."""
+
+ def __init__(self, coverage, ignore_errors=False):
+ super(XmlReporter, self).__init__(coverage, ignore_errors)
+
+ self.packages = None
+ self.xml_out = None
+ self.arcs = coverage.data.has_arcs()
+
+ def report(self, morfs, outfile=None, config=None):
+ """Generate a Cobertura-compatible XML report for `morfs`.
+
+ `morfs` is a list of modules or filenames.
+
+ `outfile` is a file object to write the XML to. `config` is a
+ CoverageConfig instance.
+
+ """
+ # Initial setup.
+ outfile = outfile or sys.stdout
+
+ # Create the DOM that will store the data.
+ impl = xml.dom.minidom.getDOMImplementation()
+ docType = impl.createDocumentType(
+ "coverage", None,
+ "http://cobertura.sourceforge.net/xml/coverage-03.dtd"
+ )
+ self.xml_out = impl.createDocument(None, "coverage", docType)
+
+ # Write header stuff.
+ xcoverage = self.xml_out.documentElement
+ xcoverage.setAttribute("version", __version__)
+ xcoverage.setAttribute("timestamp", str(int(time.time()*1000)))
+ xcoverage.appendChild(self.xml_out.createComment(
+ " Generated by coverage.py: %s " % __url__
+ ))
+ xpackages = self.xml_out.createElement("packages")
+ xcoverage.appendChild(xpackages)
+
+ # Call xml_file for each file in the data.
+ self.packages = {}
+ self.report_files(self.xml_file, morfs, config)
+
+ lnum_tot, lhits_tot = 0, 0
+ bnum_tot, bhits_tot = 0, 0
+
+ # Populate the XML DOM with the package info.
+ for pkg_name in sorted(self.packages.keys()):
+ pkg_data = self.packages[pkg_name]
+ class_elts, lhits, lnum, bhits, bnum = pkg_data
+ xpackage = self.xml_out.createElement("package")
+ xpackages.appendChild(xpackage)
+ xclasses = self.xml_out.createElement("classes")
+ xpackage.appendChild(xclasses)
+ for class_name in sorted(class_elts.keys()):
+ xclasses.appendChild(class_elts[class_name])
+ xpackage.setAttribute("name", pkg_name.replace(os.sep, '.'))
+ xpackage.setAttribute("line-rate", rate(lhits, lnum))
+ xpackage.setAttribute("branch-rate", rate(bhits, bnum))
+ xpackage.setAttribute("complexity", "0")
+
+ lnum_tot += lnum
+ lhits_tot += lhits
+ bnum_tot += bnum
+ bhits_tot += bhits
+
+ xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot))
+ xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot))
+
+ # Use the DOM to write the output file.
+ outfile.write(self.xml_out.toprettyxml())
+
+ def xml_file(self, cu, analysis):
+ """Add to the XML report for a single file."""
+
+ # Create the 'lines' and 'package' XML elements, which
+ # are populated later. Note that a package == a directory.
+ dirname, fname = os.path.split(cu.name)
+ dirname = dirname or '.'
+ package = self.packages.setdefault(dirname, [ {}, 0, 0, 0, 0 ])
+
+ xclass = self.xml_out.createElement("class")
+
+ xclass.appendChild(self.xml_out.createElement("methods"))
+
+ xlines = self.xml_out.createElement("lines")
+ xclass.appendChild(xlines)
+ className = fname.replace('.', '_')
+ xclass.setAttribute("name", className)
+ ext = os.path.splitext(cu.filename)[1]
+ xclass.setAttribute("filename", cu.name + ext)
+ xclass.setAttribute("complexity", "0")
+
+ branch_stats = analysis.branch_stats()
+
+ # For each statement, create an XML 'line' element.
+ for line in analysis.statements:
+ xline = self.xml_out.createElement("line")
+ xline.setAttribute("number", str(line))
+
+ # Q: can we get info about the number of times a statement is
+ # executed? If so, that should be recorded here.
+ xline.setAttribute("hits", str(int(not line in analysis.missing)))
+
+ if self.arcs:
+ if line in branch_stats:
+ total, taken = branch_stats[line]
+ xline.setAttribute("branch", "true")
+ xline.setAttribute("condition-coverage",
+ "%d%% (%d/%d)" % (100*taken/total, taken, total)
+ )
+ xlines.appendChild(xline)
+
+ class_lines = len(analysis.statements)
+ class_hits = class_lines - len(analysis.missing)
+
+ if self.arcs:
+ class_branches = sum([t for t,k in branch_stats.values()])
+ missing_branches = sum([t-k for t,k in branch_stats.values()])
+ class_br_hits = class_branches - missing_branches
+ else:
+ class_branches = 0.0
+ class_br_hits = 0.0
+
+ # Finalize the statistics that are collected in the XML DOM.
+ xclass.setAttribute("line-rate", rate(class_hits, class_lines))
+ xclass.setAttribute("branch-rate", rate(class_br_hits, class_branches))
+ package[0][className] = xclass
+ package[1] += class_hits
+ package[2] += class_lines
+ package[3] += class_br_hits
+ package[4] += class_branches
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/irc/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/irc/__init__.py
new file mode 100644
index 0000000..c1e4c6d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/irc/__init__.py
@@ -0,0 +1 @@
+# This file is required for Python to search this directory for modules.
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/irc/ircbot.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/irc/ircbot.py
new file mode 100644
index 0000000..6f29a65
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/irc/ircbot.py
@@ -0,0 +1,438 @@
+# Copyright (C) 1999--2002 Joel Rosdahl
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# Joel Rosdahl <joel@rosdahl.net>
+#
+# $Id: ircbot.py,v 1.23 2008/09/11 07:38:30 keltus Exp $
+
+"""ircbot -- Simple IRC bot library.
+
+This module contains a single-server IRC bot class that can be used to
+write simpler bots.
+"""
+
+import sys
+from UserDict import UserDict
+
+from irclib import SimpleIRCClient
+from irclib import nm_to_n, irc_lower, all_events
+from irclib import parse_channel_modes, is_channel
+from irclib import ServerConnectionError
+
+class SingleServerIRCBot(SimpleIRCClient):
+ """A single-server IRC bot class.
+
+ The bot tries to reconnect if it is disconnected.
+
+ The bot keeps track of the channels it has joined, the other
+ clients that are present in the channels and which of those that
+ have operator or voice modes. The "database" is kept in the
+ self.channels attribute, which is an IRCDict of Channels.
+ """
+ def __init__(self, server_list, nickname, realname, reconnection_interval=60):
+ """Constructor for SingleServerIRCBot objects.
+
+ Arguments:
+
+ server_list -- A list of tuples (server, port) that
+ defines which servers the bot should try to
+ connect to.
+
+ nickname -- The bot's nickname.
+
+ realname -- The bot's realname.
+
+ reconnection_interval -- How long the bot should wait
+ before trying to reconnect.
+
+ dcc_connections -- A list of initiated/accepted DCC
+ connections.
+ """
+
+ SimpleIRCClient.__init__(self)
+ self.channels = IRCDict()
+ self.server_list = server_list
+ if not reconnection_interval or reconnection_interval < 0:
+ reconnection_interval = 2**31
+ self.reconnection_interval = reconnection_interval
+
+ self._nickname = nickname
+ self._realname = realname
+ for i in ["disconnect", "join", "kick", "mode",
+ "namreply", "nick", "part", "quit"]:
+ self.connection.add_global_handler(i,
+ getattr(self, "_on_" + i),
+ -10)
+ def _connected_checker(self):
+ """[Internal]"""
+ if not self.connection.is_connected():
+ self.connection.execute_delayed(self.reconnection_interval,
+ self._connected_checker)
+ self.jump_server()
+
+ def _connect(self):
+ """[Internal]"""
+ password = None
+ if len(self.server_list[0]) > 2:
+ password = self.server_list[0][2]
+ try:
+ self.connect(self.server_list[0][0],
+ self.server_list[0][1],
+ self._nickname,
+ password,
+ ircname=self._realname)
+ except ServerConnectionError:
+ pass
+
+ def _on_disconnect(self, c, e):
+ """[Internal]"""
+ self.channels = IRCDict()
+ self.connection.execute_delayed(self.reconnection_interval,
+ self._connected_checker)
+
+ def _on_join(self, c, e):
+ """[Internal]"""
+ ch = e.target()
+ nick = nm_to_n(e.source())
+ if nick == c.get_nickname():
+ self.channels[ch] = Channel()
+ self.channels[ch].add_user(nick)
+
+ def _on_kick(self, c, e):
+ """[Internal]"""
+ nick = e.arguments()[0]
+ channel = e.target()
+
+ if nick == c.get_nickname():
+ del self.channels[channel]
+ else:
+ self.channels[channel].remove_user(nick)
+
+ def _on_mode(self, c, e):
+ """[Internal]"""
+ modes = parse_channel_modes(" ".join(e.arguments()))
+ t = e.target()
+ if is_channel(t):
+ ch = self.channels[t]
+ for mode in modes:
+ if mode[0] == "+":
+ f = ch.set_mode
+ else:
+ f = ch.clear_mode
+ f(mode[1], mode[2])
+ else:
+ # Mode on self... XXX
+ pass
+
+ def _on_namreply(self, c, e):
+ """[Internal]"""
+
+ # e.arguments()[0] == "@" for secret channels,
+ # "*" for private channels,
+ # "=" for others (public channels)
+ # e.arguments()[1] == channel
+ # e.arguments()[2] == nick list
+
+ ch = e.arguments()[1]
+ for nick in e.arguments()[2].split():
+ if nick[0] == "@":
+ nick = nick[1:]
+ self.channels[ch].set_mode("o", nick)
+ elif nick[0] == "+":
+ nick = nick[1:]
+ self.channels[ch].set_mode("v", nick)
+ self.channels[ch].add_user(nick)
+
+ def _on_nick(self, c, e):
+ """[Internal]"""
+ before = nm_to_n(e.source())
+ after = e.target()
+ for ch in self.channels.values():
+ if ch.has_user(before):
+ ch.change_nick(before, after)
+
+ def _on_part(self, c, e):
+ """[Internal]"""
+ nick = nm_to_n(e.source())
+ channel = e.target()
+
+ if nick == c.get_nickname():
+ del self.channels[channel]
+ else:
+ self.channels[channel].remove_user(nick)
+
+ def _on_quit(self, c, e):
+ """[Internal]"""
+ nick = nm_to_n(e.source())
+ for ch in self.channels.values():
+ if ch.has_user(nick):
+ ch.remove_user(nick)
+
+ def die(self, msg="Bye, cruel world!"):
+ """Let the bot die.
+
+ Arguments:
+
+ msg -- Quit message.
+ """
+
+ self.connection.disconnect(msg)
+ sys.exit(0)
+
+ def disconnect(self, msg="I'll be back!"):
+ """Disconnect the bot.
+
+ The bot will try to reconnect after a while.
+
+ Arguments:
+
+ msg -- Quit message.
+ """
+ self.connection.disconnect(msg)
+
+ def get_version(self):
+ """Returns the bot version.
+
+ Used when answering a CTCP VERSION request.
+ """
+ return "ircbot.py by Joel Rosdahl <joel@rosdahl.net>"
+
+ def jump_server(self, msg="Changing servers"):
+ """Connect to a new server, possibly disconnecting from the current.
+
+ The bot will skip to next server in the server_list each time
+ jump_server is called.
+ """
+ if self.connection.is_connected():
+ self.connection.disconnect(msg)
+
+ self.server_list.append(self.server_list.pop(0))
+ self._connect()
+
+ def on_ctcp(self, c, e):
+ """Default handler for ctcp events.
+
+ Replies to VERSION and PING requests and relays DCC requests
+ to the on_dccchat method.
+ """
+ if e.arguments()[0] == "VERSION":
+ c.ctcp_reply(nm_to_n(e.source()),
+ "VERSION " + self.get_version())
+ elif e.arguments()[0] == "PING":
+ if len(e.arguments()) > 1:
+ c.ctcp_reply(nm_to_n(e.source()),
+ "PING " + e.arguments()[1])
+ elif e.arguments()[0] == "DCC" and e.arguments()[1].split(" ", 1)[0] == "CHAT":
+ self.on_dccchat(c, e)
+
+ def on_dccchat(self, c, e):
+ pass
+
+ def start(self):
+ """Start the bot."""
+ self._connect()
+ SimpleIRCClient.start(self)
+
+
+class IRCDict:
+ """A dictionary suitable for storing IRC-related things.
+
+ Dictionary keys a and b are considered equal if and only if
+ irc_lower(a) == irc_lower(b)
+
+ Otherwise, it should behave exactly as a normal dictionary.
+ """
+
+ def __init__(self, dict=None):
+ self.data = {}
+ self.canon_keys = {} # Canonical keys
+ if dict is not None:
+ self.update(dict)
+ def __repr__(self):
+ return repr(self.data)
+ def __cmp__(self, dict):
+ if isinstance(dict, IRCDict):
+ return cmp(self.data, dict.data)
+ else:
+ return cmp(self.data, dict)
+ def __len__(self):
+ return len(self.data)
+ def __getitem__(self, key):
+ return self.data[self.canon_keys[irc_lower(key)]]
+ def __setitem__(self, key, item):
+ if key in self:
+ del self[key]
+ self.data[key] = item
+ self.canon_keys[irc_lower(key)] = key
+ def __delitem__(self, key):
+ ck = irc_lower(key)
+ del self.data[self.canon_keys[ck]]
+ del self.canon_keys[ck]
+ def __iter__(self):
+ return iter(self.data)
+ def __contains__(self, key):
+ return self.has_key(key)
+ def clear(self):
+ self.data.clear()
+ self.canon_keys.clear()
+ def copy(self):
+ if self.__class__ is UserDict:
+ return UserDict(self.data)
+ import copy
+ return copy.copy(self)
+ def keys(self):
+ return self.data.keys()
+ def items(self):
+ return self.data.items()
+ def values(self):
+ return self.data.values()
+ def has_key(self, key):
+ return irc_lower(key) in self.canon_keys
+ def update(self, dict):
+ for k, v in dict.items():
+ self.data[k] = v
+ def get(self, key, failobj=None):
+ return self.data.get(key, failobj)
+
+
+class Channel:
+ """A class for keeping information about an IRC channel.
+
+ This class can be improved a lot.
+ """
+
+ def __init__(self):
+ self.userdict = IRCDict()
+ self.operdict = IRCDict()
+ self.voiceddict = IRCDict()
+ self.modes = {}
+
+ def users(self):
+ """Returns an unsorted list of the channel's users."""
+ return self.userdict.keys()
+
+ def opers(self):
+ """Returns an unsorted list of the channel's operators."""
+ return self.operdict.keys()
+
+ def voiced(self):
+ """Returns an unsorted list of the persons that have voice
+ mode set in the channel."""
+ return self.voiceddict.keys()
+
+ def has_user(self, nick):
+ """Check whether the channel has a user."""
+ return nick in self.userdict
+
+ def is_oper(self, nick):
+ """Check whether a user has operator status in the channel."""
+ return nick in self.operdict
+
+ def is_voiced(self, nick):
+ """Check whether a user has voice mode set in the channel."""
+ return nick in self.voiceddict
+
+ def add_user(self, nick):
+ self.userdict[nick] = 1
+
+ def remove_user(self, nick):
+ for d in self.userdict, self.operdict, self.voiceddict:
+ if nick in d:
+ del d[nick]
+
+ def change_nick(self, before, after):
+ self.userdict[after] = 1
+ del self.userdict[before]
+ if before in self.operdict:
+ self.operdict[after] = 1
+ del self.operdict[before]
+ if before in self.voiceddict:
+ self.voiceddict[after] = 1
+ del self.voiceddict[before]
+
+ def set_mode(self, mode, value=None):
+ """Set mode on the channel.
+
+ Arguments:
+
+ mode -- The mode (a single-character string).
+
+ value -- Value
+ """
+ if mode == "o":
+ self.operdict[value] = 1
+ elif mode == "v":
+ self.voiceddict[value] = 1
+ else:
+ self.modes[mode] = value
+
+ def clear_mode(self, mode, value=None):
+ """Clear mode on the channel.
+
+ Arguments:
+
+ mode -- The mode (a single-character string).
+
+ value -- Value
+ """
+ try:
+ if mode == "o":
+ del self.operdict[value]
+ elif mode == "v":
+ del self.voiceddict[value]
+ else:
+ del self.modes[mode]
+ except KeyError:
+ pass
+
+ def has_mode(self, mode):
+ return mode in self.modes
+
+ def is_moderated(self):
+ return self.has_mode("m")
+
+ def is_secret(self):
+ return self.has_mode("s")
+
+ def is_protected(self):
+ return self.has_mode("p")
+
+ def has_topic_lock(self):
+ return self.has_mode("t")
+
+ def is_invite_only(self):
+ return self.has_mode("i")
+
+ def has_allow_external_messages(self):
+ return self.has_mode("n")
+
+ def has_limit(self):
+ return self.has_mode("l")
+
+ def limit(self):
+ if self.has_limit():
+ return self.modes[l]
+ else:
+ return None
+
+ def has_key(self):
+ return self.has_mode("k")
+
+ def key(self):
+ if self.has_key():
+ return self.modes["k"]
+ else:
+ return None
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/irc/irclib.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/irc/irclib.py
new file mode 100644
index 0000000..5f7141c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/irc/irclib.py
@@ -0,0 +1,1560 @@
+# Copyright (C) 1999--2002 Joel Rosdahl
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# keltus <keltus@users.sourceforge.net>
+#
+# $Id: irclib.py,v 1.47 2008/09/25 22:00:59 keltus Exp $
+
+"""irclib -- Internet Relay Chat (IRC) protocol client library.
+
+This library is intended to encapsulate the IRC protocol at a quite
+low level. It provides an event-driven IRC client framework. It has
+a fairly thorough support for the basic IRC protocol, CTCP, DCC chat,
+but DCC file transfers is not yet supported.
+
+In order to understand how to make an IRC client, I'm afraid you more
+or less must understand the IRC specifications. They are available
+here: [IRC specifications].
+
+The main features of the IRC client framework are:
+
+ * Abstraction of the IRC protocol.
+ * Handles multiple simultaneous IRC server connections.
+ * Handles server PONGing transparently.
+ * Messages to the IRC server are done by calling methods on an IRC
+ connection object.
+ * Messages from an IRC server triggers events, which can be caught
+ by event handlers.
+ * Reading from and writing to IRC server sockets are normally done
+ by an internal select() loop, but the select()ing may be done by
+ an external main loop.
+ * Functions can be registered to execute at specified times by the
+ event-loop.
+ * Decodes CTCP tagging correctly (hopefully); I haven't seen any
+ other IRC client implementation that handles the CTCP
+ specification subtilties.
+ * A kind of simple, single-server, object-oriented IRC client class
+ that dispatches events to instance methods is included.
+
+Current limitations:
+
+ * The IRC protocol shines through the abstraction a bit too much.
+ * Data is not written asynchronously to the server, i.e. the write()
+ may block if the TCP buffers are stuffed.
+ * There are no support for DCC file transfers.
+ * The author haven't even read RFC 2810, 2811, 2812 and 2813.
+ * Like most projects, documentation is lacking...
+
+.. [IRC specifications] http://www.irchelp.org/irchelp/rfc/
+"""
+
+import bisect
+import re
+import select
+import socket
+import string
+import sys
+import time
+import types
+
+VERSION = 0, 4, 8
+DEBUG = 0
+
+# TODO
+# ----
+# (maybe) thread safety
+# (maybe) color parser convenience functions
+# documentation (including all event types)
+# (maybe) add awareness of different types of ircds
+# send data asynchronously to the server (and DCC connections)
+# (maybe) automatically close unused, passive DCC connections after a while
+
+# NOTES
+# -----
+# connection.quit() only sends QUIT to the server.
+# ERROR from the server triggers the error event and the disconnect event.
+# dropping of the connection triggers the disconnect event.
+
+class IRCError(Exception):
+ """Represents an IRC exception."""
+ pass
+
+
+class IRC:
+ """Class that handles one or several IRC server connections.
+
+ When an IRC object has been instantiated, it can be used to create
+ Connection objects that represent the IRC connections. The
+ responsibility of the IRC object is to provide an event-driven
+ framework for the connections and to keep the connections alive.
+ It runs a select loop to poll each connection's TCP socket and
+ hands over the sockets with incoming data for processing by the
+ corresponding connection.
+
+ The methods of most interest for an IRC client writer are server,
+ add_global_handler, remove_global_handler, execute_at,
+ execute_delayed, process_once and process_forever.
+
+ Here is an example:
+
+ irc = irclib.IRC()
+ server = irc.server()
+ server.connect(\"irc.some.where\", 6667, \"my_nickname\")
+ server.privmsg(\"a_nickname\", \"Hi there!\")
+ irc.process_forever()
+
+ This will connect to the IRC server irc.some.where on port 6667
+ using the nickname my_nickname and send the message \"Hi there!\"
+ to the nickname a_nickname.
+ """
+
+ def __init__(self, fn_to_add_socket=None,
+ fn_to_remove_socket=None,
+ fn_to_add_timeout=None):
+ """Constructor for IRC objects.
+
+ Optional arguments are fn_to_add_socket, fn_to_remove_socket
+ and fn_to_add_timeout. The first two specify functions that
+ will be called with a socket object as argument when the IRC
+ object wants to be notified (or stop being notified) of data
+ coming on a new socket. When new data arrives, the method
+ process_data should be called. Similarly, fn_to_add_timeout
+ is called with a number of seconds (a floating point number)
+ as first argument when the IRC object wants to receive a
+ notification (by calling the process_timeout method). So, if
+ e.g. the argument is 42.17, the object wants the
+ process_timeout method to be called after 42 seconds and 170
+ milliseconds.
+
+ The three arguments mainly exist to be able to use an external
+ main loop (for example Tkinter's or PyGTK's main app loop)
+ instead of calling the process_forever method.
+
+ An alternative is to just call ServerConnection.process_once()
+ once in a while.
+ """
+
+ if fn_to_add_socket and fn_to_remove_socket:
+ self.fn_to_add_socket = fn_to_add_socket
+ self.fn_to_remove_socket = fn_to_remove_socket
+ else:
+ self.fn_to_add_socket = None
+ self.fn_to_remove_socket = None
+
+ self.fn_to_add_timeout = fn_to_add_timeout
+ self.connections = []
+ self.handlers = {}
+ self.delayed_commands = [] # list of tuples in the format (time, function, arguments)
+
+ self.add_global_handler("ping", _ping_ponger, -42)
+
+ def server(self):
+ """Creates and returns a ServerConnection object."""
+
+ c = ServerConnection(self)
+ self.connections.append(c)
+ return c
+
+ def process_data(self, sockets):
+ """Called when there is more data to read on connection sockets.
+
+ Arguments:
+
+ sockets -- A list of socket objects.
+
+ See documentation for IRC.__init__.
+ """
+ for s in sockets:
+ for c in self.connections:
+ if s == c._get_socket():
+ c.process_data()
+
+ def process_timeout(self):
+ """Called when a timeout notification is due.
+
+ See documentation for IRC.__init__.
+ """
+ t = time.time()
+ while self.delayed_commands:
+ if t >= self.delayed_commands[0][0]:
+ self.delayed_commands[0][1](*self.delayed_commands[0][2])
+ del self.delayed_commands[0]
+ else:
+ break
+
+ def process_once(self, timeout=0):
+ """Process data from connections once.
+
+ Arguments:
+
+ timeout -- How long the select() call should wait if no
+ data is available.
+
+ This method should be called periodically to check and process
+ incoming data, if there are any. If that seems boring, look
+ at the process_forever method.
+ """
+ sockets = map(lambda x: x._get_socket(), self.connections)
+ sockets = filter(lambda x: x != None, sockets)
+ if sockets:
+ (i, o, e) = select.select(sockets, [], [], timeout)
+ self.process_data(i)
+ else:
+ time.sleep(timeout)
+ self.process_timeout()
+
+ def process_forever(self, timeout=0.2):
+ """Run an infinite loop, processing data from connections.
+
+ This method repeatedly calls process_once.
+
+ Arguments:
+
+ timeout -- Parameter to pass to process_once.
+ """
+ while 1:
+ self.process_once(timeout)
+
+ def disconnect_all(self, message=""):
+ """Disconnects all connections."""
+ for c in self.connections:
+ c.disconnect(message)
+
+ def add_global_handler(self, event, handler, priority=0):
+ """Adds a global handler function for a specific event type.
+
+ Arguments:
+
+ event -- Event type (a string). Check the values of the
+ numeric_events dictionary in irclib.py for possible event
+ types.
+
+ handler -- Callback function.
+
+ priority -- A number (the lower number, the higher priority).
+
+ The handler function is called whenever the specified event is
+ triggered in any of the connections. See documentation for
+ the Event class.
+
+ The handler functions are called in priority order (lowest
+ number is highest priority). If a handler function returns
+ \"NO MORE\", no more handlers will be called.
+ """
+ if not event in self.handlers:
+ self.handlers[event] = []
+ bisect.insort(self.handlers[event], ((priority, handler)))
+
+ def remove_global_handler(self, event, handler):
+ """Removes a global handler function.
+
+ Arguments:
+
+ event -- Event type (a string).
+
+ handler -- Callback function.
+
+ Returns 1 on success, otherwise 0.
+ """
+ if not event in self.handlers:
+ return 0
+ for h in self.handlers[event]:
+ if handler == h[1]:
+ self.handlers[event].remove(h)
+ return 1
+
+ def execute_at(self, at, function, arguments=()):
+ """Execute a function at a specified time.
+
+ Arguments:
+
+ at -- Execute at this time (standard \"time_t\" time).
+
+ function -- Function to call.
+
+ arguments -- Arguments to give the function.
+ """
+ self.execute_delayed(at-time.time(), function, arguments)
+
+ def execute_delayed(self, delay, function, arguments=()):
+ """Execute a function after a specified time.
+
+ Arguments:
+
+ delay -- How many seconds to wait.
+
+ function -- Function to call.
+
+ arguments -- Arguments to give the function.
+ """
+ bisect.insort(self.delayed_commands, (delay+time.time(), function, arguments))
+ if self.fn_to_add_timeout:
+ self.fn_to_add_timeout(delay)
+
+ def dcc(self, dcctype="chat"):
+ """Creates and returns a DCCConnection object.
+
+ Arguments:
+
+ dcctype -- "chat" for DCC CHAT connections or "raw" for
+ DCC SEND (or other DCC types). If "chat",
+ incoming data will be split in newline-separated
+ chunks. If "raw", incoming data is not touched.
+ """
+ c = DCCConnection(self, dcctype)
+ self.connections.append(c)
+ return c
+
+ def _handle_event(self, connection, event):
+ """[Internal]"""
+ h = self.handlers
+ for handler in h.get("all_events", []) + h.get(event.eventtype(), []):
+ if handler[1](connection, event) == "NO MORE":
+ return
+
+ def _remove_connection(self, connection):
+ """[Internal]"""
+ self.connections.remove(connection)
+ if self.fn_to_remove_socket:
+ self.fn_to_remove_socket(connection._get_socket())
+
+_rfc_1459_command_regexp = re.compile("^(:(?P<prefix>[^ ]+) +)?(?P<command>[^ ]+)( *(?P<argument> .+))?")
+
+class Connection:
+ """Base class for IRC connections.
+
+ Must be overridden.
+ """
+ def __init__(self, irclibobj):
+ self.irclibobj = irclibobj
+
+ def _get_socket():
+ raise IRCError, "Not overridden"
+
+ ##############################
+ ### Convenience wrappers.
+
+ def execute_at(self, at, function, arguments=()):
+ self.irclibobj.execute_at(at, function, arguments)
+
+ def execute_delayed(self, delay, function, arguments=()):
+ self.irclibobj.execute_delayed(delay, function, arguments)
+
+
+class ServerConnectionError(IRCError):
+ pass
+
+class ServerNotConnectedError(ServerConnectionError):
+ pass
+
+
+# Huh!? Crrrrazy EFNet doesn't follow the RFC: their ircd seems to
+# use \n as message separator! :P
+_linesep_regexp = re.compile("\r?\n")
+
+class ServerConnection(Connection):
+ """This class represents an IRC server connection.
+
+ ServerConnection objects are instantiated by calling the server
+ method on an IRC object.
+ """
+
+ def __init__(self, irclibobj):
+ Connection.__init__(self, irclibobj)
+ self.connected = 0 # Not connected yet.
+ self.socket = None
+ self.ssl = None
+
+ def connect(self, server, port, nickname, password=None, username=None,
+ ircname=None, localaddress="", localport=0, ssl=False, ipv6=False):
+ """Connect/reconnect to a server.
+
+ Arguments:
+
+ server -- Server name.
+
+ port -- Port number.
+
+ nickname -- The nickname.
+
+ password -- Password (if any).
+
+ username -- The username.
+
+ ircname -- The IRC name ("realname").
+
+ localaddress -- Bind the connection to a specific local IP address.
+
+ localport -- Bind the connection to a specific local port.
+
+ ssl -- Enable support for ssl.
+
+ ipv6 -- Enable support for ipv6.
+
+ This function can be called to reconnect a closed connection.
+
+ Returns the ServerConnection object.
+ """
+ if self.connected:
+ self.disconnect("Changing servers")
+
+ self.previous_buffer = ""
+ self.handlers = {}
+ self.real_server_name = ""
+ self.real_nickname = nickname
+ self.server = server
+ self.port = port
+ self.nickname = nickname
+ self.username = username or nickname
+ self.ircname = ircname or nickname
+ self.password = password
+ self.localaddress = localaddress
+ self.localport = localport
+ self.localhost = socket.gethostname()
+ if ipv6:
+ self.socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ else:
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ self.socket.bind((self.localaddress, self.localport))
+ self.socket.connect((self.server, self.port))
+ if ssl:
+ self.ssl = socket.ssl(self.socket)
+ except socket.error, x:
+ self.socket.close()
+ self.socket = None
+ raise ServerConnectionError, "Couldn't connect to socket: %s" % x
+ self.connected = 1
+ if self.irclibobj.fn_to_add_socket:
+ self.irclibobj.fn_to_add_socket(self.socket)
+
+ # Log on...
+ if self.password:
+ self.pass_(self.password)
+ self.nick(self.nickname)
+ self.user(self.username, self.ircname)
+ return self
+
+ def close(self):
+ """Close the connection.
+
+ This method closes the connection permanently; after it has
+ been called, the object is unusable.
+ """
+
+ self.disconnect("Closing object")
+ self.irclibobj._remove_connection(self)
+
+ def _get_socket(self):
+ """[Internal]"""
+ return self.socket
+
+ def get_server_name(self):
+ """Get the (real) server name.
+
+ This method returns the (real) server name, or, more
+ specifically, what the server calls itself.
+ """
+
+ if self.real_server_name:
+ return self.real_server_name
+ else:
+ return ""
+
+ def get_nickname(self):
+ """Get the (real) nick name.
+
+ This method returns the (real) nickname. The library keeps
+ track of nick changes, so it might not be the nick name that
+ was passed to the connect() method. """
+
+ return self.real_nickname
+
+ def process_data(self):
+ """[Internal]"""
+
+ try:
+ if self.ssl:
+ new_data = self.ssl.read(2**14)
+ else:
+ new_data = self.socket.recv(2**14)
+ except socket.error, x:
+ # The server hung up.
+ self.disconnect("Connection reset by peer")
+ return
+ if not new_data:
+ # Read nothing: connection must be down.
+ self.disconnect("Connection reset by peer")
+ return
+
+ lines = _linesep_regexp.split(self.previous_buffer + new_data)
+
+ # Save the last, unfinished line.
+ self.previous_buffer = lines.pop()
+
+ for line in lines:
+ if DEBUG:
+ print "FROM SERVER:", line
+
+ if not line:
+ continue
+
+ prefix = None
+ command = None
+ arguments = None
+ self._handle_event(Event("all_raw_messages",
+ self.get_server_name(),
+ None,
+ [line]))
+
+ m = _rfc_1459_command_regexp.match(line)
+ if m.group("prefix"):
+ prefix = m.group("prefix")
+ if not self.real_server_name:
+ self.real_server_name = prefix
+
+ if m.group("command"):
+ command = m.group("command").lower()
+
+ if m.group("argument"):
+ a = m.group("argument").split(" :", 1)
+ arguments = a[0].split()
+ if len(a) == 2:
+ arguments.append(a[1])
+
+ # Translate numerics into more readable strings.
+ if command in numeric_events:
+ command = numeric_events[command]
+
+ if command == "nick":
+ if nm_to_n(prefix) == self.real_nickname:
+ self.real_nickname = arguments[0]
+ elif command == "welcome":
+ # Record the nickname in case the client changed nick
+ # in a nicknameinuse callback.
+ self.real_nickname = arguments[0]
+
+ if command in ["privmsg", "notice"]:
+ target, message = arguments[0], arguments[1]
+ messages = _ctcp_dequote(message)
+
+ if command == "privmsg":
+ if is_channel(target):
+ command = "pubmsg"
+ else:
+ if is_channel(target):
+ command = "pubnotice"
+ else:
+ command = "privnotice"
+
+ for m in messages:
+ if type(m) is types.TupleType:
+ if command in ["privmsg", "pubmsg"]:
+ command = "ctcp"
+ else:
+ command = "ctcpreply"
+
+ m = list(m)
+ if DEBUG:
+ print "command: %s, source: %s, target: %s, arguments: %s" % (
+ command, prefix, target, m)
+ self._handle_event(Event(command, prefix, target, m))
+ if command == "ctcp" and m[0] == "ACTION":
+ self._handle_event(Event("action", prefix, target, m[1:]))
+ else:
+ if DEBUG:
+ print "command: %s, source: %s, target: %s, arguments: %s" % (
+ command, prefix, target, [m])
+ self._handle_event(Event(command, prefix, target, [m]))
+ else:
+ target = None
+
+ if command == "quit":
+ arguments = [arguments[0]]
+ elif command == "ping":
+ target = arguments[0]
+ else:
+ target = arguments[0]
+ arguments = arguments[1:]
+
+ if command == "mode":
+ if not is_channel(target):
+ command = "umode"
+
+ if DEBUG:
+ print "command: %s, source: %s, target: %s, arguments: %s" % (
+ command, prefix, target, arguments)
+ self._handle_event(Event(command, prefix, target, arguments))
+
+ def _handle_event(self, event):
+ """[Internal]"""
+ self.irclibobj._handle_event(self, event)
+ if event.eventtype() in self.handlers:
+ for fn in self.handlers[event.eventtype()]:
+ fn(self, event)
+
+ def is_connected(self):
+ """Return connection status.
+
+ Returns true if connected, otherwise false.
+ """
+ return self.connected
+
+ def add_global_handler(self, *args):
+ """Add global handler.
+
+ See documentation for IRC.add_global_handler.
+ """
+ self.irclibobj.add_global_handler(*args)
+
+ def remove_global_handler(self, *args):
+ """Remove global handler.
+
+ See documentation for IRC.remove_global_handler.
+ """
+ self.irclibobj.remove_global_handler(*args)
+
+ def action(self, target, action):
+ """Send a CTCP ACTION command."""
+ self.ctcp("ACTION", target, action)
+
+ def admin(self, server=""):
+ """Send an ADMIN command."""
+ self.send_raw(" ".join(["ADMIN", server]).strip())
+
+ def ctcp(self, ctcptype, target, parameter=""):
+ """Send a CTCP command."""
+ ctcptype = ctcptype.upper()
+ self.privmsg(target, "\001%s%s\001" % (ctcptype, parameter and (" " + parameter) or ""))
+
+ def ctcp_reply(self, target, parameter):
+ """Send a CTCP REPLY command."""
+ self.notice(target, "\001%s\001" % parameter)
+
+ def disconnect(self, message=""):
+ """Hang up the connection.
+
+ Arguments:
+
+ message -- Quit message.
+ """
+ if not self.connected:
+ return
+
+ self.connected = 0
+
+ self.quit(message)
+
+ try:
+ self.socket.close()
+ except socket.error, x:
+ pass
+ self.socket = None
+ self._handle_event(Event("disconnect", self.server, "", [message]))
+
+ def globops(self, text):
+ """Send a GLOBOPS command."""
+ self.send_raw("GLOBOPS :" + text)
+
+ def info(self, server=""):
+ """Send an INFO command."""
+ self.send_raw(" ".join(["INFO", server]).strip())
+
+ def invite(self, nick, channel):
+ """Send an INVITE command."""
+ self.send_raw(" ".join(["INVITE", nick, channel]).strip())
+
+ def ison(self, nicks):
+ """Send an ISON command.
+
+ Arguments:
+
+ nicks -- List of nicks.
+ """
+ self.send_raw("ISON " + " ".join(nicks))
+
+ def join(self, channel, key=""):
+ """Send a JOIN command."""
+ self.send_raw("JOIN %s%s" % (channel, (key and (" " + key))))
+
+ def kick(self, channel, nick, comment=""):
+ """Send a KICK command."""
+ self.send_raw("KICK %s %s%s" % (channel, nick, (comment and (" :" + comment))))
+
+ def links(self, remote_server="", server_mask=""):
+ """Send a LINKS command."""
+ command = "LINKS"
+ if remote_server:
+ command = command + " " + remote_server
+ if server_mask:
+ command = command + " " + server_mask
+ self.send_raw(command)
+
+ def list(self, channels=None, server=""):
+ """Send a LIST command."""
+ command = "LIST"
+ if channels:
+ command = command + " " + ",".join(channels)
+ if server:
+ command = command + " " + server
+ self.send_raw(command)
+
+ def lusers(self, server=""):
+ """Send a LUSERS command."""
+ self.send_raw("LUSERS" + (server and (" " + server)))
+
+ def mode(self, target, command):
+ """Send a MODE command."""
+ self.send_raw("MODE %s %s" % (target, command))
+
+ def motd(self, server=""):
+ """Send an MOTD command."""
+ self.send_raw("MOTD" + (server and (" " + server)))
+
+ def names(self, channels=None):
+ """Send a NAMES command."""
+ self.send_raw("NAMES" + (channels and (" " + ",".join(channels)) or ""))
+
+ def nick(self, newnick):
+ """Send a NICK command."""
+ self.send_raw("NICK " + newnick)
+
+ def notice(self, target, text):
+ """Send a NOTICE command."""
+ # Should limit len(text) here!
+ self.send_raw("NOTICE %s :%s" % (target, text))
+
+ def oper(self, nick, password):
+ """Send an OPER command."""
+ self.send_raw("OPER %s %s" % (nick, password))
+
+ def part(self, channels, message=""):
+ """Send a PART command."""
+ if type(channels) == types.StringType:
+ self.send_raw("PART " + channels + (message and (" " + message)))
+ else:
+ self.send_raw("PART " + ",".join(channels) + (message and (" " + message)))
+
+ def pass_(self, password):
+ """Send a PASS command."""
+ self.send_raw("PASS " + password)
+
+ def ping(self, target, target2=""):
+ """Send a PING command."""
+ self.send_raw("PING %s%s" % (target, target2 and (" " + target2)))
+
+ def pong(self, target, target2=""):
+ """Send a PONG command."""
+ self.send_raw("PONG %s%s" % (target, target2 and (" " + target2)))
+
+ def privmsg(self, target, text):
+ """Send a PRIVMSG command."""
+ # Should limit len(text) here!
+ self.send_raw("PRIVMSG %s :%s" % (target, text))
+
+ def privmsg_many(self, targets, text):
+ """Send a PRIVMSG command to multiple targets."""
+ # Should limit len(text) here!
+ self.send_raw("PRIVMSG %s :%s" % (",".join(targets), text))
+
+ def quit(self, message=""):
+ """Send a QUIT command."""
+ # Note that many IRC servers don't use your QUIT message
+ # unless you've been connected for at least 5 minutes!
+ self.send_raw("QUIT" + (message and (" :" + message)))
+
+ def send_raw(self, string):
+ """Send raw string to the server.
+
+ The string will be padded with appropriate CR LF.
+ """
+ if self.socket is None:
+ raise ServerNotConnectedError, "Not connected."
+ try:
+ if self.ssl:
+ self.ssl.write(string + "\r\n")
+ else:
+ self.socket.send(string + "\r\n")
+ if DEBUG:
+ print "TO SERVER:", string
+ except socket.error, x:
+ # Ouch!
+ self.disconnect("Connection reset by peer.")
+
+ def squit(self, server, comment=""):
+ """Send an SQUIT command."""
+ self.send_raw("SQUIT %s%s" % (server, comment and (" :" + comment)))
+
+ def stats(self, statstype, server=""):
+ """Send a STATS command."""
+ self.send_raw("STATS %s%s" % (statstype, server and (" " + server)))
+
+ def time(self, server=""):
+ """Send a TIME command."""
+ self.send_raw("TIME" + (server and (" " + server)))
+
+ def topic(self, channel, new_topic=None):
+ """Send a TOPIC command."""
+ if new_topic is None:
+ self.send_raw("TOPIC " + channel)
+ else:
+ self.send_raw("TOPIC %s :%s" % (channel, new_topic))
+
+ def trace(self, target=""):
+ """Send a TRACE command."""
+ self.send_raw("TRACE" + (target and (" " + target)))
+
+ def user(self, username, realname):
+ """Send a USER command."""
+ self.send_raw("USER %s 0 * :%s" % (username, realname))
+
+ def userhost(self, nicks):
+ """Send a USERHOST command."""
+ self.send_raw("USERHOST " + ",".join(nicks))
+
+ def users(self, server=""):
+ """Send a USERS command."""
+ self.send_raw("USERS" + (server and (" " + server)))
+
+ def version(self, server=""):
+ """Send a VERSION command."""
+ self.send_raw("VERSION" + (server and (" " + server)))
+
+ def wallops(self, text):
+ """Send a WALLOPS command."""
+ self.send_raw("WALLOPS :" + text)
+
+ def who(self, target="", op=""):
+ """Send a WHO command."""
+ self.send_raw("WHO%s%s" % (target and (" " + target), op and (" o")))
+
+ def whois(self, targets):
+ """Send a WHOIS command."""
+ self.send_raw("WHOIS " + ",".join(targets))
+
+ def whowas(self, nick, max="", server=""):
+ """Send a WHOWAS command."""
+ self.send_raw("WHOWAS %s%s%s" % (nick,
+ max and (" " + max),
+ server and (" " + server)))
+
+class DCCConnectionError(IRCError):
+ pass
+
+
+class DCCConnection(Connection):
+ """This class represents a DCC connection.
+
+ DCCConnection objects are instantiated by calling the dcc
+ method on an IRC object.
+ """
+ def __init__(self, irclibobj, dcctype):
+ Connection.__init__(self, irclibobj)
+ self.connected = 0
+ self.passive = 0
+ self.dcctype = dcctype
+ self.peeraddress = None
+ self.peerport = None
+
+ def connect(self, address, port):
+ """Connect/reconnect to a DCC peer.
+
+ Arguments:
+ address -- Host/IP address of the peer.
+
+ port -- The port number to connect to.
+
+ Returns the DCCConnection object.
+ """
+ self.peeraddress = socket.gethostbyname(address)
+ self.peerport = port
+ self.socket = None
+ self.previous_buffer = ""
+ self.handlers = {}
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.passive = 0
+ try:
+ self.socket.connect((self.peeraddress, self.peerport))
+ except socket.error, x:
+ raise DCCConnectionError, "Couldn't connect to socket: %s" % x
+ self.connected = 1
+ if self.irclibobj.fn_to_add_socket:
+ self.irclibobj.fn_to_add_socket(self.socket)
+ return self
+
+ def listen(self):
+ """Wait for a connection/reconnection from a DCC peer.
+
+ Returns the DCCConnection object.
+
+ The local IP address and port are available as
+ self.localaddress and self.localport. After connection from a
+ peer, the peer address and port are available as
+ self.peeraddress and self.peerport.
+ """
+ self.previous_buffer = ""
+ self.handlers = {}
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.passive = 1
+ try:
+ self.socket.bind((socket.gethostbyname(socket.gethostname()), 0))
+ self.localaddress, self.localport = self.socket.getsockname()
+ self.socket.listen(10)
+ except socket.error, x:
+ raise DCCConnectionError, "Couldn't bind socket: %s" % x
+ return self
+
+ def disconnect(self, message=""):
+ """Hang up the connection and close the object.
+
+ Arguments:
+
+ message -- Quit message.
+ """
+ if not self.connected:
+ return
+
+ self.connected = 0
+ try:
+ self.socket.close()
+ except socket.error, x:
+ pass
+ self.socket = None
+ self.irclibobj._handle_event(
+ self,
+ Event("dcc_disconnect", self.peeraddress, "", [message]))
+ self.irclibobj._remove_connection(self)
+
+ def process_data(self):
+ """[Internal]"""
+
+ if self.passive and not self.connected:
+ conn, (self.peeraddress, self.peerport) = self.socket.accept()
+ self.socket.close()
+ self.socket = conn
+ self.connected = 1
+ if DEBUG:
+ print "DCC connection from %s:%d" % (
+ self.peeraddress, self.peerport)
+ self.irclibobj._handle_event(
+ self,
+ Event("dcc_connect", self.peeraddress, None, None))
+ return
+
+ try:
+ new_data = self.socket.recv(2**14)
+ except socket.error, x:
+ # The server hung up.
+ self.disconnect("Connection reset by peer")
+ return
+ if not new_data:
+ # Read nothing: connection must be down.
+ self.disconnect("Connection reset by peer")
+ return
+
+ if self.dcctype == "chat":
+ # The specification says lines are terminated with LF, but
+ # it seems safer to handle CR LF terminations too.
+ chunks = _linesep_regexp.split(self.previous_buffer + new_data)
+
+ # Save the last, unfinished line.
+ self.previous_buffer = chunks[-1]
+ if len(self.previous_buffer) > 2**14:
+ # Bad peer! Naughty peer!
+ self.disconnect()
+ return
+ chunks = chunks[:-1]
+ else:
+ chunks = [new_data]
+
+ command = "dccmsg"
+ prefix = self.peeraddress
+ target = None
+ for chunk in chunks:
+ if DEBUG:
+ print "FROM PEER:", chunk
+ arguments = [chunk]
+ if DEBUG:
+ print "command: %s, source: %s, target: %s, arguments: %s" % (
+ command, prefix, target, arguments)
+ self.irclibobj._handle_event(
+ self,
+ Event(command, prefix, target, arguments))
+
+ def _get_socket(self):
+ """[Internal]"""
+ return self.socket
+
+ def privmsg(self, string):
+ """Send data to DCC peer.
+
+ The string will be padded with appropriate LF if it's a DCC
+ CHAT session.
+ """
+ try:
+ self.socket.send(string)
+ if self.dcctype == "chat":
+ self.socket.send("\n")
+ if DEBUG:
+ print "TO PEER: %s\n" % string
+ except socket.error, x:
+ # Ouch!
+ self.disconnect("Connection reset by peer.")
+
+class SimpleIRCClient:
+ """A simple single-server IRC client class.
+
+ This is an example of an object-oriented wrapper of the IRC
+ framework. A real IRC client can be made by subclassing this
+ class and adding appropriate methods.
+
+ The method on_join will be called when a "join" event is created
+ (which is done when the server sends a JOIN messsage/command),
+ on_privmsg will be called for "privmsg" events, and so on. The
+ handler methods get two arguments: the connection object (same as
+ self.connection) and the event object.
+
+ Instance attributes that can be used by sub classes:
+
+ ircobj -- The IRC instance.
+
+ connection -- The ServerConnection instance.
+
+ dcc_connections -- A list of DCCConnection instances.
+ """
+ def __init__(self):
+ self.ircobj = IRC()
+ self.connection = self.ircobj.server()
+ self.dcc_connections = []
+ self.ircobj.add_global_handler("all_events", self._dispatcher, -10)
+ self.ircobj.add_global_handler("dcc_disconnect", self._dcc_disconnect, -10)
+
+ def _dispatcher(self, c, e):
+ """[Internal]"""
+ m = "on_" + e.eventtype()
+ if hasattr(self, m):
+ getattr(self, m)(c, e)
+
+ def _dcc_disconnect(self, c, e):
+ self.dcc_connections.remove(c)
+
+ def connect(self, server, port, nickname, password=None, username=None,
+ ircname=None, localaddress="", localport=0, ssl=False, ipv6=False):
+ """Connect/reconnect to a server.
+
+ Arguments:
+
+ server -- Server name.
+
+ port -- Port number.
+
+ nickname -- The nickname.
+
+ password -- Password (if any).
+
+ username -- The username.
+
+ ircname -- The IRC name.
+
+ localaddress -- Bind the connection to a specific local IP address.
+
+ localport -- Bind the connection to a specific local port.
+
+ ssl -- Enable support for ssl.
+
+ ipv6 -- Enable support for ipv6.
+
+ This function can be called to reconnect a closed connection.
+ """
+ self.connection.connect(server, port, nickname,
+ password, username, ircname,
+ localaddress, localport, ssl, ipv6)
+
+ def dcc_connect(self, address, port, dcctype="chat"):
+ """Connect to a DCC peer.
+
+ Arguments:
+
+ address -- IP address of the peer.
+
+ port -- Port to connect to.
+
+ Returns a DCCConnection instance.
+ """
+ dcc = self.ircobj.dcc(dcctype)
+ self.dcc_connections.append(dcc)
+ dcc.connect(address, port)
+ return dcc
+
+ def dcc_listen(self, dcctype="chat"):
+ """Listen for connections from a DCC peer.
+
+ Returns a DCCConnection instance.
+ """
+ dcc = self.ircobj.dcc(dcctype)
+ self.dcc_connections.append(dcc)
+ dcc.listen()
+ return dcc
+
+ def start(self):
+ """Start the IRC client."""
+ self.ircobj.process_forever()
+
+
+class Event:
+ """Class representing an IRC event."""
+ def __init__(self, eventtype, source, target, arguments=None):
+ """Constructor of Event objects.
+
+ Arguments:
+
+ eventtype -- A string describing the event.
+
+ source -- The originator of the event (a nick mask or a server).
+
+ target -- The target of the event (a nick or a channel).
+
+ arguments -- Any event specific arguments.
+ """
+ self._eventtype = eventtype
+ self._source = source
+ self._target = target
+ if arguments:
+ self._arguments = arguments
+ else:
+ self._arguments = []
+
+ def eventtype(self):
+ """Get the event type."""
+ return self._eventtype
+
+ def source(self):
+ """Get the event source."""
+ return self._source
+
+ def target(self):
+ """Get the event target."""
+ return self._target
+
+ def arguments(self):
+ """Get the event arguments."""
+ return self._arguments
+
+_LOW_LEVEL_QUOTE = "\020"
+_CTCP_LEVEL_QUOTE = "\134"
+_CTCP_DELIMITER = "\001"
+
+_low_level_mapping = {
+ "0": "\000",
+ "n": "\n",
+ "r": "\r",
+ _LOW_LEVEL_QUOTE: _LOW_LEVEL_QUOTE
+}
+
+_low_level_regexp = re.compile(_LOW_LEVEL_QUOTE + "(.)")
+
+def mask_matches(nick, mask):
+ """Check if a nick matches a mask.
+
+ Returns true if the nick matches, otherwise false.
+ """
+ nick = irc_lower(nick)
+ mask = irc_lower(mask)
+ mask = mask.replace("\\", "\\\\")
+ for ch in ".$|[](){}+":
+ mask = mask.replace(ch, "\\" + ch)
+ mask = mask.replace("?", ".")
+ mask = mask.replace("*", ".*")
+ r = re.compile(mask, re.IGNORECASE)
+ return r.match(nick)
+
+_special = "-[]\\`^{}"
+nick_characters = string.ascii_letters + string.digits + _special
+_ircstring_translation = string.maketrans(string.ascii_uppercase + "[]\\^",
+ string.ascii_lowercase + "{}|~")
+
+def irc_lower(s):
+ """Returns a lowercased string.
+
+ The definition of lowercased comes from the IRC specification (RFC
+ 1459).
+ """
+ return s.translate(_ircstring_translation)
+
+def _ctcp_dequote(message):
+ """[Internal] Dequote a message according to CTCP specifications.
+
+ The function returns a list where each element can be either a
+ string (normal message) or a tuple of one or two strings (tagged
+ messages). If a tuple has only one element (ie is a singleton),
+ that element is the tag; otherwise the tuple has two elements: the
+ tag and the data.
+
+ Arguments:
+
+ message -- The message to be decoded.
+ """
+
+ def _low_level_replace(match_obj):
+ ch = match_obj.group(1)
+
+ # If low_level_mapping doesn't have the character as key, we
+ # should just return the character.
+ return _low_level_mapping.get(ch, ch)
+
+ if _LOW_LEVEL_QUOTE in message:
+ # Yup, there was a quote. Release the dequoter, man!
+ message = _low_level_regexp.sub(_low_level_replace, message)
+
+ if _CTCP_DELIMITER not in message:
+ return [message]
+ else:
+ # Split it into parts. (Does any IRC client actually *use*
+ # CTCP stacking like this?)
+ chunks = message.split(_CTCP_DELIMITER)
+
+ messages = []
+ i = 0
+ while i < len(chunks)-1:
+ # Add message if it's non-empty.
+ if len(chunks[i]) > 0:
+ messages.append(chunks[i])
+
+ if i < len(chunks)-2:
+ # Aye! CTCP tagged data ahead!
+ messages.append(tuple(chunks[i+1].split(" ", 1)))
+
+ i = i + 2
+
+ if len(chunks) % 2 == 0:
+ # Hey, a lonely _CTCP_DELIMITER at the end! This means
+ # that the last chunk, including the delimiter, is a
+ # normal message! (This is according to the CTCP
+ # specification.)
+ messages.append(_CTCP_DELIMITER + chunks[-1])
+
+ return messages
+
+def is_channel(string):
+ """Check if a string is a channel name.
+
+ Returns true if the argument is a channel name, otherwise false.
+ """
+ return string and string[0] in "#&+!"
+
+def ip_numstr_to_quad(num):
+ """Convert an IP number as an integer given in ASCII
+ representation (e.g. '3232235521') to an IP address string
+ (e.g. '192.168.0.1')."""
+ n = long(num)
+ p = map(str, map(int, [n >> 24 & 0xFF, n >> 16 & 0xFF,
+ n >> 8 & 0xFF, n & 0xFF]))
+ return ".".join(p)
+
+def ip_quad_to_numstr(quad):
+ """Convert an IP address string (e.g. '192.168.0.1') to an IP
+ number as an integer given in ASCII representation
+ (e.g. '3232235521')."""
+ p = map(long, quad.split("."))
+ s = str((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])
+ if s[-1] == "L":
+ s = s[:-1]
+ return s
+
+def nm_to_n(s):
+ """Get the nick part of a nickmask.
+
+ (The source of an Event is a nickmask.)
+ """
+ return s.split("!")[0]
+
+def nm_to_uh(s):
+ """Get the userhost part of a nickmask.
+
+ (The source of an Event is a nickmask.)
+ """
+ return s.split("!")[1]
+
+def nm_to_h(s):
+ """Get the host part of a nickmask.
+
+ (The source of an Event is a nickmask.)
+ """
+ return s.split("@")[1]
+
+def nm_to_u(s):
+ """Get the user part of a nickmask.
+
+ (The source of an Event is a nickmask.)
+ """
+ s = s.split("!")[1]
+ return s.split("@")[0]
+
+def parse_nick_modes(mode_string):
+ """Parse a nick mode string.
+
+ The function returns a list of lists with three members: sign,
+ mode and argument. The sign is \"+\" or \"-\". The argument is
+ always None.
+
+ Example:
+
+ >>> irclib.parse_nick_modes(\"+ab-c\")
+ [['+', 'a', None], ['+', 'b', None], ['-', 'c', None]]
+ """
+
+ return _parse_modes(mode_string, "")
+
+def parse_channel_modes(mode_string):
+ """Parse a channel mode string.
+
+ The function returns a list of lists with three members: sign,
+ mode and argument. The sign is \"+\" or \"-\". The argument is
+ None if mode isn't one of \"b\", \"k\", \"l\", \"v\" or \"o\".
+
+ Example:
+
+ >>> irclib.parse_channel_modes(\"+ab-c foo\")
+ [['+', 'a', None], ['+', 'b', 'foo'], ['-', 'c', None]]
+ """
+
+ return _parse_modes(mode_string, "bklvo")
+
+def _parse_modes(mode_string, unary_modes=""):
+ """[Internal]"""
+ modes = []
+ arg_count = 0
+
+ # State variable.
+ sign = ""
+
+ a = mode_string.split()
+ if len(a) == 0:
+ return []
+ else:
+ mode_part, args = a[0], a[1:]
+
+ if mode_part[0] not in "+-":
+ return []
+ for ch in mode_part:
+ if ch in "+-":
+ sign = ch
+ elif ch == " ":
+ collecting_arguments = 1
+ elif ch in unary_modes:
+ if len(args) >= arg_count + 1:
+ modes.append([sign, ch, args[arg_count]])
+ arg_count = arg_count + 1
+ else:
+ modes.append([sign, ch, None])
+ else:
+ modes.append([sign, ch, None])
+ return modes
+
+def _ping_ponger(connection, event):
+ """[Internal]"""
+ connection.pong(event.target())
+
+# Numeric table mostly stolen from the Perl IRC module (Net::IRC).
+numeric_events = {
+ "001": "welcome",
+ "002": "yourhost",
+ "003": "created",
+ "004": "myinfo",
+ "005": "featurelist", # XXX
+ "200": "tracelink",
+ "201": "traceconnecting",
+ "202": "tracehandshake",
+ "203": "traceunknown",
+ "204": "traceoperator",
+ "205": "traceuser",
+ "206": "traceserver",
+ "207": "traceservice",
+ "208": "tracenewtype",
+ "209": "traceclass",
+ "210": "tracereconnect",
+ "211": "statslinkinfo",
+ "212": "statscommands",
+ "213": "statscline",
+ "214": "statsnline",
+ "215": "statsiline",
+ "216": "statskline",
+ "217": "statsqline",
+ "218": "statsyline",
+ "219": "endofstats",
+ "221": "umodeis",
+ "231": "serviceinfo",
+ "232": "endofservices",
+ "233": "service",
+ "234": "servlist",
+ "235": "servlistend",
+ "241": "statslline",
+ "242": "statsuptime",
+ "243": "statsoline",
+ "244": "statshline",
+ "250": "luserconns",
+ "251": "luserclient",
+ "252": "luserop",
+ "253": "luserunknown",
+ "254": "luserchannels",
+ "255": "luserme",
+ "256": "adminme",
+ "257": "adminloc1",
+ "258": "adminloc2",
+ "259": "adminemail",
+ "261": "tracelog",
+ "262": "endoftrace",
+ "263": "tryagain",
+ "265": "n_local",
+ "266": "n_global",
+ "300": "none",
+ "301": "away",
+ "302": "userhost",
+ "303": "ison",
+ "305": "unaway",
+ "306": "nowaway",
+ "311": "whoisuser",
+ "312": "whoisserver",
+ "313": "whoisoperator",
+ "314": "whowasuser",
+ "315": "endofwho",
+ "316": "whoischanop",
+ "317": "whoisidle",
+ "318": "endofwhois",
+ "319": "whoischannels",
+ "321": "liststart",
+ "322": "list",
+ "323": "listend",
+ "324": "channelmodeis",
+ "329": "channelcreate",
+ "331": "notopic",
+ "332": "currenttopic",
+ "333": "topicinfo",
+ "341": "inviting",
+ "342": "summoning",
+ "346": "invitelist",
+ "347": "endofinvitelist",
+ "348": "exceptlist",
+ "349": "endofexceptlist",
+ "351": "version",
+ "352": "whoreply",
+ "353": "namreply",
+ "361": "killdone",
+ "362": "closing",
+ "363": "closeend",
+ "364": "links",
+ "365": "endoflinks",
+ "366": "endofnames",
+ "367": "banlist",
+ "368": "endofbanlist",
+ "369": "endofwhowas",
+ "371": "info",
+ "372": "motd",
+ "373": "infostart",
+ "374": "endofinfo",
+ "375": "motdstart",
+ "376": "endofmotd",
+ "377": "motd2", # 1997-10-16 -- tkil
+ "381": "youreoper",
+ "382": "rehashing",
+ "384": "myportis",
+ "391": "time",
+ "392": "usersstart",
+ "393": "users",
+ "394": "endofusers",
+ "395": "nousers",
+ "401": "nosuchnick",
+ "402": "nosuchserver",
+ "403": "nosuchchannel",
+ "404": "cannotsendtochan",
+ "405": "toomanychannels",
+ "406": "wasnosuchnick",
+ "407": "toomanytargets",
+ "409": "noorigin",
+ "411": "norecipient",
+ "412": "notexttosend",
+ "413": "notoplevel",
+ "414": "wildtoplevel",
+ "421": "unknowncommand",
+ "422": "nomotd",
+ "423": "noadmininfo",
+ "424": "fileerror",
+ "431": "nonicknamegiven",
+ "432": "erroneusnickname", # Thiss iz how its speld in thee RFC.
+ "433": "nicknameinuse",
+ "436": "nickcollision",
+ "437": "unavailresource", # "Nick temporally unavailable"
+ "441": "usernotinchannel",
+ "442": "notonchannel",
+ "443": "useronchannel",
+ "444": "nologin",
+ "445": "summondisabled",
+ "446": "usersdisabled",
+ "451": "notregistered",
+ "461": "needmoreparams",
+ "462": "alreadyregistered",
+ "463": "nopermforhost",
+ "464": "passwdmismatch",
+ "465": "yourebannedcreep", # I love this one...
+ "466": "youwillbebanned",
+ "467": "keyset",
+ "471": "channelisfull",
+ "472": "unknownmode",
+ "473": "inviteonlychan",
+ "474": "bannedfromchan",
+ "475": "badchannelkey",
+ "476": "badchanmask",
+ "477": "nochanmodes", # "Channel doesn't support modes"
+ "478": "banlistfull",
+ "481": "noprivileges",
+ "482": "chanoprivsneeded",
+ "483": "cantkillserver",
+ "484": "restricted", # Connection is restricted
+ "485": "uniqopprivsneeded",
+ "491": "nooperhost",
+ "492": "noservicehost",
+ "501": "umodeunknownflag",
+ "502": "usersdontmatch",
+}
+
+generated_events = [
+ # Generated events
+ "dcc_connect",
+ "dcc_disconnect",
+ "dccmsg",
+ "disconnect",
+ "ctcp",
+ "ctcpreply",
+]
+
+protocol_events = [
+ # IRC protocol events
+ "error",
+ "join",
+ "kick",
+ "mode",
+ "part",
+ "ping",
+ "privmsg",
+ "privnotice",
+ "pubmsg",
+ "pubnotice",
+ "quit",
+ "invite",
+ "pong",
+]
+
+all_events = generated_events + protocol_events + numeric_events.values()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/COPYING b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/COPYING
new file mode 100644
index 0000000..989d02e
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/COPYING
@@ -0,0 +1,28 @@
+Copyright 2012, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/__init__.py
new file mode 100644
index 0000000..70933a2
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/__init__.py
@@ -0,0 +1,224 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket extension for Apache HTTP Server.
+
+mod_pywebsocket is a WebSocket extension for Apache HTTP Server
+intended for testing or experimental purposes. mod_python is required.
+
+
+Installation
+============
+
+0. Prepare an Apache HTTP Server for which mod_python is enabled.
+
+1. Specify the following Apache HTTP Server directives to suit your
+ configuration.
+
+ If mod_pywebsocket is not in the Python path, specify the following.
+ <websock_lib> is the directory where mod_pywebsocket is installed.
+
+ PythonPath "sys.path+['<websock_lib>']"
+
+ Always specify the following. <websock_handlers> is the directory where
+ user-written WebSocket handlers are placed.
+
+ PythonOption mod_pywebsocket.handler_root <websock_handlers>
+ PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
+
+ To limit the search for WebSocket handlers to a directory <scan_dir>
+ under <websock_handlers>, configure as follows:
+
+ PythonOption mod_pywebsocket.handler_scan <scan_dir>
+
+ <scan_dir> is useful in saving scan time when <websock_handlers>
+ contains many non-WebSocket handler files.
+
+ If you want to allow handlers whose canonical path is not under the root
+ directory (i.e. symbolic link is in root directory but its target is not),
+ configure as follows:
+
+ PythonOption mod_pywebsocket.allow_handlers_outside_root_dir On
+
+ Example snippet of httpd.conf:
+ (mod_pywebsocket is in /websock_lib, WebSocket handlers are in
+ /websock_handlers, port is 80 for ws, 443 for wss.)
+
+ <IfModule python_module>
+ PythonPath "sys.path+['/websock_lib']"
+ PythonOption mod_pywebsocket.handler_root /websock_handlers
+ PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
+ </IfModule>
+
+2. Tune Apache parameters for serving WebSocket. We'd like to note that at
+ least TimeOut directive from core features and RequestReadTimeout
+ directive from mod_reqtimeout should be modified not to kill connections
+ in only a few seconds of idle time.
+
+3. Verify installation. You can use example/console.html to poke the server.
+
+
+Writing WebSocket handlers
+==========================
+
+When a WebSocket request comes in, the resource name
+specified in the handshake is considered as if it is a file path under
+<websock_handlers> and the handler defined in
+<websock_handlers>/<resource_name>_wsh.py is invoked.
+
+For example, if the resource name is /example/chat, the handler defined in
+<websock_handlers>/example/chat_wsh.py is invoked.
+
+A WebSocket handler is composed of the following three functions:
+
+ web_socket_do_extra_handshake(request)
+ web_socket_transfer_data(request)
+ web_socket_passive_closing_handshake(request)
+
+where:
+ request: mod_python request.
+
+web_socket_do_extra_handshake is called during the handshake after the
+headers are successfully parsed and WebSocket properties (ws_location,
+ws_origin, and ws_resource) are added to request. A handler
+can reject the request by raising an exception.
+
+A request object has the following properties that you can use during the
+extra handshake (web_socket_do_extra_handshake):
+- ws_resource
+- ws_origin
+- ws_version
+- ws_location (HyBi 00 only)
+- ws_extensions (HyBi 06 and later)
+- ws_deflate (HyBi 06 and later)
+- ws_protocol
+- ws_requested_protocols (HyBi 06 and later)
+
+The last two are a bit tricky. See the next subsection.
+
+
+Subprotocol Negotiation
+-----------------------
+
+For HyBi 06 and later, ws_protocol is always set to None when
+web_socket_do_extra_handshake is called. If ws_requested_protocols is not
+None, you must choose one subprotocol from this list and set it to
+ws_protocol.
+
+For HyBi 00, when web_socket_do_extra_handshake is called,
+ws_protocol is set to the value given by the client in
+Sec-WebSocket-Protocol header or None if
+such header was not found in the opening handshake request. Finish extra
+handshake with ws_protocol untouched to accept the request subprotocol.
+Then, Sec-WebSocket-Protocol header will be sent to
+the client in response with the same value as requested. Raise an exception
+in web_socket_do_extra_handshake to reject the requested subprotocol.
+
+
+Data Transfer
+-------------
+
+web_socket_transfer_data is called after the handshake completed
+successfully. A handler can receive/send messages from/to the client
+using request. mod_pywebsocket.msgutil module provides utilities
+for data transfer.
+
+You can receive a message by the following statement.
+
+ message = request.ws_stream.receive_message()
+
+This call blocks until any complete text frame arrives, and the payload data
+of the incoming frame will be stored into message. When you're using IETF
+HyBi 00 or later protocol, receive_message() will return None on receiving
+client-initiated closing handshake. When any error occurs, receive_message()
+will raise some exception.
+
+You can send a message by the following statement.
+
+ request.ws_stream.send_message(message)
+
+
+Closing Connection
+------------------
+
+Executing the following statement or just return-ing from
+web_socket_transfer_data cause connection close.
+
+ request.ws_stream.close_connection()
+
+close_connection will wait
+for closing handshake acknowledgement coming from the client. When it
+couldn't receive a valid acknowledgement, raises an exception.
+
+web_socket_passive_closing_handshake is called after the server receives
+incoming closing frame from the client peer immediately. You can specify
+code and reason by return values. They are sent as a outgoing closing frame
+from the server. A request object has the following properties that you can
+use in web_socket_passive_closing_handshake.
+- ws_close_code
+- ws_close_reason
+
+
+Threading
+---------
+
+A WebSocket handler must be thread-safe if the server (Apache or
+standalone.py) is configured to use threads.
+
+
+Configuring WebSocket Extension Processors
+------------------------------------------
+
+See extensions.py for supported WebSocket extensions. Note that they are
+unstable and their APIs are subject to change substantially.
+
+A request object has these extension processing related attributes.
+
+- ws_requested_extensions:
+
+ A list of common.ExtensionParameter instances representing extension
+ parameters received from the client in the client's opening handshake.
+ You shouldn't modify it manually.
+
+- ws_extensions:
+
+ A list of common.ExtensionParameter instances representing extension
+ parameters to send back to the client in the server's opening handshake.
+ You shouldn't touch it directly. Instead, call methods on extension
+ processors.
+
+- ws_extension_processors:
+
+ A list of loaded extension processors. Find the processor for the
+ extension you want to configure from it, and call its methods.
+"""
+
+
+# vi:sts=4 sw=4 et tw=72
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_base.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_base.py
new file mode 100644
index 0000000..8235666
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_base.py
@@ -0,0 +1,181 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Base stream class.
+"""
+
+
+# Note: request.connection.write/read are used in this module, even though
+# mod_python document says that they should be used only in connection
+# handlers. Unfortunately, we have no other options. For example,
+# request.write/read are not suitable because they don't allow direct raw bytes
+# writing/reading.
+
+
+import socket
+
+from mod_pywebsocket import util
+
+
+# Exceptions
+
+
+class ConnectionTerminatedException(Exception):
+ """This exception will be raised when a connection is terminated
+ unexpectedly.
+ """
+
+ pass
+
+
+class InvalidFrameException(ConnectionTerminatedException):
+ """This exception will be raised when we received an invalid frame we
+ cannot parse.
+ """
+
+ pass
+
+
+class BadOperationException(Exception):
+ """This exception will be raised when send_message() is called on
+ server-terminated connection or receive_message() is called on
+ client-terminated connection.
+ """
+
+ pass
+
+
+class UnsupportedFrameException(Exception):
+ """This exception will be raised when we receive a frame with flag, opcode
+ we cannot handle. Handlers can just catch and ignore this exception and
+ call receive_message() again to continue processing the next frame.
+ """
+
+ pass
+
+
+class InvalidUTF8Exception(Exception):
+ """This exception will be raised when we receive a text frame which
+ contains invalid UTF-8 strings.
+ """
+
+ pass
+
+
+class StreamBase(object):
+ """Base stream class."""
+
+ def __init__(self, request):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+
+ def _read(self, length):
+ """Reads length bytes from connection. In case we catch any exception,
+ prepends remote address to the exception message and raise again.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ try:
+ read_bytes = self._request.connection.read(length)
+ if not read_bytes:
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. Peer (%r) closed connection' %
+ (length, (self._request.connection.remote_addr,)))
+ return read_bytes
+ except socket.error, e:
+ # Catch a socket.error. Because it's not a child class of the
+ # IOError prior to Python 2.6, we cannot omit this except clause.
+ # Use %s rather than %r for the exception to use human friendly
+ # format.
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. socket.error (%s) occurred' %
+ (length, e))
+ except IOError, e:
+ # Also catch an IOError because mod_python throws it.
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. IOError (%s) occurred' %
+ (length, e))
+
+ def _write(self, bytes_to_write):
+ """Writes given bytes to connection. In case we catch any exception,
+ prepends remote address to the exception message and raise again.
+ """
+
+ try:
+ self._request.connection.write(bytes_to_write)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ 'Failed to send message to %r: ' %
+ (self._request.connection.remote_addr,),
+ e)
+ raise
+
+ def receive_bytes(self, length):
+ """Receives multiple bytes. Retries read when we couldn't receive the
+ specified amount.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ read_bytes = []
+ while length > 0:
+ new_read_bytes = self._read(length)
+ read_bytes.append(new_read_bytes)
+ length -= len(new_read_bytes)
+ return ''.join(read_bytes)
+
+ def _read_until(self, delim_char):
+ """Reads bytes until we encounter delim_char. The result will not
+ contain delim_char.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ read_bytes = []
+ while True:
+ ch = self._read(1)
+ if ch == delim_char:
+ break
+ read_bytes.append(ch)
+ return ''.join(read_bytes)
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hixie75.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hixie75.py
new file mode 100644
index 0000000..94cf5b3
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hixie75.py
@@ -0,0 +1,229 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides a class for parsing/building frames of the WebSocket
+protocol version HyBi 00 and Hixie 75.
+
+Specification:
+- HyBi 00 http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
+- Hixie 75 http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-75
+"""
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import StreamBase
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+from mod_pywebsocket import util
+
+
+class StreamHixie75(StreamBase):
+ """A class for parsing/building frames of the WebSocket protocol version
+ HyBi 00 and Hixie 75.
+ """
+
+ def __init__(self, request, enable_closing_handshake=False):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ enable_closing_handshake: to let StreamHixie75 perform closing
+ handshake as specified in HyBi 00, set
+ this option to True.
+ """
+
+ StreamBase.__init__(self, request)
+
+ self._logger = util.get_class_logger(self)
+
+ self._enable_closing_handshake = enable_closing_handshake
+
+ self._request.client_terminated = False
+ self._request.server_terminated = False
+
+ def send_message(self, message, end=True, binary=False):
+ """Send message.
+
+ Args:
+ message: unicode string to send.
+ binary: not used in hixie75.
+
+ Raises:
+ BadOperationException: when called on a server-terminated
+ connection.
+ """
+
+ if not end:
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_message with end=False')
+
+ if binary:
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_message with binary=True')
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ self._write(''.join(['\x00', message.encode('utf-8'), '\xff']))
+
+ def _read_payload_length_hixie75(self):
+ """Reads a length header in a Hixie75 version frame with length.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ length = 0
+ while True:
+ b_str = self._read(1)
+ b = ord(b_str)
+ length = length * 128 + (b & 0x7f)
+ if (b & 0x80) == 0:
+ break
+ return length
+
+ def receive_message(self):
+ """Receive a WebSocket frame and return its payload an unicode string.
+
+ Returns:
+ payload unicode string in a WebSocket frame.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty
+ string.
+ BadOperationException: when called on a client-terminated
+ connection.
+ """
+
+ if self._request.client_terminated:
+ raise BadOperationException(
+ 'Requested receive_message after receiving a closing '
+ 'handshake')
+
+ while True:
+ # Read 1 byte.
+ # mp_conn.read will block if no bytes are available.
+ # Timeout is controlled by TimeOut directive of Apache.
+ frame_type_str = self.receive_bytes(1)
+ frame_type = ord(frame_type_str)
+ if (frame_type & 0x80) == 0x80:
+ # The payload length is specified in the frame.
+ # Read and discard.
+ length = self._read_payload_length_hixie75()
+ if length > 0:
+ _ = self.receive_bytes(length)
+ # 5.3 3. 12. if /type/ is 0xFF and /length/ is 0, then set the
+ # /client terminated/ flag and abort these steps.
+ if not self._enable_closing_handshake:
+ continue
+
+ if frame_type == 0xFF and length == 0:
+ self._request.client_terminated = True
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Received ack for server-initiated closing '
+ 'handshake')
+ return None
+
+ self._logger.debug(
+ 'Received client-initiated closing handshake')
+
+ self._send_closing_handshake()
+ self._logger.debug(
+ 'Sent ack for client-initiated closing handshake')
+ return None
+ else:
+ # The payload is delimited with \xff.
+ bytes = self._read_until('\xff')
+ # The WebSocket protocol section 4.4 specifies that invalid
+ # characters must be replaced with U+fffd REPLACEMENT
+ # CHARACTER.
+ message = bytes.decode('utf-8', 'replace')
+ if frame_type == 0x00:
+ return message
+ # Discard data of other types.
+
+ def _send_closing_handshake(self):
+ if not self._enable_closing_handshake:
+ raise BadOperationException(
+ 'Closing handshake is not supported in Hixie 75 protocol')
+
+ self._request.server_terminated = True
+
+ # 5.3 the server may decide to terminate the WebSocket connection by
+ # running through the following steps:
+ # 1. send a 0xFF byte and a 0x00 byte to the client to indicate the
+ # start of the closing handshake.
+ self._write('\xff\x00')
+
+ def close_connection(self, unused_code='', unused_reason=''):
+ """Closes a WebSocket connection.
+
+ Raises:
+ ConnectionTerminatedException: when closing handshake was
+ not successfull.
+ """
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Requested close_connection but server is already terminated')
+ return
+
+ if not self._enable_closing_handshake:
+ self._request.server_terminated = True
+ self._logger.debug('Connection closed')
+ return
+
+ self._send_closing_handshake()
+ self._logger.debug('Sent server-initiated closing handshake')
+
+ # TODO(ukai): 2. wait until the /client terminated/ flag has been set,
+ # or until a server-defined timeout expires.
+ #
+ # For now, we expect receiving closing handshake right after sending
+ # out closing handshake, and if we couldn't receive non-handshake
+ # frame, we take it as ConnectionTerminatedException.
+ message = self.receive_message()
+ if message is not None:
+ raise ConnectionTerminatedException(
+ 'Didn\'t receive valid ack for closing handshake')
+ # TODO: 3. close the WebSocket connection.
+ # note: mod_python Connection (mp_conn) doesn't have close method.
+
+ def send_ping(self, body):
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_ping')
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py
new file mode 100644
index 0000000..a8a49e3
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py
@@ -0,0 +1,887 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides classes and helper functions for parsing/building frames
+of the WebSocket protocol (RFC 6455).
+
+Specification:
+http://tools.ietf.org/html/rfc6455
+"""
+
+
+from collections import deque
+import logging
+import os
+import struct
+import time
+
+from mod_pywebsocket import common
+from mod_pywebsocket import util
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import InvalidUTF8Exception
+from mod_pywebsocket._stream_base import StreamBase
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+
+
+_NOOP_MASKER = util.NoopMasker()
+
+
+class Frame(object):
+
+ def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0,
+ opcode=None, payload=''):
+ self.fin = fin
+ self.rsv1 = rsv1
+ self.rsv2 = rsv2
+ self.rsv3 = rsv3
+ self.opcode = opcode
+ self.payload = payload
+
+
+# Helper functions made public to be used for writing unittests for WebSocket
+# clients.
+
+
+def create_length_header(length, mask):
+ """Creates a length header.
+
+ Args:
+ length: Frame length. Must be less than 2^63.
+ mask: Mask bit. Must be boolean.
+
+ Raises:
+ ValueError: when bad data is given.
+ """
+
+ if mask:
+ mask_bit = 1 << 7
+ else:
+ mask_bit = 0
+
+ if length < 0:
+ raise ValueError('length must be non negative integer')
+ elif length <= 125:
+ return chr(mask_bit | length)
+ elif length < (1 << 16):
+ return chr(mask_bit | 126) + struct.pack('!H', length)
+ elif length < (1 << 63):
+ return chr(mask_bit | 127) + struct.pack('!Q', length)
+ else:
+ raise ValueError('Payload is too big for one frame')
+
+
+def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask):
+ """Creates a frame header.
+
+ Raises:
+ Exception: when bad data is given.
+ """
+
+ if opcode < 0 or 0xf < opcode:
+ raise ValueError('Opcode out of range')
+
+ if payload_length < 0 or (1 << 63) <= payload_length:
+ raise ValueError('payload_length out of range')
+
+ if (fin | rsv1 | rsv2 | rsv3) & ~1:
+ raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1')
+
+ header = ''
+
+ first_byte = ((fin << 7)
+ | (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4)
+ | opcode)
+ header += chr(first_byte)
+ header += create_length_header(payload_length, mask)
+
+ return header
+
+
+def _build_frame(header, body, mask):
+ if not mask:
+ return header + body
+
+ masking_nonce = os.urandom(4)
+ masker = util.RepeatedXorMasker(masking_nonce)
+
+ return header + masking_nonce + masker.mask(body)
+
+
+def _filter_and_format_frame_object(frame, mask, frame_filters):
+ for frame_filter in frame_filters:
+ frame_filter.filter(frame)
+
+ header = create_header(
+ frame.opcode, len(frame.payload), frame.fin,
+ frame.rsv1, frame.rsv2, frame.rsv3, mask)
+ return _build_frame(header, frame.payload, mask)
+
+
+def create_binary_frame(
+ message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]):
+ """Creates a simple binary frame with no extension, reserved bit."""
+
+ frame = Frame(fin=fin, opcode=opcode, payload=message)
+ return _filter_and_format_frame_object(frame, mask, frame_filters)
+
+
+def create_text_frame(
+ message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]):
+ """Creates a simple text frame with no extension, reserved bit."""
+
+ encoded_message = message.encode('utf-8')
+ return create_binary_frame(encoded_message, opcode, fin, mask,
+ frame_filters)
+
+
+def parse_frame(receive_bytes, logger=None,
+ ws_version=common.VERSION_HYBI_LATEST,
+ unmask_receive=True):
+ """Parses a frame. Returns a tuple containing each header field and
+ payload.
+
+ Args:
+ receive_bytes: a function that reads frame data from a stream or
+ something similar. The function takes length of the bytes to be
+ read. The function must raise ConnectionTerminatedException if
+ there is not enough data to be read.
+ logger: a logging object.
+ ws_version: the version of WebSocket protocol.
+ unmask_receive: unmask received frames. When received unmasked
+ frame, raises InvalidFrameException.
+
+ Raises:
+ ConnectionTerminatedException: when receive_bytes raises it.
+ InvalidFrameException: when the frame contains invalid data.
+ """
+
+ if not logger:
+ logger = logging.getLogger()
+
+ logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame')
+
+ received = receive_bytes(2)
+
+ first_byte = ord(received[0])
+ fin = (first_byte >> 7) & 1
+ rsv1 = (first_byte >> 6) & 1
+ rsv2 = (first_byte >> 5) & 1
+ rsv3 = (first_byte >> 4) & 1
+ opcode = first_byte & 0xf
+
+ second_byte = ord(received[1])
+ mask = (second_byte >> 7) & 1
+ payload_length = second_byte & 0x7f
+
+ logger.log(common.LOGLEVEL_FINE,
+ 'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, '
+ 'Mask=%s, Payload_length=%s',
+ fin, rsv1, rsv2, rsv3, opcode, mask, payload_length)
+
+ if (mask == 1) != unmask_receive:
+ raise InvalidFrameException(
+ 'Mask bit on the received frame did\'nt match masking '
+ 'configuration for received frames')
+
+ # The HyBi and later specs disallow putting a value in 0x0-0xFFFF
+ # into the 8-octet extended payload length field (or 0x0-0xFD in
+ # 2-octet field).
+ valid_length_encoding = True
+ length_encoding_bytes = 1
+ if payload_length == 127:
+ logger.log(common.LOGLEVEL_FINE,
+ 'Receive 8-octet extended payload length')
+
+ extended_payload_length = receive_bytes(8)
+ payload_length = struct.unpack(
+ '!Q', extended_payload_length)[0]
+ if payload_length > 0x7FFFFFFFFFFFFFFF:
+ raise InvalidFrameException(
+ 'Extended payload length >= 2^63')
+ if ws_version >= 13 and payload_length < 0x10000:
+ valid_length_encoding = False
+ length_encoding_bytes = 8
+
+ logger.log(common.LOGLEVEL_FINE,
+ 'Decoded_payload_length=%s', payload_length)
+ elif payload_length == 126:
+ logger.log(common.LOGLEVEL_FINE,
+ 'Receive 2-octet extended payload length')
+
+ extended_payload_length = receive_bytes(2)
+ payload_length = struct.unpack(
+ '!H', extended_payload_length)[0]
+ if ws_version >= 13 and payload_length < 126:
+ valid_length_encoding = False
+ length_encoding_bytes = 2
+
+ logger.log(common.LOGLEVEL_FINE,
+ 'Decoded_payload_length=%s', payload_length)
+
+ if not valid_length_encoding:
+ logger.warning(
+ 'Payload length is not encoded using the minimal number of '
+ 'bytes (%d is encoded using %d bytes)',
+ payload_length,
+ length_encoding_bytes)
+
+ if mask == 1:
+ logger.log(common.LOGLEVEL_FINE, 'Receive mask')
+
+ masking_nonce = receive_bytes(4)
+ masker = util.RepeatedXorMasker(masking_nonce)
+
+ logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce)
+ else:
+ masker = _NOOP_MASKER
+
+ logger.log(common.LOGLEVEL_FINE, 'Receive payload data')
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ receive_start = time.time()
+
+ raw_payload_bytes = receive_bytes(payload_length)
+
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ logger.log(
+ common.LOGLEVEL_FINE,
+ 'Done receiving payload data at %s MB/s',
+ payload_length / (time.time() - receive_start) / 1000 / 1000)
+ logger.log(common.LOGLEVEL_FINE, 'Unmask payload data')
+
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ unmask_start = time.time()
+
+ unmasked_bytes = masker.mask(raw_payload_bytes)
+
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ logger.log(
+ common.LOGLEVEL_FINE,
+ 'Done unmasking payload data at %s MB/s',
+ payload_length / (time.time() - unmask_start) / 1000 / 1000)
+
+ return opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3
+
+
+class FragmentedFrameBuilder(object):
+ """A stateful class to send a message as fragments."""
+
+ def __init__(self, mask, frame_filters=[], encode_utf8=True):
+ """Constructs an instance."""
+
+ self._mask = mask
+ self._frame_filters = frame_filters
+ # This is for skipping UTF-8 encoding when building text type frames
+ # from compressed data.
+ self._encode_utf8 = encode_utf8
+
+ self._started = False
+
+ # Hold opcode of the first frame in messages to verify types of other
+ # frames in the message are all the same.
+ self._opcode = common.OPCODE_TEXT
+
+ def build(self, payload_data, end, binary):
+ if binary:
+ frame_type = common.OPCODE_BINARY
+ else:
+ frame_type = common.OPCODE_TEXT
+ if self._started:
+ if self._opcode != frame_type:
+ raise ValueError('Message types are different in frames for '
+ 'the same message')
+ opcode = common.OPCODE_CONTINUATION
+ else:
+ opcode = frame_type
+ self._opcode = frame_type
+
+ if end:
+ self._started = False
+ fin = 1
+ else:
+ self._started = True
+ fin = 0
+
+ if binary or not self._encode_utf8:
+ return create_binary_frame(
+ payload_data, opcode, fin, self._mask, self._frame_filters)
+ else:
+ return create_text_frame(
+ payload_data, opcode, fin, self._mask, self._frame_filters)
+
+
+def _create_control_frame(opcode, body, mask, frame_filters):
+ frame = Frame(opcode=opcode, payload=body)
+
+ for frame_filter in frame_filters:
+ frame_filter.filter(frame)
+
+ if len(frame.payload) > 125:
+ raise BadOperationException(
+ 'Payload data size of control frames must be 125 bytes or less')
+
+ header = create_header(
+ frame.opcode, len(frame.payload), frame.fin,
+ frame.rsv1, frame.rsv2, frame.rsv3, mask)
+ return _build_frame(header, frame.payload, mask)
+
+
+def create_ping_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters)
+
+
+def create_pong_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters)
+
+
+def create_close_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(
+ common.OPCODE_CLOSE, body, mask, frame_filters)
+
+
+def create_closing_handshake_body(code, reason):
+ body = ''
+ if code is not None:
+ if (code > common.STATUS_USER_PRIVATE_MAX or
+ code < common.STATUS_NORMAL_CLOSURE):
+ raise BadOperationException('Status code is out of range')
+ if (code == common.STATUS_NO_STATUS_RECEIVED or
+ code == common.STATUS_ABNORMAL_CLOSURE or
+ code == common.STATUS_TLS_HANDSHAKE):
+ raise BadOperationException('Status code is reserved pseudo '
+ 'code')
+ encoded_reason = reason.encode('utf-8')
+ body = struct.pack('!H', code) + encoded_reason
+ return body
+
+
+class StreamOptions(object):
+ """Holds option values to configure Stream objects."""
+
+ def __init__(self):
+ """Constructs StreamOptions."""
+
+ # Filters applied to frames.
+ self.outgoing_frame_filters = []
+ self.incoming_frame_filters = []
+
+ # Filters applied to messages. Control frames are not affected by them.
+ self.outgoing_message_filters = []
+ self.incoming_message_filters = []
+
+ self.encode_text_message_to_utf8 = True
+ self.mask_send = False
+ self.unmask_receive = True
+
+
+class Stream(StreamBase):
+ """A class for parsing/building frames of the WebSocket protocol
+ (RFC 6455).
+ """
+
+ def __init__(self, request, options):
+ """Constructs an instance.
+
+ Args:
+ request: mod_python request.
+ """
+
+ StreamBase.__init__(self, request)
+
+ self._logger = util.get_class_logger(self)
+
+ self._options = options
+
+ self._request.client_terminated = False
+ self._request.server_terminated = False
+
+ # Holds body of received fragments.
+ self._received_fragments = []
+ # Holds the opcode of the first fragment.
+ self._original_opcode = None
+
+ self._writer = FragmentedFrameBuilder(
+ self._options.mask_send, self._options.outgoing_frame_filters,
+ self._options.encode_text_message_to_utf8)
+
+ self._ping_queue = deque()
+
+ def _receive_frame(self):
+ """Receives a frame and return data in the frame as a tuple containing
+ each header field and payload separately.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty
+ string.
+ InvalidFrameException: when the frame contains invalid data.
+ """
+
+ def _receive_bytes(length):
+ return self.receive_bytes(length)
+
+ return parse_frame(receive_bytes=_receive_bytes,
+ logger=self._logger,
+ ws_version=self._request.ws_version,
+ unmask_receive=self._options.unmask_receive)
+
+ def _receive_frame_as_frame_object(self):
+ opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame()
+
+ return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3,
+ opcode=opcode, payload=unmasked_bytes)
+
+ def receive_filtered_frame(self):
+ """Receives a frame and applies frame filters and message filters.
+ The frame to be received must satisfy following conditions:
+ - The frame is not fragmented.
+ - The opcode of the frame is TEXT or BINARY.
+
+ DO NOT USE this method except for testing purpose.
+ """
+
+ frame = self._receive_frame_as_frame_object()
+ if not frame.fin:
+ raise InvalidFrameException(
+ 'Segmented frames must not be received via '
+ 'receive_filtered_frame()')
+ if (frame.opcode != common.OPCODE_TEXT and
+ frame.opcode != common.OPCODE_BINARY):
+ raise InvalidFrameException(
+ 'Control frames must not be received via '
+ 'receive_filtered_frame()')
+
+ for frame_filter in self._options.incoming_frame_filters:
+ frame_filter.filter(frame)
+ for message_filter in self._options.incoming_message_filters:
+ frame.payload = message_filter.filter(frame.payload)
+ return frame
+
+ def send_message(self, message, end=True, binary=False):
+ """Send message.
+
+ Args:
+ message: text in unicode or binary in str to send.
+ binary: send message as binary frame.
+
+ Raises:
+ BadOperationException: when called on a server-terminated
+ connection or called with inconsistent message type or
+ binary parameter.
+ """
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ if binary and isinstance(message, unicode):
+ raise BadOperationException(
+ 'Message for binary frame must be instance of str')
+
+ for message_filter in self._options.outgoing_message_filters:
+ message = message_filter.filter(message, end, binary)
+
+ try:
+ # Set this to any positive integer to limit maximum size of data in
+ # payload data of each frame.
+ MAX_PAYLOAD_DATA_SIZE = -1
+
+ if MAX_PAYLOAD_DATA_SIZE <= 0:
+ self._write(self._writer.build(message, end, binary))
+ return
+
+ bytes_written = 0
+ while True:
+ end_for_this_frame = end
+ bytes_to_write = len(message) - bytes_written
+ if (MAX_PAYLOAD_DATA_SIZE > 0 and
+ bytes_to_write > MAX_PAYLOAD_DATA_SIZE):
+ end_for_this_frame = False
+ bytes_to_write = MAX_PAYLOAD_DATA_SIZE
+
+ frame = self._writer.build(
+ message[bytes_written:bytes_written + bytes_to_write],
+ end_for_this_frame,
+ binary)
+ self._write(frame)
+
+ bytes_written += bytes_to_write
+
+ # This if must be placed here (the end of while block) so that
+ # at least one frame is sent.
+ if len(message) <= bytes_written:
+ break
+ except ValueError, e:
+ raise BadOperationException(e)
+
+ def _get_message_from_frame(self, frame):
+ """Gets a message from frame. If the message is composed of fragmented
+ frames and the frame is not the last fragmented frame, this method
+ returns None. The whole message will be returned when the last
+ fragmented frame is passed to this method.
+
+ Raises:
+ InvalidFrameException: when the frame doesn't match defragmentation
+ context, or the frame contains invalid data.
+ """
+
+ if frame.opcode == common.OPCODE_CONTINUATION:
+ if not self._received_fragments:
+ if frame.fin:
+ raise InvalidFrameException(
+ 'Received a termination frame but fragmentation '
+ 'not started')
+ else:
+ raise InvalidFrameException(
+ 'Received an intermediate frame but '
+ 'fragmentation not started')
+
+ if frame.fin:
+ # End of fragmentation frame
+ self._received_fragments.append(frame.payload)
+ message = ''.join(self._received_fragments)
+ self._received_fragments = []
+ return message
+ else:
+ # Intermediate frame
+ self._received_fragments.append(frame.payload)
+ return None
+ else:
+ if self._received_fragments:
+ if frame.fin:
+ raise InvalidFrameException(
+ 'Received an unfragmented frame without '
+ 'terminating existing fragmentation')
+ else:
+ raise InvalidFrameException(
+ 'New fragmentation started without terminating '
+ 'existing fragmentation')
+
+ if frame.fin:
+ # Unfragmented frame
+
+ self._original_opcode = frame.opcode
+ return frame.payload
+ else:
+ # Start of fragmentation frame
+
+ if common.is_control_opcode(frame.opcode):
+ raise InvalidFrameException(
+ 'Control frames must not be fragmented')
+
+ self._original_opcode = frame.opcode
+ self._received_fragments.append(frame.payload)
+ return None
+
+ def _process_close_message(self, message):
+ """Processes close message.
+
+ Args:
+ message: close message.
+
+ Raises:
+ InvalidFrameException: when the message is invalid.
+ """
+
+ self._request.client_terminated = True
+
+ # Status code is optional. We can have status reason only if we
+ # have status code. Status reason can be empty string. So,
+ # allowed cases are
+ # - no application data: no code no reason
+ # - 2 octet of application data: has code but no reason
+ # - 3 or more octet of application data: both code and reason
+ if len(message) == 0:
+ self._logger.debug('Received close frame (empty body)')
+ self._request.ws_close_code = (
+ common.STATUS_NO_STATUS_RECEIVED)
+ elif len(message) == 1:
+ raise InvalidFrameException(
+ 'If a close frame has status code, the length of '
+ 'status code must be 2 octet')
+ elif len(message) >= 2:
+ self._request.ws_close_code = struct.unpack(
+ '!H', message[0:2])[0]
+ self._request.ws_close_reason = message[2:].decode(
+ 'utf-8', 'replace')
+ self._logger.debug(
+ 'Received close frame (code=%d, reason=%r)',
+ self._request.ws_close_code,
+ self._request.ws_close_reason)
+
+ # As we've received a close frame, no more data is coming over the
+ # socket. We can now safely close the socket without worrying about
+ # RST sending.
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Received ack for server-initiated closing handshake')
+ return
+
+ self._logger.debug(
+ 'Received client-initiated closing handshake')
+
+ code = common.STATUS_NORMAL_CLOSURE
+ reason = ''
+ if hasattr(self._request, '_dispatcher'):
+ dispatcher = self._request._dispatcher
+ code, reason = dispatcher.passive_closing_handshake(
+ self._request)
+ if code is None and reason is not None and len(reason) > 0:
+ self._logger.warning(
+ 'Handler specified reason despite code being None')
+ reason = ''
+ if reason is None:
+ reason = ''
+ self._send_closing_handshake(code, reason)
+ self._logger.debug(
+ 'Acknowledged closing handshake initiated by the peer '
+ '(code=%r, reason=%r)', code, reason)
+
+ def _process_ping_message(self, message):
+ """Processes ping message.
+
+ Args:
+ message: ping message.
+ """
+
+ try:
+ handler = self._request.on_ping_handler
+ if handler:
+ handler(self._request, message)
+ return
+ except AttributeError, e:
+ pass
+ self._send_pong(message)
+
+ def _process_pong_message(self, message):
+ """Processes pong message.
+
+ Args:
+ message: pong message.
+ """
+
+ # TODO(tyoshino): Add ping timeout handling.
+
+ inflight_pings = deque()
+
+ while True:
+ try:
+ expected_body = self._ping_queue.popleft()
+ if expected_body == message:
+ # inflight_pings contains pings ignored by the
+ # other peer. Just forget them.
+ self._logger.debug(
+ 'Ping %r is acked (%d pings were ignored)',
+ expected_body, len(inflight_pings))
+ break
+ else:
+ inflight_pings.append(expected_body)
+ except IndexError, e:
+ # The received pong was unsolicited pong. Keep the
+ # ping queue as is.
+ self._ping_queue = inflight_pings
+ self._logger.debug('Received a unsolicited pong')
+ break
+
+ try:
+ handler = self._request.on_pong_handler
+ if handler:
+ handler(self._request, message)
+ except AttributeError, e:
+ pass
+
+ def receive_message(self):
+ """Receive a WebSocket frame and return its payload as a text in
+ unicode or a binary in str.
+
+ Returns:
+ payload data of the frame
+ - as unicode instance if received text frame
+ - as str instance if received binary frame
+ or None iff received closing handshake.
+ Raises:
+ BadOperationException: when called on a client-terminated
+ connection.
+ ConnectionTerminatedException: when read returns empty
+ string.
+ InvalidFrameException: when the frame contains invalid
+ data.
+ UnsupportedFrameException: when the received frame has
+ flags, opcode we cannot handle. You can ignore this
+ exception and continue receiving the next frame.
+ """
+
+ if self._request.client_terminated:
+ raise BadOperationException(
+ 'Requested receive_message after receiving a closing '
+ 'handshake')
+
+ while True:
+ # mp_conn.read will block if no bytes are available.
+ # Timeout is controlled by TimeOut directive of Apache.
+
+ frame = self._receive_frame_as_frame_object()
+
+ # Check the constraint on the payload size for control frames
+ # before extension processes the frame.
+ # See also http://tools.ietf.org/html/rfc6455#section-5.5
+ if (common.is_control_opcode(frame.opcode) and
+ len(frame.payload) > 125):
+ raise InvalidFrameException(
+ 'Payload data size of control frames must be 125 bytes or '
+ 'less')
+
+ for frame_filter in self._options.incoming_frame_filters:
+ frame_filter.filter(frame)
+
+ if frame.rsv1 or frame.rsv2 or frame.rsv3:
+ raise UnsupportedFrameException(
+ 'Unsupported flag is set (rsv = %d%d%d)' %
+ (frame.rsv1, frame.rsv2, frame.rsv3))
+
+ message = self._get_message_from_frame(frame)
+ if message is None:
+ continue
+
+ for message_filter in self._options.incoming_message_filters:
+ message = message_filter.filter(message)
+
+ if self._original_opcode == common.OPCODE_TEXT:
+ # The WebSocket protocol section 4.4 specifies that invalid
+ # characters must be replaced with U+fffd REPLACEMENT
+ # CHARACTER.
+ try:
+ return message.decode('utf-8')
+ except UnicodeDecodeError, e:
+ raise InvalidUTF8Exception(e)
+ elif self._original_opcode == common.OPCODE_BINARY:
+ return message
+ elif self._original_opcode == common.OPCODE_CLOSE:
+ self._process_close_message(message)
+ return None
+ elif self._original_opcode == common.OPCODE_PING:
+ self._process_ping_message(message)
+ elif self._original_opcode == common.OPCODE_PONG:
+ self._process_pong_message(message)
+ else:
+ raise UnsupportedFrameException(
+ 'Opcode %d is not supported' % self._original_opcode)
+
+ def _send_closing_handshake(self, code, reason):
+ body = create_closing_handshake_body(code, reason)
+ frame = create_close_frame(
+ body, mask=self._options.mask_send,
+ frame_filters=self._options.outgoing_frame_filters)
+
+ self._request.server_terminated = True
+
+ self._write(frame)
+
+ def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason='',
+ wait_response=True):
+ """Closes a WebSocket connection.
+
+ Args:
+ code: Status code for close frame. If code is None, a close
+ frame with empty body will be sent.
+ reason: string representing close reason.
+ wait_response: True when caller want to wait the response.
+ Raises:
+ BadOperationException: when reason is specified with code None
+ or reason is not an instance of both str and unicode.
+ """
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Requested close_connection but server is already terminated')
+ return
+
+ if code is None:
+ if reason is not None and len(reason) > 0:
+ raise BadOperationException(
+ 'close reason must not be specified if code is None')
+ reason = ''
+ else:
+ if not isinstance(reason, str) and not isinstance(reason, unicode):
+ raise BadOperationException(
+ 'close reason must be an instance of str or unicode')
+
+ self._send_closing_handshake(code, reason)
+ self._logger.debug(
+ 'Initiated closing handshake (code=%r, reason=%r)',
+ code, reason)
+
+ if (code == common.STATUS_GOING_AWAY or
+ code == common.STATUS_PROTOCOL_ERROR) or not wait_response:
+ # It doesn't make sense to wait for a close frame if the reason is
+ # protocol error or that the server is going away. For some of
+ # other reasons, it might not make sense to wait for a close frame,
+ # but it's not clear, yet.
+ return
+
+ # TODO(ukai): 2. wait until the /client terminated/ flag has been set,
+ # or until a server-defined timeout expires.
+ #
+ # For now, we expect receiving closing handshake right after sending
+ # out closing handshake.
+ message = self.receive_message()
+ if message is not None:
+ raise ConnectionTerminatedException(
+ 'Didn\'t receive valid ack for closing handshake')
+ # TODO: 3. close the WebSocket connection.
+ # note: mod_python Connection (mp_conn) doesn't have close method.
+
+ def send_ping(self, body=''):
+ frame = create_ping_frame(
+ body,
+ self._options.mask_send,
+ self._options.outgoing_frame_filters)
+ self._write(frame)
+
+ self._ping_queue.append(body)
+
+ def _send_pong(self, body):
+ frame = create_pong_frame(
+ body,
+ self._options.mask_send,
+ self._options.outgoing_frame_filters)
+ self._write(frame)
+
+ def get_last_received_opcode(self):
+ """Returns the opcode of the WebSocket message which the last received
+ frame belongs to. The return value is valid iff immediately after
+ receive_message call.
+ """
+
+ return self._original_opcode
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/common.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/common.py
new file mode 100644
index 0000000..2fc2ead
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/common.py
@@ -0,0 +1,303 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file must not depend on any module specific to the WebSocket protocol.
+"""
+
+
+from mod_pywebsocket import http_header_util
+
+
+# Additional log level definitions.
+LOGLEVEL_FINE = 9
+
+# Constants indicating WebSocket protocol version.
+VERSION_HIXIE75 = -1
+VERSION_HYBI00 = 0
+VERSION_HYBI01 = 1
+VERSION_HYBI02 = 2
+VERSION_HYBI03 = 2
+VERSION_HYBI04 = 4
+VERSION_HYBI05 = 5
+VERSION_HYBI06 = 6
+VERSION_HYBI07 = 7
+VERSION_HYBI08 = 8
+VERSION_HYBI09 = 8
+VERSION_HYBI10 = 8
+VERSION_HYBI11 = 8
+VERSION_HYBI12 = 8
+VERSION_HYBI13 = 13
+VERSION_HYBI14 = 13
+VERSION_HYBI15 = 13
+VERSION_HYBI16 = 13
+VERSION_HYBI17 = 13
+
+# Constants indicating WebSocket protocol latest version.
+VERSION_HYBI_LATEST = VERSION_HYBI13
+
+# Port numbers
+DEFAULT_WEB_SOCKET_PORT = 80
+DEFAULT_WEB_SOCKET_SECURE_PORT = 443
+
+# Schemes
+WEB_SOCKET_SCHEME = 'ws'
+WEB_SOCKET_SECURE_SCHEME = 'wss'
+
+# Frame opcodes defined in the spec.
+OPCODE_CONTINUATION = 0x0
+OPCODE_TEXT = 0x1
+OPCODE_BINARY = 0x2
+OPCODE_CLOSE = 0x8
+OPCODE_PING = 0x9
+OPCODE_PONG = 0xa
+
+# UUIDs used by HyBi 04 and later opening handshake and frame masking.
+WEBSOCKET_ACCEPT_UUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
+
+# Opening handshake header names and expected values.
+UPGRADE_HEADER = 'Upgrade'
+WEBSOCKET_UPGRADE_TYPE = 'websocket'
+WEBSOCKET_UPGRADE_TYPE_HIXIE75 = 'WebSocket'
+CONNECTION_HEADER = 'Connection'
+UPGRADE_CONNECTION_TYPE = 'Upgrade'
+HOST_HEADER = 'Host'
+ORIGIN_HEADER = 'Origin'
+SEC_WEBSOCKET_ORIGIN_HEADER = 'Sec-WebSocket-Origin'
+SEC_WEBSOCKET_KEY_HEADER = 'Sec-WebSocket-Key'
+SEC_WEBSOCKET_ACCEPT_HEADER = 'Sec-WebSocket-Accept'
+SEC_WEBSOCKET_VERSION_HEADER = 'Sec-WebSocket-Version'
+SEC_WEBSOCKET_PROTOCOL_HEADER = 'Sec-WebSocket-Protocol'
+SEC_WEBSOCKET_EXTENSIONS_HEADER = 'Sec-WebSocket-Extensions'
+SEC_WEBSOCKET_DRAFT_HEADER = 'Sec-WebSocket-Draft'
+SEC_WEBSOCKET_KEY1_HEADER = 'Sec-WebSocket-Key1'
+SEC_WEBSOCKET_KEY2_HEADER = 'Sec-WebSocket-Key2'
+SEC_WEBSOCKET_LOCATION_HEADER = 'Sec-WebSocket-Location'
+
+# Extensions
+DEFLATE_FRAME_EXTENSION = 'deflate-frame'
+PERMESSAGE_COMPRESSION_EXTENSION = 'permessage-compress'
+PERMESSAGE_DEFLATE_EXTENSION = 'permessage-deflate'
+X_WEBKIT_DEFLATE_FRAME_EXTENSION = 'x-webkit-deflate-frame'
+X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION = 'x-webkit-permessage-compress'
+MUX_EXTENSION = 'mux_DO_NOT_USE'
+
+# Status codes
+# Code STATUS_NO_STATUS_RECEIVED, STATUS_ABNORMAL_CLOSURE, and
+# STATUS_TLS_HANDSHAKE are pseudo codes to indicate specific error cases.
+# Could not be used for codes in actual closing frames.
+# Application level errors must use codes in the range
+# STATUS_USER_REGISTERED_BASE to STATUS_USER_PRIVATE_MAX. The codes in the
+# range STATUS_USER_REGISTERED_BASE to STATUS_USER_REGISTERED_MAX are managed
+# by IANA. Usually application must define user protocol level errors in the
+# range STATUS_USER_PRIVATE_BASE to STATUS_USER_PRIVATE_MAX.
+STATUS_NORMAL_CLOSURE = 1000
+STATUS_GOING_AWAY = 1001
+STATUS_PROTOCOL_ERROR = 1002
+STATUS_UNSUPPORTED_DATA = 1003
+STATUS_NO_STATUS_RECEIVED = 1005
+STATUS_ABNORMAL_CLOSURE = 1006
+STATUS_INVALID_FRAME_PAYLOAD_DATA = 1007
+STATUS_POLICY_VIOLATION = 1008
+STATUS_MESSAGE_TOO_BIG = 1009
+STATUS_MANDATORY_EXTENSION = 1010
+STATUS_INTERNAL_ENDPOINT_ERROR = 1011
+STATUS_TLS_HANDSHAKE = 1015
+STATUS_USER_REGISTERED_BASE = 3000
+STATUS_USER_REGISTERED_MAX = 3999
+STATUS_USER_PRIVATE_BASE = 4000
+STATUS_USER_PRIVATE_MAX = 4999
+# Following definitions are aliases to keep compatibility. Applications must
+# not use these obsoleted definitions anymore.
+STATUS_NORMAL = STATUS_NORMAL_CLOSURE
+STATUS_UNSUPPORTED = STATUS_UNSUPPORTED_DATA
+STATUS_CODE_NOT_AVAILABLE = STATUS_NO_STATUS_RECEIVED
+STATUS_ABNORMAL_CLOSE = STATUS_ABNORMAL_CLOSURE
+STATUS_INVALID_FRAME_PAYLOAD = STATUS_INVALID_FRAME_PAYLOAD_DATA
+STATUS_MANDATORY_EXT = STATUS_MANDATORY_EXTENSION
+
+# HTTP status codes
+HTTP_STATUS_BAD_REQUEST = 400
+HTTP_STATUS_FORBIDDEN = 403
+HTTP_STATUS_NOT_FOUND = 404
+
+
+def is_control_opcode(opcode):
+ return (opcode >> 3) == 1
+
+
+class ExtensionParameter(object):
+ """Holds information about an extension which is exchanged on extension
+ negotiation in opening handshake.
+ """
+
+ def __init__(self, name):
+ self._name = name
+ # TODO(tyoshino): Change the data structure to more efficient one such
+ # as dict when the spec changes to say like
+ # - Parameter names must be unique
+ # - The order of parameters is not significant
+ self._parameters = []
+
+ def name(self):
+ return self._name
+
+ def add_parameter(self, name, value):
+ self._parameters.append((name, value))
+
+ def get_parameters(self):
+ return self._parameters
+
+ def get_parameter_names(self):
+ return [name for name, unused_value in self._parameters]
+
+ def has_parameter(self, name):
+ for param_name, param_value in self._parameters:
+ if param_name == name:
+ return True
+ return False
+
+ def get_parameter_value(self, name):
+ for param_name, param_value in self._parameters:
+ if param_name == name:
+ return param_value
+
+
+class ExtensionParsingException(Exception):
+ def __init__(self, name):
+ super(ExtensionParsingException, self).__init__(name)
+
+
+def _parse_extension_param(state, definition):
+ param_name = http_header_util.consume_token(state)
+
+ if param_name is None:
+ raise ExtensionParsingException('No valid parameter name found')
+
+ http_header_util.consume_lwses(state)
+
+ if not http_header_util.consume_string(state, '='):
+ definition.add_parameter(param_name, None)
+ return
+
+ http_header_util.consume_lwses(state)
+
+ # TODO(tyoshino): Add code to validate that parsed param_value is token
+ param_value = http_header_util.consume_token_or_quoted_string(state)
+ if param_value is None:
+ raise ExtensionParsingException(
+ 'No valid parameter value found on the right-hand side of '
+ 'parameter %r' % param_name)
+
+ definition.add_parameter(param_name, param_value)
+
+
+def _parse_extension(state):
+ extension_token = http_header_util.consume_token(state)
+ if extension_token is None:
+ return None
+
+ extension = ExtensionParameter(extension_token)
+
+ while True:
+ http_header_util.consume_lwses(state)
+
+ if not http_header_util.consume_string(state, ';'):
+ break
+
+ http_header_util.consume_lwses(state)
+
+ try:
+ _parse_extension_param(state, extension)
+ except ExtensionParsingException, e:
+ raise ExtensionParsingException(
+ 'Failed to parse parameter for %r (%r)' %
+ (extension_token, e))
+
+ return extension
+
+
+def parse_extensions(data):
+ """Parses Sec-WebSocket-Extensions header value returns a list of
+ ExtensionParameter objects.
+
+ Leading LWSes must be trimmed.
+ """
+
+ state = http_header_util.ParsingState(data)
+
+ extension_list = []
+ while True:
+ extension = _parse_extension(state)
+ if extension is not None:
+ extension_list.append(extension)
+
+ http_header_util.consume_lwses(state)
+
+ if http_header_util.peek(state) is None:
+ break
+
+ if not http_header_util.consume_string(state, ','):
+ raise ExtensionParsingException(
+ 'Failed to parse Sec-WebSocket-Extensions header: '
+ 'Expected a comma but found %r' %
+ http_header_util.peek(state))
+
+ http_header_util.consume_lwses(state)
+
+ if len(extension_list) == 0:
+ raise ExtensionParsingException(
+ 'No valid extension entry found')
+
+ return extension_list
+
+
+def format_extension(extension):
+ """Formats an ExtensionParameter object."""
+
+ formatted_params = [extension.name()]
+ for param_name, param_value in extension.get_parameters():
+ if param_value is None:
+ formatted_params.append(param_name)
+ else:
+ quoted_value = http_header_util.quote_if_necessary(param_value)
+ formatted_params.append('%s=%s' % (param_name, quoted_value))
+ return '; '.join(formatted_params)
+
+
+def format_extensions(extension_list):
+ """Formats a list of ExtensionParameter objects."""
+
+ formatted_extension_list = []
+ for extension in extension_list:
+ formatted_extension_list.append(format_extension(extension))
+ return ', '.join(formatted_extension_list)
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/dispatch.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/dispatch.py
new file mode 100644
index 0000000..96c91e0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/dispatch.py
@@ -0,0 +1,393 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Dispatch WebSocket request.
+"""
+
+
+import logging
+import os
+import re
+
+from mod_pywebsocket import common
+from mod_pywebsocket import handshake
+from mod_pywebsocket import msgutil
+from mod_pywebsocket import mux
+from mod_pywebsocket import stream
+from mod_pywebsocket import util
+
+
+_SOURCE_PATH_PATTERN = re.compile(r'(?i)_wsh\.py$')
+_SOURCE_SUFFIX = '_wsh.py'
+_DO_EXTRA_HANDSHAKE_HANDLER_NAME = 'web_socket_do_extra_handshake'
+_TRANSFER_DATA_HANDLER_NAME = 'web_socket_transfer_data'
+_PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME = (
+ 'web_socket_passive_closing_handshake')
+
+
+class DispatchException(Exception):
+ """Exception in dispatching WebSocket request."""
+
+ def __init__(self, name, status=common.HTTP_STATUS_NOT_FOUND):
+ super(DispatchException, self).__init__(name)
+ self.status = status
+
+
+def _default_passive_closing_handshake_handler(request):
+ """Default web_socket_passive_closing_handshake handler."""
+
+ return common.STATUS_NORMAL_CLOSURE, ''
+
+
+def _normalize_path(path):
+ """Normalize path.
+
+ Args:
+ path: the path to normalize.
+
+ Path is converted to the absolute path.
+ The input path can use either '\\' or '/' as the separator.
+ The normalized path always uses '/' regardless of the platform.
+ """
+
+ path = path.replace('\\', os.path.sep)
+ path = os.path.realpath(path)
+ path = path.replace('\\', '/')
+ return path
+
+
+def _create_path_to_resource_converter(base_dir):
+ """Returns a function that converts the path of a WebSocket handler source
+ file to a resource string by removing the path to the base directory from
+ its head, removing _SOURCE_SUFFIX from its tail, and replacing path
+ separators in it with '/'.
+
+ Args:
+ base_dir: the path to the base directory.
+ """
+
+ base_dir = _normalize_path(base_dir)
+
+ base_len = len(base_dir)
+ suffix_len = len(_SOURCE_SUFFIX)
+
+ def converter(path):
+ if not path.endswith(_SOURCE_SUFFIX):
+ return None
+ # _normalize_path must not be used because resolving symlink breaks
+ # following path check.
+ path = path.replace('\\', '/')
+ if not path.startswith(base_dir):
+ return None
+ return path[base_len:-suffix_len]
+
+ return converter
+
+
+def _enumerate_handler_file_paths(directory):
+ """Returns a generator that enumerates WebSocket Handler source file names
+ in the given directory.
+ """
+
+ for root, unused_dirs, files in os.walk(directory):
+ for base in files:
+ path = os.path.join(root, base)
+ if _SOURCE_PATH_PATTERN.search(path):
+ yield path
+
+
+class _HandlerSuite(object):
+ """A handler suite holder class."""
+
+ def __init__(self, do_extra_handshake, transfer_data,
+ passive_closing_handshake):
+ self.do_extra_handshake = do_extra_handshake
+ self.transfer_data = transfer_data
+ self.passive_closing_handshake = passive_closing_handshake
+
+
+def _source_handler_file(handler_definition):
+ """Source a handler definition string.
+
+ Args:
+ handler_definition: a string containing Python statements that define
+ handler functions.
+ """
+
+ global_dic = {}
+ try:
+ exec handler_definition in global_dic
+ except Exception:
+ raise DispatchException('Error in sourcing handler:' +
+ util.get_stack_trace())
+ passive_closing_handshake_handler = None
+ try:
+ passive_closing_handshake_handler = _extract_handler(
+ global_dic, _PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME)
+ except Exception:
+ passive_closing_handshake_handler = (
+ _default_passive_closing_handshake_handler)
+ return _HandlerSuite(
+ _extract_handler(global_dic, _DO_EXTRA_HANDSHAKE_HANDLER_NAME),
+ _extract_handler(global_dic, _TRANSFER_DATA_HANDLER_NAME),
+ passive_closing_handshake_handler)
+
+
+def _extract_handler(dic, name):
+ """Extracts a callable with the specified name from the given dictionary
+ dic.
+ """
+
+ if name not in dic:
+ raise DispatchException('%s is not defined.' % name)
+ handler = dic[name]
+ if not callable(handler):
+ raise DispatchException('%s is not callable.' % name)
+ return handler
+
+
+class Dispatcher(object):
+ """Dispatches WebSocket requests.
+
+ This class maintains a map from resource name to handlers.
+ """
+
+ def __init__(
+ self, root_dir, scan_dir=None,
+ allow_handlers_outside_root_dir=True):
+ """Construct an instance.
+
+ Args:
+ root_dir: The directory where handler definition files are
+ placed.
+ scan_dir: The directory where handler definition files are
+ searched. scan_dir must be a directory under root_dir,
+ including root_dir itself. If scan_dir is None,
+ root_dir is used as scan_dir. scan_dir can be useful
+ in saving scan time when root_dir contains many
+ subdirectories.
+ allow_handlers_outside_root_dir: Scans handler files even if their
+ canonical path is not under root_dir.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._handler_suite_map = {}
+ self._source_warnings = []
+ if scan_dir is None:
+ scan_dir = root_dir
+ if not os.path.realpath(scan_dir).startswith(
+ os.path.realpath(root_dir)):
+ raise DispatchException('scan_dir:%s must be a directory under '
+ 'root_dir:%s.' % (scan_dir, root_dir))
+ self._source_handler_files_in_dir(
+ root_dir, scan_dir, allow_handlers_outside_root_dir)
+
+ def add_resource_path_alias(self,
+ alias_resource_path, existing_resource_path):
+ """Add resource path alias.
+
+ Once added, request to alias_resource_path would be handled by
+ handler registered for existing_resource_path.
+
+ Args:
+ alias_resource_path: alias resource path
+ existing_resource_path: existing resource path
+ """
+ try:
+ handler_suite = self._handler_suite_map[existing_resource_path]
+ self._handler_suite_map[alias_resource_path] = handler_suite
+ except KeyError:
+ raise DispatchException('No handler for: %r' %
+ existing_resource_path)
+
+ def source_warnings(self):
+ """Return warnings in sourcing handlers."""
+
+ return self._source_warnings
+
+ def do_extra_handshake(self, request):
+ """Do extra checking in WebSocket handshake.
+
+ Select a handler based on request.uri and call its
+ web_socket_do_extra_handshake function.
+
+ Args:
+ request: mod_python request.
+
+ Raises:
+ DispatchException: when handler was not found
+ AbortedByUserException: when user handler abort connection
+ HandshakeException: when opening handshake failed
+ """
+
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ raise DispatchException('No handler for: %r' % request.ws_resource)
+ do_extra_handshake_ = handler_suite.do_extra_handshake
+ try:
+ do_extra_handshake_(request)
+ except handshake.AbortedByUserException, e:
+ # Re-raise to tell the caller of this function to finish this
+ # connection without sending any error.
+ self._logger.debug('%s', util.get_stack_trace())
+ raise
+ except Exception, e:
+ util.prepend_message_to_exception(
+ '%s raised exception for %s: ' % (
+ _DO_EXTRA_HANDSHAKE_HANDLER_NAME,
+ request.ws_resource),
+ e)
+ raise handshake.HandshakeException(e, common.HTTP_STATUS_FORBIDDEN)
+
+ def transfer_data(self, request):
+ """Let a handler transfer_data with a WebSocket client.
+
+ Select a handler based on request.ws_resource and call its
+ web_socket_transfer_data function.
+
+ Args:
+ request: mod_python request.
+
+ Raises:
+ DispatchException: when handler was not found
+ AbortedByUserException: when user handler abort connection
+ """
+
+ # TODO(tyoshino): Terminate underlying TCP connection if possible.
+ try:
+ if mux.use_mux(request):
+ mux.start(request, self)
+ else:
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ raise DispatchException('No handler for: %r' %
+ request.ws_resource)
+ transfer_data_ = handler_suite.transfer_data
+ transfer_data_(request)
+
+ if not request.server_terminated:
+ request.ws_stream.close_connection()
+ # Catch non-critical exceptions the handler didn't handle.
+ except handshake.AbortedByUserException, e:
+ self._logger.debug('%s', util.get_stack_trace())
+ raise
+ except msgutil.BadOperationException, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(
+ common.STATUS_INTERNAL_ENDPOINT_ERROR)
+ except msgutil.InvalidFrameException, e:
+ # InvalidFrameException must be caught before
+ # ConnectionTerminatedException that catches InvalidFrameException.
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(common.STATUS_PROTOCOL_ERROR)
+ except msgutil.UnsupportedFrameException, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(common.STATUS_UNSUPPORTED_DATA)
+ except stream.InvalidUTF8Exception, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(
+ common.STATUS_INVALID_FRAME_PAYLOAD_DATA)
+ except msgutil.ConnectionTerminatedException, e:
+ self._logger.debug('%s', e)
+ except Exception, e:
+ # Any other exceptions are forwarded to the caller of this
+ # function.
+ util.prepend_message_to_exception(
+ '%s raised exception for %s: ' % (
+ _TRANSFER_DATA_HANDLER_NAME, request.ws_resource),
+ e)
+ raise
+
+ def passive_closing_handshake(self, request):
+ """Prepare code and reason for responding client initiated closing
+ handshake.
+ """
+
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ return _default_passive_closing_handshake_handler(request)
+ return handler_suite.passive_closing_handshake(request)
+
+ def get_handler_suite(self, resource):
+ """Retrieves two handlers (one for extra handshake processing, and one
+ for data transfer) for the given request as a HandlerSuite object.
+ """
+
+ fragment = None
+ if '#' in resource:
+ resource, fragment = resource.split('#', 1)
+ if '?' in resource:
+ resource = resource.split('?', 1)[0]
+ handler_suite = self._handler_suite_map.get(resource)
+ if handler_suite and fragment:
+ raise DispatchException('Fragment identifiers MUST NOT be used on '
+ 'WebSocket URIs',
+ common.HTTP_STATUS_BAD_REQUEST)
+ return handler_suite
+
+ def _source_handler_files_in_dir(
+ self, root_dir, scan_dir, allow_handlers_outside_root_dir):
+ """Source all the handler source files in the scan_dir directory.
+
+ The resource path is determined relative to root_dir.
+ """
+
+ # We build a map from resource to handler code assuming that there's
+ # only one path from root_dir to scan_dir and it can be obtained by
+ # comparing realpath of them.
+
+ # Here we cannot use abspath. See
+ # https://bugs.webkit.org/show_bug.cgi?id=31603
+
+ convert = _create_path_to_resource_converter(root_dir)
+ scan_realpath = os.path.realpath(scan_dir)
+ root_realpath = os.path.realpath(root_dir)
+ for path in _enumerate_handler_file_paths(scan_realpath):
+ if (not allow_handlers_outside_root_dir and
+ (not os.path.realpath(path).startswith(root_realpath))):
+ self._logger.debug(
+ 'Canonical path of %s is not under root directory' %
+ path)
+ continue
+ try:
+ handler_suite = _source_handler_file(open(path).read())
+ except DispatchException, e:
+ self._source_warnings.append('%s: %s' % (path, e))
+ continue
+ resource = convert(path)
+ if resource is None:
+ self._logger.debug(
+ 'Path to resource conversion on %s failed' % path)
+ else:
+ self._handler_suite_map[convert(path)] = handler_suite
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/extensions.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/extensions.py
new file mode 100644
index 0000000..49a9fdc
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/extensions.py
@@ -0,0 +1,885 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket import util
+from mod_pywebsocket.http_header_util import quote_if_necessary
+
+
+# The list of available server side extension processor classes.
+_available_processors = {}
+_compression_extension_names = []
+
+
+class ExtensionProcessorInterface(object):
+
+ def __init__(self, request):
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._active = True
+
+ def request(self):
+ return self._request
+
+ def name(self):
+ return None
+
+ def check_consistency_with_other_processors(self, processors):
+ pass
+
+ def set_active(self, active):
+ self._active = active
+
+ def is_active(self):
+ return self._active
+
+ def _get_extension_response_internal(self):
+ return None
+
+ def get_extension_response(self):
+ if not self._active:
+ self._logger.debug('Extension %s is deactivated', self.name())
+ return None
+
+ response = self._get_extension_response_internal()
+ if response is None:
+ self._active = False
+ return response
+
+ def _setup_stream_options_internal(self, stream_options):
+ pass
+
+ def setup_stream_options(self, stream_options):
+ if self._active:
+ self._setup_stream_options_internal(stream_options)
+
+
+def _log_outgoing_compression_ratio(
+ logger, original_bytes, filtered_bytes, average_ratio):
+ # Print inf when ratio is not available.
+ ratio = float('inf')
+ if original_bytes != 0:
+ ratio = float(filtered_bytes) / original_bytes
+
+ logger.debug('Outgoing compression ratio: %f (average: %f)' %
+ (ratio, average_ratio))
+
+
+def _log_incoming_compression_ratio(
+ logger, received_bytes, filtered_bytes, average_ratio):
+ # Print inf when ratio is not available.
+ ratio = float('inf')
+ if filtered_bytes != 0:
+ ratio = float(received_bytes) / filtered_bytes
+
+ logger.debug('Incoming compression ratio: %f (average: %f)' %
+ (ratio, average_ratio))
+
+
+def _parse_window_bits(bits):
+ """Return parsed integer value iff the given string conforms to the
+ grammar of the window bits extension parameters.
+ """
+
+ if bits is None:
+ raise ValueError('Value is required')
+
+ # For non integer values such as "10.0", ValueError will be raised.
+ int_bits = int(bits)
+
+ # First condition is to drop leading zero case e.g. "08".
+ if bits != str(int_bits) or int_bits < 8 or int_bits > 15:
+ raise ValueError('Invalid value: %r' % bits)
+
+ return int_bits
+
+
+class _AverageRatioCalculator(object):
+ """Stores total bytes of original and result data, and calculates average
+ result / original ratio.
+ """
+
+ def __init__(self):
+ self._total_original_bytes = 0
+ self._total_result_bytes = 0
+
+ def add_original_bytes(self, value):
+ self._total_original_bytes += value
+
+ def add_result_bytes(self, value):
+ self._total_result_bytes += value
+
+ def get_average_ratio(self):
+ if self._total_original_bytes != 0:
+ return (float(self._total_result_bytes) /
+ self._total_original_bytes)
+ else:
+ return float('inf')
+
+
+class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
+ """deflate-frame extension processor.
+
+ Specification:
+ http://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate
+ """
+
+ _WINDOW_BITS_PARAM = 'max_window_bits'
+ _NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover'
+
+ def __init__(self, request):
+ ExtensionProcessorInterface.__init__(self, request)
+ self._logger = util.get_class_logger(self)
+
+ self._response_window_bits = None
+ self._response_no_context_takeover = False
+ self._bfinal = False
+
+ # Calculates
+ # (Total outgoing bytes supplied to this filter) /
+ # (Total bytes sent to the network after applying this filter)
+ self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
+
+ # Calculates
+ # (Total bytes received from the network) /
+ # (Total incoming bytes obtained after applying this filter)
+ self._incoming_average_ratio_calculator = _AverageRatioCalculator()
+
+ def name(self):
+ return common.DEFLATE_FRAME_EXTENSION
+
+ def _get_extension_response_internal(self):
+ # Any unknown parameter will be just ignored.
+
+ window_bits = None
+ if self._request.has_parameter(self._WINDOW_BITS_PARAM):
+ window_bits = self._request.get_parameter_value(
+ self._WINDOW_BITS_PARAM)
+ try:
+ window_bits = _parse_window_bits(window_bits)
+ except ValueError, e:
+ return None
+
+ no_context_takeover = self._request.has_parameter(
+ self._NO_CONTEXT_TAKEOVER_PARAM)
+ if (no_context_takeover and
+ self._request.get_parameter_value(
+ self._NO_CONTEXT_TAKEOVER_PARAM) is not None):
+ return None
+
+ self._rfc1979_deflater = util._RFC1979Deflater(
+ window_bits, no_context_takeover)
+
+ self._rfc1979_inflater = util._RFC1979Inflater()
+
+ self._compress_outgoing = True
+
+ response = common.ExtensionParameter(self._request.name())
+
+ if self._response_window_bits is not None:
+ response.add_parameter(
+ self._WINDOW_BITS_PARAM, str(self._response_window_bits))
+ if self._response_no_context_takeover:
+ response.add_parameter(
+ self._NO_CONTEXT_TAKEOVER_PARAM, None)
+
+ self._logger.debug(
+ 'Enable %s extension ('
+ 'request: window_bits=%s; no_context_takeover=%r, '
+ 'response: window_wbits=%s; no_context_takeover=%r)' %
+ (self._request.name(),
+ window_bits,
+ no_context_takeover,
+ self._response_window_bits,
+ self._response_no_context_takeover))
+
+ return response
+
+ def _setup_stream_options_internal(self, stream_options):
+
+ class _OutgoingFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._outgoing_filter(frame)
+
+ class _IncomingFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._incoming_filter(frame)
+
+ stream_options.outgoing_frame_filters.append(
+ _OutgoingFilter(self))
+ stream_options.incoming_frame_filters.insert(
+ 0, _IncomingFilter(self))
+
+ def set_response_window_bits(self, value):
+ self._response_window_bits = value
+
+ def set_response_no_context_takeover(self, value):
+ self._response_no_context_takeover = value
+
+ def set_bfinal(self, value):
+ self._bfinal = value
+
+ def enable_outgoing_compression(self):
+ self._compress_outgoing = True
+
+ def disable_outgoing_compression(self):
+ self._compress_outgoing = False
+
+ def _outgoing_filter(self, frame):
+ """Transform outgoing frames. This method is called only by
+ an _OutgoingFilter instance.
+ """
+
+ original_payload_size = len(frame.payload)
+ self._outgoing_average_ratio_calculator.add_original_bytes(
+ original_payload_size)
+
+ if (not self._compress_outgoing or
+ common.is_control_opcode(frame.opcode)):
+ self._outgoing_average_ratio_calculator.add_result_bytes(
+ original_payload_size)
+ return
+
+ frame.payload = self._rfc1979_deflater.filter(
+ frame.payload, bfinal=self._bfinal)
+ frame.rsv1 = 1
+
+ filtered_payload_size = len(frame.payload)
+ self._outgoing_average_ratio_calculator.add_result_bytes(
+ filtered_payload_size)
+
+ _log_outgoing_compression_ratio(
+ self._logger,
+ original_payload_size,
+ filtered_payload_size,
+ self._outgoing_average_ratio_calculator.get_average_ratio())
+
+ def _incoming_filter(self, frame):
+ """Transform incoming frames. This method is called only by
+ an _IncomingFilter instance.
+ """
+
+ received_payload_size = len(frame.payload)
+ self._incoming_average_ratio_calculator.add_result_bytes(
+ received_payload_size)
+
+ if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode):
+ self._incoming_average_ratio_calculator.add_original_bytes(
+ received_payload_size)
+ return
+
+ frame.payload = self._rfc1979_inflater.filter(frame.payload)
+ frame.rsv1 = 0
+
+ filtered_payload_size = len(frame.payload)
+ self._incoming_average_ratio_calculator.add_original_bytes(
+ filtered_payload_size)
+
+ _log_incoming_compression_ratio(
+ self._logger,
+ received_payload_size,
+ filtered_payload_size,
+ self._incoming_average_ratio_calculator.get_average_ratio())
+
+
+_available_processors[common.DEFLATE_FRAME_EXTENSION] = (
+ DeflateFrameExtensionProcessor)
+_compression_extension_names.append(common.DEFLATE_FRAME_EXTENSION)
+
+_available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = (
+ DeflateFrameExtensionProcessor)
+_compression_extension_names.append(common.X_WEBKIT_DEFLATE_FRAME_EXTENSION)
+
+
+def _parse_compression_method(data):
+ """Parses the value of "method" extension parameter."""
+
+ return common.parse_extensions(data)
+
+
+def _create_accepted_method_desc(method_name, method_params):
+ """Creates accepted-method-desc from given method name and parameters"""
+
+ extension = common.ExtensionParameter(method_name)
+ for name, value in method_params:
+ extension.add_parameter(name, value)
+ return common.format_extension(extension)
+
+
+class CompressionExtensionProcessorBase(ExtensionProcessorInterface):
+ """Base class for perframe-compress and permessage-compress extension."""
+
+ _METHOD_PARAM = 'method'
+
+ def __init__(self, request):
+ ExtensionProcessorInterface.__init__(self, request)
+ self._logger = util.get_class_logger(self)
+ self._compression_method_name = None
+ self._compression_processor = None
+ self._compression_processor_hook = None
+
+ def name(self):
+ return ''
+
+ def _lookup_compression_processor(self, method_desc):
+ return None
+
+ def _get_compression_processor_response(self):
+ """Looks up the compression processor based on the self._request and
+ returns the compression processor's response.
+ """
+
+ method_list = self._request.get_parameter_value(self._METHOD_PARAM)
+ if method_list is None:
+ return None
+ methods = _parse_compression_method(method_list)
+ if methods is None:
+ return None
+ comression_processor = None
+ # The current implementation tries only the first method that matches
+ # supported algorithm. Following methods aren't tried even if the
+ # first one is rejected.
+ # TODO(bashi): Need to clarify this behavior.
+ for method_desc in methods:
+ compression_processor = self._lookup_compression_processor(
+ method_desc)
+ if compression_processor is not None:
+ self._compression_method_name = method_desc.name()
+ break
+ if compression_processor is None:
+ return None
+
+ if self._compression_processor_hook:
+ self._compression_processor_hook(compression_processor)
+
+ processor_response = compression_processor.get_extension_response()
+ if processor_response is None:
+ return None
+ self._compression_processor = compression_processor
+ return processor_response
+
+ def _get_extension_response_internal(self):
+ processor_response = self._get_compression_processor_response()
+ if processor_response is None:
+ return None
+
+ response = common.ExtensionParameter(self._request.name())
+ accepted_method_desc = _create_accepted_method_desc(
+ self._compression_method_name,
+ processor_response.get_parameters())
+ response.add_parameter(self._METHOD_PARAM, accepted_method_desc)
+ self._logger.debug(
+ 'Enable %s extension (method: %s)' %
+ (self._request.name(), self._compression_method_name))
+ return response
+
+ def _setup_stream_options_internal(self, stream_options):
+ if self._compression_processor is None:
+ return
+ self._compression_processor.setup_stream_options(stream_options)
+
+ def set_compression_processor_hook(self, hook):
+ self._compression_processor_hook = hook
+
+ def get_compression_processor(self):
+ return self._compression_processor
+
+
+class PerMessageDeflateExtensionProcessor(ExtensionProcessorInterface):
+ """permessage-deflate extension processor. It's also used for
+ permessage-compress extension when the deflate method is chosen.
+
+ Specification:
+ http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression-08
+ """
+
+ _SERVER_MAX_WINDOW_BITS_PARAM = 'server_max_window_bits'
+ _SERVER_NO_CONTEXT_TAKEOVER_PARAM = 'server_no_context_takeover'
+ _CLIENT_MAX_WINDOW_BITS_PARAM = 'client_max_window_bits'
+ _CLIENT_NO_CONTEXT_TAKEOVER_PARAM = 'client_no_context_takeover'
+
+ def __init__(self, request, draft08=True):
+ """Construct PerMessageDeflateExtensionProcessor
+
+ Args:
+ draft08: Follow the constraints on the parameters that were not
+ specified for permessage-compress but are specified for
+ permessage-deflate as on
+ draft-ietf-hybi-permessage-compression-08.
+ """
+
+ ExtensionProcessorInterface.__init__(self, request)
+ self._logger = util.get_class_logger(self)
+
+ self._preferred_client_max_window_bits = None
+ self._client_no_context_takeover = False
+
+ self._draft08 = draft08
+
+ def name(self):
+ return 'deflate'
+
+ def _get_extension_response_internal(self):
+ if self._draft08:
+ for name in self._request.get_parameter_names():
+ if name not in [self._SERVER_MAX_WINDOW_BITS_PARAM,
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM,
+ self._CLIENT_MAX_WINDOW_BITS_PARAM]:
+ self._logger.debug('Unknown parameter: %r', name)
+ return None
+ else:
+ # Any unknown parameter will be just ignored.
+ pass
+
+ server_max_window_bits = None
+ if self._request.has_parameter(self._SERVER_MAX_WINDOW_BITS_PARAM):
+ server_max_window_bits = self._request.get_parameter_value(
+ self._SERVER_MAX_WINDOW_BITS_PARAM)
+ try:
+ server_max_window_bits = _parse_window_bits(
+ server_max_window_bits)
+ except ValueError, e:
+ self._logger.debug('Bad %s parameter: %r',
+ self._SERVER_MAX_WINDOW_BITS_PARAM,
+ e)
+ return None
+
+ server_no_context_takeover = self._request.has_parameter(
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM)
+ if (server_no_context_takeover and
+ self._request.get_parameter_value(
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM) is not None):
+ self._logger.debug('%s parameter must not have a value: %r',
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM,
+ server_no_context_takeover)
+ return None
+
+ # client_max_window_bits from a client indicates whether the client can
+ # accept client_max_window_bits from a server or not.
+ client_client_max_window_bits = self._request.has_parameter(
+ self._CLIENT_MAX_WINDOW_BITS_PARAM)
+ if (self._draft08 and
+ client_client_max_window_bits and
+ self._request.get_parameter_value(
+ self._CLIENT_MAX_WINDOW_BITS_PARAM) is not None):
+ self._logger.debug('%s parameter must not have a value in a '
+ 'client\'s opening handshake: %r',
+ self._CLIENT_MAX_WINDOW_BITS_PARAM,
+ client_client_max_window_bits)
+ return None
+
+ self._rfc1979_deflater = util._RFC1979Deflater(
+ server_max_window_bits, server_no_context_takeover)
+
+ # Note that we prepare for incoming messages compressed with window
+ # bits upto 15 regardless of the client_max_window_bits value to be
+ # sent to the client.
+ self._rfc1979_inflater = util._RFC1979Inflater()
+
+ self._framer = _PerMessageDeflateFramer(
+ server_max_window_bits, server_no_context_takeover)
+ self._framer.set_bfinal(False)
+ self._framer.set_compress_outgoing_enabled(True)
+
+ response = common.ExtensionParameter(self._request.name())
+
+ if server_max_window_bits is not None:
+ response.add_parameter(
+ self._SERVER_MAX_WINDOW_BITS_PARAM,
+ str(server_max_window_bits))
+
+ if server_no_context_takeover:
+ response.add_parameter(
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM, None)
+
+ if self._preferred_client_max_window_bits is not None:
+ if self._draft08 and not client_client_max_window_bits:
+ self._logger.debug('Processor is configured to use %s but '
+ 'the client cannot accept it',
+ self._CLIENT_MAX_WINDOW_BITS_PARAM)
+ return None
+ response.add_parameter(
+ self._CLIENT_MAX_WINDOW_BITS_PARAM,
+ str(self._preferred_client_max_window_bits))
+
+ if self._client_no_context_takeover:
+ response.add_parameter(
+ self._CLIENT_NO_CONTEXT_TAKEOVER_PARAM, None)
+
+ self._logger.debug(
+ 'Enable %s extension ('
+ 'request: server_max_window_bits=%s; '
+ 'server_no_context_takeover=%r, '
+ 'response: client_max_window_bits=%s; '
+ 'client_no_context_takeover=%r)' %
+ (self._request.name(),
+ server_max_window_bits,
+ server_no_context_takeover,
+ self._preferred_client_max_window_bits,
+ self._client_no_context_takeover))
+
+ return response
+
+ def _setup_stream_options_internal(self, stream_options):
+ self._framer.setup_stream_options(stream_options)
+
+ def set_client_max_window_bits(self, value):
+ """If this option is specified, this class adds the
+ client_max_window_bits extension parameter to the handshake response,
+ but doesn't reduce the LZ77 sliding window size of its inflater.
+ I.e., you can use this for testing client implementation but cannot
+ reduce memory usage of this class.
+
+ If this method has been called with True and an offer without the
+ client_max_window_bits extension parameter is received,
+ - (When processing the permessage-deflate extension) this processor
+ declines the request.
+ - (When processing the permessage-compress extension) this processor
+ accepts the request.
+ """
+
+ self._preferred_client_max_window_bits = value
+
+ def set_client_no_context_takeover(self, value):
+ """If this option is specified, this class adds the
+ client_no_context_takeover extension parameter to the handshake
+ response, but doesn't reset inflater for each message. I.e., you can
+ use this for testing client implementation but cannot reduce memory
+ usage of this class.
+ """
+
+ self._client_no_context_takeover = value
+
+ def set_bfinal(self, value):
+ self._framer.set_bfinal(value)
+
+ def enable_outgoing_compression(self):
+ self._framer.set_compress_outgoing_enabled(True)
+
+ def disable_outgoing_compression(self):
+ self._framer.set_compress_outgoing_enabled(False)
+
+
+class _PerMessageDeflateFramer(object):
+ """A framer for extensions with per-message DEFLATE feature."""
+
+ def __init__(self, deflate_max_window_bits, deflate_no_context_takeover):
+ self._logger = util.get_class_logger(self)
+
+ self._rfc1979_deflater = util._RFC1979Deflater(
+ deflate_max_window_bits, deflate_no_context_takeover)
+
+ self._rfc1979_inflater = util._RFC1979Inflater()
+
+ self._bfinal = False
+
+ self._compress_outgoing_enabled = False
+
+ # True if a message is fragmented and compression is ongoing.
+ self._compress_ongoing = False
+
+ # Calculates
+ # (Total outgoing bytes supplied to this filter) /
+ # (Total bytes sent to the network after applying this filter)
+ self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
+
+ # Calculates
+ # (Total bytes received from the network) /
+ # (Total incoming bytes obtained after applying this filter)
+ self._incoming_average_ratio_calculator = _AverageRatioCalculator()
+
+ def set_bfinal(self, value):
+ self._bfinal = value
+
+ def set_compress_outgoing_enabled(self, value):
+ self._compress_outgoing_enabled = value
+
+ def _process_incoming_message(self, message, decompress):
+ if not decompress:
+ return message
+
+ received_payload_size = len(message)
+ self._incoming_average_ratio_calculator.add_result_bytes(
+ received_payload_size)
+
+ message = self._rfc1979_inflater.filter(message)
+
+ filtered_payload_size = len(message)
+ self._incoming_average_ratio_calculator.add_original_bytes(
+ filtered_payload_size)
+
+ _log_incoming_compression_ratio(
+ self._logger,
+ received_payload_size,
+ filtered_payload_size,
+ self._incoming_average_ratio_calculator.get_average_ratio())
+
+ return message
+
+ def _process_outgoing_message(self, message, end, binary):
+ if not binary:
+ message = message.encode('utf-8')
+
+ if not self._compress_outgoing_enabled:
+ return message
+
+ original_payload_size = len(message)
+ self._outgoing_average_ratio_calculator.add_original_bytes(
+ original_payload_size)
+
+ message = self._rfc1979_deflater.filter(
+ message, end=end, bfinal=self._bfinal)
+
+ filtered_payload_size = len(message)
+ self._outgoing_average_ratio_calculator.add_result_bytes(
+ filtered_payload_size)
+
+ _log_outgoing_compression_ratio(
+ self._logger,
+ original_payload_size,
+ filtered_payload_size,
+ self._outgoing_average_ratio_calculator.get_average_ratio())
+
+ if not self._compress_ongoing:
+ self._outgoing_frame_filter.set_compression_bit()
+ self._compress_ongoing = not end
+ return message
+
+ def _process_incoming_frame(self, frame):
+ if frame.rsv1 == 1 and not common.is_control_opcode(frame.opcode):
+ self._incoming_message_filter.decompress_next_message()
+ frame.rsv1 = 0
+
+ def _process_outgoing_frame(self, frame, compression_bit):
+ if (not compression_bit or
+ common.is_control_opcode(frame.opcode)):
+ return
+
+ frame.rsv1 = 1
+
+ def setup_stream_options(self, stream_options):
+ """Creates filters and sets them to the StreamOptions."""
+
+ class _OutgoingMessageFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, message, end=True, binary=False):
+ return self._parent._process_outgoing_message(
+ message, end, binary)
+
+ class _IncomingMessageFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+ self._decompress_next_message = False
+
+ def decompress_next_message(self):
+ self._decompress_next_message = True
+
+ def filter(self, message):
+ message = self._parent._process_incoming_message(
+ message, self._decompress_next_message)
+ self._decompress_next_message = False
+ return message
+
+ self._outgoing_message_filter = _OutgoingMessageFilter(self)
+ self._incoming_message_filter = _IncomingMessageFilter(self)
+ stream_options.outgoing_message_filters.append(
+ self._outgoing_message_filter)
+ stream_options.incoming_message_filters.append(
+ self._incoming_message_filter)
+
+ class _OutgoingFrameFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+ self._set_compression_bit = False
+
+ def set_compression_bit(self):
+ self._set_compression_bit = True
+
+ def filter(self, frame):
+ self._parent._process_outgoing_frame(
+ frame, self._set_compression_bit)
+ self._set_compression_bit = False
+
+ class _IncomingFrameFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._process_incoming_frame(frame)
+
+ self._outgoing_frame_filter = _OutgoingFrameFilter(self)
+ self._incoming_frame_filter = _IncomingFrameFilter(self)
+ stream_options.outgoing_frame_filters.append(
+ self._outgoing_frame_filter)
+ stream_options.incoming_frame_filters.append(
+ self._incoming_frame_filter)
+
+ stream_options.encode_text_message_to_utf8 = False
+
+
+_available_processors[common.PERMESSAGE_DEFLATE_EXTENSION] = (
+ PerMessageDeflateExtensionProcessor)
+# TODO(tyoshino): Reorganize class names.
+_compression_extension_names.append('deflate')
+
+
+class PerMessageCompressExtensionProcessor(
+ CompressionExtensionProcessorBase):
+ """permessage-compress extension processor.
+
+ Specification:
+ http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression
+ """
+
+ _DEFLATE_METHOD = 'deflate'
+
+ def __init__(self, request):
+ CompressionExtensionProcessorBase.__init__(self, request)
+
+ def name(self):
+ return common.PERMESSAGE_COMPRESSION_EXTENSION
+
+ def _lookup_compression_processor(self, method_desc):
+ if method_desc.name() == self._DEFLATE_METHOD:
+ return PerMessageDeflateExtensionProcessor(method_desc, False)
+ return None
+
+
+_available_processors[common.PERMESSAGE_COMPRESSION_EXTENSION] = (
+ PerMessageCompressExtensionProcessor)
+_compression_extension_names.append(common.PERMESSAGE_COMPRESSION_EXTENSION)
+
+
+class MuxExtensionProcessor(ExtensionProcessorInterface):
+ """WebSocket multiplexing extension processor."""
+
+ _QUOTA_PARAM = 'quota'
+
+ def __init__(self, request):
+ ExtensionProcessorInterface.__init__(self, request)
+ self._quota = 0
+ self._extensions = []
+
+ def name(self):
+ return common.MUX_EXTENSION
+
+ def check_consistency_with_other_processors(self, processors):
+ before_mux = True
+ for processor in processors:
+ name = processor.name()
+ if name == self.name():
+ before_mux = False
+ continue
+ if not processor.is_active():
+ continue
+ if before_mux:
+ # Mux extension cannot be used after extensions
+ # that depend on frame boundary, extension data field, or any
+ # reserved bits which are attributed to each frame.
+ if (name == common.DEFLATE_FRAME_EXTENSION or
+ name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION):
+ self.set_active(False)
+ return
+ else:
+ # Mux extension should not be applied before any history-based
+ # compression extension.
+ if (name == common.DEFLATE_FRAME_EXTENSION or
+ name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION or
+ name == common.PERMESSAGE_COMPRESSION_EXTENSION or
+ name == common.X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION):
+ self.set_active(False)
+ return
+
+ def _get_extension_response_internal(self):
+ self._active = False
+ quota = self._request.get_parameter_value(self._QUOTA_PARAM)
+ if quota is not None:
+ try:
+ quota = int(quota)
+ except ValueError, e:
+ return None
+ if quota < 0 or quota >= 2 ** 32:
+ return None
+ self._quota = quota
+
+ self._active = True
+ return common.ExtensionParameter(common.MUX_EXTENSION)
+
+ def _setup_stream_options_internal(self, stream_options):
+ pass
+
+ def set_quota(self, quota):
+ self._quota = quota
+
+ def quota(self):
+ return self._quota
+
+ def set_extensions(self, extensions):
+ self._extensions = extensions
+
+ def extensions(self):
+ return self._extensions
+
+
+_available_processors[common.MUX_EXTENSION] = MuxExtensionProcessor
+
+
+def get_extension_processor(extension_request):
+ """Given an ExtensionParameter representing an extension offer received
+ from a client, configures and returns an instance of the corresponding
+ extension processor class.
+ """
+
+ processor_class = _available_processors.get(extension_request.name())
+ if processor_class is None:
+ return None
+ return processor_class(extension_request)
+
+
+def is_compression_extension(extension_name):
+ return extension_name in _compression_extension_names
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/fast_masking.i b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/fast_masking.i
new file mode 100644
index 0000000..ddaad27
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/fast_masking.i
@@ -0,0 +1,98 @@
+// Copyright 2013, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+%module fast_masking
+
+%include "cstring.i"
+
+%{
+#include <cstring>
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+#endif
+%}
+
+%apply (char *STRING, int LENGTH) {
+ (const char* payload, int payload_length),
+ (const char* masking_key, int masking_key_length) };
+%cstring_output_allocate_size(
+ char** result, int* result_length, delete [] *$1);
+
+%inline %{
+
+void mask(
+ const char* payload, int payload_length,
+ const char* masking_key, int masking_key_length,
+ int masking_key_index,
+ char** result, int* result_length) {
+ *result = new char[payload_length];
+ *result_length = payload_length;
+ memcpy(*result, payload, payload_length);
+
+ char* cursor = *result;
+ char* cursor_end = *result + *result_length;
+
+#ifdef __SSE2__
+ while ((cursor < cursor_end) &&
+ (reinterpret_cast<size_t>(cursor) & 0xf)) {
+ *cursor ^= masking_key[masking_key_index];
+ ++cursor;
+ masking_key_index = (masking_key_index + 1) % masking_key_length;
+ }
+ if (cursor == cursor_end) {
+ return;
+ }
+
+ const int kBlockSize = 16;
+ __m128i masking_key_block;
+ for (int i = 0; i < kBlockSize; ++i) {
+ *(reinterpret_cast<char*>(&masking_key_block) + i) =
+ masking_key[masking_key_index];
+ masking_key_index = (masking_key_index + 1) % masking_key_length;
+ }
+
+ while (cursor + kBlockSize <= cursor_end) {
+ __m128i payload_block =
+ _mm_load_si128(reinterpret_cast<__m128i*>(cursor));
+ _mm_stream_si128(reinterpret_cast<__m128i*>(cursor),
+ _mm_xor_si128(payload_block, masking_key_block));
+ cursor += kBlockSize;
+ }
+#endif
+
+ while (cursor < cursor_end) {
+ *cursor ^= masking_key[masking_key_index];
+ ++cursor;
+ masking_key_index = (masking_key_index + 1) % masking_key_length;
+ }
+}
+
+%}
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/__init__.py
new file mode 100644
index 0000000..194f6b3
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/__init__.py
@@ -0,0 +1,110 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket opening handshake processor. This class try to apply available
+opening handshake processors for each protocol version until a connection is
+successfully established.
+"""
+
+
+import logging
+
+from mod_pywebsocket import common
+from mod_pywebsocket.handshake import hybi00
+from mod_pywebsocket.handshake import hybi
+# Export AbortedByUserException, HandshakeException, and VersionException
+# symbol from this module.
+from mod_pywebsocket.handshake._base import AbortedByUserException
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import VersionException
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def do_handshake(request, dispatcher, allowDraft75=False, strict=False):
+ """Performs WebSocket handshake.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+ allowDraft75: obsolete argument. ignored.
+ strict: obsolete argument. ignored.
+
+ Handshaker will add attributes such as ws_resource in performing
+ handshake.
+ """
+
+ _LOGGER.debug('Client\'s opening handshake resource: %r', request.uri)
+ # To print mimetools.Message as escaped one-line string, we converts
+ # headers_in to dict object. Without conversion, if we use %r, it just
+ # prints the type and address, and if we use %s, it prints the original
+ # header string as multiple lines.
+ #
+ # Both mimetools.Message and MpTable_Type of mod_python can be
+ # converted to dict.
+ #
+ # mimetools.Message.__str__ returns the original header string.
+ # dict(mimetools.Message object) returns the map from header names to
+ # header values. While MpTable_Type doesn't have such __str__ but just
+ # __repr__ which formats itself as well as dictionary object.
+ _LOGGER.debug(
+ 'Client\'s opening handshake headers: %r', dict(request.headers_in))
+
+ handshakers = []
+ handshakers.append(
+ ('RFC 6455', hybi.Handshaker(request, dispatcher)))
+ handshakers.append(
+ ('HyBi 00', hybi00.Handshaker(request, dispatcher)))
+
+ for name, handshaker in handshakers:
+ _LOGGER.debug('Trying protocol version %s', name)
+ try:
+ handshaker.do_handshake()
+ _LOGGER.info('Established (%s protocol)', name)
+ return
+ except HandshakeException, e:
+ _LOGGER.debug(
+ 'Failed to complete opening handshake as %s protocol: %r',
+ name, e)
+ if e.status:
+ raise e
+ except AbortedByUserException, e:
+ raise
+ except VersionException, e:
+ raise
+
+ # TODO(toyoshim): Add a test to cover the case all handshakers fail.
+ raise HandshakeException(
+ 'Failed to complete opening handshake for all available protocols',
+ status=common.HTTP_STATUS_BAD_REQUEST)
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/_base.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/_base.py
new file mode 100644
index 0000000..c993a58
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/_base.py
@@ -0,0 +1,182 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Common functions and exceptions used by WebSocket opening handshake
+processors.
+"""
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket import http_header_util
+
+
+class AbortedByUserException(Exception):
+ """Exception for aborting a connection intentionally.
+
+ If this exception is raised in do_extra_handshake handler, the connection
+ will be abandoned. No other WebSocket or HTTP(S) handler will be invoked.
+
+ If this exception is raised in transfer_data_handler, the connection will
+ be closed without closing handshake. No other WebSocket or HTTP(S) handler
+ will be invoked.
+ """
+
+ pass
+
+
+class HandshakeException(Exception):
+ """This exception will be raised when an error occurred while processing
+ WebSocket initial handshake.
+ """
+
+ def __init__(self, name, status=None):
+ super(HandshakeException, self).__init__(name)
+ self.status = status
+
+
+class VersionException(Exception):
+ """This exception will be raised when a version of client request does not
+ match with version the server supports.
+ """
+
+ def __init__(self, name, supported_versions=''):
+ """Construct an instance.
+
+ Args:
+ supported_version: a str object to show supported hybi versions.
+ (e.g. '8, 13')
+ """
+ super(VersionException, self).__init__(name)
+ self.supported_versions = supported_versions
+
+
+def get_default_port(is_secure):
+ if is_secure:
+ return common.DEFAULT_WEB_SOCKET_SECURE_PORT
+ else:
+ return common.DEFAULT_WEB_SOCKET_PORT
+
+
+def validate_subprotocol(subprotocol):
+ """Validate a value in the Sec-WebSocket-Protocol field.
+
+ See the Section 4.1., 4.2.2., and 4.3. of RFC 6455.
+ """
+
+ if not subprotocol:
+ raise HandshakeException('Invalid subprotocol name: empty')
+
+ # Parameter should be encoded HTTP token.
+ state = http_header_util.ParsingState(subprotocol)
+ token = http_header_util.consume_token(state)
+ rest = http_header_util.peek(state)
+ # If |rest| is not None, |subprotocol| is not one token or invalid. If
+ # |rest| is None, |token| must not be None because |subprotocol| is
+ # concatenation of |token| and |rest| and is not None.
+ if rest is not None:
+ raise HandshakeException('Invalid non-token string in subprotocol '
+ 'name: %r' % rest)
+
+
+def parse_host_header(request):
+ fields = request.headers_in[common.HOST_HEADER].split(':', 1)
+ if len(fields) == 1:
+ return fields[0], get_default_port(request.is_https())
+ try:
+ return fields[0], int(fields[1])
+ except ValueError, e:
+ raise HandshakeException('Invalid port number format: %r' % e)
+
+
+def format_header(name, value):
+ return '%s: %s\r\n' % (name, value)
+
+
+def get_mandatory_header(request, key):
+ value = request.headers_in.get(key)
+ if value is None:
+ raise HandshakeException('Header %s is not defined' % key)
+ return value
+
+
+def validate_mandatory_header(request, key, expected_value, fail_status=None):
+ value = get_mandatory_header(request, key)
+
+ if value.lower() != expected_value.lower():
+ raise HandshakeException(
+ 'Expected %r for header %s but found %r (case-insensitive)' %
+ (expected_value, key, value), status=fail_status)
+
+
+def check_request_line(request):
+ # 5.1 1. The three character UTF-8 string "GET".
+ # 5.1 2. A UTF-8-encoded U+0020 SPACE character (0x20 byte).
+ if request.method != 'GET':
+ raise HandshakeException('Method is not GET: %r' % request.method)
+
+ if request.protocol != 'HTTP/1.1':
+ raise HandshakeException('Version is not HTTP/1.1: %r' %
+ request.protocol)
+
+
+def parse_token_list(data):
+ """Parses a header value which follows 1#token and returns parsed elements
+ as a list of strings.
+
+ Leading LWSes must be trimmed.
+ """
+
+ state = http_header_util.ParsingState(data)
+
+ token_list = []
+
+ while True:
+ token = http_header_util.consume_token(state)
+ if token is not None:
+ token_list.append(token)
+
+ http_header_util.consume_lwses(state)
+
+ if http_header_util.peek(state) is None:
+ break
+
+ if not http_header_util.consume_string(state, ','):
+ raise HandshakeException(
+ 'Expected a comma but found %r' % http_header_util.peek(state))
+
+ http_header_util.consume_lwses(state)
+
+ if len(token_list) == 0:
+ raise HandshakeException('No valid token found')
+
+ return token_list
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi.py
new file mode 100644
index 0000000..1ad10ea
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi.py
@@ -0,0 +1,420 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides the opening handshake processor for the WebSocket
+protocol (RFC 6455).
+
+Specification:
+http://tools.ietf.org/html/rfc6455
+"""
+
+
+# Note: request.connection.write is used in this module, even though mod_python
+# document says that it should be used only in connection handlers.
+# Unfortunately, we have no other options. For example, request.write is not
+# suitable because it doesn't allow direct raw bytes writing.
+
+
+import base64
+import logging
+import os
+import re
+
+from mod_pywebsocket import common
+from mod_pywebsocket.extensions import get_extension_processor
+from mod_pywebsocket.extensions import is_compression_extension
+from mod_pywebsocket.handshake._base import check_request_line
+from mod_pywebsocket.handshake._base import format_header
+from mod_pywebsocket.handshake._base import get_mandatory_header
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import parse_token_list
+from mod_pywebsocket.handshake._base import validate_mandatory_header
+from mod_pywebsocket.handshake._base import validate_subprotocol
+from mod_pywebsocket.handshake._base import VersionException
+from mod_pywebsocket.stream import Stream
+from mod_pywebsocket.stream import StreamOptions
+from mod_pywebsocket import util
+
+
+# Used to validate the value in the Sec-WebSocket-Key header strictly. RFC 4648
+# disallows non-zero padding, so the character right before == must be any of
+# A, Q, g and w.
+_SEC_WEBSOCKET_KEY_REGEX = re.compile('^[+/0-9A-Za-z]{21}[AQgw]==$')
+
+# Defining aliases for values used frequently.
+_VERSION_LATEST = common.VERSION_HYBI_LATEST
+_VERSION_LATEST_STRING = str(_VERSION_LATEST)
+_SUPPORTED_VERSIONS = [
+ _VERSION_LATEST,
+]
+
+
+def compute_accept(key):
+ """Computes value for the Sec-WebSocket-Accept header from value of the
+ Sec-WebSocket-Key header.
+ """
+
+ accept_binary = util.sha1_hash(
+ key + common.WEBSOCKET_ACCEPT_UUID).digest()
+ accept = base64.b64encode(accept_binary)
+
+ return (accept, accept_binary)
+
+
+class Handshaker(object):
+ """Opening handshake processor for the WebSocket protocol (RFC 6455)."""
+
+ def __init__(self, request, dispatcher):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+
+ Handshaker will add attributes such as ws_resource during handshake.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._dispatcher = dispatcher
+
+ def _validate_connection_header(self):
+ connection = get_mandatory_header(
+ self._request, common.CONNECTION_HEADER)
+
+ try:
+ connection_tokens = parse_token_list(connection)
+ except HandshakeException, e:
+ raise HandshakeException(
+ 'Failed to parse %s: %s' % (common.CONNECTION_HEADER, e))
+
+ connection_is_valid = False
+ for token in connection_tokens:
+ if token.lower() == common.UPGRADE_CONNECTION_TYPE.lower():
+ connection_is_valid = True
+ break
+ if not connection_is_valid:
+ raise HandshakeException(
+ '%s header doesn\'t contain "%s"' %
+ (common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+
+ def do_handshake(self):
+ self._request.ws_close_code = None
+ self._request.ws_close_reason = None
+
+ # Parsing.
+
+ check_request_line(self._request)
+
+ validate_mandatory_header(
+ self._request,
+ common.UPGRADE_HEADER,
+ common.WEBSOCKET_UPGRADE_TYPE)
+
+ self._validate_connection_header()
+
+ self._request.ws_resource = self._request.uri
+
+ unused_host = get_mandatory_header(self._request, common.HOST_HEADER)
+
+ self._request.ws_version = self._check_version()
+
+ try:
+ self._get_origin()
+ self._set_protocol()
+ self._parse_extensions()
+
+ # Key validation, response generation.
+
+ key = self._get_key()
+ (accept, accept_binary) = compute_accept(key)
+ self._logger.debug(
+ '%s: %r (%s)',
+ common.SEC_WEBSOCKET_ACCEPT_HEADER,
+ accept,
+ util.hexify(accept_binary))
+
+ self._logger.debug('Protocol version is RFC 6455')
+
+ # Setup extension processors.
+
+ processors = []
+ if self._request.ws_requested_extensions is not None:
+ for extension_request in self._request.ws_requested_extensions:
+ processor = get_extension_processor(extension_request)
+ # Unknown extension requests are just ignored.
+ if processor is not None:
+ processors.append(processor)
+ self._request.ws_extension_processors = processors
+
+ # List of extra headers. The extra handshake handler may add header
+ # data as name/value pairs to this list and pywebsocket appends
+ # them to the WebSocket handshake.
+ self._request.extra_headers = []
+
+ # Extra handshake handler may modify/remove processors.
+ self._dispatcher.do_extra_handshake(self._request)
+ processors = filter(lambda processor: processor is not None,
+ self._request.ws_extension_processors)
+
+ # Ask each processor if there are extensions on the request which
+ # cannot co-exist. When processor decided other processors cannot
+ # co-exist with it, the processor marks them (or itself) as
+ # "inactive". The first extension processor has the right to
+ # make the final call.
+ for processor in reversed(processors):
+ if processor.is_active():
+ processor.check_consistency_with_other_processors(
+ processors)
+ processors = filter(lambda processor: processor.is_active(),
+ processors)
+
+ accepted_extensions = []
+
+ # We need to take into account of mux extension here.
+ # If mux extension exists:
+ # - Remove processors of extensions for logical channel,
+ # which are processors located before the mux processor
+ # - Pass extension requests for logical channel to mux processor
+ # - Attach the mux processor to the request. It will be referred
+ # by dispatcher to see whether the dispatcher should use mux
+ # handler or not.
+ mux_index = -1
+ for i, processor in enumerate(processors):
+ if processor.name() == common.MUX_EXTENSION:
+ mux_index = i
+ break
+ if mux_index >= 0:
+ logical_channel_extensions = []
+ for processor in processors[:mux_index]:
+ logical_channel_extensions.append(processor.request())
+ processor.set_active(False)
+ self._request.mux_processor = processors[mux_index]
+ self._request.mux_processor.set_extensions(
+ logical_channel_extensions)
+ processors = filter(lambda processor: processor.is_active(),
+ processors)
+
+ stream_options = StreamOptions()
+
+ for index, processor in enumerate(processors):
+ if not processor.is_active():
+ continue
+
+ extension_response = processor.get_extension_response()
+ if extension_response is None:
+ # Rejected.
+ continue
+
+ accepted_extensions.append(extension_response)
+
+ processor.setup_stream_options(stream_options)
+
+ if not is_compression_extension(processor.name()):
+ continue
+
+ # Inactivate all of the following compression extensions.
+ for j in xrange(index + 1, len(processors)):
+ if is_compression_extension(processors[j].name()):
+ processors[j].set_active(False)
+
+ if len(accepted_extensions) > 0:
+ self._request.ws_extensions = accepted_extensions
+ self._logger.debug(
+ 'Extensions accepted: %r',
+ map(common.ExtensionParameter.name, accepted_extensions))
+ else:
+ self._request.ws_extensions = None
+
+ self._request.ws_stream = self._create_stream(stream_options)
+
+ if self._request.ws_requested_protocols is not None:
+ if self._request.ws_protocol is None:
+ raise HandshakeException(
+ 'do_extra_handshake must choose one subprotocol from '
+ 'ws_requested_protocols and set it to ws_protocol')
+ validate_subprotocol(self._request.ws_protocol)
+
+ self._logger.debug(
+ 'Subprotocol accepted: %r',
+ self._request.ws_protocol)
+ else:
+ if self._request.ws_protocol is not None:
+ raise HandshakeException(
+ 'ws_protocol must be None when the client didn\'t '
+ 'request any subprotocol')
+
+ self._send_handshake(accept)
+ except HandshakeException, e:
+ if not e.status:
+ # Fallback to 400 bad request by default.
+ e.status = common.HTTP_STATUS_BAD_REQUEST
+ raise e
+
+ def _get_origin(self):
+ origin_header = common.ORIGIN_HEADER
+ origin = self._request.headers_in.get(origin_header)
+ if origin is None:
+ self._logger.debug('Client request does not have origin header')
+ self._request.ws_origin = origin
+
+ def _check_version(self):
+ version = get_mandatory_header(self._request,
+ common.SEC_WEBSOCKET_VERSION_HEADER)
+ if version == _VERSION_LATEST_STRING:
+ return _VERSION_LATEST
+
+ if version.find(',') >= 0:
+ raise HandshakeException(
+ 'Multiple versions (%r) are not allowed for header %s' %
+ (version, common.SEC_WEBSOCKET_VERSION_HEADER),
+ status=common.HTTP_STATUS_BAD_REQUEST)
+ raise VersionException(
+ 'Unsupported version %r for header %s' %
+ (version, common.SEC_WEBSOCKET_VERSION_HEADER),
+ supported_versions=', '.join(map(str, _SUPPORTED_VERSIONS)))
+
+ def _set_protocol(self):
+ self._request.ws_protocol = None
+
+ protocol_header = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER)
+
+ if protocol_header is None:
+ self._request.ws_requested_protocols = None
+ return
+
+ self._request.ws_requested_protocols = parse_token_list(
+ protocol_header)
+ self._logger.debug('Subprotocols requested: %r',
+ self._request.ws_requested_protocols)
+
+ def _parse_extensions(self):
+ extensions_header = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER)
+ if not extensions_header:
+ self._request.ws_requested_extensions = None
+ return
+
+ try:
+ self._request.ws_requested_extensions = common.parse_extensions(
+ extensions_header)
+ except common.ExtensionParsingException, e:
+ raise HandshakeException(
+ 'Failed to parse Sec-WebSocket-Extensions header: %r' % e)
+
+ self._logger.debug(
+ 'Extensions requested: %r',
+ map(common.ExtensionParameter.name,
+ self._request.ws_requested_extensions))
+
+ def _validate_key(self, key):
+ if key.find(',') >= 0:
+ raise HandshakeException('Request has multiple %s header lines or '
+ 'contains illegal character \',\': %r' %
+ (common.SEC_WEBSOCKET_KEY_HEADER, key))
+
+ # Validate
+ key_is_valid = False
+ try:
+ # Validate key by quick regex match before parsing by base64
+ # module. Because base64 module skips invalid characters, we have
+ # to do this in advance to make this server strictly reject illegal
+ # keys.
+ if _SEC_WEBSOCKET_KEY_REGEX.match(key):
+ decoded_key = base64.b64decode(key)
+ if len(decoded_key) == 16:
+ key_is_valid = True
+ except TypeError, e:
+ pass
+
+ if not key_is_valid:
+ raise HandshakeException(
+ 'Illegal value for header %s: %r' %
+ (common.SEC_WEBSOCKET_KEY_HEADER, key))
+
+ return decoded_key
+
+ def _get_key(self):
+ key = get_mandatory_header(
+ self._request, common.SEC_WEBSOCKET_KEY_HEADER)
+
+ decoded_key = self._validate_key(key)
+
+ self._logger.debug(
+ '%s: %r (%s)',
+ common.SEC_WEBSOCKET_KEY_HEADER,
+ key,
+ util.hexify(decoded_key))
+
+ return key
+
+ def _create_stream(self, stream_options):
+ return Stream(self._request, stream_options)
+
+ def _create_handshake_response(self, accept):
+ response = []
+
+ response.append('HTTP/1.1 101 Switching Protocols\r\n')
+
+ # WebSocket headers
+ response.append(format_header(
+ common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE))
+ response.append(format_header(
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
+ if self._request.ws_protocol is not None:
+ response.append(format_header(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ if (self._request.ws_extensions is not None and
+ len(self._request.ws_extensions) != 0):
+ response.append(format_header(
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
+ common.format_extensions(self._request.ws_extensions)))
+
+ # Headers not specific for WebSocket
+ for name, value in self._request.extra_headers:
+ response.append(format_header(name, value))
+
+ response.append('\r\n')
+
+ return ''.join(response)
+
+ def _send_handshake(self, accept):
+ raw_response = self._create_handshake_response(accept)
+ self._request.connection.write(raw_response)
+ self._logger.debug('Sent server\'s opening handshake: %r',
+ raw_response)
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py
new file mode 100644
index 0000000..8757717
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py
@@ -0,0 +1,293 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides the opening handshake processor for the WebSocket
+protocol version HyBi 00.
+
+Specification:
+http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
+"""
+
+
+# Note: request.connection.write/read are used in this module, even though
+# mod_python document says that they should be used only in connection
+# handlers. Unfortunately, we have no other options. For example,
+# request.write/read are not suitable because they don't allow direct raw bytes
+# writing/reading.
+
+
+import logging
+import re
+import struct
+
+from mod_pywebsocket import common
+from mod_pywebsocket.stream import StreamHixie75
+from mod_pywebsocket import util
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import check_request_line
+from mod_pywebsocket.handshake._base import format_header
+from mod_pywebsocket.handshake._base import get_default_port
+from mod_pywebsocket.handshake._base import get_mandatory_header
+from mod_pywebsocket.handshake._base import parse_host_header
+from mod_pywebsocket.handshake._base import validate_mandatory_header
+
+
+_MANDATORY_HEADERS = [
+ # key, expected value or None
+ [common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75],
+ [common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE],
+]
+
+
+def _validate_subprotocol(subprotocol):
+ """Checks if characters in subprotocol are in range between U+0020 and
+ U+007E. A value in the Sec-WebSocket-Protocol field need to satisfy this
+ requirement.
+
+ See the Section 4.1. Opening handshake of the spec.
+ """
+
+ if not subprotocol:
+ raise HandshakeException('Invalid subprotocol name: empty')
+
+ # Parameter should be in the range U+0020 to U+007E.
+ for c in subprotocol:
+ if not 0x20 <= ord(c) <= 0x7e:
+ raise HandshakeException(
+ 'Illegal character in subprotocol name: %r' % c)
+
+
+def _check_header_lines(request, mandatory_headers):
+ check_request_line(request)
+
+ # The expected field names, and the meaning of their corresponding
+ # values, are as follows.
+ # |Upgrade| and |Connection|
+ for key, expected_value in mandatory_headers:
+ validate_mandatory_header(request, key, expected_value)
+
+
+def _build_location(request):
+ """Build WebSocket location for request."""
+
+ location_parts = []
+ if request.is_https():
+ location_parts.append(common.WEB_SOCKET_SECURE_SCHEME)
+ else:
+ location_parts.append(common.WEB_SOCKET_SCHEME)
+ location_parts.append('://')
+ host, port = parse_host_header(request)
+ connection_port = request.connection.local_addr[1]
+ if port != connection_port:
+ raise HandshakeException('Header/connection port mismatch: %d/%d' %
+ (port, connection_port))
+ location_parts.append(host)
+ if (port != get_default_port(request.is_https())):
+ location_parts.append(':')
+ location_parts.append(str(port))
+ location_parts.append(request.unparsed_uri)
+ return ''.join(location_parts)
+
+
+class Handshaker(object):
+ """Opening handshake processor for the WebSocket protocol version HyBi 00.
+ """
+
+ def __init__(self, request, dispatcher):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+
+ Handshaker will add attributes such as ws_resource in performing
+ handshake.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._dispatcher = dispatcher
+
+ def do_handshake(self):
+ """Perform WebSocket Handshake.
+
+ On _request, we set
+ ws_resource, ws_protocol, ws_location, ws_origin, ws_challenge,
+ ws_challenge_md5: WebSocket handshake information.
+ ws_stream: Frame generation/parsing class.
+ ws_version: Protocol version.
+
+ Raises:
+ HandshakeException: when any error happened in parsing the opening
+ handshake request.
+ """
+
+ # 5.1 Reading the client's opening handshake.
+ # dispatcher sets it in self._request.
+ _check_header_lines(self._request, _MANDATORY_HEADERS)
+ self._set_resource()
+ self._set_subprotocol()
+ self._set_location()
+ self._set_origin()
+ self._set_challenge_response()
+ self._set_protocol_version()
+
+ self._dispatcher.do_extra_handshake(self._request)
+
+ self._send_handshake()
+
+ def _set_resource(self):
+ self._request.ws_resource = self._request.uri
+
+ def _set_subprotocol(self):
+ # |Sec-WebSocket-Protocol|
+ subprotocol = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER)
+ if subprotocol is not None:
+ _validate_subprotocol(subprotocol)
+ self._request.ws_protocol = subprotocol
+
+ def _set_location(self):
+ # |Host|
+ host = self._request.headers_in.get(common.HOST_HEADER)
+ if host is not None:
+ self._request.ws_location = _build_location(self._request)
+ # TODO(ukai): check host is this host.
+
+ def _set_origin(self):
+ # |Origin|
+ origin = self._request.headers_in.get(common.ORIGIN_HEADER)
+ if origin is not None:
+ self._request.ws_origin = origin
+
+ def _set_protocol_version(self):
+ # |Sec-WebSocket-Draft|
+ draft = self._request.headers_in.get(common.SEC_WEBSOCKET_DRAFT_HEADER)
+ if draft is not None and draft != '0':
+ raise HandshakeException('Illegal value for %s: %s' %
+ (common.SEC_WEBSOCKET_DRAFT_HEADER,
+ draft))
+
+ self._logger.debug('Protocol version is HyBi 00')
+ self._request.ws_version = common.VERSION_HYBI00
+ self._request.ws_stream = StreamHixie75(self._request, True)
+
+ def _set_challenge_response(self):
+ # 5.2 4-8.
+ self._request.ws_challenge = self._get_challenge()
+ # 5.2 9. let /response/ be the MD5 finterprint of /challenge/
+ self._request.ws_challenge_md5 = util.md5_hash(
+ self._request.ws_challenge).digest()
+ self._logger.debug(
+ 'Challenge: %r (%s)',
+ self._request.ws_challenge,
+ util.hexify(self._request.ws_challenge))
+ self._logger.debug(
+ 'Challenge response: %r (%s)',
+ self._request.ws_challenge_md5,
+ util.hexify(self._request.ws_challenge_md5))
+
+ def _get_key_value(self, key_field):
+ key_value = get_mandatory_header(self._request, key_field)
+
+ self._logger.debug('%s: %r', key_field, key_value)
+
+ # 5.2 4. let /key-number_n/ be the digits (characters in the range
+ # U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9)) in /key_n/,
+ # interpreted as a base ten integer, ignoring all other characters
+ # in /key_n/.
+ try:
+ key_number = int(re.sub("\\D", "", key_value))
+ except:
+ raise HandshakeException('%s field contains no digit' % key_field)
+ # 5.2 5. let /spaces_n/ be the number of U+0020 SPACE characters
+ # in /key_n/.
+ spaces = re.subn(" ", "", key_value)[1]
+ if spaces == 0:
+ raise HandshakeException('%s field contains no space' % key_field)
+
+ self._logger.debug(
+ '%s: Key-number is %d and number of spaces is %d',
+ key_field, key_number, spaces)
+
+ # 5.2 6. if /key-number_n/ is not an integral multiple of /spaces_n/
+ # then abort the WebSocket connection.
+ if key_number % spaces != 0:
+ raise HandshakeException(
+ '%s: Key-number (%d) is not an integral multiple of spaces '
+ '(%d)' % (key_field, key_number, spaces))
+ # 5.2 7. let /part_n/ be /key-number_n/ divided by /spaces_n/.
+ part = key_number / spaces
+ self._logger.debug('%s: Part is %d', key_field, part)
+ return part
+
+ def _get_challenge(self):
+ # 5.2 4-7.
+ key1 = self._get_key_value(common.SEC_WEBSOCKET_KEY1_HEADER)
+ key2 = self._get_key_value(common.SEC_WEBSOCKET_KEY2_HEADER)
+ # 5.2 8. let /challenge/ be the concatenation of /part_1/,
+ challenge = ''
+ challenge += struct.pack('!I', key1) # network byteorder int
+ challenge += struct.pack('!I', key2) # network byteorder int
+ challenge += self._request.connection.read(8)
+ return challenge
+
+ def _send_handshake(self):
+ response = []
+
+ # 5.2 10. send the following line.
+ response.append('HTTP/1.1 101 WebSocket Protocol Handshake\r\n')
+
+ # 5.2 11. send the following fields to the client.
+ response.append(format_header(
+ common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75))
+ response.append(format_header(
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_LOCATION_HEADER, self._request.ws_location))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_ORIGIN_HEADER, self._request.ws_origin))
+ if self._request.ws_protocol:
+ response.append(format_header(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ # 5.2 12. send two bytes 0x0D 0x0A.
+ response.append('\r\n')
+ # 5.2 13. send /response/
+ response.append(self._request.ws_challenge_md5)
+
+ raw_response = ''.join(response)
+ self._request.connection.write(raw_response)
+ self._logger.debug('Sent server\'s opening handshake: %r',
+ raw_response)
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/headerparserhandler.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/headerparserhandler.py
new file mode 100644
index 0000000..c244421
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/headerparserhandler.py
@@ -0,0 +1,254 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""PythonHeaderParserHandler for mod_pywebsocket.
+
+Apache HTTP Server and mod_python must be configured such that this
+function is called to handle WebSocket request.
+"""
+
+
+import logging
+
+from mod_python import apache
+
+from mod_pywebsocket import common
+from mod_pywebsocket import dispatch
+from mod_pywebsocket import handshake
+from mod_pywebsocket import util
+
+
+# PythonOption to specify the handler root directory.
+_PYOPT_HANDLER_ROOT = 'mod_pywebsocket.handler_root'
+
+# PythonOption to specify the handler scan directory.
+# This must be a directory under the root directory.
+# The default is the root directory.
+_PYOPT_HANDLER_SCAN = 'mod_pywebsocket.handler_scan'
+
+# PythonOption to allow handlers whose canonical path is
+# not under the root directory. It's disallowed by default.
+# Set this option with value of 'yes' to allow.
+_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT = (
+ 'mod_pywebsocket.allow_handlers_outside_root_dir')
+# Map from values to their meanings. 'Yes' and 'No' are allowed just for
+# compatibility.
+_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION = {
+ 'off': False, 'no': False, 'on': True, 'yes': True}
+
+# (Obsolete option. Ignored.)
+# PythonOption to specify to allow handshake defined in Hixie 75 version
+# protocol. The default is None (Off)
+_PYOPT_ALLOW_DRAFT75 = 'mod_pywebsocket.allow_draft75'
+# Map from values to their meanings.
+_PYOPT_ALLOW_DRAFT75_DEFINITION = {'off': False, 'on': True}
+
+
+class ApacheLogHandler(logging.Handler):
+ """Wrapper logging.Handler to emit log message to apache's error.log."""
+
+ _LEVELS = {
+ logging.DEBUG: apache.APLOG_DEBUG,
+ logging.INFO: apache.APLOG_INFO,
+ logging.WARNING: apache.APLOG_WARNING,
+ logging.ERROR: apache.APLOG_ERR,
+ logging.CRITICAL: apache.APLOG_CRIT,
+ }
+
+ def __init__(self, request=None):
+ logging.Handler.__init__(self)
+ self._log_error = apache.log_error
+ if request is not None:
+ self._log_error = request.log_error
+
+ # Time and level will be printed by Apache.
+ self._formatter = logging.Formatter('%(name)s: %(message)s')
+
+ def emit(self, record):
+ apache_level = apache.APLOG_DEBUG
+ if record.levelno in ApacheLogHandler._LEVELS:
+ apache_level = ApacheLogHandler._LEVELS[record.levelno]
+
+ msg = self._formatter.format(record)
+
+ # "server" parameter must be passed to have "level" parameter work.
+ # If only "level" parameter is passed, nothing shows up on Apache's
+ # log. However, at this point, we cannot get the server object of the
+ # virtual host which will process WebSocket requests. The only server
+ # object we can get here is apache.main_server. But Wherever (server
+ # configuration context or virtual host context) we put
+ # PythonHeaderParserHandler directive, apache.main_server just points
+ # the main server instance (not any of virtual server instance). Then,
+ # Apache follows LogLevel directive in the server configuration context
+ # to filter logs. So, we need to specify LogLevel in the server
+ # configuration context. Even if we specify "LogLevel debug" in the
+ # virtual host context which actually handles WebSocket connections,
+ # DEBUG level logs never show up unless "LogLevel debug" is specified
+ # in the server configuration context.
+ #
+ # TODO(tyoshino): Provide logging methods on request object. When
+ # request is mp_request object (when used together with Apache), the
+ # methods call request.log_error indirectly. When request is
+ # _StandaloneRequest, the methods call Python's logging facility which
+ # we create in standalone.py.
+ self._log_error(msg, apache_level, apache.main_server)
+
+
+def _configure_logging():
+ logger = logging.getLogger()
+ # Logs are filtered by Apache based on LogLevel directive in Apache
+ # configuration file. We must just pass logs for all levels to
+ # ApacheLogHandler.
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(ApacheLogHandler())
+
+
+_configure_logging()
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def _parse_option(name, value, definition):
+ if value is None:
+ return False
+
+ meaning = definition.get(value.lower())
+ if meaning is None:
+ raise Exception('Invalid value for PythonOption %s: %r' %
+ (name, value))
+ return meaning
+
+
+def _create_dispatcher():
+ _LOGGER.info('Initializing Dispatcher')
+
+ options = apache.main_server.get_options()
+
+ handler_root = options.get(_PYOPT_HANDLER_ROOT, None)
+ if not handler_root:
+ raise Exception('PythonOption %s is not defined' % _PYOPT_HANDLER_ROOT,
+ apache.APLOG_ERR)
+
+ handler_scan = options.get(_PYOPT_HANDLER_SCAN, handler_root)
+
+ allow_handlers_outside_root = _parse_option(
+ _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT,
+ options.get(_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT),
+ _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION)
+
+ dispatcher = dispatch.Dispatcher(
+ handler_root, handler_scan, allow_handlers_outside_root)
+
+ for warning in dispatcher.source_warnings():
+ apache.log_error(
+ 'mod_pywebsocket: Warning in source loading: %s' % warning,
+ apache.APLOG_WARNING)
+
+ return dispatcher
+
+
+# Initialize
+_dispatcher = _create_dispatcher()
+
+
+def headerparserhandler(request):
+ """Handle request.
+
+ Args:
+ request: mod_python request.
+
+ This function is named headerparserhandler because it is the default
+ name for a PythonHeaderParserHandler.
+ """
+
+ handshake_is_done = False
+ try:
+ # Fallback to default http handler for request paths for which
+ # we don't have request handlers.
+ if not _dispatcher.get_handler_suite(request.uri):
+ request.log_error(
+ 'mod_pywebsocket: No handler for resource: %r' % request.uri,
+ apache.APLOG_INFO)
+ request.log_error(
+ 'mod_pywebsocket: Fallback to Apache', apache.APLOG_INFO)
+ return apache.DECLINED
+ except dispatch.DispatchException, e:
+ request.log_error(
+ 'mod_pywebsocket: Dispatch failed for error: %s' % e,
+ apache.APLOG_INFO)
+ if not handshake_is_done:
+ return e.status
+
+ try:
+ allow_draft75 = _parse_option(
+ _PYOPT_ALLOW_DRAFT75,
+ apache.main_server.get_options().get(_PYOPT_ALLOW_DRAFT75),
+ _PYOPT_ALLOW_DRAFT75_DEFINITION)
+
+ try:
+ handshake.do_handshake(
+ request, _dispatcher, allowDraft75=allow_draft75)
+ except handshake.VersionException, e:
+ request.log_error(
+ 'mod_pywebsocket: Handshake failed for version error: %s' % e,
+ apache.APLOG_INFO)
+ request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER,
+ e.supported_versions)
+ return apache.HTTP_BAD_REQUEST
+ except handshake.HandshakeException, e:
+ # Handshake for ws/wss failed.
+ # Send http response with error status.
+ request.log_error(
+ 'mod_pywebsocket: Handshake failed for error: %s' % e,
+ apache.APLOG_INFO)
+ return e.status
+
+ handshake_is_done = True
+ request._dispatcher = _dispatcher
+ _dispatcher.transfer_data(request)
+ except handshake.AbortedByUserException, e:
+ request.log_error('mod_pywebsocket: Aborted: %s' % e, apache.APLOG_INFO)
+ except Exception, e:
+ # DispatchException can also be thrown if something is wrong in
+ # pywebsocket code. It's caught here, then.
+
+ request.log_error('mod_pywebsocket: Exception occurred: %s\n%s' %
+ (e, util.get_stack_trace()),
+ apache.APLOG_ERR)
+ # Unknown exceptions before handshake mean Apache must handle its
+ # request with another handler.
+ if not handshake_is_done:
+ return apache.DECLINED
+ # Set assbackwards to suppress response header generation by Apache.
+ request.assbackwards = 1
+ return apache.DONE # Return DONE such that no other handlers are invoked.
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/http_header_util.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/http_header_util.py
new file mode 100644
index 0000000..b774653
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/http_header_util.py
@@ -0,0 +1,263 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Utilities for parsing and formatting headers that follow the grammar defined
+in HTTP RFC http://www.ietf.org/rfc/rfc2616.txt.
+"""
+
+
+import urlparse
+
+
+_SEPARATORS = '()<>@,;:\\"/[]?={} \t'
+
+
+def _is_char(c):
+ """Returns true iff c is in CHAR as specified in HTTP RFC."""
+
+ return ord(c) <= 127
+
+
+def _is_ctl(c):
+ """Returns true iff c is in CTL as specified in HTTP RFC."""
+
+ return ord(c) <= 31 or ord(c) == 127
+
+
+class ParsingState(object):
+
+ def __init__(self, data):
+ self.data = data
+ self.head = 0
+
+
+def peek(state, pos=0):
+ """Peeks the character at pos from the head of data."""
+
+ if state.head + pos >= len(state.data):
+ return None
+
+ return state.data[state.head + pos]
+
+
+def consume(state, amount=1):
+ """Consumes specified amount of bytes from the head and returns the
+ consumed bytes. If there's not enough bytes to consume, returns None.
+ """
+
+ if state.head + amount > len(state.data):
+ return None
+
+ result = state.data[state.head:state.head + amount]
+ state.head = state.head + amount
+ return result
+
+
+def consume_string(state, expected):
+ """Given a parsing state and a expected string, consumes the string from
+ the head. Returns True if consumed successfully. Otherwise, returns
+ False.
+ """
+
+ pos = 0
+
+ for c in expected:
+ if c != peek(state, pos):
+ return False
+ pos += 1
+
+ consume(state, pos)
+ return True
+
+
+def consume_lws(state):
+ """Consumes a LWS from the head. Returns True if any LWS is consumed.
+ Otherwise, returns False.
+
+ LWS = [CRLF] 1*( SP | HT )
+ """
+
+ original_head = state.head
+
+ consume_string(state, '\r\n')
+
+ pos = 0
+
+ while True:
+ c = peek(state, pos)
+ if c == ' ' or c == '\t':
+ pos += 1
+ else:
+ if pos == 0:
+ state.head = original_head
+ return False
+ else:
+ consume(state, pos)
+ return True
+
+
+def consume_lwses(state):
+ """Consumes *LWS from the head."""
+
+ while consume_lws(state):
+ pass
+
+
+def consume_token(state):
+ """Consumes a token from the head. Returns the token or None if no token
+ was found.
+ """
+
+ pos = 0
+
+ while True:
+ c = peek(state, pos)
+ if c is None or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
+ if pos == 0:
+ return None
+
+ return consume(state, pos)
+ else:
+ pos += 1
+
+
+def consume_token_or_quoted_string(state):
+ """Consumes a token or a quoted-string, and returns the token or unquoted
+ string. If no token or quoted-string was found, returns None.
+ """
+
+ original_head = state.head
+
+ if not consume_string(state, '"'):
+ return consume_token(state)
+
+ result = []
+
+ expect_quoted_pair = False
+
+ while True:
+ if not expect_quoted_pair and consume_lws(state):
+ result.append(' ')
+ continue
+
+ c = consume(state)
+ if c is None:
+ # quoted-string is not enclosed with double quotation
+ state.head = original_head
+ return None
+ elif expect_quoted_pair:
+ expect_quoted_pair = False
+ if _is_char(c):
+ result.append(c)
+ else:
+ # Non CHAR character found in quoted-pair
+ state.head = original_head
+ return None
+ elif c == '\\':
+ expect_quoted_pair = True
+ elif c == '"':
+ return ''.join(result)
+ elif _is_ctl(c):
+ # Invalid character %r found in qdtext
+ state.head = original_head
+ return None
+ else:
+ result.append(c)
+
+
+def quote_if_necessary(s):
+ """Quotes arbitrary string into quoted-string."""
+
+ quote = False
+ if s == '':
+ return '""'
+
+ result = []
+ for c in s:
+ if c == '"' or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
+ quote = True
+
+ if c == '"' or _is_ctl(c):
+ result.append('\\' + c)
+ else:
+ result.append(c)
+
+ if quote:
+ return '"' + ''.join(result) + '"'
+ else:
+ return ''.join(result)
+
+
+def parse_uri(uri):
+ """Parse absolute URI then return host, port and resource."""
+
+ parsed = urlparse.urlsplit(uri)
+ if parsed.scheme != 'wss' and parsed.scheme != 'ws':
+ # |uri| must be a relative URI.
+ # TODO(toyoshim): Should validate |uri|.
+ return None, None, uri
+
+ if parsed.hostname is None:
+ return None, None, None
+
+ port = None
+ try:
+ port = parsed.port
+ except ValueError, e:
+ # port property cause ValueError on invalid null port description like
+ # 'ws://host:/path'.
+ return None, None, None
+
+ if port is None:
+ if parsed.scheme == 'ws':
+ port = 80
+ else:
+ port = 443
+
+ path = parsed.path
+ if not path:
+ path += '/'
+ if parsed.query:
+ path += '?' + parsed.query
+ if parsed.fragment:
+ path += '#' + parsed.fragment
+
+ return parsed.hostname, port, path
+
+
+try:
+ urlparse.uses_netloc.index('ws')
+except ValueError, e:
+ # urlparse in Python2.5.1 doesn't have 'ws' and 'wss' entries.
+ urlparse.uses_netloc.append('ws')
+ urlparse.uses_netloc.append('wss')
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/memorizingfile.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/memorizingfile.py
new file mode 100644
index 0000000..4d4cd95
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/memorizingfile.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Memorizing file.
+
+A memorizing file wraps a file and memorizes lines read by readline.
+"""
+
+
+import sys
+
+
+class MemorizingFile(object):
+ """MemorizingFile wraps a file and memorizes lines read by readline.
+
+ Note that data read by other methods are not memorized. This behavior
+ is good enough for memorizing lines SimpleHTTPServer reads before
+ the control reaches WebSocketRequestHandler.
+ """
+
+ def __init__(self, file_, max_memorized_lines=sys.maxint):
+ """Construct an instance.
+
+ Args:
+ file_: the file object to wrap.
+ max_memorized_lines: the maximum number of lines to memorize.
+ Only the first max_memorized_lines are memorized.
+ Default: sys.maxint.
+ """
+
+ self._file = file_
+ self._memorized_lines = []
+ self._max_memorized_lines = max_memorized_lines
+ self._buffered = False
+ self._buffered_line = None
+
+ def __getattribute__(self, name):
+ if name in ('_file', '_memorized_lines', '_max_memorized_lines',
+ '_buffered', '_buffered_line', 'readline',
+ 'get_memorized_lines'):
+ return object.__getattribute__(self, name)
+ return self._file.__getattribute__(name)
+
+ def readline(self, size=-1):
+ """Override file.readline and memorize the line read.
+
+ Note that even if size is specified and smaller than actual size,
+ the whole line will be read out from underlying file object by
+ subsequent readline calls.
+ """
+
+ if self._buffered:
+ line = self._buffered_line
+ self._buffered = False
+ else:
+ line = self._file.readline()
+ if line and len(self._memorized_lines) < self._max_memorized_lines:
+ self._memorized_lines.append(line)
+ if size >= 0 and size < len(line):
+ self._buffered = True
+ self._buffered_line = line[size:]
+ return line[:size]
+ return line
+
+ def get_memorized_lines(self):
+ """Get lines memorized so far."""
+ return self._memorized_lines
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/msgutil.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/msgutil.py
new file mode 100644
index 0000000..4c1a011
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/msgutil.py
@@ -0,0 +1,219 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Message related utilities.
+
+Note: request.connection.write/read are used in this module, even though
+mod_python document says that they should be used only in connection
+handlers. Unfortunately, we have no other options. For example,
+request.write/read are not suitable because they don't allow direct raw
+bytes writing/reading.
+"""
+
+
+import Queue
+import threading
+
+
+# Export Exception symbols from msgutil for backward compatibility
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+
+
+# An API for handler to send/receive WebSocket messages.
+def close_connection(request):
+ """Close connection.
+
+ Args:
+ request: mod_python request.
+ """
+ request.ws_stream.close_connection()
+
+
+def send_message(request, payload_data, end=True, binary=False):
+ """Send a message (or part of a message).
+
+ Args:
+ request: mod_python request.
+ payload_data: unicode text or str binary to send.
+ end: True to terminate a message.
+ False to send payload_data as part of a message that is to be
+ terminated by next or later send_message call with end=True.
+ binary: send payload_data as binary frame(s).
+ Raises:
+ BadOperationException: when server already terminated.
+ """
+ request.ws_stream.send_message(payload_data, end, binary)
+
+
+def receive_message(request):
+ """Receive a WebSocket frame and return its payload as a text in
+ unicode or a binary in str.
+
+ Args:
+ request: mod_python request.
+ Raises:
+ InvalidFrameException: when client send invalid frame.
+ UnsupportedFrameException: when client send unsupported frame e.g. some
+ of reserved bit is set but no extension can
+ recognize it.
+ InvalidUTF8Exception: when client send a text frame containing any
+ invalid UTF-8 string.
+ ConnectionTerminatedException: when the connection is closed
+ unexpectedly.
+ BadOperationException: when client already terminated.
+ """
+ return request.ws_stream.receive_message()
+
+
+def send_ping(request, body=''):
+ request.ws_stream.send_ping(body)
+
+
+class MessageReceiver(threading.Thread):
+ """This class receives messages from the client.
+
+ This class provides three ways to receive messages: blocking,
+ non-blocking, and via callback. Callback has the highest precedence.
+
+ Note: This class should not be used with the standalone server for wss
+ because pyOpenSSL used by the server raises a fatal error if the socket
+ is accessed from multiple threads.
+ """
+
+ def __init__(self, request, onmessage=None):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ onmessage: a function to be called when a message is received.
+ May be None. If not None, the function is called on
+ another thread. In that case, MessageReceiver.receive
+ and MessageReceiver.receive_nowait are useless
+ because they will never return any messages.
+ """
+
+ threading.Thread.__init__(self)
+ self._request = request
+ self._queue = Queue.Queue()
+ self._onmessage = onmessage
+ self._stop_requested = False
+ self.setDaemon(True)
+ self.start()
+
+ def run(self):
+ try:
+ while not self._stop_requested:
+ message = receive_message(self._request)
+ if self._onmessage:
+ self._onmessage(message)
+ else:
+ self._queue.put(message)
+ finally:
+ close_connection(self._request)
+
+ def receive(self):
+ """ Receive a message from the channel, blocking.
+
+ Returns:
+ message as a unicode string.
+ """
+ return self._queue.get()
+
+ def receive_nowait(self):
+ """ Receive a message from the channel, non-blocking.
+
+ Returns:
+ message as a unicode string if available. None otherwise.
+ """
+ try:
+ message = self._queue.get_nowait()
+ except Queue.Empty:
+ message = None
+ return message
+
+ def stop(self):
+ """Request to stop this instance.
+
+ The instance will be stopped after receiving the next message.
+ This method may not be very useful, but there is no clean way
+ in Python to forcefully stop a running thread.
+ """
+ self._stop_requested = True
+
+
+class MessageSender(threading.Thread):
+ """This class sends messages to the client.
+
+ This class provides both synchronous and asynchronous ways to send
+ messages.
+
+ Note: This class should not be used with the standalone server for wss
+ because pyOpenSSL used by the server raises a fatal error if the socket
+ is accessed from multiple threads.
+ """
+
+ def __init__(self, request):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ """
+ threading.Thread.__init__(self)
+ self._request = request
+ self._queue = Queue.Queue()
+ self.setDaemon(True)
+ self.start()
+
+ def run(self):
+ while True:
+ message, condition = self._queue.get()
+ condition.acquire()
+ send_message(self._request, message)
+ condition.notify()
+ condition.release()
+
+ def send(self, message):
+ """Send a message, blocking."""
+
+ condition = threading.Condition()
+ condition.acquire()
+ self._queue.put((message, condition))
+ condition.wait()
+
+ def send_nowait(self, message):
+ """Send a message, non-blocking."""
+
+ self._queue.put((message, threading.Condition()))
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/mux.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/mux.py
new file mode 100644
index 0000000..7633468
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/mux.py
@@ -0,0 +1,1889 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides classes and helper functions for multiplexing extension.
+
+Specification:
+http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06
+"""
+
+
+import collections
+import copy
+import email
+import email.parser
+import logging
+import math
+import struct
+import threading
+import traceback
+
+from mod_pywebsocket import common
+from mod_pywebsocket import handshake
+from mod_pywebsocket import util
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_hybi import Frame
+from mod_pywebsocket._stream_hybi import Stream
+from mod_pywebsocket._stream_hybi import StreamOptions
+from mod_pywebsocket._stream_hybi import create_binary_frame
+from mod_pywebsocket._stream_hybi import create_closing_handshake_body
+from mod_pywebsocket._stream_hybi import create_header
+from mod_pywebsocket._stream_hybi import create_length_header
+from mod_pywebsocket._stream_hybi import parse_frame
+from mod_pywebsocket.handshake import hybi
+
+
+_CONTROL_CHANNEL_ID = 0
+_DEFAULT_CHANNEL_ID = 1
+
+_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
+_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
+_MUX_OPCODE_FLOW_CONTROL = 2
+_MUX_OPCODE_DROP_CHANNEL = 3
+_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
+
+_MAX_CHANNEL_ID = 2 ** 29 - 1
+
+_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64
+_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024
+
+_HANDSHAKE_ENCODING_IDENTITY = 0
+_HANDSHAKE_ENCODING_DELTA = 1
+
+# We need only these status code for now.
+_HTTP_BAD_RESPONSE_MESSAGES = {
+ common.HTTP_STATUS_BAD_REQUEST: 'Bad Request',
+}
+
+# DropChannel reason code
+# TODO(bashi): Define all reason code defined in -05 draft.
+_DROP_CODE_NORMAL_CLOSURE = 1000
+
+_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001
+_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002
+_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003
+_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
+_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
+_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
+_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
+_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 2010
+
+_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
+_DROP_CODE_SEND_QUOTA_OVERFLOW = 3006
+_DROP_CODE_ACKNOWLEDGED = 3008
+_DROP_CODE_BAD_FRAGMENTATION = 3009
+
+
+class MuxUnexpectedException(Exception):
+ """Exception in handling multiplexing extension."""
+ pass
+
+
+# Temporary
+class MuxNotImplementedException(Exception):
+ """Raised when a flow enters unimplemented code path."""
+ pass
+
+
+class LogicalConnectionClosedException(Exception):
+ """Raised when logical connection is gracefully closed."""
+ pass
+
+
+class PhysicalConnectionError(Exception):
+ """Raised when there is a physical connection error."""
+ def __init__(self, drop_code, message=''):
+ super(PhysicalConnectionError, self).__init__(
+ 'code=%d, message=%r' % (drop_code, message))
+ self.drop_code = drop_code
+ self.message = message
+
+
+class LogicalChannelError(Exception):
+ """Raised when there is a logical channel error."""
+ def __init__(self, channel_id, drop_code, message=''):
+ super(LogicalChannelError, self).__init__(
+ 'channel_id=%d, code=%d, message=%r' % (
+ channel_id, drop_code, message))
+ self.channel_id = channel_id
+ self.drop_code = drop_code
+ self.message = message
+
+
+def _encode_channel_id(channel_id):
+ if channel_id < 0:
+ raise ValueError('Channel id %d must not be negative' % channel_id)
+
+ if channel_id < 2 ** 7:
+ return chr(channel_id)
+ if channel_id < 2 ** 14:
+ return struct.pack('!H', 0x8000 + channel_id)
+ if channel_id < 2 ** 21:
+ first = chr(0xc0 + (channel_id >> 16))
+ return first + struct.pack('!H', channel_id & 0xffff)
+ if channel_id < 2 ** 29:
+ return struct.pack('!L', 0xe0000000 + channel_id)
+
+ raise ValueError('Channel id %d is too large' % channel_id)
+
+
+def _encode_number(number):
+ return create_length_header(number, False)
+
+
+def _create_add_channel_response(channel_id, encoded_handshake,
+ encoding=0, rejected=False):
+ if encoding != 0 and encoding != 1:
+ raise ValueError('Invalid encoding %d' % encoding)
+
+ first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) |
+ (rejected << 4) | encoding)
+ block = (chr(first_byte) +
+ _encode_channel_id(channel_id) +
+ _encode_number(len(encoded_handshake)) +
+ encoded_handshake)
+ return block
+
+
+def _create_drop_channel(channel_id, code=None, message=''):
+ if len(message) > 0 and code is None:
+ raise ValueError('Code must be specified if message is specified')
+
+ first_byte = _MUX_OPCODE_DROP_CHANNEL << 5
+ block = chr(first_byte) + _encode_channel_id(channel_id)
+ if code is None:
+ block += _encode_number(0) # Reason size
+ else:
+ reason = struct.pack('!H', code) + message
+ reason_size = _encode_number(len(reason))
+ block += reason_size + reason
+
+ return block
+
+
+def _create_flow_control(channel_id, replenished_quota):
+ first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
+ block = (chr(first_byte) +
+ _encode_channel_id(channel_id) +
+ _encode_number(replenished_quota))
+ return block
+
+
+def _create_new_channel_slot(slots, send_quota):
+ if slots < 0 or send_quota < 0:
+ raise ValueError('slots and send_quota must be non-negative.')
+ first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
+ block = (chr(first_byte) +
+ _encode_number(slots) +
+ _encode_number(send_quota))
+ return block
+
+
+def _create_fallback_new_channel_slot():
+ first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
+ block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
+ return block
+
+
+def _parse_request_text(request_text):
+ request_line, header_lines = request_text.split('\r\n', 1)
+
+ words = request_line.split(' ')
+ if len(words) != 3:
+ raise ValueError('Bad Request-Line syntax %r' % request_line)
+ [command, path, version] = words
+ if version != 'HTTP/1.1':
+ raise ValueError('Bad request version %r' % version)
+
+ # email.parser.Parser() parses RFC 2822 (RFC 822) style headers.
+ # RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers
+ # RFC 822.
+ headers = email.parser.Parser().parsestr(header_lines)
+ return command, path, version, headers
+
+
+class _ControlBlock(object):
+ """A structure that holds parsing result of multiplexing control block.
+ Control block specific attributes will be added by _MuxFramePayloadParser.
+ (e.g. encoded_handshake will be added for AddChannelRequest and
+ AddChannelResponse)
+ """
+
+ def __init__(self, opcode):
+ self.opcode = opcode
+
+
+class _MuxFramePayloadParser(object):
+ """A class that parses multiplexed frame payload."""
+
+ def __init__(self, payload):
+ self._data = payload
+ self._read_position = 0
+ self._logger = util.get_class_logger(self)
+
+ def read_channel_id(self):
+ """Reads channel id.
+
+ Raises:
+ ValueError: when the payload doesn't contain
+ valid channel id.
+ """
+
+ remaining_length = len(self._data) - self._read_position
+ pos = self._read_position
+ if remaining_length == 0:
+ raise ValueError('Invalid channel id format')
+
+ channel_id = ord(self._data[pos])
+ channel_id_length = 1
+ if channel_id & 0xe0 == 0xe0:
+ if remaining_length < 4:
+ raise ValueError('Invalid channel id format')
+ channel_id = struct.unpack('!L',
+ self._data[pos:pos+4])[0] & 0x1fffffff
+ channel_id_length = 4
+ elif channel_id & 0xc0 == 0xc0:
+ if remaining_length < 3:
+ raise ValueError('Invalid channel id format')
+ channel_id = (((channel_id & 0x1f) << 16) +
+ struct.unpack('!H', self._data[pos+1:pos+3])[0])
+ channel_id_length = 3
+ elif channel_id & 0x80 == 0x80:
+ if remaining_length < 2:
+ raise ValueError('Invalid channel id format')
+ channel_id = struct.unpack('!H',
+ self._data[pos:pos+2])[0] & 0x3fff
+ channel_id_length = 2
+ self._read_position += channel_id_length
+
+ return channel_id
+
+ def read_inner_frame(self):
+ """Reads an inner frame.
+
+ Raises:
+ PhysicalConnectionError: when the inner frame is invalid.
+ """
+
+ if len(self._data) == self._read_position:
+ raise PhysicalConnectionError(
+ _DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED)
+
+ bits = ord(self._data[self._read_position])
+ self._read_position += 1
+ fin = (bits & 0x80) == 0x80
+ rsv1 = (bits & 0x40) == 0x40
+ rsv2 = (bits & 0x20) == 0x20
+ rsv3 = (bits & 0x10) == 0x10
+ opcode = bits & 0xf
+ payload = self.remaining_data()
+ # Consume rest of the message which is payload data of the original
+ # frame.
+ self._read_position = len(self._data)
+ return fin, rsv1, rsv2, rsv3, opcode, payload
+
+ def _read_number(self):
+ if self._read_position + 1 > len(self._data):
+ raise ValueError(
+ 'Cannot read the first byte of number field')
+
+ number = ord(self._data[self._read_position])
+ if number & 0x80 == 0x80:
+ raise ValueError(
+ 'The most significant bit of the first byte of number should '
+ 'be unset')
+ self._read_position += 1
+ pos = self._read_position
+ if number == 127:
+ if pos + 8 > len(self._data):
+ raise ValueError('Invalid number field')
+ self._read_position += 8
+ number = struct.unpack('!Q', self._data[pos:pos+8])[0]
+ if number > 0x7FFFFFFFFFFFFFFF:
+ raise ValueError('Encoded number(%d) >= 2^63' % number)
+ if number <= 0xFFFF:
+ raise ValueError(
+ '%d should not be encoded by 9 bytes encoding' % number)
+ return number
+ if number == 126:
+ if pos + 2 > len(self._data):
+ raise ValueError('Invalid number field')
+ self._read_position += 2
+ number = struct.unpack('!H', self._data[pos:pos+2])[0]
+ if number <= 125:
+ raise ValueError(
+ '%d should not be encoded by 3 bytes encoding' % number)
+ return number
+
+ def _read_size_and_contents(self):
+ """Reads data that consists of followings:
+ - the size of the contents encoded the same way as payload length
+ of the WebSocket Protocol with 1 bit padding at the head.
+ - the contents.
+ """
+
+ try:
+ size = self._read_number()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ str(e))
+ pos = self._read_position
+ if pos + size > len(self._data):
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Cannot read %d bytes data' % size)
+
+ self._read_position += size
+ return self._data[pos:pos+size]
+
+ def _read_add_channel_request(self, first_byte, control_block):
+ reserved = (first_byte >> 2) & 0x7
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ # Invalid encoding will be handled by MuxHandler.
+ encoding = first_byte & 0x3
+ try:
+ control_block.channel_id = self.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+ control_block.encoding = encoding
+ encoded_handshake = self._read_size_and_contents()
+ control_block.encoded_handshake = encoded_handshake
+ return control_block
+
+ def _read_add_channel_response(self, first_byte, control_block):
+ reserved = (first_byte >> 2) & 0x3
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ control_block.accepted = (first_byte >> 4) & 1
+ control_block.encoding = first_byte & 0x3
+ try:
+ control_block.channel_id = self.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+ control_block.encoded_handshake = self._read_size_and_contents()
+ return control_block
+
+ def _read_flow_control(self, first_byte, control_block):
+ reserved = first_byte & 0x1f
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ try:
+ control_block.channel_id = self.read_channel_id()
+ control_block.send_quota = self._read_number()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ str(e))
+
+ return control_block
+
+ def _read_drop_channel(self, first_byte, control_block):
+ reserved = first_byte & 0x1f
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ try:
+ control_block.channel_id = self.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+ reason = self._read_size_and_contents()
+ if len(reason) == 0:
+ control_block.drop_code = None
+ control_block.drop_message = ''
+ elif len(reason) >= 2:
+ control_block.drop_code = struct.unpack('!H', reason[:2])[0]
+ control_block.drop_message = reason[2:]
+ else:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Received DropChannel that conains only 1-byte reason')
+ return control_block
+
+ def _read_new_channel_slot(self, first_byte, control_block):
+ reserved = first_byte & 0x1e
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+ control_block.fallback = first_byte & 1
+ try:
+ control_block.slots = self._read_number()
+ control_block.send_quota = self._read_number()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ str(e))
+ return control_block
+
+ def read_control_blocks(self):
+ """Reads control block(s).
+
+ Raises:
+ PhysicalConnectionError: when the payload contains invalid control
+ block(s).
+ StopIteration: when no control blocks left.
+ """
+
+ while self._read_position < len(self._data):
+ first_byte = ord(self._data[self._read_position])
+ self._read_position += 1
+ opcode = (first_byte >> 5) & 0x7
+ control_block = _ControlBlock(opcode=opcode)
+ if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
+ yield self._read_add_channel_request(first_byte, control_block)
+ elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
+ yield self._read_add_channel_response(
+ first_byte, control_block)
+ elif opcode == _MUX_OPCODE_FLOW_CONTROL:
+ yield self._read_flow_control(first_byte, control_block)
+ elif opcode == _MUX_OPCODE_DROP_CHANNEL:
+ yield self._read_drop_channel(first_byte, control_block)
+ elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
+ yield self._read_new_channel_slot(first_byte, control_block)
+ else:
+ raise PhysicalConnectionError(
+ _DROP_CODE_UNKNOWN_MUX_OPCODE,
+ 'Invalid opcode %d' % opcode)
+
+ assert self._read_position == len(self._data)
+ raise StopIteration
+
+ def remaining_data(self):
+ """Returns remaining data."""
+
+ return self._data[self._read_position:]
+
+
+class _LogicalRequest(object):
+ """Mimics mod_python request."""
+
+ def __init__(self, channel_id, command, path, protocol, headers,
+ connection):
+ """Constructs an instance.
+
+ Args:
+ channel_id: the channel id of the logical channel.
+ command: HTTP request command.
+ path: HTTP request path.
+ headers: HTTP headers.
+ connection: _LogicalConnection instance.
+ """
+
+ self.channel_id = channel_id
+ self.method = command
+ self.uri = path
+ self.protocol = protocol
+ self.headers_in = headers
+ self.connection = connection
+ self.server_terminated = False
+ self.client_terminated = False
+
+ def is_https(self):
+ """Mimics request.is_https(). Returns False because this method is
+ used only by old protocols (hixie and hybi00).
+ """
+
+ return False
+
+
+class _LogicalConnection(object):
+ """Mimics mod_python mp_conn."""
+
+ # For details, see the comment of set_read_state().
+ STATE_ACTIVE = 1
+ STATE_GRACEFULLY_CLOSED = 2
+ STATE_TERMINATED = 3
+
+ def __init__(self, mux_handler, channel_id):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ channel_id: channel id of this connection.
+ """
+
+ self._mux_handler = mux_handler
+ self._channel_id = channel_id
+ self._incoming_data = ''
+
+ # - Protects _waiting_write_completion
+ # - Signals the thread waiting for completion of write by mux handler
+ self._write_condition = threading.Condition()
+ self._waiting_write_completion = False
+
+ self._read_condition = threading.Condition()
+ self._read_state = self.STATE_ACTIVE
+
+ def get_local_addr(self):
+ """Getter to mimic mp_conn.local_addr."""
+
+ return self._mux_handler.physical_connection.get_local_addr()
+ local_addr = property(get_local_addr)
+
+ def get_remote_addr(self):
+ """Getter to mimic mp_conn.remote_addr."""
+
+ return self._mux_handler.physical_connection.get_remote_addr()
+ remote_addr = property(get_remote_addr)
+
+ def get_memorized_lines(self):
+ """Gets memorized lines. Not supported."""
+
+ raise MuxUnexpectedException('_LogicalConnection does not support '
+ 'get_memorized_lines')
+
+ def write(self, data):
+ """Writes data. mux_handler sends data asynchronously. The caller will
+ be suspended until write done.
+
+ Args:
+ data: data to be written.
+
+ Raises:
+ MuxUnexpectedException: when called before finishing the previous
+ write.
+ """
+
+ try:
+ self._write_condition.acquire()
+ if self._waiting_write_completion:
+ raise MuxUnexpectedException(
+ 'Logical connection %d is already waiting the completion '
+ 'of write' % self._channel_id)
+
+ self._waiting_write_completion = True
+ self._mux_handler.send_data(self._channel_id, data)
+ self._write_condition.wait()
+ # TODO(tyoshino): Raise an exception if woke up by on_writer_done.
+ finally:
+ self._write_condition.release()
+
+ def write_control_data(self, data):
+ """Writes data via the control channel. Don't wait finishing write
+ because this method can be called by mux dispatcher.
+
+ Args:
+ data: data to be written.
+ """
+
+ self._mux_handler.send_control_data(data)
+
+ def on_write_data_done(self):
+ """Called when sending data is completed."""
+
+ try:
+ self._write_condition.acquire()
+ if not self._waiting_write_completion:
+ raise MuxUnexpectedException(
+ 'Invalid call of on_write_data_done for logical '
+ 'connection %d' % self._channel_id)
+ self._waiting_write_completion = False
+ self._write_condition.notify()
+ finally:
+ self._write_condition.release()
+
+ def on_writer_done(self):
+ """Called by the mux handler when the writer thread has finished."""
+
+ try:
+ self._write_condition.acquire()
+ self._waiting_write_completion = False
+ self._write_condition.notify()
+ finally:
+ self._write_condition.release()
+
+
+ def append_frame_data(self, frame_data):
+ """Appends incoming frame data. Called when mux_handler dispatches
+ frame data to the corresponding application.
+
+ Args:
+ frame_data: incoming frame data.
+ """
+
+ self._read_condition.acquire()
+ self._incoming_data += frame_data
+ self._read_condition.notify()
+ self._read_condition.release()
+
+ def read(self, length):
+ """Reads data. Blocks until enough data has arrived via physical
+ connection.
+
+ Args:
+ length: length of data to be read.
+ Raises:
+ LogicalConnectionClosedException: when closing handshake for this
+ logical channel has been received.
+ ConnectionTerminatedException: when the physical connection has
+ closed, or an error is caused on the reader thread.
+ """
+
+ self._read_condition.acquire()
+ while (self._read_state == self.STATE_ACTIVE and
+ len(self._incoming_data) < length):
+ self._read_condition.wait()
+
+ try:
+ if self._read_state == self.STATE_GRACEFULLY_CLOSED:
+ raise LogicalConnectionClosedException(
+ 'Logical channel %d has closed.' % self._channel_id)
+ elif self._read_state == self.STATE_TERMINATED:
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. Logical channel (%d) closed' %
+ (length, self._channel_id))
+
+ value = self._incoming_data[:length]
+ self._incoming_data = self._incoming_data[length:]
+ finally:
+ self._read_condition.release()
+
+ return value
+
+ def set_read_state(self, new_state):
+ """Sets the state of this connection. Called when an event for this
+ connection has occurred.
+
+ Args:
+ new_state: state to be set. new_state must be one of followings:
+ - STATE_GRACEFULLY_CLOSED: when closing handshake for this
+ connection has been received.
+ - STATE_TERMINATED: when the physical connection has closed or
+ DropChannel of this connection has received.
+ """
+
+ self._read_condition.acquire()
+ self._read_state = new_state
+ self._read_condition.notify()
+ self._read_condition.release()
+
+
+class _InnerMessage(object):
+ """Holds the result of _InnerMessageBuilder.build().
+ """
+
+ def __init__(self, opcode, payload):
+ self.opcode = opcode
+ self.payload = payload
+
+
+class _InnerMessageBuilder(object):
+ """A class that holds the context of inner message fragmentation and
+ builds a message from fragmented inner frame(s).
+ """
+
+ def __init__(self):
+ self._control_opcode = None
+ self._pending_control_fragments = []
+ self._message_opcode = None
+ self._pending_message_fragments = []
+ self._frame_handler = self._handle_first
+
+ def _handle_first(self, frame):
+ if frame.opcode == common.OPCODE_CONTINUATION:
+ raise InvalidFrameException('Sending invalid continuation opcode')
+
+ if common.is_control_opcode(frame.opcode):
+ return self._process_first_fragmented_control(frame)
+ else:
+ return self._process_first_fragmented_message(frame)
+
+ def _process_first_fragmented_control(self, frame):
+ self._control_opcode = frame.opcode
+ self._pending_control_fragments.append(frame.payload)
+ if not frame.fin:
+ self._frame_handler = self._handle_fragmented_control
+ return None
+ return self._reassemble_fragmented_control()
+
+ def _process_first_fragmented_message(self, frame):
+ self._message_opcode = frame.opcode
+ self._pending_message_fragments.append(frame.payload)
+ if not frame.fin:
+ self._frame_handler = self._handle_fragmented_message
+ return None
+ return self._reassemble_fragmented_message()
+
+ def _handle_fragmented_control(self, frame):
+ if frame.opcode != common.OPCODE_CONTINUATION:
+ raise InvalidFrameException(
+ 'Sending invalid opcode %d while sending fragmented control '
+ 'message' % frame.opcode)
+ self._pending_control_fragments.append(frame.payload)
+ if not frame.fin:
+ return None
+ return self._reassemble_fragmented_control()
+
+ def _reassemble_fragmented_control(self):
+ opcode = self._control_opcode
+ payload = ''.join(self._pending_control_fragments)
+ self._control_opcode = None
+ self._pending_control_fragments = []
+ if self._message_opcode is not None:
+ self._frame_handler = self._handle_fragmented_message
+ else:
+ self._frame_handler = self._handle_first
+ return _InnerMessage(opcode, payload)
+
+ def _handle_fragmented_message(self, frame):
+ # Sender can interleave a control message while sending fragmented
+ # messages.
+ if common.is_control_opcode(frame.opcode):
+ if self._control_opcode is not None:
+ raise MuxUnexpectedException(
+ 'Should not reach here(Bug in builder)')
+ return self._process_first_fragmented_control(frame)
+
+ if frame.opcode != common.OPCODE_CONTINUATION:
+ raise InvalidFrameException(
+ 'Sending invalid opcode %d while sending fragmented message' %
+ frame.opcode)
+ self._pending_message_fragments.append(frame.payload)
+ if not frame.fin:
+ return None
+ return self._reassemble_fragmented_message()
+
+ def _reassemble_fragmented_message(self):
+ opcode = self._message_opcode
+ payload = ''.join(self._pending_message_fragments)
+ self._message_opcode = None
+ self._pending_message_fragments = []
+ self._frame_handler = self._handle_first
+ return _InnerMessage(opcode, payload)
+
+ def build(self, frame):
+ """Build an inner message. Returns an _InnerMessage instance when
+ the given frame is the last fragmented frame. Returns None otherwise.
+
+ Args:
+ frame: an inner frame.
+ Raises:
+ InvalidFrameException: when received invalid opcode. (e.g.
+ receiving non continuation data opcode but the fin flag of
+ the previous inner frame was not set.)
+ """
+
+ return self._frame_handler(frame)
+
+
+class _LogicalStream(Stream):
+ """Mimics the Stream class. This class interprets multiplexed WebSocket
+ frames.
+ """
+
+ def __init__(self, request, stream_options, send_quota, receive_quota):
+ """Constructs an instance.
+
+ Args:
+ request: _LogicalRequest instance.
+ stream_options: StreamOptions instance.
+ send_quota: Initial send quota.
+ receive_quota: Initial receive quota.
+ """
+
+ # Physical stream is responsible for masking.
+ stream_options.unmask_receive = False
+ Stream.__init__(self, request, stream_options)
+
+ self._send_closed = False
+ self._send_quota = send_quota
+ # - Protects _send_closed and _send_quota
+ # - Signals the thread waiting for send quota replenished
+ self._send_condition = threading.Condition()
+
+ # The opcode of the first frame in messages.
+ self._message_opcode = common.OPCODE_TEXT
+ # True when the last message was fragmented.
+ self._last_message_was_fragmented = False
+
+ self._receive_quota = receive_quota
+ self._write_inner_frame_semaphore = threading.Semaphore()
+
+ self._inner_message_builder = _InnerMessageBuilder()
+
+ def _create_inner_frame(self, opcode, payload, end=True):
+ frame = Frame(fin=end, opcode=opcode, payload=payload)
+ for frame_filter in self._options.outgoing_frame_filters:
+ frame_filter.filter(frame)
+
+ if len(payload) != len(frame.payload):
+ raise MuxUnexpectedException(
+ 'Mux extension must not be used after extensions which change '
+ ' frame boundary')
+
+ first_byte = ((frame.fin << 7) | (frame.rsv1 << 6) |
+ (frame.rsv2 << 5) | (frame.rsv3 << 4) | frame.opcode)
+ return chr(first_byte) + frame.payload
+
+ def _write_inner_frame(self, opcode, payload, end=True):
+ payload_length = len(payload)
+ write_position = 0
+
+ try:
+ # An inner frame will be fragmented if there is no enough send
+ # quota. This semaphore ensures that fragmented inner frames are
+ # sent in order on the logical channel.
+ # Note that frames that come from other logical channels or
+ # multiplexing control blocks can be inserted between fragmented
+ # inner frames on the physical channel.
+ self._write_inner_frame_semaphore.acquire()
+
+ # Consume an octet quota when this is the first fragmented frame.
+ if opcode != common.OPCODE_CONTINUATION:
+ try:
+ self._send_condition.acquire()
+ while (not self._send_closed) and self._send_quota == 0:
+ self._send_condition.wait()
+
+ if self._send_closed:
+ raise BadOperationException(
+ 'Logical connection %d is closed' %
+ self._request.channel_id)
+
+ self._send_quota -= 1
+ finally:
+ self._send_condition.release()
+
+ while write_position < payload_length:
+ try:
+ self._send_condition.acquire()
+ while (not self._send_closed) and self._send_quota == 0:
+ self._logger.debug(
+ 'No quota. Waiting FlowControl message for %d.' %
+ self._request.channel_id)
+ self._send_condition.wait()
+
+ if self._send_closed:
+ raise BadOperationException(
+ 'Logical connection %d is closed' %
+ self.request._channel_id)
+
+ remaining = payload_length - write_position
+ write_length = min(self._send_quota, remaining)
+ inner_frame_end = (
+ end and
+ (write_position + write_length == payload_length))
+
+ inner_frame = self._create_inner_frame(
+ opcode,
+ payload[write_position:write_position+write_length],
+ inner_frame_end)
+ self._send_quota -= write_length
+ self._logger.debug('Consumed quota=%d, remaining=%d' %
+ (write_length, self._send_quota))
+ finally:
+ self._send_condition.release()
+
+ # Writing data will block the worker so we need to release
+ # _send_condition before writing.
+ self._logger.debug('Sending inner frame: %r' % inner_frame)
+ self._request.connection.write(inner_frame)
+ write_position += write_length
+
+ opcode = common.OPCODE_CONTINUATION
+
+ except ValueError, e:
+ raise BadOperationException(e)
+ finally:
+ self._write_inner_frame_semaphore.release()
+
+ def replenish_send_quota(self, send_quota):
+ """Replenish send quota."""
+
+ try:
+ self._send_condition.acquire()
+ if self._send_quota + send_quota > 0x7FFFFFFFFFFFFFFF:
+ self._send_quota = 0
+ raise LogicalChannelError(
+ self._request.channel_id, _DROP_CODE_SEND_QUOTA_OVERFLOW)
+ self._send_quota += send_quota
+ self._logger.debug('Replenished send quota for channel id %d: %d' %
+ (self._request.channel_id, self._send_quota))
+ finally:
+ self._send_condition.notify()
+ self._send_condition.release()
+
+ def consume_receive_quota(self, amount):
+ """Consumes receive quota. Returns False on failure."""
+
+ if self._receive_quota < amount:
+ self._logger.debug('Violate quota on channel id %d: %d < %d' %
+ (self._request.channel_id,
+ self._receive_quota, amount))
+ return False
+ self._receive_quota -= amount
+ return True
+
+ def send_message(self, message, end=True, binary=False):
+ """Override Stream.send_message."""
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ if binary and isinstance(message, unicode):
+ raise BadOperationException(
+ 'Message for binary frame must be instance of str')
+
+ if binary:
+ opcode = common.OPCODE_BINARY
+ else:
+ opcode = common.OPCODE_TEXT
+ message = message.encode('utf-8')
+
+ for message_filter in self._options.outgoing_message_filters:
+ message = message_filter.filter(message, end, binary)
+
+ if self._last_message_was_fragmented:
+ if opcode != self._message_opcode:
+ raise BadOperationException('Message types are different in '
+ 'frames for the same message')
+ opcode = common.OPCODE_CONTINUATION
+ else:
+ self._message_opcode = opcode
+
+ self._write_inner_frame(opcode, message, end)
+ self._last_message_was_fragmented = not end
+
+ def _receive_frame(self):
+ """Overrides Stream._receive_frame.
+
+ In addition to call Stream._receive_frame, this method adds the amount
+ of payload to receiving quota and sends FlowControl to the client.
+ We need to do it here because Stream.receive_message() handles
+ control frames internally.
+ """
+
+ opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
+ amount = len(payload)
+ # Replenish extra one octet when receiving the first fragmented frame.
+ if opcode != common.OPCODE_CONTINUATION:
+ amount += 1
+ self._receive_quota += amount
+ frame_data = _create_flow_control(self._request.channel_id,
+ amount)
+ self._logger.debug('Sending flow control for %d, replenished=%d' %
+ (self._request.channel_id, amount))
+ self._request.connection.write_control_data(frame_data)
+ return opcode, payload, fin, rsv1, rsv2, rsv3
+
+ def _get_message_from_frame(self, frame):
+ """Overrides Stream._get_message_from_frame.
+ """
+
+ try:
+ inner_message = self._inner_message_builder.build(frame)
+ except InvalidFrameException:
+ raise LogicalChannelError(
+ self._request.channel_id, _DROP_CODE_BAD_FRAGMENTATION)
+
+ if inner_message is None:
+ return None
+ self._original_opcode = inner_message.opcode
+ return inner_message.payload
+
+ def receive_message(self):
+ """Overrides Stream.receive_message."""
+
+ # Just call Stream.receive_message(), but catch
+ # LogicalConnectionClosedException, which is raised when the logical
+ # connection has closed gracefully.
+ try:
+ return Stream.receive_message(self)
+ except LogicalConnectionClosedException, e:
+ self._logger.debug('%s', e)
+ return None
+
+ def _send_closing_handshake(self, code, reason):
+ """Overrides Stream._send_closing_handshake."""
+
+ body = create_closing_handshake_body(code, reason)
+ self._logger.debug('Sending closing handshake for %d: (%r, %r)' %
+ (self._request.channel_id, code, reason))
+ self._write_inner_frame(common.OPCODE_CLOSE, body, end=True)
+
+ self._request.server_terminated = True
+
+ def send_ping(self, body=''):
+ """Overrides Stream.send_ping"""
+
+ self._logger.debug('Sending ping on logical channel %d: %r' %
+ (self._request.channel_id, body))
+ self._write_inner_frame(common.OPCODE_PING, body, end=True)
+
+ self._ping_queue.append(body)
+
+ def _send_pong(self, body):
+ """Overrides Stream._send_pong"""
+
+ self._logger.debug('Sending pong on logical channel %d: %r' %
+ (self._request.channel_id, body))
+ self._write_inner_frame(common.OPCODE_PONG, body, end=True)
+
+ def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
+ """Overrides Stream.close_connection."""
+
+ # TODO(bashi): Implement
+ self._logger.debug('Closing logical connection %d' %
+ self._request.channel_id)
+ self._request.server_terminated = True
+
+ def stop_sending(self):
+ """Stops accepting new send operation (_write_inner_frame)."""
+
+ self._send_condition.acquire()
+ self._send_closed = True
+ self._send_condition.notify()
+ self._send_condition.release()
+
+
+class _OutgoingData(object):
+ """A structure that holds data to be sent via physical connection and
+ origin of the data.
+ """
+
+ def __init__(self, channel_id, data):
+ self.channel_id = channel_id
+ self.data = data
+
+
+class _PhysicalConnectionWriter(threading.Thread):
+ """A thread that is responsible for writing data to physical connection.
+
+ TODO(bashi): Make sure there is no thread-safety problem when the reader
+ thread reads data from the same socket at a time.
+ """
+
+ def __init__(self, mux_handler):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ """
+
+ threading.Thread.__init__(self)
+ self._logger = util.get_class_logger(self)
+ self._mux_handler = mux_handler
+ self.setDaemon(True)
+
+ # When set, make this thread stop accepting new data, flush pending
+ # data and exit.
+ self._stop_requested = False
+ # The close code of the physical connection.
+ self._close_code = common.STATUS_NORMAL_CLOSURE
+ # Deque for passing write data. It's protected by _deque_condition
+ # until _stop_requested is set.
+ self._deque = collections.deque()
+ # - Protects _deque, _stop_requested and _close_code
+ # - Signals threads waiting for them to be available
+ self._deque_condition = threading.Condition()
+
+ def put_outgoing_data(self, data):
+ """Puts outgoing data.
+
+ Args:
+ data: _OutgoingData instance.
+
+ Raises:
+ BadOperationException: when the thread has been requested to
+ terminate.
+ """
+
+ try:
+ self._deque_condition.acquire()
+ if self._stop_requested:
+ raise BadOperationException('Cannot write data anymore')
+
+ self._deque.append(data)
+ self._deque_condition.notify()
+ finally:
+ self._deque_condition.release()
+
+ def _write_data(self, outgoing_data):
+ message = (_encode_channel_id(outgoing_data.channel_id) +
+ outgoing_data.data)
+ try:
+ self._mux_handler.physical_stream.send_message(
+ message=message, end=True, binary=True)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ 'Failed to send message to %r: ' %
+ (self._mux_handler.physical_connection.remote_addr,), e)
+ raise
+
+ # TODO(bashi): It would be better to block the thread that sends
+ # control data as well.
+ if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
+ self._mux_handler.notify_write_data_done(outgoing_data.channel_id)
+
+ def run(self):
+ try:
+ self._deque_condition.acquire()
+ while not self._stop_requested:
+ if len(self._deque) == 0:
+ self._deque_condition.wait()
+ continue
+
+ outgoing_data = self._deque.popleft()
+
+ self._deque_condition.release()
+ self._write_data(outgoing_data)
+ self._deque_condition.acquire()
+
+ # Flush deque.
+ #
+ # At this point, self._deque_condition is always acquired.
+ try:
+ while len(self._deque) > 0:
+ outgoing_data = self._deque.popleft()
+ self._write_data(outgoing_data)
+ finally:
+ self._deque_condition.release()
+
+ # Close physical connection.
+ try:
+ # Don't wait the response here. The response will be read
+ # by the reader thread.
+ self._mux_handler.physical_stream.close_connection(
+ self._close_code, wait_response=False)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ 'Failed to close the physical connection: %r' % e)
+ raise
+ finally:
+ self._mux_handler.notify_writer_done()
+
+ def stop(self, close_code=common.STATUS_NORMAL_CLOSURE):
+ """Stops the writer thread."""
+
+ self._deque_condition.acquire()
+ self._stop_requested = True
+ self._close_code = close_code
+ self._deque_condition.notify()
+ self._deque_condition.release()
+
+
+class _PhysicalConnectionReader(threading.Thread):
+ """A thread that is responsible for reading data from physical connection.
+ """
+
+ def __init__(self, mux_handler):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ """
+
+ threading.Thread.__init__(self)
+ self._logger = util.get_class_logger(self)
+ self._mux_handler = mux_handler
+ self.setDaemon(True)
+
+ def run(self):
+ while True:
+ try:
+ physical_stream = self._mux_handler.physical_stream
+ message = physical_stream.receive_message()
+ if message is None:
+ break
+ # Below happens only when a data message is received.
+ opcode = physical_stream.get_last_received_opcode()
+ if opcode != common.OPCODE_BINARY:
+ self._mux_handler.fail_physical_connection(
+ _DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
+ 'Received a text message on physical connection')
+ break
+
+ except ConnectionTerminatedException, e:
+ self._logger.debug('%s', e)
+ break
+
+ try:
+ self._mux_handler.dispatch_message(message)
+ except PhysicalConnectionError, e:
+ self._mux_handler.fail_physical_connection(
+ e.drop_code, e.message)
+ break
+ except LogicalChannelError, e:
+ self._mux_handler.fail_logical_channel(
+ e.channel_id, e.drop_code, e.message)
+ except Exception, e:
+ self._logger.debug(traceback.format_exc())
+ break
+
+ self._mux_handler.notify_reader_done()
+
+
+class _Worker(threading.Thread):
+ """A thread that is responsible for running the corresponding application
+ handler.
+ """
+
+ def __init__(self, mux_handler, request):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ request: _LogicalRequest instance.
+ """
+
+ threading.Thread.__init__(self)
+ self._logger = util.get_class_logger(self)
+ self._mux_handler = mux_handler
+ self._request = request
+ self.setDaemon(True)
+
+ def run(self):
+ self._logger.debug('Logical channel worker started. (id=%d)' %
+ self._request.channel_id)
+ try:
+ # Non-critical exceptions will be handled by dispatcher.
+ self._mux_handler.dispatcher.transfer_data(self._request)
+ except LogicalChannelError, e:
+ self._mux_handler.fail_logical_channel(
+ e.channel_id, e.drop_code, e.message)
+ finally:
+ self._mux_handler.notify_worker_done(self._request.channel_id)
+
+
+class _MuxHandshaker(hybi.Handshaker):
+ """Opening handshake processor for multiplexing."""
+
+ _DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ=='
+
+ def __init__(self, request, dispatcher, send_quota, receive_quota):
+ """Constructs an instance.
+ Args:
+ request: _LogicalRequest instance.
+ dispatcher: Dispatcher instance (dispatch.Dispatcher).
+ send_quota: Initial send quota.
+ receive_quota: Initial receive quota.
+ """
+
+ hybi.Handshaker.__init__(self, request, dispatcher)
+ self._send_quota = send_quota
+ self._receive_quota = receive_quota
+
+ # Append headers which should not be included in handshake field of
+ # AddChannelRequest.
+ # TODO(bashi): Make sure whether we should raise exception when
+ # these headers are included already.
+ request.headers_in[common.UPGRADE_HEADER] = (
+ common.WEBSOCKET_UPGRADE_TYPE)
+ request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
+ str(common.VERSION_HYBI_LATEST))
+ request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
+ self._DUMMY_WEBSOCKET_KEY)
+
+ def _create_stream(self, stream_options):
+ """Override hybi.Handshaker._create_stream."""
+
+ self._logger.debug('Creating logical stream for %d' %
+ self._request.channel_id)
+ return _LogicalStream(
+ self._request, stream_options, self._send_quota,
+ self._receive_quota)
+
+ def _create_handshake_response(self, accept):
+ """Override hybi._create_handshake_response."""
+
+ response = []
+
+ response.append('HTTP/1.1 101 Switching Protocols\r\n')
+
+ # Upgrade and Sec-WebSocket-Accept should be excluded.
+ response.append('%s: %s\r\n' % (
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+ if self._request.ws_protocol is not None:
+ response.append('%s: %s\r\n' % (
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ if (self._request.ws_extensions is not None and
+ len(self._request.ws_extensions) != 0):
+ response.append('%s: %s\r\n' % (
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
+ common.format_extensions(self._request.ws_extensions)))
+ response.append('\r\n')
+
+ return ''.join(response)
+
+ def _send_handshake(self, accept):
+ """Override hybi.Handshaker._send_handshake."""
+
+ # Don't send handshake response for the default channel
+ if self._request.channel_id == _DEFAULT_CHANNEL_ID:
+ return
+
+ handshake_response = self._create_handshake_response(accept)
+ frame_data = _create_add_channel_response(
+ self._request.channel_id,
+ handshake_response)
+ self._logger.debug('Sending handshake response for %d: %r' %
+ (self._request.channel_id, frame_data))
+ self._request.connection.write_control_data(frame_data)
+
+
+class _LogicalChannelData(object):
+ """A structure that holds information about logical channel.
+ """
+
+ def __init__(self, request, worker):
+ self.request = request
+ self.worker = worker
+ self.drop_code = _DROP_CODE_NORMAL_CLOSURE
+ self.drop_message = ''
+
+
+class _HandshakeDeltaBase(object):
+ """A class that holds information for delta-encoded handshake."""
+
+ def __init__(self, headers):
+ self._headers = headers
+
+ def create_headers(self, delta=None):
+ """Creates request headers for an AddChannelRequest that has
+ delta-encoded handshake.
+
+ Args:
+ delta: headers should be overridden.
+ """
+
+ headers = copy.copy(self._headers)
+ if delta:
+ for key, value in delta.items():
+ # The spec requires that a header with an empty value is
+ # removed from the delta base.
+ if len(value) == 0 and headers.has_key(key):
+ del headers[key]
+ else:
+ headers[key] = value
+ return headers
+
+
+class _MuxHandler(object):
+ """Multiplexing handler. When a handler starts, it launches three
+ threads; the reader thread, the writer thread, and a worker thread.
+
+ The reader thread reads data from the physical stream, i.e., the
+ ws_stream object of the underlying websocket connection. The reader
+ thread interprets multiplexed frames and dispatches them to logical
+ channels. Methods of this class are mostly called by the reader thread.
+
+ The writer thread sends multiplexed frames which are created by
+ logical channels via the physical connection.
+
+ The worker thread launched at the starting point handles the
+ "Implicitly Opened Connection". If multiplexing handler receives
+ an AddChannelRequest and accepts it, the handler will launch a new worker
+ thread and dispatch the request to it.
+ """
+
+ def __init__(self, request, dispatcher):
+ """Constructs an instance.
+
+ Args:
+ request: mod_python request of the physical connection.
+ dispatcher: Dispatcher instance (dispatch.Dispatcher).
+ """
+
+ self.original_request = request
+ self.dispatcher = dispatcher
+ self.physical_connection = request.connection
+ self.physical_stream = request.ws_stream
+ self._logger = util.get_class_logger(self)
+ self._logical_channels = {}
+ self._logical_channels_condition = threading.Condition()
+ # Holds client's initial quota
+ self._channel_slots = collections.deque()
+ self._handshake_base = None
+ self._worker_done_notify_received = False
+ self._reader = None
+ self._writer = None
+
+ def start(self):
+ """Starts the handler.
+
+ Raises:
+ MuxUnexpectedException: when the handler already started, or when
+ opening handshake of the default channel fails.
+ """
+
+ if self._reader or self._writer:
+ raise MuxUnexpectedException('MuxHandler already started')
+
+ self._reader = _PhysicalConnectionReader(self)
+ self._writer = _PhysicalConnectionWriter(self)
+ self._reader.start()
+ self._writer.start()
+
+ # Create "Implicitly Opened Connection".
+ logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
+ headers = copy.copy(self.original_request.headers_in)
+ # Add extensions for logical channel.
+ headers[common.SEC_WEBSOCKET_EXTENSIONS_HEADER] = (
+ common.format_extensions(
+ self.original_request.mux_processor.extensions()))
+ self._handshake_base = _HandshakeDeltaBase(headers)
+ logical_request = _LogicalRequest(
+ _DEFAULT_CHANNEL_ID,
+ self.original_request.method,
+ self.original_request.uri,
+ self.original_request.protocol,
+ self._handshake_base.create_headers(),
+ logical_connection)
+ # Client's send quota for the implicitly opened connection is zero,
+ # but we will send FlowControl later so set the initial quota to
+ # _INITIAL_QUOTA_FOR_CLIENT.
+ self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
+ send_quota = self.original_request.mux_processor.quota()
+ if not self._do_handshake_for_logical_request(
+ logical_request, send_quota=send_quota):
+ raise MuxUnexpectedException(
+ 'Failed handshake on the default channel id')
+ self._add_logical_channel(logical_request)
+
+ # Send FlowControl for the implicitly opened connection.
+ frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID,
+ _INITIAL_QUOTA_FOR_CLIENT)
+ logical_request.connection.write_control_data(frame_data)
+
+ def add_channel_slots(self, slots, send_quota):
+ """Adds channel slots.
+
+ Args:
+ slots: number of slots to be added.
+ send_quota: initial send quota for slots.
+ """
+
+ self._channel_slots.extend([send_quota] * slots)
+ # Send NewChannelSlot to client.
+ frame_data = _create_new_channel_slot(slots, send_quota)
+ self.send_control_data(frame_data)
+
+ def wait_until_done(self, timeout=None):
+ """Waits until all workers are done. Returns False when timeout has
+ occurred. Returns True on success.
+
+ Args:
+ timeout: timeout in sec.
+ """
+
+ self._logical_channels_condition.acquire()
+ try:
+ while len(self._logical_channels) > 0:
+ self._logger.debug('Waiting workers(%d)...' %
+ len(self._logical_channels))
+ self._worker_done_notify_received = False
+ self._logical_channels_condition.wait(timeout)
+ if not self._worker_done_notify_received:
+ self._logger.debug('Waiting worker(s) timed out')
+ return False
+ finally:
+ self._logical_channels_condition.release()
+
+ # Flush pending outgoing data
+ self._writer.stop()
+ self._writer.join()
+
+ return True
+
+ def notify_write_data_done(self, channel_id):
+ """Called by the writer thread when a write operation has done.
+
+ Args:
+ channel_id: objective channel id.
+ """
+
+ try:
+ self._logical_channels_condition.acquire()
+ if channel_id in self._logical_channels:
+ channel_data = self._logical_channels[channel_id]
+ channel_data.request.connection.on_write_data_done()
+ else:
+ self._logger.debug('Seems that logical channel for %d has gone'
+ % channel_id)
+ finally:
+ self._logical_channels_condition.release()
+
+ def send_control_data(self, data):
+ """Sends data via the control channel.
+
+ Args:
+ data: data to be sent.
+ """
+
+ self._writer.put_outgoing_data(_OutgoingData(
+ channel_id=_CONTROL_CHANNEL_ID, data=data))
+
+ def send_data(self, channel_id, data):
+ """Sends data via given logical channel. This method is called by
+ worker threads.
+
+ Args:
+ channel_id: objective channel id.
+ data: data to be sent.
+ """
+
+ self._writer.put_outgoing_data(_OutgoingData(
+ channel_id=channel_id, data=data))
+
+ def _send_drop_channel(self, channel_id, code=None, message=''):
+ frame_data = _create_drop_channel(channel_id, code, message)
+ self._logger.debug(
+ 'Sending drop channel for channel id %d' % channel_id)
+ self.send_control_data(frame_data)
+
+ def _send_error_add_channel_response(self, channel_id, status=None):
+ if status is None:
+ status = common.HTTP_STATUS_BAD_REQUEST
+
+ if status in _HTTP_BAD_RESPONSE_MESSAGES:
+ message = _HTTP_BAD_RESPONSE_MESSAGES[status]
+ else:
+ self._logger.debug('Response message for %d is not found' % status)
+ message = '???'
+
+ response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message)
+ frame_data = _create_add_channel_response(channel_id,
+ encoded_handshake=response,
+ encoding=0, rejected=True)
+ self.send_control_data(frame_data)
+
+ def _create_logical_request(self, block):
+ if block.channel_id == _CONTROL_CHANNEL_ID:
+ # TODO(bashi): Raise PhysicalConnectionError with code 2006
+ # instead of MuxUnexpectedException.
+ raise MuxUnexpectedException(
+ 'Received the control channel id (0) as objective channel '
+ 'id for AddChannel')
+
+ if block.encoding > _HANDSHAKE_ENCODING_DELTA:
+ raise PhysicalConnectionError(
+ _DROP_CODE_UNKNOWN_REQUEST_ENCODING)
+
+ method, path, version, headers = _parse_request_text(
+ block.encoded_handshake)
+ if block.encoding == _HANDSHAKE_ENCODING_DELTA:
+ headers = self._handshake_base.create_headers(headers)
+
+ connection = _LogicalConnection(self, block.channel_id)
+ request = _LogicalRequest(block.channel_id, method, path, version,
+ headers, connection)
+ return request
+
+ def _do_handshake_for_logical_request(self, request, send_quota=0):
+ try:
+ receive_quota = self._channel_slots.popleft()
+ except IndexError:
+ raise LogicalChannelError(
+ request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION)
+
+ handshaker = _MuxHandshaker(request, self.dispatcher,
+ send_quota, receive_quota)
+ try:
+ handshaker.do_handshake()
+ except handshake.VersionException, e:
+ self._logger.info('%s', e)
+ self._send_error_add_channel_response(
+ request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+ return False
+ except handshake.HandshakeException, e:
+ # TODO(bashi): Should we _Fail the Logical Channel_ with 3001
+ # instead?
+ self._logger.info('%s', e)
+ self._send_error_add_channel_response(request.channel_id,
+ status=e.status)
+ return False
+ except handshake.AbortedByUserException, e:
+ self._logger.info('%s', e)
+ self._send_error_add_channel_response(request.channel_id)
+ return False
+
+ return True
+
+ def _add_logical_channel(self, logical_request):
+ try:
+ self._logical_channels_condition.acquire()
+ if logical_request.channel_id in self._logical_channels:
+ self._logger.debug('Channel id %d already exists' %
+ logical_request.channel_id)
+ raise PhysicalConnectionError(
+ _DROP_CODE_CHANNEL_ALREADY_EXISTS,
+ 'Channel id %d already exists' %
+ logical_request.channel_id)
+ worker = _Worker(self, logical_request)
+ channel_data = _LogicalChannelData(logical_request, worker)
+ self._logical_channels[logical_request.channel_id] = channel_data
+ worker.start()
+ finally:
+ self._logical_channels_condition.release()
+
+ def _process_add_channel_request(self, block):
+ try:
+ logical_request = self._create_logical_request(block)
+ except ValueError, e:
+ self._logger.debug('Failed to create logical request: %r' % e)
+ self._send_error_add_channel_response(
+ block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+ return
+ if self._do_handshake_for_logical_request(logical_request):
+ if block.encoding == _HANDSHAKE_ENCODING_IDENTITY:
+ # Update handshake base.
+ # TODO(bashi): Make sure this is the right place to update
+ # handshake base.
+ self._handshake_base = _HandshakeDeltaBase(
+ logical_request.headers_in)
+ self._add_logical_channel(logical_request)
+ else:
+ self._send_error_add_channel_response(
+ block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+
+ def _process_flow_control(self, block):
+ try:
+ self._logical_channels_condition.acquire()
+ if not block.channel_id in self._logical_channels:
+ return
+ channel_data = self._logical_channels[block.channel_id]
+ channel_data.request.ws_stream.replenish_send_quota(
+ block.send_quota)
+ finally:
+ self._logical_channels_condition.release()
+
+ def _process_drop_channel(self, block):
+ self._logger.debug(
+ 'DropChannel received for %d: code=%r, reason=%r' %
+ (block.channel_id, block.drop_code, block.drop_message))
+ try:
+ self._logical_channels_condition.acquire()
+ if not block.channel_id in self._logical_channels:
+ return
+ channel_data = self._logical_channels[block.channel_id]
+ channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
+
+ # Close the logical channel
+ channel_data.request.connection.set_read_state(
+ _LogicalConnection.STATE_TERMINATED)
+ channel_data.request.ws_stream.stop_sending()
+ finally:
+ self._logical_channels_condition.release()
+
+ def _process_control_blocks(self, parser):
+ for control_block in parser.read_control_blocks():
+ opcode = control_block.opcode
+ self._logger.debug('control block received, opcode: %d' % opcode)
+ if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
+ self._process_add_channel_request(control_block)
+ elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Received AddChannelResponse')
+ elif opcode == _MUX_OPCODE_FLOW_CONTROL:
+ self._process_flow_control(control_block)
+ elif opcode == _MUX_OPCODE_DROP_CHANNEL:
+ self._process_drop_channel(control_block)
+ elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Received NewChannelSlot')
+ else:
+ raise MuxUnexpectedException(
+ 'Unexpected opcode %r' % opcode)
+
+ def _process_logical_frame(self, channel_id, parser):
+ self._logger.debug('Received a frame. channel id=%d' % channel_id)
+ try:
+ self._logical_channels_condition.acquire()
+ if not channel_id in self._logical_channels:
+ # We must ignore the message for an inactive channel.
+ return
+ channel_data = self._logical_channels[channel_id]
+ fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
+ consuming_byte = len(payload)
+ if opcode != common.OPCODE_CONTINUATION:
+ consuming_byte += 1
+ if not channel_data.request.ws_stream.consume_receive_quota(
+ consuming_byte):
+ # The client violates quota. Close logical channel.
+ raise LogicalChannelError(
+ channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
+ header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3,
+ mask=False)
+ frame_data = header + payload
+ channel_data.request.connection.append_frame_data(frame_data)
+ finally:
+ self._logical_channels_condition.release()
+
+ def dispatch_message(self, message):
+ """Dispatches message. The reader thread calls this method.
+
+ Args:
+ message: a message that contains encapsulated frame.
+ Raises:
+ PhysicalConnectionError: if the message contains physical
+ connection level errors.
+ LogicalChannelError: if the message contains logical channel
+ level errors.
+ """
+
+ parser = _MuxFramePayloadParser(message)
+ try:
+ channel_id = parser.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED)
+ if channel_id == _CONTROL_CHANNEL_ID:
+ self._process_control_blocks(parser)
+ else:
+ self._process_logical_frame(channel_id, parser)
+
+ def notify_worker_done(self, channel_id):
+ """Called when a worker has finished.
+
+ Args:
+ channel_id: channel id corresponded with the worker.
+ """
+
+ self._logger.debug('Worker for channel id %d terminated' % channel_id)
+ try:
+ self._logical_channels_condition.acquire()
+ if not channel_id in self._logical_channels:
+ raise MuxUnexpectedException(
+ 'Channel id %d not found' % channel_id)
+ channel_data = self._logical_channels.pop(channel_id)
+ finally:
+ self._worker_done_notify_received = True
+ self._logical_channels_condition.notify()
+ self._logical_channels_condition.release()
+
+ if not channel_data.request.server_terminated:
+ self._send_drop_channel(
+ channel_id, code=channel_data.drop_code,
+ message=channel_data.drop_message)
+
+ def notify_reader_done(self):
+ """This method is called by the reader thread when the reader has
+ finished.
+ """
+
+ self._logger.debug(
+ 'Termiating all logical connections waiting for incoming data '
+ '...')
+ self._logical_channels_condition.acquire()
+ for channel_data in self._logical_channels.values():
+ try:
+ channel_data.request.connection.set_read_state(
+ _LogicalConnection.STATE_TERMINATED)
+ except Exception:
+ self._logger.debug(traceback.format_exc())
+ self._logical_channels_condition.release()
+
+ def notify_writer_done(self):
+ """This method is called by the writer thread when the writer has
+ finished.
+ """
+
+ self._logger.debug(
+ 'Termiating all logical connections waiting for write '
+ 'completion ...')
+ self._logical_channels_condition.acquire()
+ for channel_data in self._logical_channels.values():
+ try:
+ channel_data.request.connection.on_writer_done()
+ except Exception:
+ self._logger.debug(traceback.format_exc())
+ self._logical_channels_condition.release()
+
+ def fail_physical_connection(self, code, message):
+ """Fail the physical connection.
+
+ Args:
+ code: drop reason code.
+ message: drop message.
+ """
+
+ self._logger.debug('Failing the physical connection...')
+ self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
+ self._writer.stop(common.STATUS_INTERNAL_ENDPOINT_ERROR)
+
+ def fail_logical_channel(self, channel_id, code, message):
+ """Fail a logical channel.
+
+ Args:
+ channel_id: channel id.
+ code: drop reason code.
+ message: drop message.
+ """
+
+ self._logger.debug('Failing logical channel %d...' % channel_id)
+ try:
+ self._logical_channels_condition.acquire()
+ if channel_id in self._logical_channels:
+ channel_data = self._logical_channels[channel_id]
+ # Close the logical channel. notify_worker_done() will be
+ # called later and it will send DropChannel.
+ channel_data.drop_code = code
+ channel_data.drop_message = message
+
+ channel_data.request.connection.set_read_state(
+ _LogicalConnection.STATE_TERMINATED)
+ channel_data.request.ws_stream.stop_sending()
+ else:
+ self._send_drop_channel(channel_id, code, message)
+ finally:
+ self._logical_channels_condition.release()
+
+
+def use_mux(request):
+ return hasattr(request, 'mux_processor') and (
+ request.mux_processor.is_active())
+
+
+def start(request, dispatcher):
+ mux_handler = _MuxHandler(request, dispatcher)
+ mux_handler.start()
+
+ mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ _INITIAL_QUOTA_FOR_CLIENT)
+
+ mux_handler.wait_until_done()
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/standalone.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/standalone.py
new file mode 100755
index 0000000..2aaa50e
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/standalone.py
@@ -0,0 +1,1185 @@
+#!/usr/bin/env python
+#
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Standalone WebSocket server.
+
+Use this file to launch pywebsocket without Apache HTTP Server.
+
+
+BASIC USAGE
+===========
+
+Go to the src directory and run
+
+ $ python mod_pywebsocket/standalone.py [-p <ws_port>]
+ [-w <websock_handlers>]
+ [-d <document_root>]
+
+<ws_port> is the port number to use for ws:// connection.
+
+<document_root> is the path to the root directory of HTML files.
+
+<websock_handlers> is the path to the root directory of WebSocket handlers.
+If not specified, <document_root> will be used. See __init__.py (or
+run $ pydoc mod_pywebsocket) for how to write WebSocket handlers.
+
+For more detail and other options, run
+
+ $ python mod_pywebsocket/standalone.py --help
+
+or see _build_option_parser method below.
+
+For trouble shooting, adding "--log_level debug" might help you.
+
+
+TRY DEMO
+========
+
+Go to the src directory and run standalone.py with -d option to set the
+document root to the directory containing example HTMLs and handlers like this:
+
+ $ cd src
+ $ PYTHONPATH=. python mod_pywebsocket/standalone.py -d example
+
+to launch pywebsocket with the sample handler and html on port 80. Open
+http://localhost/console.html, click the connect button, type something into
+the text box next to the send button and click the send button. If everything
+is working, you'll see the message you typed echoed by the server.
+
+
+USING TLS
+=========
+
+To run the standalone server with TLS support, run it with -t, -k, and -c
+options. When TLS is enabled, the standalone server accepts only TLS connection.
+
+Note that when ssl module is used and the key/cert location is incorrect,
+TLS connection silently fails while pyOpenSSL fails on startup.
+
+Example:
+
+ $ PYTHONPATH=. python mod_pywebsocket/standalone.py \
+ -d example \
+ -p 10443 \
+ -t \
+ -c ../test/cert/cert.pem \
+ -k ../test/cert/key.pem \
+
+Note that when passing a relative path to -c and -k option, it will be resolved
+using the document root directory as the base.
+
+
+USING CLIENT AUTHENTICATION
+===========================
+
+To run the standalone server with TLS client authentication support, run it with
+--tls-client-auth and --tls-client-ca options in addition to ones required for
+TLS support.
+
+Example:
+
+ $ PYTHONPATH=. python mod_pywebsocket/standalone.py -d example -p 10443 -t \
+ -c ../test/cert/cert.pem -k ../test/cert/key.pem \
+ --tls-client-auth \
+ --tls-client-ca=../test/cert/cacert.pem
+
+Note that when passing a relative path to --tls-client-ca option, it will be
+resolved using the document root directory as the base.
+
+
+CONFIGURATION FILE
+==================
+
+You can also write a configuration file and use it by specifying the path to
+the configuration file by --config option. Please write a configuration file
+following the documentation of the Python ConfigParser library. Name of each
+entry must be the long version argument name. E.g. to set log level to debug,
+add the following line:
+
+log_level=debug
+
+For options which doesn't take value, please add some fake value. E.g. for
+--tls option, add the following line:
+
+tls=True
+
+Note that tls will be enabled even if you write tls=False as the value part is
+fake.
+
+When both a command line argument and a configuration file entry are set for
+the same configuration item, the command line value will override one in the
+configuration file.
+
+
+THREADING
+=========
+
+This server is derived from SocketServer.ThreadingMixIn. Hence a thread is
+used for each request.
+
+
+SECURITY WARNING
+================
+
+This uses CGIHTTPServer and CGIHTTPServer is not secure.
+It may execute arbitrary Python code or external programs. It should not be
+used outside a firewall.
+"""
+
+import BaseHTTPServer
+import CGIHTTPServer
+import SimpleHTTPServer
+import SocketServer
+import ConfigParser
+import base64
+import httplib
+import logging
+import logging.handlers
+import optparse
+import os
+import re
+import select
+import socket
+import sys
+import threading
+import time
+
+from mod_pywebsocket import common
+from mod_pywebsocket import dispatch
+from mod_pywebsocket import handshake
+from mod_pywebsocket import http_header_util
+from mod_pywebsocket import memorizingfile
+from mod_pywebsocket import util
+from mod_pywebsocket.xhr_benchmark_handler import XHRBenchmarkHandler
+
+
+_DEFAULT_LOG_MAX_BYTES = 1024 * 256
+_DEFAULT_LOG_BACKUP_COUNT = 5
+
+_DEFAULT_REQUEST_QUEUE_SIZE = 128
+
+# 1024 is practically large enough to contain WebSocket handshake lines.
+_MAX_MEMORIZED_LINES = 1024
+
+# Constants for the --tls_module flag.
+_TLS_BY_STANDARD_MODULE = 'ssl'
+_TLS_BY_PYOPENSSL = 'pyopenssl'
+
+
+class _StandaloneConnection(object):
+ """Mimic mod_python mp_conn."""
+
+ def __init__(self, request_handler):
+ """Construct an instance.
+
+ Args:
+ request_handler: A WebSocketRequestHandler instance.
+ """
+
+ self._request_handler = request_handler
+
+ def get_local_addr(self):
+ """Getter to mimic mp_conn.local_addr."""
+
+ return (self._request_handler.server.server_name,
+ self._request_handler.server.server_port)
+ local_addr = property(get_local_addr)
+
+ def get_remote_addr(self):
+ """Getter to mimic mp_conn.remote_addr.
+
+ Setting the property in __init__ won't work because the request
+ handler is not initialized yet there."""
+
+ return self._request_handler.client_address
+ remote_addr = property(get_remote_addr)
+
+ def write(self, data):
+ """Mimic mp_conn.write()."""
+
+ return self._request_handler.wfile.write(data)
+
+ def read(self, length):
+ """Mimic mp_conn.read()."""
+
+ return self._request_handler.rfile.read(length)
+
+ def get_memorized_lines(self):
+ """Get memorized lines."""
+
+ return self._request_handler.rfile.get_memorized_lines()
+
+
+class _StandaloneRequest(object):
+ """Mimic mod_python request."""
+
+ def __init__(self, request_handler, use_tls):
+ """Construct an instance.
+
+ Args:
+ request_handler: A WebSocketRequestHandler instance.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request_handler = request_handler
+ self.connection = _StandaloneConnection(request_handler)
+ self._use_tls = use_tls
+ self.headers_in = request_handler.headers
+
+ def get_uri(self):
+ """Getter to mimic request.uri.
+
+ This method returns the raw data at the Request-URI part of the
+ Request-Line, while the uri method on the request object of mod_python
+ returns the path portion after parsing the raw data. This behavior is
+ kept for compatibility.
+ """
+
+ return self._request_handler.path
+ uri = property(get_uri)
+
+ def get_unparsed_uri(self):
+ """Getter to mimic request.unparsed_uri."""
+
+ return self._request_handler.path
+ unparsed_uri = property(get_unparsed_uri)
+
+ def get_method(self):
+ """Getter to mimic request.method."""
+
+ return self._request_handler.command
+ method = property(get_method)
+
+ def get_protocol(self):
+ """Getter to mimic request.protocol."""
+
+ return self._request_handler.request_version
+ protocol = property(get_protocol)
+
+ def is_https(self):
+ """Mimic request.is_https()."""
+
+ return self._use_tls
+
+
+def _import_ssl():
+ global ssl
+ try:
+ import ssl
+ return True
+ except ImportError:
+ return False
+
+
+def _import_pyopenssl():
+ global OpenSSL
+ try:
+ import OpenSSL.SSL
+ return True
+ except ImportError:
+ return False
+
+
+class _StandaloneSSLConnection(object):
+ """A wrapper class for OpenSSL.SSL.Connection to
+ - provide makefile method which is not supported by the class
+ - tweak shutdown method since OpenSSL.SSL.Connection.shutdown doesn't
+ accept the "how" argument.
+ - convert SysCallError exceptions that its recv method may raise into a
+ return value of '', meaning EOF. We cannot overwrite the recv method on
+ self._connection since it's immutable.
+ """
+
+ _OVERRIDDEN_ATTRIBUTES = ['_connection', 'makefile', 'shutdown', 'recv']
+
+ def __init__(self, connection):
+ self._connection = connection
+
+ def __getattribute__(self, name):
+ if name in _StandaloneSSLConnection._OVERRIDDEN_ATTRIBUTES:
+ return object.__getattribute__(self, name)
+ return self._connection.__getattribute__(name)
+
+ def __setattr__(self, name, value):
+ if name in _StandaloneSSLConnection._OVERRIDDEN_ATTRIBUTES:
+ return object.__setattr__(self, name, value)
+ return self._connection.__setattr__(name, value)
+
+ def makefile(self, mode='r', bufsize=-1):
+ return socket._fileobject(self, mode, bufsize)
+
+ def shutdown(self, unused_how):
+ self._connection.shutdown()
+
+ def recv(self, bufsize, flags=0):
+ if flags != 0:
+ raise ValueError('Non-zero flags not allowed')
+
+ try:
+ return self._connection.recv(bufsize)
+ except OpenSSL.SSL.SysCallError, (err, message):
+ if err == -1:
+ # Suppress "unexpected EOF" exception. See the OpenSSL document
+ # for SSL_get_error.
+ return ''
+ raise
+
+
+def _alias_handlers(dispatcher, websock_handlers_map_file):
+ """Set aliases specified in websock_handler_map_file in dispatcher.
+
+ Args:
+ dispatcher: dispatch.Dispatcher instance
+ websock_handler_map_file: alias map file
+ """
+
+ fp = open(websock_handlers_map_file)
+ try:
+ for line in fp:
+ if line[0] == '#' or line.isspace():
+ continue
+ m = re.match('(\S+)\s+(\S+)', line)
+ if not m:
+ logging.warning('Wrong format in map file:' + line)
+ continue
+ try:
+ dispatcher.add_resource_path_alias(
+ m.group(1), m.group(2))
+ except dispatch.DispatchException, e:
+ logging.error(str(e))
+ finally:
+ fp.close()
+
+
+class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
+ """HTTPServer specialized for WebSocket."""
+
+ # Overrides SocketServer.ThreadingMixIn.daemon_threads
+ daemon_threads = True
+ # Overrides BaseHTTPServer.HTTPServer.allow_reuse_address
+ allow_reuse_address = True
+
+ def __init__(self, options):
+ """Override SocketServer.TCPServer.__init__ to set SSL enabled
+ socket object to self.socket before server_bind and server_activate,
+ if necessary.
+ """
+
+ # Share a Dispatcher among request handlers to save time for
+ # instantiation. Dispatcher can be shared because it is thread-safe.
+ options.dispatcher = dispatch.Dispatcher(
+ options.websock_handlers,
+ options.scan_dir,
+ options.allow_handlers_outside_root_dir)
+ if options.websock_handlers_map_file:
+ _alias_handlers(options.dispatcher,
+ options.websock_handlers_map_file)
+ warnings = options.dispatcher.source_warnings()
+ if warnings:
+ for warning in warnings:
+ logging.warning('Warning in source loading: %s' % warning)
+
+ self._logger = util.get_class_logger(self)
+
+ self.request_queue_size = options.request_queue_size
+ self.__ws_is_shut_down = threading.Event()
+ self.__ws_serving = False
+
+ SocketServer.BaseServer.__init__(
+ self, (options.server_host, options.port), WebSocketRequestHandler)
+
+ # Expose the options object to allow handler objects access it. We name
+ # it with websocket_ prefix to avoid conflict.
+ self.websocket_server_options = options
+
+ self._create_sockets()
+ self.server_bind()
+ self.server_activate()
+
+ def _create_sockets(self):
+ self.server_name, self.server_port = self.server_address
+ self._sockets = []
+ if not self.server_name:
+ # On platforms that doesn't support IPv6, the first bind fails.
+ # On platforms that supports IPv6
+ # - If it binds both IPv4 and IPv6 on call with AF_INET6, the
+ # first bind succeeds and the second fails (we'll see 'Address
+ # already in use' error).
+ # - If it binds only IPv6 on call with AF_INET6, both call are
+ # expected to succeed to listen both protocol.
+ addrinfo_array = [
+ (socket.AF_INET6, socket.SOCK_STREAM, '', '', ''),
+ (socket.AF_INET, socket.SOCK_STREAM, '', '', '')]
+ else:
+ addrinfo_array = socket.getaddrinfo(self.server_name,
+ self.server_port,
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM,
+ socket.IPPROTO_TCP)
+ for addrinfo in addrinfo_array:
+ self._logger.info('Create socket on: %r', addrinfo)
+ family, socktype, proto, canonname, sockaddr = addrinfo
+ try:
+ socket_ = socket.socket(family, socktype)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ continue
+ server_options = self.websocket_server_options
+ if server_options.use_tls:
+ # For the case of _HAS_OPEN_SSL, we do wrapper setup after
+ # accept.
+ if server_options.tls_module == _TLS_BY_STANDARD_MODULE:
+ if server_options.tls_client_auth:
+ if server_options.tls_client_cert_optional:
+ client_cert_ = ssl.CERT_OPTIONAL
+ else:
+ client_cert_ = ssl.CERT_REQUIRED
+ else:
+ client_cert_ = ssl.CERT_NONE
+ socket_ = ssl.wrap_socket(socket_,
+ keyfile=server_options.private_key,
+ certfile=server_options.certificate,
+ ssl_version=ssl.PROTOCOL_SSLv23,
+ ca_certs=server_options.tls_client_ca,
+ cert_reqs=client_cert_,
+ do_handshake_on_connect=False)
+ self._sockets.append((socket_, addrinfo))
+
+ def server_bind(self):
+ """Override SocketServer.TCPServer.server_bind to enable multiple
+ sockets bind.
+ """
+
+ failed_sockets = []
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.info('Bind on: %r', addrinfo)
+ if self.allow_reuse_address:
+ socket_.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ socket_.bind(self.server_address)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ socket_.close()
+ failed_sockets.append(socketinfo)
+ if self.server_address[1] == 0:
+ # The operating system assigns the actual port number for port
+ # number 0. This case, the second and later sockets should use
+ # the same port number. Also self.server_port is rewritten
+ # because it is exported, and will be used by external code.
+ self.server_address = (
+ self.server_name, socket_.getsockname()[1])
+ self.server_port = self.server_address[1]
+ self._logger.info('Port %r is assigned', self.server_port)
+
+ for socketinfo in failed_sockets:
+ self._sockets.remove(socketinfo)
+
+ def server_activate(self):
+ """Override SocketServer.TCPServer.server_activate to enable multiple
+ sockets listen.
+ """
+
+ failed_sockets = []
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.info('Listen on: %r', addrinfo)
+ try:
+ socket_.listen(self.request_queue_size)
+ except Exception, e:
+ self._logger.info('Skip by failure: %r', e)
+ socket_.close()
+ failed_sockets.append(socketinfo)
+
+ for socketinfo in failed_sockets:
+ self._sockets.remove(socketinfo)
+
+ if len(self._sockets) == 0:
+ self._logger.critical(
+ 'No sockets activated. Use info log level to see the reason.')
+
+ def server_close(self):
+ """Override SocketServer.TCPServer.server_close to enable multiple
+ sockets close.
+ """
+
+ for socketinfo in self._sockets:
+ socket_, addrinfo = socketinfo
+ self._logger.info('Close on: %r', addrinfo)
+ socket_.close()
+
+ def fileno(self):
+ """Override SocketServer.TCPServer.fileno."""
+
+ self._logger.critical('Not supported: fileno')
+ return self._sockets[0][0].fileno()
+
+ def handle_error(self, request, client_address):
+ """Override SocketServer.handle_error."""
+
+ self._logger.error(
+ 'Exception in processing request from: %r\n%s',
+ client_address,
+ util.get_stack_trace())
+ # Note: client_address is a tuple.
+
+ def get_request(self):
+ """Override TCPServer.get_request to wrap OpenSSL.SSL.Connection
+ object with _StandaloneSSLConnection to provide makefile method. We
+ cannot substitute OpenSSL.SSL.Connection.makefile since it's readonly
+ attribute.
+ """
+
+ accepted_socket, client_address = self.socket.accept()
+
+ server_options = self.websocket_server_options
+ if server_options.use_tls:
+ if server_options.tls_module == _TLS_BY_STANDARD_MODULE:
+ try:
+ accepted_socket.do_handshake()
+ except ssl.SSLError, e:
+ self._logger.debug('%r', e)
+ raise
+
+ # Print cipher in use. Handshake is done on accept.
+ self._logger.debug('Cipher: %s', accepted_socket.cipher())
+ self._logger.debug('Client cert: %r',
+ accepted_socket.getpeercert())
+ elif server_options.tls_module == _TLS_BY_PYOPENSSL:
+ # We cannot print the cipher in use. pyOpenSSL doesn't provide
+ # any method to fetch that.
+
+ ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
+ ctx.use_privatekey_file(server_options.private_key)
+ ctx.use_certificate_file(server_options.certificate)
+
+ def default_callback(conn, cert, errnum, errdepth, ok):
+ return ok == 1
+
+ # See the OpenSSL document for SSL_CTX_set_verify.
+ if server_options.tls_client_auth:
+ verify_mode = OpenSSL.SSL.VERIFY_PEER
+ if not server_options.tls_client_cert_optional:
+ verify_mode |= OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT
+ ctx.set_verify(verify_mode, default_callback)
+ ctx.load_verify_locations(server_options.tls_client_ca,
+ None)
+ else:
+ ctx.set_verify(OpenSSL.SSL.VERIFY_NONE, default_callback)
+
+ accepted_socket = OpenSSL.SSL.Connection(ctx, accepted_socket)
+ accepted_socket.set_accept_state()
+
+ # Convert SSL related error into socket.error so that
+ # SocketServer ignores them and keeps running.
+ #
+ # TODO(tyoshino): Convert all kinds of errors.
+ try:
+ accepted_socket.do_handshake()
+ except OpenSSL.SSL.Error, e:
+ # Set errno part to 1 (SSL_ERROR_SSL) like the ssl module
+ # does.
+ self._logger.debug('%r', e)
+ raise socket.error(1, '%r' % e)
+ cert = accepted_socket.get_peer_certificate()
+ if cert is not None:
+ self._logger.debug('Client cert subject: %r',
+ cert.get_subject().get_components())
+ accepted_socket = _StandaloneSSLConnection(accepted_socket)
+ else:
+ raise ValueError('No TLS support module is available')
+
+ return accepted_socket, client_address
+
+ def serve_forever(self, poll_interval=0.5):
+ """Override SocketServer.BaseServer.serve_forever."""
+
+ self.__ws_serving = True
+ self.__ws_is_shut_down.clear()
+ handle_request = self.handle_request
+ if hasattr(self, '_handle_request_noblock'):
+ handle_request = self._handle_request_noblock
+ else:
+ self._logger.warning('Fallback to blocking request handler')
+ try:
+ while self.__ws_serving:
+ r, w, e = select.select(
+ [socket_[0] for socket_ in self._sockets],
+ [], [], poll_interval)
+ for socket_ in r:
+ self.socket = socket_
+ handle_request()
+ self.socket = None
+ finally:
+ self.__ws_is_shut_down.set()
+
+ def shutdown(self):
+ """Override SocketServer.BaseServer.shutdown."""
+
+ self.__ws_serving = False
+ self.__ws_is_shut_down.wait()
+
+
+class WebSocketRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler):
+ """CGIHTTPRequestHandler specialized for WebSocket."""
+
+ # Use httplib.HTTPMessage instead of mimetools.Message.
+ MessageClass = httplib.HTTPMessage
+
+ def setup(self):
+ """Override SocketServer.StreamRequestHandler.setup to wrap rfile
+ with MemorizingFile.
+
+ This method will be called by BaseRequestHandler's constructor
+ before calling BaseHTTPRequestHandler.handle.
+ BaseHTTPRequestHandler.handle will call
+ BaseHTTPRequestHandler.handle_one_request and it will call
+ WebSocketRequestHandler.parse_request.
+ """
+
+ # Call superclass's setup to prepare rfile, wfile, etc. See setup
+ # definition on the root class SocketServer.StreamRequestHandler to
+ # understand what this does.
+ CGIHTTPServer.CGIHTTPRequestHandler.setup(self)
+
+ self.rfile = memorizingfile.MemorizingFile(
+ self.rfile,
+ max_memorized_lines=_MAX_MEMORIZED_LINES)
+
+ def __init__(self, request, client_address, server):
+ self._logger = util.get_class_logger(self)
+
+ self._options = server.websocket_server_options
+
+ # Overrides CGIHTTPServerRequestHandler.cgi_directories.
+ self.cgi_directories = self._options.cgi_directories
+ # Replace CGIHTTPRequestHandler.is_executable method.
+ if self._options.is_executable_method is not None:
+ self.is_executable = self._options.is_executable_method
+
+ # This actually calls BaseRequestHandler.__init__.
+ CGIHTTPServer.CGIHTTPRequestHandler.__init__(
+ self, request, client_address, server)
+
+ def parse_request(self):
+ """Override BaseHTTPServer.BaseHTTPRequestHandler.parse_request.
+
+ Return True to continue processing for HTTP(S), False otherwise.
+
+ See BaseHTTPRequestHandler.handle_one_request method which calls
+ this method to understand how the return value will be handled.
+ """
+
+ # We hook parse_request method, but also call the original
+ # CGIHTTPRequestHandler.parse_request since when we return False,
+ # CGIHTTPRequestHandler.handle_one_request continues processing and
+ # it needs variables set by CGIHTTPRequestHandler.parse_request.
+ #
+ # Variables set by this method will be also used by WebSocket request
+ # handling (self.path, self.command, self.requestline, etc. See also
+ # how _StandaloneRequest's members are implemented using these
+ # attributes).
+ if not CGIHTTPServer.CGIHTTPRequestHandler.parse_request(self):
+ return False
+
+ if self._options.use_basic_auth:
+ auth = self.headers.getheader('Authorization')
+ if auth != self._options.basic_auth_credential:
+ self.send_response(401)
+ self.send_header('WWW-Authenticate',
+ 'Basic realm="Pywebsocket"')
+ self.end_headers()
+ self._logger.info('Request basic authentication')
+ return True
+
+ host, port, resource = http_header_util.parse_uri(self.path)
+
+ # Special paths for XMLHttpRequest benchmark
+ xhr_benchmark_helper_prefix = '/073be001e10950692ccbf3a2ad21c245'
+ if resource == (xhr_benchmark_helper_prefix + '_send'):
+ xhr_benchmark_handler = XHRBenchmarkHandler(
+ self.headers, self.rfile, self.wfile)
+ xhr_benchmark_handler.do_send()
+ return False
+ if resource == (xhr_benchmark_helper_prefix + '_receive'):
+ xhr_benchmark_handler = XHRBenchmarkHandler(
+ self.headers, self.rfile, self.wfile)
+ xhr_benchmark_handler.do_receive()
+ return False
+
+ if resource is None:
+ self._logger.info('Invalid URI: %r', self.path)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ server_options = self.server.websocket_server_options
+ if host is not None:
+ validation_host = server_options.validation_host
+ if validation_host is not None and host != validation_host:
+ self._logger.info('Invalid host: %r (expected: %r)',
+ host,
+ validation_host)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ if port is not None:
+ validation_port = server_options.validation_port
+ if validation_port is not None and port != validation_port:
+ self._logger.info('Invalid port: %r (expected: %r)',
+ port,
+ validation_port)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ self.path = resource
+
+ request = _StandaloneRequest(self, self._options.use_tls)
+
+ try:
+ # Fallback to default http handler for request paths for which
+ # we don't have request handlers.
+ if not self._options.dispatcher.get_handler_suite(self.path):
+ self._logger.info('No handler for resource: %r',
+ self.path)
+ self._logger.info('Fallback to CGIHTTPRequestHandler')
+ return True
+ except dispatch.DispatchException, e:
+ self._logger.info('Dispatch failed for error: %s', e)
+ self.send_error(e.status)
+ return False
+
+ # If any Exceptions without except clause setup (including
+ # DispatchException) is raised below this point, it will be caught
+ # and logged by WebSocketServer.
+
+ try:
+ try:
+ handshake.do_handshake(
+ request,
+ self._options.dispatcher,
+ allowDraft75=self._options.allow_draft75,
+ strict=self._options.strict)
+ except handshake.VersionException, e:
+ self._logger.info('Handshake failed for version error: %s', e)
+ self.send_response(common.HTTP_STATUS_BAD_REQUEST)
+ self.send_header(common.SEC_WEBSOCKET_VERSION_HEADER,
+ e.supported_versions)
+ self.end_headers()
+ return False
+ except handshake.HandshakeException, e:
+ # Handshake for ws(s) failed.
+ self._logger.info('Handshake failed for error: %s', e)
+ self.send_error(e.status)
+ return False
+
+ request._dispatcher = self._options.dispatcher
+ self._options.dispatcher.transfer_data(request)
+ except handshake.AbortedByUserException, e:
+ self._logger.info('Aborted: %s', e)
+ return False
+
+ def log_request(self, code='-', size='-'):
+ """Override BaseHTTPServer.log_request."""
+
+ self._logger.info('"%s" %s %s',
+ self.requestline, str(code), str(size))
+
+ def log_error(self, *args):
+ """Override BaseHTTPServer.log_error."""
+
+ # Despite the name, this method is for warnings than for errors.
+ # For example, HTTP status code is logged by this method.
+ self._logger.warning('%s - %s',
+ self.address_string(),
+ args[0] % args[1:])
+
+ def is_cgi(self):
+ """Test whether self.path corresponds to a CGI script.
+
+ Add extra check that self.path doesn't contains ..
+ Also check if the file is a executable file or not.
+ If the file is not executable, it is handled as static file or dir
+ rather than a CGI script.
+ """
+
+ if CGIHTTPServer.CGIHTTPRequestHandler.is_cgi(self):
+ if '..' in self.path:
+ return False
+ # strip query parameter from request path
+ resource_name = self.path.split('?', 2)[0]
+ # convert resource_name into real path name in filesystem.
+ scriptfile = self.translate_path(resource_name)
+ if not os.path.isfile(scriptfile):
+ return False
+ if not self.is_executable(scriptfile):
+ return False
+ return True
+ return False
+
+
+def _get_logger_from_class(c):
+ return logging.getLogger('%s.%s' % (c.__module__, c.__name__))
+
+
+def _configure_logging(options):
+ logging.addLevelName(common.LOGLEVEL_FINE, 'FINE')
+
+ logger = logging.getLogger()
+ logger.setLevel(logging.getLevelName(options.log_level.upper()))
+ if options.log_file:
+ handler = logging.handlers.RotatingFileHandler(
+ options.log_file, 'a', options.log_max, options.log_count)
+ else:
+ handler = logging.StreamHandler()
+ formatter = logging.Formatter(
+ '[%(asctime)s] [%(levelname)s] %(name)s: %(message)s')
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+ deflate_log_level_name = logging.getLevelName(
+ options.deflate_log_level.upper())
+ _get_logger_from_class(util._Deflater).setLevel(
+ deflate_log_level_name)
+ _get_logger_from_class(util._Inflater).setLevel(
+ deflate_log_level_name)
+
+
+def _build_option_parser():
+ parser = optparse.OptionParser()
+
+ parser.add_option('--config', dest='config_file', type='string',
+ default=None,
+ help=('Path to configuration file. See the file comment '
+ 'at the top of this file for the configuration '
+ 'file format'))
+ parser.add_option('-H', '--server-host', '--server_host',
+ dest='server_host',
+ default='',
+ help='server hostname to listen to')
+ parser.add_option('-V', '--validation-host', '--validation_host',
+ dest='validation_host',
+ default=None,
+ help='server hostname to validate in absolute path.')
+ parser.add_option('-p', '--port', dest='port', type='int',
+ default=common.DEFAULT_WEB_SOCKET_PORT,
+ help='port to listen to')
+ parser.add_option('-P', '--validation-port', '--validation_port',
+ dest='validation_port', type='int',
+ default=None,
+ help='server port to validate in absolute path.')
+ parser.add_option('-w', '--websock-handlers', '--websock_handlers',
+ dest='websock_handlers',
+ default='.',
+ help=('The root directory of WebSocket handler files. '
+ 'If the path is relative, --document-root is used '
+ 'as the base.'))
+ parser.add_option('-m', '--websock-handlers-map-file',
+ '--websock_handlers_map_file',
+ dest='websock_handlers_map_file',
+ default=None,
+ help=('WebSocket handlers map file. '
+ 'Each line consists of alias_resource_path and '
+ 'existing_resource_path, separated by spaces.'))
+ parser.add_option('-s', '--scan-dir', '--scan_dir', dest='scan_dir',
+ default=None,
+ help=('Must be a directory under --websock-handlers. '
+ 'Only handlers under this directory are scanned '
+ 'and registered to the server. '
+ 'Useful for saving scan time when the handler '
+ 'root directory contains lots of files that are '
+ 'not handler file or are handler files but you '
+ 'don\'t want them to be registered. '))
+ parser.add_option('--allow-handlers-outside-root-dir',
+ '--allow_handlers_outside_root_dir',
+ dest='allow_handlers_outside_root_dir',
+ action='store_true',
+ default=False,
+ help=('Scans WebSocket handlers even if their canonical '
+ 'path is not under --websock-handlers.'))
+ parser.add_option('-d', '--document-root', '--document_root',
+ dest='document_root', default='.',
+ help='Document root directory.')
+ parser.add_option('-x', '--cgi-paths', '--cgi_paths', dest='cgi_paths',
+ default=None,
+ help=('CGI paths relative to document_root.'
+ 'Comma-separated. (e.g -x /cgi,/htbin) '
+ 'Files under document_root/cgi_path are handled '
+ 'as CGI programs. Must be executable.'))
+ parser.add_option('-t', '--tls', dest='use_tls', action='store_true',
+ default=False, help='use TLS (wss://)')
+ parser.add_option('--tls-module', '--tls_module', dest='tls_module',
+ type='choice',
+ choices = [_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL],
+ help='Use ssl module if "%s" is specified. '
+ 'Use pyOpenSSL module if "%s" is specified' %
+ (_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL))
+ parser.add_option('-k', '--private-key', '--private_key',
+ dest='private_key',
+ default='', help='TLS private key file.')
+ parser.add_option('-c', '--certificate', dest='certificate',
+ default='', help='TLS certificate file.')
+ parser.add_option('--tls-client-auth', dest='tls_client_auth',
+ action='store_true', default=False,
+ help='Requests TLS client auth on every connection.')
+ parser.add_option('--tls-client-cert-optional',
+ dest='tls_client_cert_optional',
+ action='store_true', default=False,
+ help=('Makes client certificate optional even though '
+ 'TLS client auth is enabled.'))
+ parser.add_option('--tls-client-ca', dest='tls_client_ca', default='',
+ help=('Specifies a pem file which contains a set of '
+ 'concatenated CA certificates which are used to '
+ 'validate certificates passed from clients'))
+ parser.add_option('--basic-auth', dest='use_basic_auth',
+ action='store_true', default=False,
+ help='Requires Basic authentication.')
+ parser.add_option('--basic-auth-credential',
+ dest='basic_auth_credential', default='test:test',
+ help='Specifies the credential of basic authentication '
+ 'by username:password pair (e.g. test:test).')
+ parser.add_option('-l', '--log-file', '--log_file', dest='log_file',
+ default='', help='Log file.')
+ # Custom log level:
+ # - FINE: Prints status of each frame processing step
+ parser.add_option('--log-level', '--log_level', type='choice',
+ dest='log_level', default='warn',
+ choices=['fine',
+ 'debug', 'info', 'warning', 'warn', 'error',
+ 'critical'],
+ help='Log level.')
+ parser.add_option('--deflate-log-level', '--deflate_log_level',
+ type='choice',
+ dest='deflate_log_level', default='warn',
+ choices=['debug', 'info', 'warning', 'warn', 'error',
+ 'critical'],
+ help='Log level for _Deflater and _Inflater.')
+ parser.add_option('--thread-monitor-interval-in-sec',
+ '--thread_monitor_interval_in_sec',
+ dest='thread_monitor_interval_in_sec',
+ type='int', default=-1,
+ help=('If positive integer is specified, run a thread '
+ 'monitor to show the status of server threads '
+ 'periodically in the specified inteval in '
+ 'second. If non-positive integer is specified, '
+ 'disable the thread monitor.'))
+ parser.add_option('--log-max', '--log_max', dest='log_max', type='int',
+ default=_DEFAULT_LOG_MAX_BYTES,
+ help='Log maximum bytes')
+ parser.add_option('--log-count', '--log_count', dest='log_count',
+ type='int', default=_DEFAULT_LOG_BACKUP_COUNT,
+ help='Log backup count')
+ parser.add_option('--allow-draft75', dest='allow_draft75',
+ action='store_true', default=False,
+ help='Obsolete option. Ignored.')
+ parser.add_option('--strict', dest='strict', action='store_true',
+ default=False, help='Obsolete option. Ignored.')
+ parser.add_option('-q', '--queue', dest='request_queue_size', type='int',
+ default=_DEFAULT_REQUEST_QUEUE_SIZE,
+ help='request queue size')
+
+ return parser
+
+
+class ThreadMonitor(threading.Thread):
+ daemon = True
+
+ def __init__(self, interval_in_sec):
+ threading.Thread.__init__(self, name='ThreadMonitor')
+
+ self._logger = util.get_class_logger(self)
+
+ self._interval_in_sec = interval_in_sec
+
+ def run(self):
+ while True:
+ thread_name_list = []
+ for thread in threading.enumerate():
+ thread_name_list.append(thread.name)
+ self._logger.info(
+ "%d active threads: %s",
+ threading.active_count(),
+ ', '.join(thread_name_list))
+ time.sleep(self._interval_in_sec)
+
+
+def _parse_args_and_config(args):
+ parser = _build_option_parser()
+
+ # First, parse options without configuration file.
+ temporary_options, temporary_args = parser.parse_args(args=args)
+ if temporary_args:
+ logging.critical(
+ 'Unrecognized positional arguments: %r', temporary_args)
+ sys.exit(1)
+
+ if temporary_options.config_file:
+ try:
+ config_fp = open(temporary_options.config_file, 'r')
+ except IOError, e:
+ logging.critical(
+ 'Failed to open configuration file %r: %r',
+ temporary_options.config_file,
+ e)
+ sys.exit(1)
+
+ config_parser = ConfigParser.SafeConfigParser()
+ config_parser.readfp(config_fp)
+ config_fp.close()
+
+ args_from_config = []
+ for name, value in config_parser.items('pywebsocket'):
+ args_from_config.append('--' + name)
+ args_from_config.append(value)
+ if args is None:
+ args = args_from_config
+ else:
+ args = args_from_config + args
+ return parser.parse_args(args=args)
+ else:
+ return temporary_options, temporary_args
+
+
+def _main(args=None):
+ """You can call this function from your own program, but please note that
+ this function has some side-effects that might affect your program. For
+ example, util.wrap_popen3_for_win use in this method replaces implementation
+ of os.popen3.
+ """
+
+ options, args = _parse_args_and_config(args=args)
+
+ os.chdir(options.document_root)
+
+ _configure_logging(options)
+
+ if options.allow_draft75:
+ logging.warning('--allow_draft75 option is obsolete.')
+
+ if options.strict:
+ logging.warning('--strict option is obsolete.')
+
+ # TODO(tyoshino): Clean up initialization of CGI related values. Move some
+ # of code here to WebSocketRequestHandler class if it's better.
+ options.cgi_directories = []
+ options.is_executable_method = None
+ if options.cgi_paths:
+ options.cgi_directories = options.cgi_paths.split(',')
+ if sys.platform in ('cygwin', 'win32'):
+ cygwin_path = None
+ # For Win32 Python, it is expected that CYGWIN_PATH
+ # is set to a directory of cygwin binaries.
+ # For example, websocket_server.py in Chromium sets CYGWIN_PATH to
+ # full path of third_party/cygwin/bin.
+ if 'CYGWIN_PATH' in os.environ:
+ cygwin_path = os.environ['CYGWIN_PATH']
+ util.wrap_popen3_for_win(cygwin_path)
+
+ def __check_script(scriptpath):
+ return util.get_script_interp(scriptpath, cygwin_path)
+
+ options.is_executable_method = __check_script
+
+ if options.use_tls:
+ if options.tls_module is None:
+ if _import_ssl():
+ options.tls_module = _TLS_BY_STANDARD_MODULE
+ logging.debug('Using ssl module')
+ elif _import_pyopenssl():
+ options.tls_module = _TLS_BY_PYOPENSSL
+ logging.debug('Using pyOpenSSL module')
+ else:
+ logging.critical(
+ 'TLS support requires ssl or pyOpenSSL module.')
+ sys.exit(1)
+ elif options.tls_module == _TLS_BY_STANDARD_MODULE:
+ if not _import_ssl():
+ logging.critical('ssl module is not available')
+ sys.exit(1)
+ elif options.tls_module == _TLS_BY_PYOPENSSL:
+ if not _import_pyopenssl():
+ logging.critical('pyOpenSSL module is not available')
+ sys.exit(1)
+ else:
+ logging.critical('Invalid --tls-module option: %r',
+ options.tls_module)
+ sys.exit(1)
+
+ if not options.private_key or not options.certificate:
+ logging.critical(
+ 'To use TLS, specify private_key and certificate.')
+ sys.exit(1)
+
+ if (options.tls_client_cert_optional and
+ not options.tls_client_auth):
+ logging.critical('Client authentication must be enabled to '
+ 'specify tls_client_cert_optional')
+ sys.exit(1)
+ else:
+ if options.tls_module is not None:
+ logging.critical('Use --tls-module option only together with '
+ '--use-tls option.')
+ sys.exit(1)
+
+ if options.tls_client_auth:
+ logging.critical('TLS must be enabled for client authentication.')
+ sys.exit(1)
+
+ if options.tls_client_cert_optional:
+ logging.critical('TLS must be enabled for client authentication.')
+ sys.exit(1)
+
+ if not options.scan_dir:
+ options.scan_dir = options.websock_handlers
+
+ if options.use_basic_auth:
+ options.basic_auth_credential = 'Basic ' + base64.b64encode(
+ options.basic_auth_credential)
+
+ try:
+ if options.thread_monitor_interval_in_sec > 0:
+ # Run a thread monitor to show the status of server threads for
+ # debugging.
+ ThreadMonitor(options.thread_monitor_interval_in_sec).start()
+
+ server = WebSocketServer(options)
+ server.serve_forever()
+ except Exception, e:
+ logging.critical('mod_pywebsocket: %s' % e)
+ logging.critical('mod_pywebsocket: %s' % util.get_stack_trace())
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ _main(sys.argv[1:])
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/stream.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/stream.py
new file mode 100644
index 0000000..edc5332
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/stream.py
@@ -0,0 +1,57 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file exports public symbols.
+"""
+
+
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import InvalidUTF8Exception
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+from mod_pywebsocket._stream_hixie75 import StreamHixie75
+from mod_pywebsocket._stream_hybi import Frame
+from mod_pywebsocket._stream_hybi import Stream
+from mod_pywebsocket._stream_hybi import StreamOptions
+
+# These methods are intended to be used by WebSocket client developers to have
+# their implementations receive broken data in tests.
+from mod_pywebsocket._stream_hybi import create_close_frame
+from mod_pywebsocket._stream_hybi import create_header
+from mod_pywebsocket._stream_hybi import create_length_header
+from mod_pywebsocket._stream_hybi import create_ping_frame
+from mod_pywebsocket._stream_hybi import create_pong_frame
+from mod_pywebsocket._stream_hybi import create_binary_frame
+from mod_pywebsocket._stream_hybi import create_text_frame
+from mod_pywebsocket._stream_hybi import create_closing_handshake_body
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py
new file mode 100644
index 0000000..d224ae3
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py
@@ -0,0 +1,416 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket utilities.
+"""
+
+
+import array
+import errno
+
+# Import hash classes from a module available and recommended for each Python
+# version and re-export those symbol. Use sha and md5 module in Python 2.4, and
+# hashlib module in Python 2.6.
+try:
+ import hashlib
+ md5_hash = hashlib.md5
+ sha1_hash = hashlib.sha1
+except ImportError:
+ import md5
+ import sha
+ md5_hash = md5.md5
+ sha1_hash = sha.sha
+
+import StringIO
+import logging
+import os
+import re
+import socket
+import traceback
+import zlib
+
+try:
+ from mod_pywebsocket import fast_masking
+except ImportError:
+ pass
+
+
+def get_stack_trace():
+ """Get the current stack trace as string.
+
+ This is needed to support Python 2.3.
+ TODO: Remove this when we only support Python 2.4 and above.
+ Use traceback.format_exc instead.
+ """
+
+ out = StringIO.StringIO()
+ traceback.print_exc(file=out)
+ return out.getvalue()
+
+
+def prepend_message_to_exception(message, exc):
+ """Prepend message to the exception."""
+
+ exc.args = (message + str(exc),)
+ return
+
+
+def __translate_interp(interp, cygwin_path):
+ """Translate interp program path for Win32 python to run cygwin program
+ (e.g. perl). Note that it doesn't support path that contains space,
+ which is typically true for Unix, where #!-script is written.
+ For Win32 python, cygwin_path is a directory of cygwin binaries.
+
+ Args:
+ interp: interp command line
+ cygwin_path: directory name of cygwin binary, or None
+ Returns:
+ translated interp command line.
+ """
+ if not cygwin_path:
+ return interp
+ m = re.match('^[^ ]*/([^ ]+)( .*)?', interp)
+ if m:
+ cmd = os.path.join(cygwin_path, m.group(1))
+ return cmd + m.group(2)
+ return interp
+
+
+def get_script_interp(script_path, cygwin_path=None):
+ """Gets #!-interpreter command line from the script.
+
+ It also fixes command path. When Cygwin Python is used, e.g. in WebKit,
+ it could run "/usr/bin/perl -wT hello.pl".
+ When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix
+ "/usr/bin/perl" to "<cygwin_path>\perl.exe".
+
+ Args:
+ script_path: pathname of the script
+ cygwin_path: directory name of cygwin binary, or None
+ Returns:
+ #!-interpreter command line, or None if it is not #!-script.
+ """
+ fp = open(script_path)
+ line = fp.readline()
+ fp.close()
+ m = re.match('^#!(.*)', line)
+ if m:
+ return __translate_interp(m.group(1), cygwin_path)
+ return None
+
+
+def wrap_popen3_for_win(cygwin_path):
+ """Wrap popen3 to support #!-script on Windows.
+
+ Args:
+ cygwin_path: path for cygwin binary if command path is needed to be
+ translated. None if no translation required.
+ """
+
+ __orig_popen3 = os.popen3
+
+ def __wrap_popen3(cmd, mode='t', bufsize=-1):
+ cmdline = cmd.split(' ')
+ interp = get_script_interp(cmdline[0], cygwin_path)
+ if interp:
+ cmd = interp + ' ' + cmd
+ return __orig_popen3(cmd, mode, bufsize)
+
+ os.popen3 = __wrap_popen3
+
+
+def hexify(s):
+ return ' '.join(map(lambda x: '%02x' % ord(x), s))
+
+
+def get_class_logger(o):
+ return logging.getLogger(
+ '%s.%s' % (o.__class__.__module__, o.__class__.__name__))
+
+
+class NoopMasker(object):
+ """A masking object that has the same interface as RepeatedXorMasker but
+ just returns the string passed in without making any change.
+ """
+
+ def __init__(self):
+ pass
+
+ def mask(self, s):
+ return s
+
+
+class RepeatedXorMasker(object):
+ """A masking object that applies XOR on the string given to mask method
+ with the masking bytes given to the constructor repeatedly. This object
+ remembers the position in the masking bytes the last mask method call
+ ended and resumes from that point on the next mask method call.
+ """
+
+ def __init__(self, masking_key):
+ self._masking_key = masking_key
+ self._masking_key_index = 0
+
+ def _mask_using_swig(self, s):
+ masked_data = fast_masking.mask(
+ s, self._masking_key, self._masking_key_index)
+ self._masking_key_index = (
+ (self._masking_key_index + len(s)) % len(self._masking_key))
+ return masked_data
+
+ def _mask_using_array(self, s):
+ result = array.array('B')
+ result.fromstring(s)
+
+ # Use temporary local variables to eliminate the cost to access
+ # attributes
+ masking_key = map(ord, self._masking_key)
+ masking_key_size = len(masking_key)
+ masking_key_index = self._masking_key_index
+
+ for i in xrange(len(result)):
+ result[i] ^= masking_key[masking_key_index]
+ masking_key_index = (masking_key_index + 1) % masking_key_size
+
+ self._masking_key_index = masking_key_index
+
+ return result.tostring()
+
+ if 'fast_masking' in globals():
+ mask = _mask_using_swig
+ else:
+ mask = _mask_using_array
+
+
+# By making wbits option negative, we can suppress CMF/FLG (2 octet) and
+# ADLER32 (4 octet) fields of zlib so that we can use zlib module just as
+# deflate library. DICTID won't be added as far as we don't set dictionary.
+# LZ77 window of 32K will be used for both compression and decompression.
+# For decompression, we can just use 32K to cover any windows size. For
+# compression, we use 32K so receivers must use 32K.
+#
+# Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level
+# to decode.
+#
+# See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of
+# Python. See also RFC1950 (ZLIB 3.3).
+
+
+class _Deflater(object):
+
+ def __init__(self, window_bits):
+ self._logger = get_class_logger(self)
+
+ self._compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -window_bits)
+
+ def compress(self, bytes):
+ compressed_bytes = self._compress.compress(bytes)
+ self._logger.debug('Compress input %r', bytes)
+ self._logger.debug('Compress result %r', compressed_bytes)
+ return compressed_bytes
+
+ def compress_and_flush(self, bytes):
+ compressed_bytes = self._compress.compress(bytes)
+ compressed_bytes += self._compress.flush(zlib.Z_SYNC_FLUSH)
+ self._logger.debug('Compress input %r', bytes)
+ self._logger.debug('Compress result %r', compressed_bytes)
+ return compressed_bytes
+
+ def compress_and_finish(self, bytes):
+ compressed_bytes = self._compress.compress(bytes)
+ compressed_bytes += self._compress.flush(zlib.Z_FINISH)
+ self._logger.debug('Compress input %r', bytes)
+ self._logger.debug('Compress result %r', compressed_bytes)
+ return compressed_bytes
+
+
+class _Inflater(object):
+
+ def __init__(self, window_bits):
+ self._logger = get_class_logger(self)
+ self._window_bits = window_bits
+
+ self._unconsumed = ''
+
+ self.reset()
+
+ def decompress(self, size):
+ if not (size == -1 or size > 0):
+ raise Exception('size must be -1 or positive')
+
+ data = ''
+
+ while True:
+ if size == -1:
+ data += self._decompress.decompress(self._unconsumed)
+ # See Python bug http://bugs.python.org/issue12050 to
+ # understand why the same code cannot be used for updating
+ # self._unconsumed for here and else block.
+ self._unconsumed = ''
+ else:
+ data += self._decompress.decompress(
+ self._unconsumed, size - len(data))
+ self._unconsumed = self._decompress.unconsumed_tail
+ if self._decompress.unused_data:
+ # Encountered a last block (i.e. a block with BFINAL = 1) and
+ # found a new stream (unused_data). We cannot use the same
+ # zlib.Decompress object for the new stream. Create a new
+ # Decompress object to decompress the new one.
+ #
+ # It's fine to ignore unconsumed_tail if unused_data is not
+ # empty.
+ self._unconsumed = self._decompress.unused_data
+ self.reset()
+ if size >= 0 and len(data) == size:
+ # data is filled. Don't call decompress again.
+ break
+ else:
+ # Re-invoke Decompress.decompress to try to decompress all
+ # available bytes before invoking read which blocks until
+ # any new byte is available.
+ continue
+ else:
+ # Here, since unused_data is empty, even if unconsumed_tail is
+ # not empty, bytes of requested length are already in data. We
+ # don't have to "continue" here.
+ break
+
+ if data:
+ self._logger.debug('Decompressed %r', data)
+ return data
+
+ def append(self, data):
+ self._logger.debug('Appended %r', data)
+ self._unconsumed += data
+
+ def reset(self):
+ self._logger.debug('Reset')
+ self._decompress = zlib.decompressobj(-self._window_bits)
+
+
+# Compresses/decompresses given octets using the method introduced in RFC1979.
+
+
+class _RFC1979Deflater(object):
+ """A compressor class that applies DEFLATE to given byte sequence and
+ flushes using the algorithm described in the RFC1979 section 2.1.
+ """
+
+ def __init__(self, window_bits, no_context_takeover):
+ self._deflater = None
+ if window_bits is None:
+ window_bits = zlib.MAX_WBITS
+ self._window_bits = window_bits
+ self._no_context_takeover = no_context_takeover
+
+ def filter(self, bytes, end=True, bfinal=False):
+ if self._deflater is None:
+ self._deflater = _Deflater(self._window_bits)
+
+ if bfinal:
+ result = self._deflater.compress_and_finish(bytes)
+ # Add a padding block with BFINAL = 0 and BTYPE = 0.
+ result = result + chr(0)
+ self._deflater = None
+ return result
+
+ result = self._deflater.compress_and_flush(bytes)
+ if end:
+ # Strip last 4 octets which is LEN and NLEN field of a
+ # non-compressed block added for Z_SYNC_FLUSH.
+ result = result[:-4]
+
+ if self._no_context_takeover and end:
+ self._deflater = None
+
+ return result
+
+
+class _RFC1979Inflater(object):
+ """A decompressor class for byte sequence compressed and flushed following
+ the algorithm described in the RFC1979 section 2.1.
+ """
+
+ def __init__(self, window_bits=zlib.MAX_WBITS):
+ self._inflater = _Inflater(window_bits)
+
+ def filter(self, bytes):
+ # Restore stripped LEN and NLEN field of a non-compressed block added
+ # for Z_SYNC_FLUSH.
+ self._inflater.append(bytes + '\x00\x00\xff\xff')
+ return self._inflater.decompress(-1)
+
+
+class DeflateSocket(object):
+ """A wrapper class for socket object to intercept send and recv to perform
+ deflate compression and decompression transparently.
+ """
+
+ # Size of the buffer passed to recv to receive compressed data.
+ _RECV_SIZE = 4096
+
+ def __init__(self, socket):
+ self._socket = socket
+
+ self._logger = get_class_logger(self)
+
+ self._deflater = _Deflater(zlib.MAX_WBITS)
+ self._inflater = _Inflater(zlib.MAX_WBITS)
+
+ def recv(self, size):
+ """Receives data from the socket specified on the construction up
+ to the specified size. Once any data is available, returns it even
+ if it's smaller than the specified size.
+ """
+
+ # TODO(tyoshino): Allow call with size=0. It should block until any
+ # decompressed data is available.
+ if size <= 0:
+ raise Exception('Non-positive size passed')
+ while True:
+ data = self._inflater.decompress(size)
+ if len(data) != 0:
+ return data
+
+ read_data = self._socket.recv(DeflateSocket._RECV_SIZE)
+ if not read_data:
+ return ''
+ self._inflater.append(read_data)
+
+ def sendall(self, bytes):
+ self.send(bytes)
+
+ def send(self, bytes):
+ self._socket.sendall(self._deflater.compress_and_flush(bytes))
+ return len(bytes)
+
+
+# vi:sts=4 sw=4 et
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/xhr_benchmark_handler.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/xhr_benchmark_handler.py
new file mode 100644
index 0000000..7d5d35f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/xhr_benchmark_handler.py
@@ -0,0 +1,110 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Use of this source code is governed by a BSD-style
+# license that can be found in the COPYING file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+
+from mod_pywebsocket import util
+
+
+class XHRBenchmarkHandler(object):
+ def __init__(self, headers, rfile, wfile):
+ self._logger = util.get_class_logger(self)
+
+ self.headers = headers
+ self.rfile = rfile
+ self.wfile = wfile
+
+ def do_send(self):
+ content_length = int(self.headers.getheader('Content-Length'))
+
+ self._logger.debug('Requested to receive %s bytes', content_length)
+
+ RECEIVE_BLOCK_SIZE = 1024 * 1024
+
+ bytes_to_receive = content_length
+ while bytes_to_receive > 0:
+ bytes_to_receive_in_this_loop = bytes_to_receive
+ if bytes_to_receive_in_this_loop > RECEIVE_BLOCK_SIZE:
+ bytes_to_receive_in_this_loop = RECEIVE_BLOCK_SIZE
+ received_data = self.rfile.read(bytes_to_receive_in_this_loop)
+ for c in received_data:
+ if c != 'a':
+ self._logger.debug('Request body verification failed')
+ return
+ bytes_to_receive -= len(received_data)
+ if bytes_to_receive < 0:
+ self._logger.debug('Received %d more bytes than expected' %
+ (-bytes_to_receive))
+ return
+
+ # Return the number of received bytes back to the client.
+ response_body = '%d' % content_length
+ self.wfile.write(
+ 'HTTP/1.1 200 OK\r\n'
+ 'Content-Type: text/html\r\n'
+ 'Content-Length: %d\r\n'
+ '\r\n%s' % (len(response_body), response_body))
+ self.wfile.flush()
+
+ def do_receive(self):
+ content_length = int(self.headers.getheader('Content-Length'))
+ request_body = self.rfile.read(content_length)
+
+ request_array = request_body.split(' ')
+ if len(request_array) < 2:
+ self._logger.debug('Malformed request body: %r', request_body)
+ return
+
+ # Parse the size parameter.
+ bytes_to_send = request_array[0]
+ try:
+ bytes_to_send = int(bytes_to_send)
+ except ValueError, e:
+ self._logger.debug('Malformed size parameter: %r', bytes_to_send)
+ return
+ self._logger.debug('Requested to send %s bytes', bytes_to_send)
+
+ # Parse the transfer encoding parameter.
+ chunked_mode = False
+ mode_parameter = request_array[1]
+ if mode_parameter == 'chunked':
+ self._logger.debug('Requested chunked transfer encoding')
+ chunked_mode = True
+ elif mode_parameter != 'none':
+ self._logger.debug('Invalid mode parameter: %r', mode_parameter)
+ return
+
+ # Write a header
+ response_header = (
+ 'HTTP/1.1 200 OK\r\n'
+ 'Content-Type: application/octet-stream\r\n')
+ if chunked_mode:
+ response_header += 'Transfer-Encoding: chunked\r\n\r\n'
+ else:
+ response_header += (
+ 'Content-Length: %d\r\n\r\n' % bytes_to_send)
+ self.wfile.write(response_header)
+ self.wfile.flush()
+
+ # Write a body
+ SEND_BLOCK_SIZE = 1024 * 1024
+
+ while bytes_to_send > 0:
+ bytes_to_send_in_this_loop = bytes_to_send
+ if bytes_to_send_in_this_loop > SEND_BLOCK_SIZE:
+ bytes_to_send_in_this_loop = SEND_BLOCK_SIZE
+
+ if chunked_mode:
+ self.wfile.write('%x\r\n' % bytes_to_send_in_this_loop)
+ self.wfile.write('a' * bytes_to_send_in_this_loop)
+ if chunked_mode:
+ self.wfile.write('\r\n')
+ self.wfile.flush()
+
+ bytes_to_send -= bytes_to_send_in_this_loop
+
+ if chunked_mode:
+ self.wfile.write('0\r\n\r\n')
+ self.wfile.flush()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/pep8.py b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/pep8.py
new file mode 100755
index 0000000..f605f18
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/thirdparty/pep8.py
@@ -0,0 +1,1942 @@
+#!/usr/bin/env python
+# pep8.py - Check Python source code formatting, according to PEP 8
+# Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net>
+# Copyright (C) 2009-2014 Florent Xicluna <florent.xicluna@gmail.com>
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation files
+# (the "Software"), to deal in the Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+r"""
+Check Python source code formatting, according to PEP 8.
+
+For usage and a list of options, try this:
+$ python pep8.py -h
+
+This program and its regression test suite live here:
+http://github.com/jcrocholl/pep8
+
+Groups of errors and warnings:
+E errors
+W warnings
+100 indentation
+200 whitespace
+300 blank lines
+400 imports
+500 line length
+600 deprecation
+700 statements
+900 syntax error
+"""
+from __future__ import with_statement
+
+__version__ = '1.5.7'
+
+import os
+import sys
+import re
+import time
+import inspect
+import keyword
+import tokenize
+from optparse import OptionParser
+from fnmatch import fnmatch
+try:
+ from configparser import RawConfigParser
+ from io import TextIOWrapper
+except ImportError:
+ from ConfigParser import RawConfigParser
+
+DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__'
+DEFAULT_IGNORE = 'E123,E226,E24'
+if sys.platform == 'win32':
+ DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
+else:
+ DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
+ os.path.expanduser('~/.config'), 'pep8')
+PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
+TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite')
+MAX_LINE_LENGTH = 79
+REPORT_FORMAT = {
+ 'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s',
+ 'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s',
+}
+
+PyCF_ONLY_AST = 1024
+SINGLETONS = frozenset(['False', 'None', 'True'])
+KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS
+UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-'])
+ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-'])
+WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%'])
+WS_NEEDED_OPERATORS = frozenset([
+ '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
+ '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='])
+WHITESPACE = frozenset(' \t')
+NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE])
+SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT])
+# ERRORTOKEN is triggered by backticks in Python 3
+SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN])
+BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
+
+INDENT_REGEX = re.compile(r'([ \t]*)')
+RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,')
+RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$')
+ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b')
+DOCSTRING_REGEX = re.compile(r'u?r?["\']')
+EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
+WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)')
+COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)')
+COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^[({ ]+\s+(in|is)\s')
+COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type'
+ r'|\s*\(\s*([^)]*[^ )])\s*\))')
+KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS))
+OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)')
+LAMBDA_REGEX = re.compile(r'\blambda\b')
+HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$')
+
+# Work around Python < 2.6 behaviour, which does not generate NL after
+# a comment which is on a line by itself.
+COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n'
+
+
+##############################################################################
+# Plugins (check functions) for physical lines
+##############################################################################
+
+
+def tabs_or_spaces(physical_line, indent_char):
+ r"""Never mix tabs and spaces.
+
+ The most popular way of indenting Python is with spaces only. The
+ second-most popular way is with tabs only. Code indented with a mixture
+ of tabs and spaces should be converted to using spaces exclusively. When
+ invoking the Python command line interpreter with the -t option, it issues
+ warnings about code that illegally mixes tabs and spaces. When using -tt
+ these warnings become errors. These options are highly recommended!
+
+ Okay: if a == 0:\n a = 1\n b = 1
+ E101: if a == 0:\n a = 1\n\tb = 1
+ """
+ indent = INDENT_REGEX.match(physical_line).group(1)
+ for offset, char in enumerate(indent):
+ if char != indent_char:
+ return offset, "E101 indentation contains mixed spaces and tabs"
+
+
+def tabs_obsolete(physical_line):
+ r"""For new projects, spaces-only are strongly recommended over tabs.
+
+ Okay: if True:\n return
+ W191: if True:\n\treturn
+ """
+ indent = INDENT_REGEX.match(physical_line).group(1)
+ if '\t' in indent:
+ return indent.index('\t'), "W191 indentation contains tabs"
+
+
+def trailing_whitespace(physical_line):
+ r"""Trailing whitespace is superfluous.
+
+ The warning returned varies on whether the line itself is blank, for easier
+ filtering for those who want to indent their blank lines.
+
+ Okay: spam(1)\n#
+ W291: spam(1) \n#
+ W293: class Foo(object):\n \n bang = 12
+ """
+ physical_line = physical_line.rstrip('\n') # chr(10), newline
+ physical_line = physical_line.rstrip('\r') # chr(13), carriage return
+ physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
+ stripped = physical_line.rstrip(' \t\v')
+ if physical_line != stripped:
+ if stripped:
+ return len(stripped), "W291 trailing whitespace"
+ else:
+ return 0, "W293 blank line contains whitespace"
+
+
+def trailing_blank_lines(physical_line, lines, line_number, total_lines):
+ r"""Trailing blank lines are superfluous.
+
+ Okay: spam(1)
+ W391: spam(1)\n
+
+ However the last line should end with a new line (warning W292).
+ """
+ if line_number == total_lines:
+ stripped_last_line = physical_line.rstrip()
+ if not stripped_last_line:
+ return 0, "W391 blank line at end of file"
+ if stripped_last_line == physical_line:
+ return len(physical_line), "W292 no newline at end of file"
+
+
+def maximum_line_length(physical_line, max_line_length, multiline):
+ r"""Limit all lines to a maximum of 79 characters.
+
+ There are still many devices around that are limited to 80 character
+ lines; plus, limiting windows to 80 characters makes it possible to have
+ several windows side-by-side. The default wrapping on such devices looks
+ ugly. Therefore, please limit all lines to a maximum of 79 characters.
+ For flowing long blocks of text (docstrings or comments), limiting the
+ length to 72 characters is recommended.
+
+ Reports error E501.
+ """
+ line = physical_line.rstrip()
+ length = len(line)
+ if length > max_line_length and not noqa(line):
+ # Special case for long URLs in multi-line docstrings or comments,
+ # but still report the error when the 72 first chars are whitespaces.
+ chunks = line.split()
+ if ((len(chunks) == 1 and multiline) or
+ (len(chunks) == 2 and chunks[0] == '#')) and \
+ len(line) - len(chunks[-1]) < max_line_length - 7:
+ return
+ if hasattr(line, 'decode'): # Python 2
+ # The line could contain multi-byte characters
+ try:
+ length = len(line.decode('utf-8'))
+ except UnicodeError:
+ pass
+ if length > max_line_length:
+ return (max_line_length, "E501 line too long "
+ "(%d > %d characters)" % (length, max_line_length))
+
+
+##############################################################################
+# Plugins (check functions) for logical lines
+##############################################################################
+
+
+def blank_lines(logical_line, blank_lines, indent_level, line_number,
+ blank_before, previous_logical, previous_indent_level):
+ r"""Separate top-level function and class definitions with two blank lines.
+
+ Method definitions inside a class are separated by a single blank line.
+
+ Extra blank lines may be used (sparingly) to separate groups of related
+ functions. Blank lines may be omitted between a bunch of related
+ one-liners (e.g. a set of dummy implementations).
+
+ Use blank lines in functions, sparingly, to indicate logical sections.
+
+ Okay: def a():\n pass\n\n\ndef b():\n pass
+ Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass
+
+ E301: class Foo:\n b = 0\n def bar():\n pass
+ E302: def a():\n pass\n\ndef b(n):\n pass
+ E303: def a():\n pass\n\n\n\ndef b(n):\n pass
+ E303: def a():\n\n\n\n pass
+ E304: @decorator\n\ndef a():\n pass
+ """
+ if line_number < 3 and not previous_logical:
+ return # Don't expect blank lines before the first line
+ if previous_logical.startswith('@'):
+ if blank_lines:
+ yield 0, "E304 blank lines found after function decorator"
+ elif blank_lines > 2 or (indent_level and blank_lines == 2):
+ yield 0, "E303 too many blank lines (%d)" % blank_lines
+ elif logical_line.startswith(('def ', 'class ', '@')):
+ if indent_level:
+ if not (blank_before or previous_indent_level < indent_level or
+ DOCSTRING_REGEX.match(previous_logical)):
+ yield 0, "E301 expected 1 blank line, found 0"
+ elif blank_before != 2:
+ yield 0, "E302 expected 2 blank lines, found %d" % blank_before
+
+
+def extraneous_whitespace(logical_line):
+ r"""Avoid extraneous whitespace.
+
+ Avoid extraneous whitespace in these situations:
+ - Immediately inside parentheses, brackets or braces.
+ - Immediately before a comma, semicolon, or colon.
+
+ Okay: spam(ham[1], {eggs: 2})
+ E201: spam( ham[1], {eggs: 2})
+ E201: spam(ham[ 1], {eggs: 2})
+ E201: spam(ham[1], { eggs: 2})
+ E202: spam(ham[1], {eggs: 2} )
+ E202: spam(ham[1 ], {eggs: 2})
+ E202: spam(ham[1], {eggs: 2 })
+
+ E203: if x == 4: print x, y; x, y = y , x
+ E203: if x == 4: print x, y ; x, y = y, x
+ E203: if x == 4 : print x, y; x, y = y, x
+ """
+ line = logical_line
+ for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
+ text = match.group()
+ char = text.strip()
+ found = match.start()
+ if text == char + ' ':
+ # assert char in '([{'
+ yield found + 1, "E201 whitespace after '%s'" % char
+ elif line[found - 1] != ',':
+ code = ('E202' if char in '}])' else 'E203') # if char in ',;:'
+ yield found, "%s whitespace before '%s'" % (code, char)
+
+
+def whitespace_around_keywords(logical_line):
+ r"""Avoid extraneous whitespace around keywords.
+
+ Okay: True and False
+ E271: True and False
+ E272: True and False
+ E273: True and\tFalse
+ E274: True\tand False
+ """
+ for match in KEYWORD_REGEX.finditer(logical_line):
+ before, after = match.groups()
+
+ if '\t' in before:
+ yield match.start(1), "E274 tab before keyword"
+ elif len(before) > 1:
+ yield match.start(1), "E272 multiple spaces before keyword"
+
+ if '\t' in after:
+ yield match.start(2), "E273 tab after keyword"
+ elif len(after) > 1:
+ yield match.start(2), "E271 multiple spaces after keyword"
+
+
+def missing_whitespace(logical_line):
+ r"""Each comma, semicolon or colon should be followed by whitespace.
+
+ Okay: [a, b]
+ Okay: (3,)
+ Okay: a[1:4]
+ Okay: a[:4]
+ Okay: a[1:]
+ Okay: a[1:4:2]
+ E231: ['a','b']
+ E231: foo(bar,baz)
+ E231: [{'a':'b'}]
+ """
+ line = logical_line
+ for index in range(len(line) - 1):
+ char = line[index]
+ if char in ',;:' and line[index + 1] not in WHITESPACE:
+ before = line[:index]
+ if char == ':' and before.count('[') > before.count(']') and \
+ before.rfind('{') < before.rfind('['):
+ continue # Slice syntax, no space required
+ if char == ',' and line[index + 1] == ')':
+ continue # Allow tuple with only one element: (3,)
+ yield index, "E231 missing whitespace after '%s'" % char
+
+
+def indentation(logical_line, previous_logical, indent_char,
+ indent_level, previous_indent_level):
+ r"""Use 4 spaces per indentation level.
+
+ For really old code that you don't want to mess up, you can continue to
+ use 8-space tabs.
+
+ Okay: a = 1
+ Okay: if a == 0:\n a = 1
+ E111: a = 1
+
+ Okay: for item in items:\n pass
+ E112: for item in items:\npass
+
+ Okay: a = 1\nb = 2
+ E113: a = 1\n b = 2
+ """
+ if indent_char == ' ' and indent_level % 4:
+ yield 0, "E111 indentation is not a multiple of four"
+ indent_expect = previous_logical.endswith(':')
+ if indent_expect and indent_level <= previous_indent_level:
+ yield 0, "E112 expected an indented block"
+ if indent_level > previous_indent_level and not indent_expect:
+ yield 0, "E113 unexpected indentation"
+
+
+def continued_indentation(logical_line, tokens, indent_level, hang_closing,
+ indent_char, noqa, verbose):
+ r"""Continuation lines indentation.
+
+ Continuation lines should align wrapped elements either vertically
+ using Python's implicit line joining inside parentheses, brackets
+ and braces, or using a hanging indent.
+
+ When using a hanging indent these considerations should be applied:
+ - there should be no arguments on the first line, and
+ - further indentation should be used to clearly distinguish itself as a
+ continuation line.
+
+ Okay: a = (\n)
+ E123: a = (\n )
+
+ Okay: a = (\n 42)
+ E121: a = (\n 42)
+ E122: a = (\n42)
+ E123: a = (\n 42\n )
+ E124: a = (24,\n 42\n)
+ E125: if (\n b):\n pass
+ E126: a = (\n 42)
+ E127: a = (24,\n 42)
+ E128: a = (24,\n 42)
+ E129: if (a or\n b):\n pass
+ E131: a = (\n 42\n 24)
+ """
+ first_row = tokens[0][2][0]
+ nrows = 1 + tokens[-1][2][0] - first_row
+ if noqa or nrows == 1:
+ return
+
+ # indent_next tells us whether the next block is indented; assuming
+ # that it is indented by 4 spaces, then we should not allow 4-space
+ # indents on the final continuation line; in turn, some other
+ # indents are allowed to have an extra 4 spaces.
+ indent_next = logical_line.endswith(':')
+
+ row = depth = 0
+ valid_hangs = (4,) if indent_char != '\t' else (4, 8)
+ # remember how many brackets were opened on each line
+ parens = [0] * nrows
+ # relative indents of physical lines
+ rel_indent = [0] * nrows
+ # for each depth, collect a list of opening rows
+ open_rows = [[0]]
+ # for each depth, memorize the hanging indentation
+ hangs = [None]
+ # visual indents
+ indent_chances = {}
+ last_indent = tokens[0][2]
+ visual_indent = None
+ # for each depth, memorize the visual indent column
+ indent = [last_indent[1]]
+ if verbose >= 3:
+ print(">>> " + tokens[0][4].rstrip())
+
+ for token_type, text, start, end, line in tokens:
+
+ newline = row < start[0] - first_row
+ if newline:
+ row = start[0] - first_row
+ newline = not last_token_multiline and token_type not in NEWLINE
+
+ if newline:
+ # this is the beginning of a continuation line.
+ last_indent = start
+ if verbose >= 3:
+ print("... " + line.rstrip())
+
+ # record the initial indent.
+ rel_indent[row] = expand_indent(line) - indent_level
+
+ # identify closing bracket
+ close_bracket = (token_type == tokenize.OP and text in ']})')
+
+ # is the indent relative to an opening bracket line?
+ for open_row in reversed(open_rows[depth]):
+ hang = rel_indent[row] - rel_indent[open_row]
+ hanging_indent = hang in valid_hangs
+ if hanging_indent:
+ break
+ if hangs[depth]:
+ hanging_indent = (hang == hangs[depth])
+ # is there any chance of visual indent?
+ visual_indent = (not close_bracket and hang > 0 and
+ indent_chances.get(start[1]))
+
+ if close_bracket and indent[depth]:
+ # closing bracket for visual indent
+ if start[1] != indent[depth]:
+ yield (start, "E124 closing bracket does not match "
+ "visual indentation")
+ elif close_bracket and not hang:
+ # closing bracket matches indentation of opening bracket's line
+ if hang_closing:
+ yield start, "E133 closing bracket is missing indentation"
+ elif indent[depth] and start[1] < indent[depth]:
+ if visual_indent is not True:
+ # visual indent is broken
+ yield (start, "E128 continuation line "
+ "under-indented for visual indent")
+ elif hanging_indent or (indent_next and rel_indent[row] == 8):
+ # hanging indent is verified
+ if close_bracket and not hang_closing:
+ yield (start, "E123 closing bracket does not match "
+ "indentation of opening bracket's line")
+ hangs[depth] = hang
+ elif visual_indent is True:
+ # visual indent is verified
+ indent[depth] = start[1]
+ elif visual_indent in (text, str):
+ # ignore token lined up with matching one from a previous line
+ pass
+ else:
+ # indent is broken
+ if hang <= 0:
+ error = "E122", "missing indentation or outdented"
+ elif indent[depth]:
+ error = "E127", "over-indented for visual indent"
+ elif not close_bracket and hangs[depth]:
+ error = "E131", "unaligned for hanging indent"
+ else:
+ hangs[depth] = hang
+ if hang > 4:
+ error = "E126", "over-indented for hanging indent"
+ else:
+ error = "E121", "under-indented for hanging indent"
+ yield start, "%s continuation line %s" % error
+
+ # look for visual indenting
+ if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)
+ and not indent[depth]):
+ indent[depth] = start[1]
+ indent_chances[start[1]] = True
+ if verbose >= 4:
+ print("bracket depth %s indent to %s" % (depth, start[1]))
+ # deal with implicit string concatenation
+ elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
+ text in ('u', 'ur', 'b', 'br')):
+ indent_chances[start[1]] = str
+ # special case for the "if" statement because len("if (") == 4
+ elif not indent_chances and not row and not depth and text == 'if':
+ indent_chances[end[1] + 1] = True
+ elif text == ':' and line[end[1]:].isspace():
+ open_rows[depth].append(row)
+
+ # keep track of bracket depth
+ if token_type == tokenize.OP:
+ if text in '([{':
+ depth += 1
+ indent.append(0)
+ hangs.append(None)
+ if len(open_rows) == depth:
+ open_rows.append([])
+ open_rows[depth].append(row)
+ parens[row] += 1
+ if verbose >= 4:
+ print("bracket depth %s seen, col %s, visual min = %s" %
+ (depth, start[1], indent[depth]))
+ elif text in ')]}' and depth > 0:
+ # parent indents should not be more than this one
+ prev_indent = indent.pop() or last_indent[1]
+ hangs.pop()
+ for d in range(depth):
+ if indent[d] > prev_indent:
+ indent[d] = 0
+ for ind in list(indent_chances):
+ if ind >= prev_indent:
+ del indent_chances[ind]
+ del open_rows[depth + 1:]
+ depth -= 1
+ if depth:
+ indent_chances[indent[depth]] = True
+ for idx in range(row, -1, -1):
+ if parens[idx]:
+ parens[idx] -= 1
+ break
+ assert len(indent) == depth + 1
+ if start[1] not in indent_chances:
+ # allow to line up tokens
+ indent_chances[start[1]] = text
+
+ last_token_multiline = (start[0] != end[0])
+ if last_token_multiline:
+ rel_indent[end[0] - first_row] = rel_indent[row]
+
+ if indent_next and expand_indent(line) == indent_level + 4:
+ pos = (start[0], indent[0] + 4)
+ if visual_indent:
+ code = "E129 visually indented line"
+ else:
+ code = "E125 continuation line"
+ yield pos, "%s with same indent as next logical line" % code
+
+
+def whitespace_before_parameters(logical_line, tokens):
+ r"""Avoid extraneous whitespace.
+
+ Avoid extraneous whitespace in the following situations:
+ - before the open parenthesis that starts the argument list of a
+ function call.
+ - before the open parenthesis that starts an indexing or slicing.
+
+ Okay: spam(1)
+ E211: spam (1)
+
+ Okay: dict['key'] = list[index]
+ E211: dict ['key'] = list[index]
+ E211: dict['key'] = list [index]
+ """
+ prev_type, prev_text, __, prev_end, __ = tokens[0]
+ for index in range(1, len(tokens)):
+ token_type, text, start, end, __ = tokens[index]
+ if (token_type == tokenize.OP and
+ text in '([' and
+ start != prev_end and
+ (prev_type == tokenize.NAME or prev_text in '}])') and
+ # Syntax "class A (B):" is allowed, but avoid it
+ (index < 2 or tokens[index - 2][1] != 'class') and
+ # Allow "return (a.foo for a in range(5))"
+ not keyword.iskeyword(prev_text)):
+ yield prev_end, "E211 whitespace before '%s'" % text
+ prev_type = token_type
+ prev_text = text
+ prev_end = end
+
+
+def whitespace_around_operator(logical_line):
+ r"""Avoid extraneous whitespace around an operator.
+
+ Okay: a = 12 + 3
+ E221: a = 4 + 5
+ E222: a = 4 + 5
+ E223: a = 4\t+ 5
+ E224: a = 4 +\t5
+ """
+ for match in OPERATOR_REGEX.finditer(logical_line):
+ before, after = match.groups()
+
+ if '\t' in before:
+ yield match.start(1), "E223 tab before operator"
+ elif len(before) > 1:
+ yield match.start(1), "E221 multiple spaces before operator"
+
+ if '\t' in after:
+ yield match.start(2), "E224 tab after operator"
+ elif len(after) > 1:
+ yield match.start(2), "E222 multiple spaces after operator"
+
+
+def missing_whitespace_around_operator(logical_line, tokens):
+ r"""Surround operators with a single space on either side.
+
+ - Always surround these binary operators with a single space on
+ either side: assignment (=), augmented assignment (+=, -= etc.),
+ comparisons (==, <, >, !=, <=, >=, in, not in, is, is not),
+ Booleans (and, or, not).
+
+ - If operators with different priorities are used, consider adding
+ whitespace around the operators with the lowest priorities.
+
+ Okay: i = i + 1
+ Okay: submitted += 1
+ Okay: x = x * 2 - 1
+ Okay: hypot2 = x * x + y * y
+ Okay: c = (a + b) * (a - b)
+ Okay: foo(bar, key='word', *args, **kwargs)
+ Okay: alpha[:-i]
+
+ E225: i=i+1
+ E225: submitted +=1
+ E225: x = x /2 - 1
+ E225: z = x **y
+ E226: c = (a+b) * (a-b)
+ E226: hypot2 = x*x + y*y
+ E227: c = a|b
+ E228: msg = fmt%(errno, errmsg)
+ """
+ parens = 0
+ need_space = False
+ prev_type = tokenize.OP
+ prev_text = prev_end = None
+ for token_type, text, start, end, line in tokens:
+ if token_type in SKIP_COMMENTS:
+ continue
+ if text in ('(', 'lambda'):
+ parens += 1
+ elif text == ')':
+ parens -= 1
+ if need_space:
+ if start != prev_end:
+ # Found a (probably) needed space
+ if need_space is not True and not need_space[1]:
+ yield (need_space[0],
+ "E225 missing whitespace around operator")
+ need_space = False
+ elif text == '>' and prev_text in ('<', '-'):
+ # Tolerate the "<>" operator, even if running Python 3
+ # Deal with Python 3's annotated return value "->"
+ pass
+ else:
+ if need_space is True or need_space[1]:
+ # A needed trailing space was not found
+ yield prev_end, "E225 missing whitespace around operator"
+ else:
+ code, optype = 'E226', 'arithmetic'
+ if prev_text == '%':
+ code, optype = 'E228', 'modulo'
+ elif prev_text not in ARITHMETIC_OP:
+ code, optype = 'E227', 'bitwise or shift'
+ yield (need_space[0], "%s missing whitespace "
+ "around %s operator" % (code, optype))
+ need_space = False
+ elif token_type == tokenize.OP and prev_end is not None:
+ if text == '=' and parens:
+ # Allow keyword args or defaults: foo(bar=None).
+ pass
+ elif text in WS_NEEDED_OPERATORS:
+ need_space = True
+ elif text in UNARY_OPERATORS:
+ # Check if the operator is being used as a binary operator
+ # Allow unary operators: -123, -x, +1.
+ # Allow argument unpacking: foo(*args, **kwargs).
+ if (prev_text in '}])' if prev_type == tokenize.OP
+ else prev_text not in KEYWORDS):
+ need_space = None
+ elif text in WS_OPTIONAL_OPERATORS:
+ need_space = None
+
+ if need_space is None:
+ # Surrounding space is optional, but ensure that
+ # trailing space matches opening space
+ need_space = (prev_end, start != prev_end)
+ elif need_space and start == prev_end:
+ # A needed opening space was not found
+ yield prev_end, "E225 missing whitespace around operator"
+ need_space = False
+ prev_type = token_type
+ prev_text = text
+ prev_end = end
+
+
+def whitespace_around_comma(logical_line):
+ r"""Avoid extraneous whitespace after a comma or a colon.
+
+ Note: these checks are disabled by default
+
+ Okay: a = (1, 2)
+ E241: a = (1, 2)
+ E242: a = (1,\t2)
+ """
+ line = logical_line
+ for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
+ found = m.start() + 1
+ if '\t' in m.group():
+ yield found, "E242 tab after '%s'" % m.group()[0]
+ else:
+ yield found, "E241 multiple spaces after '%s'" % m.group()[0]
+
+
+def whitespace_around_named_parameter_equals(logical_line, tokens):
+ r"""Don't use spaces around the '=' sign in function arguments.
+
+ Don't use spaces around the '=' sign when used to indicate a
+ keyword argument or a default parameter value.
+
+ Okay: def complex(real, imag=0.0):
+ Okay: return magic(r=real, i=imag)
+ Okay: boolean(a == b)
+ Okay: boolean(a != b)
+ Okay: boolean(a <= b)
+ Okay: boolean(a >= b)
+
+ E251: def complex(real, imag = 0.0):
+ E251: return magic(r = real, i = imag)
+ """
+ parens = 0
+ no_space = False
+ prev_end = None
+ message = "E251 unexpected spaces around keyword / parameter equals"
+ for token_type, text, start, end, line in tokens:
+ if token_type == tokenize.NL:
+ continue
+ if no_space:
+ no_space = False
+ if start != prev_end:
+ yield (prev_end, message)
+ elif token_type == tokenize.OP:
+ if text == '(':
+ parens += 1
+ elif text == ')':
+ parens -= 1
+ elif parens and text == '=':
+ no_space = True
+ if start != prev_end:
+ yield (prev_end, message)
+ prev_end = end
+
+
+def whitespace_before_comment(logical_line, tokens):
+ r"""Separate inline comments by at least two spaces.
+
+ An inline comment is a comment on the same line as a statement. Inline
+ comments should be separated by at least two spaces from the statement.
+ They should start with a # and a single space.
+
+ Each line of a block comment starts with a # and a single space
+ (unless it is indented text inside the comment).
+
+ Okay: x = x + 1 # Increment x
+ Okay: x = x + 1 # Increment x
+ Okay: # Block comment
+ E261: x = x + 1 # Increment x
+ E262: x = x + 1 #Increment x
+ E262: x = x + 1 # Increment x
+ E265: #Block comment
+ """
+ prev_end = (0, 0)
+ for token_type, text, start, end, line in tokens:
+ if token_type == tokenize.COMMENT:
+ inline_comment = line[:start[1]].strip()
+ if inline_comment:
+ if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
+ yield (prev_end,
+ "E261 at least two spaces before inline comment")
+ symbol, sp, comment = text.partition(' ')
+ bad_prefix = symbol not in ('#', '#:')
+ if inline_comment:
+ if bad_prefix or comment[:1].isspace():
+ yield start, "E262 inline comment should start with '# '"
+ elif bad_prefix:
+ if text.rstrip('#') and (start[0] > 1 or symbol[1] != '!'):
+ yield start, "E265 block comment should start with '# '"
+ elif token_type != tokenize.NL:
+ prev_end = end
+
+
+def imports_on_separate_lines(logical_line):
+ r"""Imports should usually be on separate lines.
+
+ Okay: import os\nimport sys
+ E401: import sys, os
+
+ Okay: from subprocess import Popen, PIPE
+ Okay: from myclas import MyClass
+ Okay: from foo.bar.yourclass import YourClass
+ Okay: import myclass
+ Okay: import foo.bar.yourclass
+ """
+ line = logical_line
+ if line.startswith('import '):
+ found = line.find(',')
+ if -1 < found and ';' not in line[:found]:
+ yield found, "E401 multiple imports on one line"
+
+
+def compound_statements(logical_line):
+ r"""Compound statements (on the same line) are generally discouraged.
+
+ While sometimes it's okay to put an if/for/while with a small body
+ on the same line, never do this for multi-clause statements.
+ Also avoid folding such long lines!
+
+ Okay: if foo == 'blah':\n do_blah_thing()
+ Okay: do_one()
+ Okay: do_two()
+ Okay: do_three()
+
+ E701: if foo == 'blah': do_blah_thing()
+ E701: for x in lst: total += x
+ E701: while t < 10: t = delay()
+ E701: if foo == 'blah': do_blah_thing()
+ E701: else: do_non_blah_thing()
+ E701: try: something()
+ E701: finally: cleanup()
+ E701: if foo == 'blah': one(); two(); three()
+
+ E702: do_one(); do_two(); do_three()
+ E703: do_four(); # useless semicolon
+ """
+ line = logical_line
+ last_char = len(line) - 1
+ found = line.find(':')
+ while -1 < found < last_char:
+ before = line[:found]
+ if (before.count('{') <= before.count('}') and # {'a': 1} (dict)
+ before.count('[') <= before.count(']') and # [1:2] (slice)
+ before.count('(') <= before.count(')') and # (Python 3 annotation)
+ not LAMBDA_REGEX.search(before)): # lambda x: x
+ yield found, "E701 multiple statements on one line (colon)"
+ found = line.find(':', found + 1)
+ found = line.find(';')
+ while -1 < found:
+ if found < last_char:
+ yield found, "E702 multiple statements on one line (semicolon)"
+ else:
+ yield found, "E703 statement ends with a semicolon"
+ found = line.find(';', found + 1)
+
+
+def explicit_line_join(logical_line, tokens):
+ r"""Avoid explicit line join between brackets.
+
+ The preferred way of wrapping long lines is by using Python's implied line
+ continuation inside parentheses, brackets and braces. Long lines can be
+ broken over multiple lines by wrapping expressions in parentheses. These
+ should be used in preference to using a backslash for line continuation.
+
+ E502: aaa = [123, \\n 123]
+ E502: aaa = ("bbb " \\n "ccc")
+
+ Okay: aaa = [123,\n 123]
+ Okay: aaa = ("bbb "\n "ccc")
+ Okay: aaa = "bbb " \\n "ccc"
+ """
+ prev_start = prev_end = parens = 0
+ for token_type, text, start, end, line in tokens:
+ if start[0] != prev_start and parens and backslash:
+ yield backslash, "E502 the backslash is redundant between brackets"
+ if end[0] != prev_end:
+ if line.rstrip('\r\n').endswith('\\'):
+ backslash = (end[0], len(line.splitlines()[-1]) - 1)
+ else:
+ backslash = None
+ prev_start = prev_end = end[0]
+ else:
+ prev_start = start[0]
+ if token_type == tokenize.OP:
+ if text in '([{':
+ parens += 1
+ elif text in ')]}':
+ parens -= 1
+
+
+def comparison_to_singleton(logical_line, noqa):
+ r"""Comparison to singletons should use "is" or "is not".
+
+ Comparisons to singletons like None should always be done
+ with "is" or "is not", never the equality operators.
+
+ Okay: if arg is not None:
+ E711: if arg != None:
+ E712: if arg == True:
+
+ Also, beware of writing if x when you really mean if x is not None --
+ e.g. when testing whether a variable or argument that defaults to None was
+ set to some other value. The other value might have a type (such as a
+ container) that could be false in a boolean context!
+ """
+ match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line)
+ if match:
+ same = (match.group(1) == '==')
+ singleton = match.group(2)
+ msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton)
+ if singleton in ('None',):
+ code = 'E711'
+ else:
+ code = 'E712'
+ nonzero = ((singleton == 'True' and same) or
+ (singleton == 'False' and not same))
+ msg += " or 'if %scond:'" % ('' if nonzero else 'not ')
+ yield match.start(1), ("%s comparison to %s should be %s" %
+ (code, singleton, msg))
+
+
+def comparison_negative(logical_line):
+ r"""Negative comparison should be done using "not in" and "is not".
+
+ Okay: if x not in y:\n pass
+ Okay: assert (X in Y or X is Z)
+ Okay: if not (X in Y):\n pass
+ Okay: zz = x is not y
+ E713: Z = not X in Y
+ E713: if not X.B in Y:\n pass
+ E714: if not X is Y:\n pass
+ E714: Z = not X.B is Y
+ """
+ match = COMPARE_NEGATIVE_REGEX.search(logical_line)
+ if match:
+ pos = match.start(1)
+ if match.group(2) == 'in':
+ yield pos, "E713 test for membership should be 'not in'"
+ else:
+ yield pos, "E714 test for object identity should be 'is not'"
+
+
+def comparison_type(logical_line):
+ r"""Object type comparisons should always use isinstance().
+
+ Do not compare types directly.
+
+ Okay: if isinstance(obj, int):
+ E721: if type(obj) is type(1):
+
+ When checking if an object is a string, keep in mind that it might be a
+ unicode string too! In Python 2.3, str and unicode have a common base
+ class, basestring, so you can do:
+
+ Okay: if isinstance(obj, basestring):
+ Okay: if type(a1) is type(b1):
+ """
+ match = COMPARE_TYPE_REGEX.search(logical_line)
+ if match:
+ inst = match.group(1)
+ if inst and isidentifier(inst) and inst not in SINGLETONS:
+ return # Allow comparison for types which are not obvious
+ yield match.start(), "E721 do not compare types, use 'isinstance()'"
+
+
+def python_3000_has_key(logical_line, noqa):
+ r"""The {}.has_key() method is removed in Python 3: use the 'in' operator.
+
+ Okay: if "alph" in d:\n print d["alph"]
+ W601: assert d.has_key('alph')
+ """
+ pos = logical_line.find('.has_key(')
+ if pos > -1 and not noqa:
+ yield pos, "W601 .has_key() is deprecated, use 'in'"
+
+
+def python_3000_raise_comma(logical_line):
+ r"""When raising an exception, use "raise ValueError('message')".
+
+ The older form is removed in Python 3.
+
+ Okay: raise DummyError("Message")
+ W602: raise DummyError, "Message"
+ """
+ match = RAISE_COMMA_REGEX.match(logical_line)
+ if match and not RERAISE_COMMA_REGEX.match(logical_line):
+ yield match.end() - 1, "W602 deprecated form of raising exception"
+
+
+def python_3000_not_equal(logical_line):
+ r"""New code should always use != instead of <>.
+
+ The older syntax is removed in Python 3.
+
+ Okay: if a != 'no':
+ W603: if a <> 'no':
+ """
+ pos = logical_line.find('<>')
+ if pos > -1:
+ yield pos, "W603 '<>' is deprecated, use '!='"
+
+
+def python_3000_backticks(logical_line):
+ r"""Backticks are removed in Python 3: use repr() instead.
+
+ Okay: val = repr(1 + 2)
+ W604: val = `1 + 2`
+ """
+ pos = logical_line.find('`')
+ if pos > -1:
+ yield pos, "W604 backticks are deprecated, use 'repr()'"
+
+
+##############################################################################
+# Helper functions
+##############################################################################
+
+
+if '' == ''.encode():
+ # Python 2: implicit encoding.
+ def readlines(filename):
+ """Read the source code."""
+ with open(filename, 'rU') as f:
+ return f.readlines()
+ isidentifier = re.compile(r'[a-zA-Z_]\w*').match
+ stdin_get_value = sys.stdin.read
+else:
+ # Python 3
+ def readlines(filename):
+ """Read the source code."""
+ try:
+ with open(filename, 'rb') as f:
+ (coding, lines) = tokenize.detect_encoding(f.readline)
+ f = TextIOWrapper(f, coding, line_buffering=True)
+ return [l.decode(coding) for l in lines] + f.readlines()
+ except (LookupError, SyntaxError, UnicodeError):
+ # Fall back if file encoding is improperly declared
+ with open(filename, encoding='latin-1') as f:
+ return f.readlines()
+ isidentifier = str.isidentifier
+
+ def stdin_get_value():
+ return TextIOWrapper(sys.stdin.buffer, errors='ignore').read()
+noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search
+
+
+def expand_indent(line):
+ r"""Return the amount of indentation.
+
+ Tabs are expanded to the next multiple of 8.
+
+ >>> expand_indent(' ')
+ 4
+ >>> expand_indent('\t')
+ 8
+ >>> expand_indent(' \t')
+ 8
+ >>> expand_indent(' \t')
+ 16
+ """
+ if '\t' not in line:
+ return len(line) - len(line.lstrip())
+ result = 0
+ for char in line:
+ if char == '\t':
+ result = result // 8 * 8 + 8
+ elif char == ' ':
+ result += 1
+ else:
+ break
+ return result
+
+
+def mute_string(text):
+ """Replace contents with 'xxx' to prevent syntax matching.
+
+ >>> mute_string('"abc"')
+ '"xxx"'
+ >>> mute_string("'''abc'''")
+ "'''xxx'''"
+ >>> mute_string("r'abc'")
+ "r'xxx'"
+ """
+ # String modifiers (e.g. u or r)
+ start = text.index(text[-1]) + 1
+ end = len(text) - 1
+ # Triple quotes
+ if text[-3:] in ('"""', "'''"):
+ start += 2
+ end -= 2
+ return text[:start] + 'x' * (end - start) + text[end:]
+
+
+def parse_udiff(diff, patterns=None, parent='.'):
+ """Return a dictionary of matching lines."""
+ # For each file of the diff, the entry key is the filename,
+ # and the value is a set of row numbers to consider.
+ rv = {}
+ path = nrows = None
+ for line in diff.splitlines():
+ if nrows:
+ if line[:1] != '-':
+ nrows -= 1
+ continue
+ if line[:3] == '@@ ':
+ hunk_match = HUNK_REGEX.match(line)
+ (row, nrows) = [int(g or '1') for g in hunk_match.groups()]
+ rv[path].update(range(row, row + nrows))
+ elif line[:3] == '+++':
+ path = line[4:].split('\t', 1)[0]
+ if path[:2] == 'b/':
+ path = path[2:]
+ rv[path] = set()
+ return dict([(os.path.join(parent, path), rows)
+ for (path, rows) in rv.items()
+ if rows and filename_match(path, patterns)])
+
+
+def normalize_paths(value, parent=os.curdir):
+ """Parse a comma-separated list of paths.
+
+ Return a list of absolute paths.
+ """
+ if not value or isinstance(value, list):
+ return value
+ paths = []
+ for path in value.split(','):
+ if '/' in path:
+ path = os.path.abspath(os.path.join(parent, path))
+ paths.append(path.rstrip('/'))
+ return paths
+
+
+def filename_match(filename, patterns, default=True):
+ """Check if patterns contains a pattern that matches filename.
+
+ If patterns is unspecified, this always returns True.
+ """
+ if not patterns:
+ return default
+ return any(fnmatch(filename, pattern) for pattern in patterns)
+
+
+if COMMENT_WITH_NL:
+ def _is_eol_token(token):
+ return (token[0] in NEWLINE or
+ (token[0] == tokenize.COMMENT and token[1] == token[4]))
+else:
+ def _is_eol_token(token):
+ return token[0] in NEWLINE
+
+
+##############################################################################
+# Framework to run all checks
+##############################################################################
+
+
+_checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}}
+
+
+def register_check(check, codes=None):
+ """Register a new check object."""
+ def _add_check(check, kind, codes, args):
+ if check in _checks[kind]:
+ _checks[kind][check][0].extend(codes or [])
+ else:
+ _checks[kind][check] = (codes or [''], args)
+ if inspect.isfunction(check):
+ args = inspect.getargspec(check)[0]
+ if args and args[0] in ('physical_line', 'logical_line'):
+ if codes is None:
+ codes = ERRORCODE_REGEX.findall(check.__doc__ or '')
+ _add_check(check, args[0], codes, args)
+ elif inspect.isclass(check):
+ if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']:
+ _add_check(check, 'tree', codes, None)
+
+
+def init_checks_registry():
+ """Register all globally visible functions.
+
+ The first argument name is either 'physical_line' or 'logical_line'.
+ """
+ mod = inspect.getmodule(register_check)
+ for (name, function) in inspect.getmembers(mod, inspect.isfunction):
+ register_check(function)
+init_checks_registry()
+
+
+class Checker(object):
+ """Load a Python source file, tokenize it, check coding style."""
+
+ def __init__(self, filename=None, lines=None,
+ options=None, report=None, **kwargs):
+ if options is None:
+ options = StyleGuide(kwargs).options
+ else:
+ assert not kwargs
+ self._io_error = None
+ self._physical_checks = options.physical_checks
+ self._logical_checks = options.logical_checks
+ self._ast_checks = options.ast_checks
+ self.max_line_length = options.max_line_length
+ self.multiline = False # in a multiline string?
+ self.hang_closing = options.hang_closing
+ self.verbose = options.verbose
+ self.filename = filename
+ if filename is None:
+ self.filename = 'stdin'
+ self.lines = lines or []
+ elif filename == '-':
+ self.filename = 'stdin'
+ self.lines = stdin_get_value().splitlines(True)
+ elif lines is None:
+ try:
+ self.lines = readlines(filename)
+ except IOError:
+ (exc_type, exc) = sys.exc_info()[:2]
+ self._io_error = '%s: %s' % (exc_type.__name__, exc)
+ self.lines = []
+ else:
+ self.lines = lines
+ if self.lines:
+ ord0 = ord(self.lines[0][0])
+ if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM
+ if ord0 == 0xfeff:
+ self.lines[0] = self.lines[0][1:]
+ elif self.lines[0][:3] == '\xef\xbb\xbf':
+ self.lines[0] = self.lines[0][3:]
+ self.report = report or options.report
+ self.report_error = self.report.error
+
+ def report_invalid_syntax(self):
+ """Check if the syntax is valid."""
+ (exc_type, exc) = sys.exc_info()[:2]
+ if len(exc.args) > 1:
+ offset = exc.args[1]
+ if len(offset) > 2:
+ offset = offset[1:3]
+ else:
+ offset = (1, 0)
+ self.report_error(offset[0], offset[1] or 0,
+ 'E901 %s: %s' % (exc_type.__name__, exc.args[0]),
+ self.report_invalid_syntax)
+
+ def readline(self):
+ """Get the next line from the input buffer."""
+ if self.line_number >= self.total_lines:
+ return ''
+ line = self.lines[self.line_number]
+ self.line_number += 1
+ if self.indent_char is None and line[:1] in WHITESPACE:
+ self.indent_char = line[0]
+ return line
+
+ def run_check(self, check, argument_names):
+ """Run a check plugin."""
+ arguments = []
+ for name in argument_names:
+ arguments.append(getattr(self, name))
+ return check(*arguments)
+
+ def check_physical(self, line):
+ """Run all physical checks on a raw input line."""
+ self.physical_line = line
+ for name, check, argument_names in self._physical_checks:
+ result = self.run_check(check, argument_names)
+ if result is not None:
+ (offset, text) = result
+ self.report_error(self.line_number, offset, text, check)
+ if text[:4] == 'E101':
+ self.indent_char = line[0]
+
+ def build_tokens_line(self):
+ """Build a logical line from tokens."""
+ logical = []
+ comments = []
+ length = 0
+ prev_row = prev_col = mapping = None
+ for token_type, text, start, end, line in self.tokens:
+ if token_type in SKIP_TOKENS:
+ continue
+ if not mapping:
+ mapping = [(0, start)]
+ if token_type == tokenize.COMMENT:
+ comments.append(text)
+ continue
+ if token_type == tokenize.STRING:
+ text = mute_string(text)
+ if prev_row:
+ (start_row, start_col) = start
+ if prev_row != start_row: # different row
+ prev_text = self.lines[prev_row - 1][prev_col - 1]
+ if prev_text == ',' or (prev_text not in '{[('
+ and text not in '}])'):
+ text = ' ' + text
+ elif prev_col != start_col: # different column
+ text = line[prev_col:start_col] + text
+ logical.append(text)
+ length += len(text)
+ mapping.append((length, end))
+ (prev_row, prev_col) = end
+ self.logical_line = ''.join(logical)
+ self.noqa = comments and noqa(''.join(comments))
+ return mapping
+
+ def check_logical(self):
+ """Build a line from tokens and run all logical checks on it."""
+ self.report.increment_logical_line()
+ mapping = self.build_tokens_line()
+ (start_row, start_col) = mapping[0][1]
+ start_line = self.lines[start_row - 1]
+ self.indent_level = expand_indent(start_line[:start_col])
+ if self.blank_before < self.blank_lines:
+ self.blank_before = self.blank_lines
+ if self.verbose >= 2:
+ print(self.logical_line[:80].rstrip())
+ for name, check, argument_names in self._logical_checks:
+ if self.verbose >= 4:
+ print(' ' + name)
+ for offset, text in self.run_check(check, argument_names) or ():
+ if not isinstance(offset, tuple):
+ for token_offset, pos in mapping:
+ if offset <= token_offset:
+ break
+ offset = (pos[0], pos[1] + offset - token_offset)
+ self.report_error(offset[0], offset[1], text, check)
+ if self.logical_line:
+ self.previous_indent_level = self.indent_level
+ self.previous_logical = self.logical_line
+ self.blank_lines = 0
+ self.tokens = []
+
+ def check_ast(self):
+ """Build the file's AST and run all AST checks."""
+ try:
+ tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
+ except (SyntaxError, TypeError):
+ return self.report_invalid_syntax()
+ for name, cls, __ in self._ast_checks:
+ checker = cls(tree, self.filename)
+ for lineno, offset, text, check in checker.run():
+ if not self.lines or not noqa(self.lines[lineno - 1]):
+ self.report_error(lineno, offset, text, check)
+
+ def generate_tokens(self):
+ """Tokenize the file, run physical line checks and yield tokens."""
+ if self._io_error:
+ self.report_error(1, 0, 'E902 %s' % self._io_error, readlines)
+ tokengen = tokenize.generate_tokens(self.readline)
+ try:
+ for token in tokengen:
+ if token[2][0] > self.total_lines:
+ return
+ self.maybe_check_physical(token)
+ yield token
+ except (SyntaxError, tokenize.TokenError):
+ self.report_invalid_syntax()
+
+ def maybe_check_physical(self, token):
+ """If appropriate (based on token), check current physical line(s)."""
+ # Called after every token, but act only on end of line.
+ if _is_eol_token(token):
+ # Obviously, a newline token ends a single physical line.
+ self.check_physical(token[4])
+ elif token[0] == tokenize.STRING and '\n' in token[1]:
+ # Less obviously, a string that contains newlines is a
+ # multiline string, either triple-quoted or with internal
+ # newlines backslash-escaped. Check every physical line in the
+ # string *except* for the last one: its newline is outside of
+ # the multiline string, so we consider it a regular physical
+ # line, and will check it like any other physical line.
+ #
+ # Subtleties:
+ # - we don't *completely* ignore the last line; if it contains
+ # the magical "# noqa" comment, we disable all physical
+ # checks for the entire multiline string
+ # - have to wind self.line_number back because initially it
+ # points to the last line of the string, and we want
+ # check_physical() to give accurate feedback
+ if noqa(token[4]):
+ return
+ self.multiline = True
+ self.line_number = token[2][0]
+ for line in token[1].split('\n')[:-1]:
+ self.check_physical(line + '\n')
+ self.line_number += 1
+ self.multiline = False
+
+ def check_all(self, expected=None, line_offset=0):
+ """Run all checks on the input file."""
+ self.report.init_file(self.filename, self.lines, expected, line_offset)
+ self.total_lines = len(self.lines)
+ if self._ast_checks:
+ self.check_ast()
+ self.line_number = 0
+ self.indent_char = None
+ self.indent_level = self.previous_indent_level = 0
+ self.previous_logical = ''
+ self.tokens = []
+ self.blank_lines = self.blank_before = 0
+ parens = 0
+ for token in self.generate_tokens():
+ self.tokens.append(token)
+ token_type, text = token[0:2]
+ if self.verbose >= 3:
+ if token[2][0] == token[3][0]:
+ pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
+ else:
+ pos = 'l.%s' % token[3][0]
+ print('l.%s\t%s\t%s\t%r' %
+ (token[2][0], pos, tokenize.tok_name[token[0]], text))
+ if token_type == tokenize.OP:
+ if text in '([{':
+ parens += 1
+ elif text in '}])':
+ parens -= 1
+ elif not parens:
+ if token_type in NEWLINE:
+ if token_type == tokenize.NEWLINE:
+ self.check_logical()
+ self.blank_before = 0
+ elif len(self.tokens) == 1:
+ # The physical line contains only this token.
+ self.blank_lines += 1
+ del self.tokens[0]
+ else:
+ self.check_logical()
+ elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:
+ if len(self.tokens) == 1:
+ # The comment also ends a physical line
+ token = list(token)
+ token[1] = text.rstrip('\r\n')
+ token[3] = (token[2][0], token[2][1] + len(token[1]))
+ self.tokens = [tuple(token)]
+ self.check_logical()
+ if self.tokens:
+ self.check_physical(self.lines[-1])
+ self.check_logical()
+ return self.report.get_file_results()
+
+
+class BaseReport(object):
+ """Collect the results of the checks."""
+
+ print_filename = False
+
+ def __init__(self, options):
+ self._benchmark_keys = options.benchmark_keys
+ self._ignore_code = options.ignore_code
+ # Results
+ self.elapsed = 0
+ self.total_errors = 0
+ self.counters = dict.fromkeys(self._benchmark_keys, 0)
+ self.messages = {}
+
+ def start(self):
+ """Start the timer."""
+ self._start_time = time.time()
+
+ def stop(self):
+ """Stop the timer."""
+ self.elapsed = time.time() - self._start_time
+
+ def init_file(self, filename, lines, expected, line_offset):
+ """Signal a new file."""
+ self.filename = filename
+ self.lines = lines
+ self.expected = expected or ()
+ self.line_offset = line_offset
+ self.file_errors = 0
+ self.counters['files'] += 1
+ self.counters['physical lines'] += len(lines)
+
+ def increment_logical_line(self):
+ """Signal a new logical line."""
+ self.counters['logical lines'] += 1
+
+ def error(self, line_number, offset, text, check):
+ """Report an error, according to options."""
+ code = text[:4]
+ if self._ignore_code(code):
+ return
+ if code in self.counters:
+ self.counters[code] += 1
+ else:
+ self.counters[code] = 1
+ self.messages[code] = text[5:]
+ # Don't care about expected errors or warnings
+ if code in self.expected:
+ return
+ if self.print_filename and not self.file_errors:
+ print(self.filename)
+ self.file_errors += 1
+ self.total_errors += 1
+ return code
+
+ def get_file_results(self):
+ """Return the count of errors and warnings for this file."""
+ return self.file_errors
+
+ def get_count(self, prefix=''):
+ """Return the total count of errors and warnings."""
+ return sum([self.counters[key]
+ for key in self.messages if key.startswith(prefix)])
+
+ def get_statistics(self, prefix=''):
+ """Get statistics for message codes that start with the prefix.
+
+ prefix='' matches all errors and warnings
+ prefix='E' matches all errors
+ prefix='W' matches all warnings
+ prefix='E4' matches all errors that have to do with imports
+ """
+ return ['%-7s %s %s' % (self.counters[key], key, self.messages[key])
+ for key in sorted(self.messages) if key.startswith(prefix)]
+
+ def print_statistics(self, prefix=''):
+ """Print overall statistics (number of errors and warnings)."""
+ for line in self.get_statistics(prefix):
+ print(line)
+
+ def print_benchmark(self):
+ """Print benchmark numbers."""
+ print('%-7.2f %s' % (self.elapsed, 'seconds elapsed'))
+ if self.elapsed:
+ for key in self._benchmark_keys:
+ print('%-7d %s per second (%d total)' %
+ (self.counters[key] / self.elapsed, key,
+ self.counters[key]))
+
+
+class FileReport(BaseReport):
+ """Collect the results of the checks and print only the filenames."""
+ print_filename = True
+
+
+class StandardReport(BaseReport):
+ """Collect and print the results of the checks."""
+
+ def __init__(self, options):
+ super(StandardReport, self).__init__(options)
+ self._fmt = REPORT_FORMAT.get(options.format.lower(),
+ options.format)
+ self._repeat = options.repeat
+ self._show_source = options.show_source
+ self._show_pep8 = options.show_pep8
+
+ def init_file(self, filename, lines, expected, line_offset):
+ """Signal a new file."""
+ self._deferred_print = []
+ return super(StandardReport, self).init_file(
+ filename, lines, expected, line_offset)
+
+ def error(self, line_number, offset, text, check):
+ """Report an error, according to options."""
+ code = super(StandardReport, self).error(line_number, offset,
+ text, check)
+ if code and (self.counters[code] == 1 or self._repeat):
+ self._deferred_print.append(
+ (line_number, offset, code, text[5:], check.__doc__))
+ return code
+
+ def get_file_results(self):
+ """Print the result and return the overall count for this file."""
+ self._deferred_print.sort()
+ for line_number, offset, code, text, doc in self._deferred_print:
+ print(self._fmt % {
+ 'path': self.filename,
+ 'row': self.line_offset + line_number, 'col': offset + 1,
+ 'code': code, 'text': text,
+ })
+ if self._show_source:
+ if line_number > len(self.lines):
+ line = ''
+ else:
+ line = self.lines[line_number - 1]
+ print(line.rstrip())
+ print(re.sub(r'\S', ' ', line[:offset]) + '^')
+ if self._show_pep8 and doc:
+ print(' ' + doc.strip())
+ return self.file_errors
+
+
+class DiffReport(StandardReport):
+ """Collect and print the results for the changed lines only."""
+
+ def __init__(self, options):
+ super(DiffReport, self).__init__(options)
+ self._selected = options.selected_lines
+
+ def error(self, line_number, offset, text, check):
+ if line_number not in self._selected[self.filename]:
+ return
+ return super(DiffReport, self).error(line_number, offset, text, check)
+
+
+class StyleGuide(object):
+ """Initialize a PEP-8 instance with few options."""
+
+ def __init__(self, *args, **kwargs):
+ # build options from the command line
+ self.checker_class = kwargs.pop('checker_class', Checker)
+ parse_argv = kwargs.pop('parse_argv', False)
+ config_file = kwargs.pop('config_file', None)
+ parser = kwargs.pop('parser', None)
+ # build options from dict
+ options_dict = dict(*args, **kwargs)
+ arglist = None if parse_argv else options_dict.get('paths', None)
+ options, self.paths = process_options(
+ arglist, parse_argv, config_file, parser)
+ if options_dict:
+ options.__dict__.update(options_dict)
+ if 'paths' in options_dict:
+ self.paths = options_dict['paths']
+
+ self.runner = self.input_file
+ self.options = options
+
+ if not options.reporter:
+ options.reporter = BaseReport if options.quiet else StandardReport
+
+ options.select = tuple(options.select or ())
+ if not (options.select or options.ignore or
+ options.testsuite or options.doctest) and DEFAULT_IGNORE:
+ # The default choice: ignore controversial checks
+ options.ignore = tuple(DEFAULT_IGNORE.split(','))
+ else:
+ # Ignore all checks which are not explicitly selected
+ options.ignore = ('',) if options.select else tuple(options.ignore)
+ options.benchmark_keys = BENCHMARK_KEYS[:]
+ options.ignore_code = self.ignore_code
+ options.physical_checks = self.get_checks('physical_line')
+ options.logical_checks = self.get_checks('logical_line')
+ options.ast_checks = self.get_checks('tree')
+ self.init_report()
+
+ def init_report(self, reporter=None):
+ """Initialize the report instance."""
+ self.options.report = (reporter or self.options.reporter)(self.options)
+ return self.options.report
+
+ def check_files(self, paths=None):
+ """Run all checks on the paths."""
+ if paths is None:
+ paths = self.paths
+ report = self.options.report
+ runner = self.runner
+ report.start()
+ try:
+ for path in paths:
+ if os.path.isdir(path):
+ self.input_dir(path)
+ elif not self.excluded(path):
+ runner(path)
+ except KeyboardInterrupt:
+ print('... stopped')
+ report.stop()
+ return report
+
+ def input_file(self, filename, lines=None, expected=None, line_offset=0):
+ """Run all checks on a Python source file."""
+ if self.options.verbose:
+ print('checking %s' % filename)
+ fchecker = self.checker_class(
+ filename, lines=lines, options=self.options)
+ return fchecker.check_all(expected=expected, line_offset=line_offset)
+
+ def input_dir(self, dirname):
+ """Check all files in this directory and all subdirectories."""
+ dirname = dirname.rstrip('/')
+ if self.excluded(dirname):
+ return 0
+ counters = self.options.report.counters
+ verbose = self.options.verbose
+ filepatterns = self.options.filename
+ runner = self.runner
+ for root, dirs, files in os.walk(dirname):
+ if verbose:
+ print('directory ' + root)
+ counters['directories'] += 1
+ for subdir in sorted(dirs):
+ if self.excluded(subdir, root):
+ dirs.remove(subdir)
+ for filename in sorted(files):
+ # contain a pattern that matches?
+ if ((filename_match(filename, filepatterns) and
+ not self.excluded(filename, root))):
+ runner(os.path.join(root, filename))
+
+ def excluded(self, filename, parent=None):
+ """Check if the file should be excluded.
+
+ Check if 'options.exclude' contains a pattern that matches filename.
+ """
+ if not self.options.exclude:
+ return False
+ basename = os.path.basename(filename)
+ if filename_match(basename, self.options.exclude):
+ return True
+ if parent:
+ filename = os.path.join(parent, filename)
+ filename = os.path.abspath(filename)
+ return filename_match(filename, self.options.exclude)
+
+ def ignore_code(self, code):
+ """Check if the error code should be ignored.
+
+ If 'options.select' contains a prefix of the error code,
+ return False. Else, if 'options.ignore' contains a prefix of
+ the error code, return True.
+ """
+ if len(code) < 4 and any(s.startswith(code)
+ for s in self.options.select):
+ return False
+ return (code.startswith(self.options.ignore) and
+ not code.startswith(self.options.select))
+
+ def get_checks(self, argument_name):
+ """Get all the checks for this category.
+
+ Find all globally visible functions where the first argument name
+ starts with argument_name and which contain selected tests.
+ """
+ checks = []
+ for check, attrs in _checks[argument_name].items():
+ (codes, args) = attrs
+ if any(not (code and self.ignore_code(code)) for code in codes):
+ checks.append((check.__name__, check, args))
+ return sorted(checks)
+
+
+def get_parser(prog='pep8', version=__version__):
+ parser = OptionParser(prog=prog, version=version,
+ usage="%prog [options] input ...")
+ parser.config_options = [
+ 'exclude', 'filename', 'select', 'ignore', 'max-line-length',
+ 'hang-closing', 'count', 'format', 'quiet', 'show-pep8',
+ 'show-source', 'statistics', 'verbose']
+ parser.add_option('-v', '--verbose', default=0, action='count',
+ help="print status messages, or debug with -vv")
+ parser.add_option('-q', '--quiet', default=0, action='count',
+ help="report only file names, or nothing with -qq")
+ parser.add_option('-r', '--repeat', default=True, action='store_true',
+ help="(obsolete) show all occurrences of the same error")
+ parser.add_option('--first', action='store_false', dest='repeat',
+ help="show first occurrence of each error")
+ parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
+ help="exclude files or directories which match these "
+ "comma separated patterns (default: %default)")
+ parser.add_option('--filename', metavar='patterns', default='*.py',
+ help="when parsing directories, only check filenames "
+ "matching these comma separated patterns "
+ "(default: %default)")
+ parser.add_option('--select', metavar='errors', default='',
+ help="select errors and warnings (e.g. E,W6)")
+ parser.add_option('--ignore', metavar='errors', default='',
+ help="skip errors and warnings (e.g. E4,W)")
+ parser.add_option('--show-source', action='store_true',
+ help="show source code for each error")
+ parser.add_option('--show-pep8', action='store_true',
+ help="show text of PEP 8 for each error "
+ "(implies --first)")
+ parser.add_option('--statistics', action='store_true',
+ help="count errors and warnings")
+ parser.add_option('--count', action='store_true',
+ help="print total number of errors and warnings "
+ "to standard error and set exit code to 1 if "
+ "total is not null")
+ parser.add_option('--max-line-length', type='int', metavar='n',
+ default=MAX_LINE_LENGTH,
+ help="set maximum allowed line length "
+ "(default: %default)")
+ parser.add_option('--hang-closing', action='store_true',
+ help="hang closing bracket instead of matching "
+ "indentation of opening bracket's line")
+ parser.add_option('--format', metavar='format', default='default',
+ help="set the error format [default|pylint|<custom>]")
+ parser.add_option('--diff', action='store_true',
+ help="report only lines changed according to the "
+ "unified diff received on STDIN")
+ group = parser.add_option_group("Testing Options")
+ if os.path.exists(TESTSUITE_PATH):
+ group.add_option('--testsuite', metavar='dir',
+ help="run regression tests from dir")
+ group.add_option('--doctest', action='store_true',
+ help="run doctest on myself")
+ group.add_option('--benchmark', action='store_true',
+ help="measure processing speed")
+ return parser
+
+
+def read_config(options, args, arglist, parser):
+ """Read both user configuration and local configuration."""
+ config = RawConfigParser()
+
+ user_conf = options.config
+ if user_conf and os.path.isfile(user_conf):
+ if options.verbose:
+ print('user configuration: %s' % user_conf)
+ config.read(user_conf)
+
+ local_dir = os.curdir
+ parent = tail = args and os.path.abspath(os.path.commonprefix(args))
+ while tail:
+ if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]):
+ local_dir = parent
+ if options.verbose:
+ print('local configuration: in %s' % parent)
+ break
+ (parent, tail) = os.path.split(parent)
+
+ pep8_section = parser.prog
+ if config.has_section(pep8_section):
+ option_list = dict([(o.dest, o.type or o.action)
+ for o in parser.option_list])
+
+ # First, read the default values
+ (new_options, __) = parser.parse_args([])
+
+ # Second, parse the configuration
+ for opt in config.options(pep8_section):
+ if opt.replace('_', '-') not in parser.config_options:
+ print(" unknown option '%s' ignored" % opt)
+ continue
+ if options.verbose > 1:
+ print(" %s = %s" % (opt, config.get(pep8_section, opt)))
+ normalized_opt = opt.replace('-', '_')
+ opt_type = option_list[normalized_opt]
+ if opt_type in ('int', 'count'):
+ value = config.getint(pep8_section, opt)
+ elif opt_type == 'string':
+ value = config.get(pep8_section, opt)
+ if normalized_opt == 'exclude':
+ value = normalize_paths(value, local_dir)
+ else:
+ assert opt_type in ('store_true', 'store_false')
+ value = config.getboolean(pep8_section, opt)
+ setattr(new_options, normalized_opt, value)
+
+ # Third, overwrite with the command-line options
+ (options, __) = parser.parse_args(arglist, values=new_options)
+ options.doctest = options.testsuite = False
+ return options
+
+
+def process_options(arglist=None, parse_argv=False, config_file=None,
+ parser=None):
+ """Process options passed either via arglist or via command line args."""
+ if not parser:
+ parser = get_parser()
+ if not parser.has_option('--config'):
+ if config_file is True:
+ config_file = DEFAULT_CONFIG
+ group = parser.add_option_group("Configuration", description=(
+ "The project options are read from the [%s] section of the "
+ "tox.ini file or the setup.cfg file located in any parent folder "
+ "of the path(s) being processed. Allowed options are: %s." %
+ (parser.prog, ', '.join(parser.config_options))))
+ group.add_option('--config', metavar='path', default=config_file,
+ help="user config file location (default: %default)")
+ # Don't read the command line if the module is used as a library.
+ if not arglist and not parse_argv:
+ arglist = []
+ # If parse_argv is True and arglist is None, arguments are
+ # parsed from the command line (sys.argv)
+ (options, args) = parser.parse_args(arglist)
+ options.reporter = None
+
+ if options.ensure_value('testsuite', False):
+ args.append(options.testsuite)
+ elif not options.ensure_value('doctest', False):
+ if parse_argv and not args:
+ if options.diff or any(os.path.exists(name)
+ for name in PROJECT_CONFIG):
+ args = ['.']
+ else:
+ parser.error('input not specified')
+ options = read_config(options, args, arglist, parser)
+ options.reporter = parse_argv and options.quiet == 1 and FileReport
+
+ options.filename = options.filename and options.filename.split(',')
+ options.exclude = normalize_paths(options.exclude)
+ options.select = options.select and options.select.split(',')
+ options.ignore = options.ignore and options.ignore.split(',')
+
+ if options.diff:
+ options.reporter = DiffReport
+ stdin = stdin_get_value()
+ options.selected_lines = parse_udiff(stdin, options.filename, args[0])
+ args = sorted(options.selected_lines)
+
+ return options, args
+
+
+def _main():
+ """Parse options and run checks on Python source."""
+ import signal
+
+ # Handle "Broken pipe" gracefully
+ try:
+ signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1))
+ except AttributeError:
+ pass # not supported on Windows
+
+ pep8style = StyleGuide(parse_argv=True, config_file=True)
+ options = pep8style.options
+ if options.doctest or options.testsuite:
+ from testsuite.support import run_tests
+ report = run_tests(pep8style)
+ else:
+ report = pep8style.check_files()
+ if options.statistics:
+ report.print_statistics()
+ if options.benchmark:
+ report.print_benchmark()
+ if options.testsuite and not options.quiet:
+ report.print_results()
+ if report.total_errors:
+ if options.count:
+ sys.stderr.write(str(report.total_errors) + '\n')
+ sys.exit(1)
+
+if __name__ == '__main__':
+ _main()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/bot/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/bot/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/bot/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/bot/commitannouncer.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/bot/commitannouncer.py
new file mode 100644
index 0000000..3376429
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/bot/commitannouncer.py
@@ -0,0 +1,201 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import re
+import threading
+import time
+
+from webkitpy.common.checkout.scm.git import Git
+from webkitpy.common.config.irc import server, port, channel, nickname
+from webkitpy.common.config.irc import update_wait_seconds, retry_attempts
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.thirdparty.irc.ircbot import SingleServerIRCBot
+
+_log = logging.getLogger(__name__)
+
+
+class CommitAnnouncer(SingleServerIRCBot):
+ _commit_detail_format = "%H\n%cn\n%s\n%b" # commit-sha1, author, subject, body
+
+ def __init__(self, tool, irc_password):
+ SingleServerIRCBot.__init__(self, [(server, port, irc_password)], nickname, nickname)
+ self.git = Git(cwd=tool.scm().checkout_root, filesystem=tool.filesystem, executive=tool.executive)
+ self.commands = {
+ 'help': self.help,
+ 'quit': self.stop,
+ }
+
+ def start(self):
+ if not self._update():
+ return
+ self.last_commit = self.git.latest_git_commit()
+ SingleServerIRCBot.start(self)
+
+ def post_new_commits(self):
+ if not self.connection.is_connected():
+ return
+ if not self._update(force_clean=True):
+ self.stop("Failed to update repository!")
+ return
+ new_commits = self.git.git_commits_since(self.last_commit)
+ if new_commits:
+ self.last_commit = new_commits[-1]
+ for commit in new_commits:
+ commit_detail = self._commit_detail(commit)
+ if commit_detail:
+ _log.info('%s Posting commit %s' % (self._time(), commit))
+ _log.info('%s Posted message: %s' % (self._time(), repr(commit_detail)))
+ self._post(commit_detail)
+ else:
+ _log.error('Malformed commit log for %s' % commit)
+
+ # Bot commands.
+
+ def help(self):
+ self._post('Commands available: %s' % ' '.join(self.commands.keys()))
+
+ def stop(self, message=""):
+ self.connection.execute_delayed(0, lambda: self.die(message))
+
+ # IRC event handlers.
+
+ def on_nicknameinuse(self, connection, event):
+ connection.nick('%s_' % connection.get_nickname())
+
+ def on_welcome(self, connection, event):
+ connection.join(channel)
+
+ def on_pubmsg(self, connection, event):
+ message = event.arguments()[0]
+ command = self._message_command(message)
+ if command:
+ command()
+
+ def _update(self, force_clean=False):
+ if not self.git.is_cleanly_tracking_remote_master():
+ if not force_clean:
+ confirm = raw_input('This repository has local changes, continue? (uncommitted changes will be lost) y/n: ')
+ if not confirm.lower() == 'y':
+ return False
+ try:
+ self.git.ensure_cleanly_tracking_remote_master()
+ except ScriptError, e:
+ _log.error('Failed to clean repository: %s' % e)
+ return False
+
+ attempts = 1
+ while attempts <= retry_attempts:
+ if attempts > 1:
+ # User may have sent a keyboard interrupt during the wait.
+ if not self.connection.is_connected():
+ return False
+ wait = int(update_wait_seconds) << (attempts - 1)
+ if wait < 120:
+ _log.info('Waiting %s seconds' % wait)
+ else:
+ _log.info('Waiting %s minutes' % (wait / 60))
+ time.sleep(wait)
+ _log.info('Pull attempt %s out of %s' % (attempts, retry_attempts))
+ try:
+ self.git.pull()
+ return True
+ except ScriptError, e:
+ _log.error('Error pulling from server: %s' % e)
+ _log.error('Output: %s' % e.output)
+ attempts += 1
+ _log.error('Exceeded pull attempts')
+ _log.error('Aborting at time: %s' % self._time())
+ return False
+
+ def _time(self):
+ return time.strftime('[%x %X %Z]', time.localtime())
+
+ def _message_command(self, message):
+ prefix = '%s:' % self.connection.get_nickname()
+ if message.startswith(prefix):
+ command_name = message[len(prefix):].strip()
+ if command_name in self.commands:
+ return self.commands[command_name]
+ return None
+
+ def _commit_detail(self, commit):
+ return self._format_commit_detail(self.git.git_commit_detail(commit, self._commit_detail_format))
+
+ def _format_commit_detail(self, commit_detail):
+ if commit_detail.count('\n') < self._commit_detail_format.count('\n'):
+ return ''
+
+ commit, email, subject, body = commit_detail.split('\n', 3)
+ review_string = 'Review URL: '
+ svn_string = 'git-svn-id: svn://svn.chromium.org/blink/trunk@'
+ red_flag_strings = ['NOTRY=true', 'TBR=']
+ review_url = ''
+ svn_revision = ''
+ red_flags = []
+
+ for line in body.split('\n'):
+ if line.startswith(review_string):
+ review_url = line[len(review_string):]
+ if line.startswith(svn_string):
+ tokens = line[len(svn_string):].split()
+ if not tokens:
+ continue
+ revision = tokens[0]
+ if not revision.isdigit():
+ continue
+ svn_revision = 'r%s' % revision
+ for red_flag_string in red_flag_strings:
+ if line.lower().startswith(red_flag_string.lower()):
+ red_flags.append(line.strip())
+
+ if review_url:
+ match = re.search(r'(?P<review_id>\d+)', review_url)
+ if match:
+ review_url = 'http://crrev.com/%s' % match.group('review_id')
+ first_url = review_url if review_url else 'https://chromium.googlesource.com/chromium/blink/+/%s' % commit[:8]
+
+ red_flag_message = '\x037%s\x03' % (' '.join(red_flags)) if red_flags else ''
+
+ return ('%s %s %s committed "%s" %s' % (svn_revision, first_url, email, subject, red_flag_message)).strip()
+
+ def _post(self, message):
+ self.connection.execute_delayed(0, lambda: self.connection.privmsg(channel, self._sanitize_string(message)))
+
+ def _sanitize_string(self, message):
+ return message.encode('ascii', 'backslashreplace')
+
+
+class CommitAnnouncerThread(threading.Thread):
+ def __init__(self, tool, irc_password):
+ threading.Thread.__init__(self)
+ self.bot = CommitAnnouncer(tool, irc_password)
+
+ def run(self):
+ self.bot.start()
+
+ def stop(self):
+ self.bot.stop()
+ self.join()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/bot/commitannouncer_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/bot/commitannouncer_unittest.py
new file mode 100644
index 0000000..c82f2e9
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/bot/commitannouncer_unittest.py
@@ -0,0 +1,204 @@
+# Copyright (C) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.tool.bot.commitannouncer import CommitAnnouncer
+from webkitpy.tool.mocktool import MockTool
+
+
+class CommitAnnouncerTest(unittest.TestCase):
+ def test_format_commit(self):
+ tool = MockTool()
+ bot = CommitAnnouncer(tool, "test_password")
+ self.assertEqual(
+ 'r456789 http://crrev.com/123456 authorABC@chromium.org committed "Commit test subject line"',
+ bot._format_commit_detail("""\
+1234commit1234
+authorABC@chromium.org
+Commit test subject line
+Multiple
+lines
+of
+description.
+
+BUG=654321
+
+Review URL: https://codereview.chromium.org/123456
+
+git-svn-id: svn://svn.chromium.org/blink/trunk@456789 bbb929c8-8fbe-4397-9dbb-9b2b20218538
+"""))
+
+ self.assertEqual(
+ 'r456789 https://chromium.googlesource.com/chromium/blink/+/1234comm '
+ 'authorABC@chromium.org committed "Commit test subject line"',
+ bot._format_commit_detail("""\
+1234commit1234
+authorABC@chromium.org
+Commit test subject line
+Multiple
+lines
+of
+description.
+
+BUG=654321
+
+git-svn-id: svn://svn.chromium.org/blink/trunk@456789 bbb929c8-8fbe-4397-9dbb-9b2b20218538
+"""))
+
+ self.assertEqual(
+ 'http://crrev.com/123456 authorABC@chromium.org committed "Commit test subject line"',
+ bot._format_commit_detail("""\
+1234commit1234
+authorABC@chromium.org
+Commit test subject line
+Multiple
+lines
+of
+description.
+
+BUG=654321
+
+Review URL: https://codereview.chromium.org/123456
+"""))
+
+ self.assertEqual(
+ 'https://chromium.googlesource.com/chromium/blink/+/1234comm authorABC@chromium.org committed "Commit test subject line"',
+ bot._format_commit_detail("""\
+1234commit1234
+authorABC@chromium.org
+Commit test subject line
+Multiple
+lines
+of
+description.
+"""))
+
+ self.assertEqual(
+ 'r456789 http://crrev.com/123456 authorABC@chromium.org committed "Commit test subject line"',
+ bot._format_commit_detail("""\
+1234commit1234
+authorABC@chromium.org
+Commit test subject line
+Multiple
+lines
+of
+description.
+Review URL: http://fake.review.url
+git-svn-id: svn://svn.chromium.org/blink/trunk@000000 Fake-SVN-number
+
+BUG=654321
+
+Review URL: https://codereview.chromium.org/123456
+
+git-svn-id: svn://svn.chromium.org/blink/trunk@456789 bbb929c8-8fbe-4397-9dbb-9b2b20218538
+"""))
+
+ self.assertEqual(
+ 'r456789 http://crrev.com/123456 authorABC@chromium.org committed "Commit test subject line" '
+ '\x037TBR=reviewerDEF@chromium.org\x03',
+ bot._format_commit_detail("""\
+1234commit1234
+authorABC@chromium.org
+Commit test subject line
+Multiple
+lines
+of
+description.
+
+BUG=654321
+TBR=reviewerDEF@chromium.org
+
+Review URL: https://codereview.chromium.org/123456
+
+git-svn-id: svn://svn.chromium.org/blink/trunk@456789 bbb929c8-8fbe-4397-9dbb-9b2b20218538
+"""))
+
+ self.assertEqual(
+ 'r456789 http://crrev.com/123456 authorABC@chromium.org committed "Commit test subject line" '
+ '\x037NOTRY=true\x03',
+ bot._format_commit_detail("""\
+1234commit1234
+authorABC@chromium.org
+Commit test subject line
+Multiple
+lines
+of
+description.
+
+BUG=654321
+NOTRY=true
+
+Review URL: https://codereview.chromium.org/123456
+
+git-svn-id: svn://svn.chromium.org/blink/trunk@456789 bbb929c8-8fbe-4397-9dbb-9b2b20218538
+"""))
+
+ self.assertEqual(
+ 'r456789 http://crrev.com/123456 authorABC@chromium.org committed "Commit test subject line" '
+ '\x037NOTRY=true TBR=reviewerDEF@chromium.org\x03',
+ bot._format_commit_detail("""\
+1234commit1234
+authorABC@chromium.org
+Commit test subject line
+Multiple
+lines
+of
+description.
+
+NOTRY=true
+BUG=654321
+TBR=reviewerDEF@chromium.org
+
+Review URL: https://codereview.chromium.org/123456
+
+git-svn-id: svn://svn.chromium.org/blink/trunk@456789 bbb929c8-8fbe-4397-9dbb-9b2b20218538
+"""))
+
+ self.assertEqual(
+ 'r456789 http://crrev.com/123456 authorABC@chromium.org committed "Commit test subject line" '
+ '\x037tbr=reviewerDEF@chromium.org, reviewerGHI@chromium.org, reviewerJKL@chromium.org notry=TRUE\x03',
+ bot._format_commit_detail("""\
+1234commit1234
+authorABC@chromium.org
+Commit test subject line
+Multiple
+lines
+of
+description.
+
+BUG=654321
+tbr=reviewerDEF@chromium.org, reviewerGHI@chromium.org, reviewerJKL@chromium.org
+notry=TRUE
+
+Review URL: https://codereview.chromium.org/123456
+
+git-svn-id: svn://svn.chromium.org/blink/trunk@456789 bbb929c8-8fbe-4397-9dbb-9b2b20218538
+"""))
+
+ def test_sanitize_string(self):
+ bot = CommitAnnouncer(MockTool(), "test_password")
+ self.assertEqual('normal ascii', bot._sanitize_string('normal ascii'))
+ self.assertEqual('uni\\u0441ode!', bot._sanitize_string(u'uni\u0441ode!'))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/__init__.py
new file mode 100644
index 0000000..627c609
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/__init__.py
@@ -0,0 +1,9 @@
+# Required for Python to search this directory for module files
+
+from webkitpy.tool.commands.commitannouncer import CommitAnnouncerCommand
+from webkitpy.tool.commands.flakytests import FlakyTests
+from webkitpy.tool.commands.prettydiff import PrettyDiff
+from webkitpy.tool.commands.queries import *
+from webkitpy.tool.commands.rebaseline import Rebaseline
+from webkitpy.tool.commands.rebaselineserver import RebaselineServer
+from webkitpy.tool.commands.layouttestsserver import LayoutTestsServer
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py
new file mode 100644
index 0000000..0056684
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py
@@ -0,0 +1,57 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from optparse import make_option
+import threading
+
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+
+class AbstractLocalServerCommand(AbstractDeclarativeCommand):
+ server = None
+ launch_path = "/"
+
+ def __init__(self):
+ options = [
+ make_option("--httpd-port", action="store", type="int", default=8127, help="Port to use for the HTTP server"),
+ make_option("--no-show-results", action="store_false", default=True, dest="show_results", help="Don't launch a browser with the rebaseline server"),
+ ]
+ AbstractDeclarativeCommand.__init__(self, options=options)
+
+ def _prepare_config(self, options, args, tool):
+ return None
+
+ def execute(self, options, args, tool):
+ config = self._prepare_config(options, args, tool)
+
+ server_url = "http://localhost:%d%s" % (options.httpd_port, self.launch_path)
+ print "Starting server at %s" % server_url
+ print "Use the 'Exit' link in the UI, %squitquitquit or Ctrl-C to stop" % server_url
+
+ if options.show_results:
+ # FIXME: This seems racy.
+ threading.Timer(0.1, lambda: self._tool.user.open_url(server_url)).start()
+
+ httpd = self.server(httpd_port=options.httpd_port, config=config) # pylint: disable=E1102
+ httpd.serve_forever()
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py
new file mode 100644
index 0000000..a092079
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.tool.commands.stepsequence import StepSequence
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+_log = logging.getLogger(__name__)
+
+
+class AbstractSequencedCommand(AbstractDeclarativeCommand):
+ steps = None
+ def __init__(self):
+ self._sequence = StepSequence(self.steps)
+ AbstractDeclarativeCommand.__init__(self, self._sequence.options())
+
+ def _prepare_state(self, options, args, tool):
+ return None
+
+ def execute(self, options, args, tool):
+ try:
+ state = self._prepare_state(options, args, tool)
+ except ScriptError, e:
+ _log.error(e.message_with_output())
+ self._exit(e.exit_code or 2)
+
+ self._sequence.run_and_handle_errors(tool, options, state)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/commandtest.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/commandtest.py
new file mode 100644
index 0000000..243291c
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/commandtest.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.mocktool import MockOptions, MockTool
+
+
+class CommandsTest(unittest.TestCase):
+ def assert_execute_outputs(self, command, args=[], expected_stdout="", expected_stderr="", expected_exception=None, expected_logs=None, options=MockOptions(), tool=MockTool()):
+ options.blocks = None
+ options.cc = 'MOCK cc'
+ options.component = 'MOCK component'
+ options.confirm = True
+ options.email = 'MOCK email'
+ options.git_commit = 'MOCK git commit'
+ options.obsolete_patches = True
+ options.open_bug = True
+ options.port = 'MOCK port'
+ options.update_changelogs = False
+ options.quiet = True
+ options.reviewer = 'MOCK reviewer'
+ command.bind_to_tool(tool)
+ OutputCapture().assert_outputs(self, command.execute, [options, args, tool], expected_stdout=expected_stdout, expected_stderr=expected_stderr, expected_exception=expected_exception, expected_logs=expected_logs)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/commitannouncer.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/commitannouncer.py
new file mode 100644
index 0000000..3beb121
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/commitannouncer.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+from optparse import make_option
+import time
+import traceback
+
+from webkitpy.common.config.irc import update_wait_seconds
+from webkitpy.tool.bot.commitannouncer import CommitAnnouncer, CommitAnnouncerThread
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+_log = logging.getLogger(__name__)
+
+
+class CommitAnnouncerCommand(AbstractDeclarativeCommand):
+ name = "commit-announcer"
+ help_text = "Start an IRC bot for announcing new git commits."
+ show_in_main_help = True
+
+ def __init__(self):
+ options = [
+ make_option("--irc-password", default=None, help="Specify IRC password to use."),
+ ]
+ AbstractDeclarativeCommand.__init__(self, options)
+
+ def execute(self, options, args, tool):
+ bot_thread = CommitAnnouncerThread(tool, options.irc_password)
+ bot_thread.start()
+ _log.info("Bot started")
+ try:
+ while bot_thread.is_alive():
+ bot_thread.bot.post_new_commits()
+ time.sleep(update_wait_seconds)
+ except KeyboardInterrupt:
+ _log.error("Terminated by keyboard interrupt")
+ except Exception, e:
+ _log.error("Unexpected error:")
+ _log.error(traceback.format_exc())
+
+ if bot_thread.is_alive():
+ _log.info("Disconnecting bot")
+ bot_thread.stop()
+ else:
+ _log.info("Bot offline")
+ _log.info("Done")
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/data/summary.html b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/data/summary.html
new file mode 100644
index 0000000..abf80d8
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/data/summary.html
@@ -0,0 +1,455 @@
+<!DOCTYPE html>
+<html>
+<head>
+<title>ChangeLog Analysis</title>
+<style type="text/css">
+
+body {
+ font-family: 'Helvetica' 'Segoe UI Light' sans-serif;
+ font-weight: 200;
+ padding: 20px;
+ min-width: 1200px;
+}
+
+* {
+ padding: 0px;
+ margin: 0px;
+ border: 0px;
+}
+
+h1, h2, h3 {
+ font-weight: 200;
+}
+
+h1 {
+ margin: 0 0 1em 0;
+}
+
+h2 {
+ font-size: 1.2em;
+ text-align: center;
+ margin-bottom: 1em;
+}
+
+h3 {
+ font-size: 1em;
+}
+
+.view {
+ margin: 0px;
+ width: 600px;
+ float: left;
+}
+
+.graph-container p {
+ width: 200px;
+ text-align: right;
+ margin: 20px 0 20px 0;
+ padding: 5px;
+ border-right: solid 1px black;
+}
+
+.graph-container table {
+ width: 100%;
+}
+
+.graph-container table, .graph-container td {
+ border-collapse: collapse;
+ border: none;
+}
+
+.graph-container td {
+ padding: 5px;
+ vertical-align: center;
+}
+
+.graph-container td:first-child {
+ width: 200px;
+ text-align: right;
+ border-right: solid 1px black;
+}
+
+.graph-container .selected {
+ background: #eee;
+}
+
+#reviewers .selected td:first-child {
+ border-radius: 10px 0px 0px 10px;
+}
+
+#areas .selected td:last-child {
+ border-radius: 0px 10px 10px 0px;
+}
+
+.graph-container .bar {
+ display: inline-block;
+ min-height: 1em;
+ background: #9f6;
+ margin-right: 0.4ex;
+}
+
+.graph-container .reviewed-patches {
+ background: #3cf;
+ margin-right: 1px;
+}
+
+.graph-container .unreviewed-patches {
+ background: #f99;
+}
+
+.constrained {
+ background: #eee;
+ border-radius: 10px;
+}
+
+.constrained .vertical-bar {
+ border-right: solid 1px #eee;
+}
+
+#header {
+ border-spacing: 5px;
+}
+
+#header section {
+ display: table-cell;
+ width: 200px;
+ vertical-align: top;
+ border: solid 2px #ccc;
+ border-collapse: collapse;
+ padding: 5px;
+ font-size: 0.8em;
+}
+
+#header dt {
+ float: left;
+}
+
+#header dt:after {
+ content: ': ';
+}
+
+#header .legend {
+ width: 600px;
+}
+
+.legend .bar {
+ width: 15ex;
+ padding: 2px;
+}
+
+.legend .reviews {
+ width: 25ex;
+}
+
+.legend td:first-child {
+ width: 18ex;
+}
+
+</style>
+</head>
+<body>
+<h1>ChangeLog Analysis</h1>
+
+<section id="header">
+<section id="summary">
+<h2>Summary</h2>
+</section>
+
+<section class="legend">
+<h2>Legend</h2>
+<div class="graph-container">
+<table>
+<tbody>
+<tr><td>Contributor's name</td>
+<td><span class="bar reviews">Reviews</span> <span class="value-container">(# of reviews)</span><br>
+<span class="bar reviewed-patches">Reviewed</span><span class="bar unreviewed-patches">Unreviewed</span>
+<span class="value-container">(# of reviewed):(# of unreviewed)</span></td></tr>
+</tbody>
+</table>
+</div>
+</section>
+</section>
+
+<section id="contributors" class="view">
+<h2 id="contributors-title">Contributors</h2>
+<div class="graph-container"></div>
+</section>
+
+<section id="areas" class="view">
+<h2 id="areas-title">Areas of contributions</h2>
+<div class="graph-container"></div>
+</section>
+
+<script>
+
+// Naive implementation of element extensions discussed on public-webapps
+
+if (!Element.prototype.append) {
+ Element.prototype.append = function () {
+ for (var i = 0; i < arguments.length; i++) {
+ // FIXME: Take care of other node types
+ if (arguments[i] instanceof Element || arguments[i] instanceof CharacterData)
+ this.appendChild(arguments[i]);
+ else
+ this.appendChild(document.createTextNode(arguments[i]));
+ }
+ return this;
+ }
+}
+
+if (!Node.prototype.remove) {
+ Node.prototype.remove = function () {
+ this.parentNode.removeChild(this);
+ return this;
+ }
+}
+
+if (!Element.create) {
+ Element.create = function () {
+ if (arguments.length < 1)
+ return null;
+ var element = document.createElement(arguments[0]);
+ if (arguments.length == 1)
+ return element;
+
+ // FIXME: the second argument can be content or IDL attributes
+ var attributes = arguments[1];
+ for (attribute in attributes)
+ element.setAttribute(attribute, attributes[attribute]);
+
+ if (arguments.length >= 3)
+ element.append.apply(element, arguments[2]);
+
+ return element;
+ }
+}
+
+if (!Node.prototype.removeAllChildren) {
+ Node.prototype.removeAllChildren = function () {
+ while (this.firstChild)
+ this.firstChild.remove();
+ return this;
+ }
+}
+
+Element.prototype.removeClassNameFromAllElements = function (className) {
+ var elements = this.getElementsByClassName(className);
+ for (var i = 0; i < elements.length; i++)
+ elements[i].classList.remove(className);
+}
+
+function getJSON(url, callback) {
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url, true);
+ xhr.onreadystatechange = function () {
+ if (this.readyState == 4)
+ callback(JSON.parse(xhr.responseText));
+ }
+ xhr.send();
+}
+
+function GraphView(container) {
+ this._container = container;
+ this._defaultData = null;
+}
+
+GraphView.prototype.setData = function(data, constrained) {
+ if (constrained)
+ this._container.classList.add('constrained');
+ else
+ this._container.classList.remove('constrained');
+ this._clearGraph();
+ this._constructGraph(data);
+}
+
+GraphView.prototype.setDefaultData = function(data) {
+ this._defaultData = data;
+ this.setData(data);
+}
+
+GraphView.prototype.reset = function () {
+ this.setMarginTop();
+ this.setData(this._defaultData);
+}
+
+GraphView.prototype.isConstrained = function () { return this._container.classList.contains('constrained'); }
+
+GraphView.prototype.targetRow = function (node) {
+ var target = null;
+
+ while (node && node != this._container) {
+ if (node.localName == 'tr')
+ target = node;
+ node = node.parentNode;
+ }
+
+ return node && target;
+}
+
+GraphView.prototype.selectRow = function (row) {
+ this._container.removeClassNameFromAllElements('selected');
+ row.classList.add('selected');
+}
+
+GraphView.prototype.setMarginTop = function (y) { this._container.style.marginTop = y ? y + 'px' : null; }
+GraphView.prototype._graphContainer = function () { return this._container.getElementsByClassName('graph-container')[0]; }
+GraphView.prototype._clearGraph = function () { return this._graphContainer().removeAllChildren(); }
+
+GraphView.prototype._numberOfPatches = function (dataItem) {
+ return dataItem.numberOfReviewedPatches + (dataItem.numberOfUnreviewedPatches !== undefined ? dataItem.numberOfUnreviewedPatches : 0);
+}
+
+GraphView.prototype._maximumValue = function (labels, data) {
+ var numberOfPatches = this._numberOfPatches;
+ return Math.max.apply(null, labels.map(function (label) {
+ return Math.max(numberOfPatches(data[label]), data[label].numberOfReviews !== undefined ? data[label].numberOfReviews : 0);
+ }));
+}
+
+GraphView.prototype._sortLabelsByNumberOfReviwsAndReviewedPatches = function(data) {
+ var labels = Object.keys(data);
+ if (!labels.length)
+ return null;
+ var numberOfPatches = this._numberOfPatches;
+ var computeValue = function (dataItem) {
+ return numberOfPatches(dataItem) + (dataItem.numberOfReviews !== undefined ? dataItem.numberOfReviews : 0);
+ }
+ labels.sort(function (a, b) { return computeValue(data[b]) - computeValue(data[a]); });
+ return labels;
+}
+
+GraphView.prototype._constructGraph = function (data) {
+ var element = this._graphContainer();
+ var labels = this._sortLabelsByNumberOfReviwsAndReviewedPatches(data);
+ if (!labels) {
+ element.append(Element.create('p', {}, ['None']));
+ return;
+ }
+
+ var maxValue = this._maximumValue(labels, data);
+ var computeStyleForBar = function (value) { return 'width:' + (value * 85.0 / maxValue) + '%' }
+
+ var table = Element.create('table', {}, [Element.create('tbody')]);
+ for (var i = 0; i < labels.length; i++) {
+ var label = labels[i];
+ var item = data[label];
+ var row = Element.create('tr', {}, [Element.create('td', {}, [label]), Element.create('td', {})]);
+ var valueCell = row.lastChild;
+
+ if (item.numberOfReviews != undefined) {
+ valueCell.append(
+ Element.create('span', {'class': 'bar reviews', 'style': computeStyleForBar(item.numberOfReviews) }),
+ Element.create('span', {'class': 'value-container'}, [item.numberOfReviews]),
+ Element.create('br')
+ );
+ }
+
+ valueCell.append(Element.create('span', {'class': 'bar reviewed-patches', 'style': computeStyleForBar(item.numberOfReviewedPatches) }));
+ if (item.numberOfUnreviewedPatches !== undefined)
+ valueCell.append(Element.create('span', {'class': 'bar unreviewed-patches', 'style': computeStyleForBar(item.numberOfUnreviewedPatches) }));
+
+ valueCell.append(Element.create('span', {'class': 'value-container'},
+ [item.numberOfReviewedPatches + (item.numberOfUnreviewedPatches !== undefined ? ':' + item.numberOfUnreviewedPatches : '')]));
+
+ table.firstChild.append(row);
+ row.label = label;
+ row.data = item;
+ }
+ element.append(table);
+}
+
+var contributorsView = new GraphView(document.querySelector('#contributors'));
+var areasView = new GraphView(document.querySelector('#areas'));
+
+getJSON('summary.json',
+ function (summary) {
+ var summaryContainer = document.querySelector('#summary');
+ summaryContainer.append(Element.create('dl', {}, [
+ Element.create('dt', {}, ['Total entries (reviewed)']),
+ Element.create('dd', {}, [(summary['reviewed'] + summary['unreviewed']) + ' (' + summary['reviewed'] + ')']),
+ Element.create('dt', {}, ['Total contributors']),
+ Element.create('dd', {}, [summary['contributors']]),
+ Element.create('dt', {}, ['Contributors who reviewed']),
+ Element.create('dd', {}, [summary['contributors_with_reviews']]),
+ ]));
+ });
+
+getJSON('contributors.json',
+ function (contributors) {
+ for (var contributor in contributors) {
+ contributor = contributors[contributor];
+ contributor.numberOfReviews = contributor.reviews ? contributor.reviews.total : 0;
+ contributor.numberOfReviewedPatches = contributor.patches ? contributor.patches.reviewed : 0;
+ contributor.numberOfUnreviewedPatches = contributor.patches ? contributor.patches.unreviewed : 0;
+ }
+ contributorsView.setDefaultData(contributors);
+ });
+
+getJSON('areas.json',
+ function (areas) {
+ for (var area in areas) {
+ areas[area].numberOfReviewedPatches = areas[area].reviewed;
+ areas[area].numberOfUnreviewedPatches = areas[area].unreviewed;
+ }
+ areasView.setDefaultData(areas);
+ });
+
+function contributorAreas(contributorData) {
+ var areas = new Object;
+ for (var area in contributorData.reviews.areas) {
+ if (!areas[area])
+ areas[area] = {'numberOfReviewedPatches': 0};
+ areas[area].numberOfReviews = contributorData.reviews.areas[area];
+ }
+ for (var area in contributorData.patches.areas) {
+ if (!areas[area])
+ areas[area] = {'numberOfReviews': 0};
+ areas[area].numberOfReviewedPatches = contributorData.patches.areas[area];
+ }
+ return areas;
+}
+
+function areaContributors(areaData) {
+ var contributors = areaData['contributors'];
+ for (var contributor in contributors) {
+ contributor = contributors[contributor];
+ contributor.numberOfReviews = contributor.reviews;
+ contributor.numberOfReviewedPatches = contributor.reviewed;
+ contributor.numberOfUnreviewedPatches = contributor.unreviewed;
+ }
+ return contributors;
+}
+
+var mouseTimer = 0;
+window.onmouseover = function (event) {
+ clearTimeout(mouseTimer);
+
+ var row = contributorsView.targetRow(event.target);
+ if (row) {
+ if (!contributorsView.isConstrained()) {
+ contributorsView.selectRow(row);
+ areasView.setMarginTop(row.firstChild.offsetTop);
+ areasView.setData(contributorAreas(row.data), 'constrained');
+ }
+ return;
+ }
+
+ row = areasView.targetRow(event.target);
+ if (row) {
+ if (!areasView.isConstrained()) {
+ areasView.selectRow(row);
+ contributorsView.setMarginTop(row.firstChild.offsetTop);
+ contributorsView.setData(areaContributors(row.data), 'constrained');
+ }
+ return;
+ }
+
+ mouseTimer = setTimeout(function () {
+ contributorsView.reset();
+ areasView.reset();
+ }, 500);
+}
+
+</script>
+</body>
+</html>
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/flakytests.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/flakytests.py
new file mode 100644
index 0000000..0b3929a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/flakytests.py
@@ -0,0 +1,135 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import optparse
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.layout_tests.layout_package.bot_test_expectations import BotTestExpectationsFactory
+from webkitpy.layout_tests.models.test_expectations import TestExpectationParser, TestExpectationsModel, TestExpectations
+from webkitpy.layout_tests.port import builders
+from webkitpy.common.net import sheriff_calendar
+
+
+class FlakyTests(AbstractDeclarativeCommand):
+ name = "update-flaky-tests"
+ help_text = "Update FlakyTests file from the flakiness dashboard"
+ show_in_main_help = True
+
+ ALWAYS_CC = [
+ 'ojan@chromium.org',
+ 'dpranke@chromium.org',
+ 'eseidel@chromium.org',
+ ]
+
+ COMMIT_MESSAGE = (
+ 'Update FlakyTests to match current flakiness dashboard results\n\n'
+ 'Automatically generated using:\n'
+ 'webkit-patch update-flaky-tests\n\n'
+ 'R=%s\n')
+
+ FLAKY_TEST_CONTENTS = (
+ '# This file is generated by webkit-patch update-flaky-tests from the flakiness dashboard data.\n'
+ '# Manual changes will be overwritten.\n\n'
+ '%s\n')
+
+ def __init__(self):
+ options = [
+ optparse.make_option('--upload', action='store_true',
+ help='upload the changed FlakyTest file for review'),
+ optparse.make_option('--reviewers', action='store',
+ help='comma-separated list of reviewers, defaults to blink gardeners'),
+ ]
+ AbstractDeclarativeCommand.__init__(self, options=options)
+ # This is sorta silly, but allows for unit testing:
+ self.expectations_factory = BotTestExpectationsFactory
+
+ def _collect_expectation_lines(self, builder_names, factory):
+ all_lines = []
+ for builder_name in builder_names:
+ model = TestExpectationsModel()
+ expectations = factory.expectations_for_builder(builder_name)
+ for line in expectations.expectation_lines(only_ignore_very_flaky=True):
+ model.add_expectation_line(line)
+ # FIXME: We need an official API to get all the test names or all test lines.
+ all_lines.extend(model._test_to_expectation_line.values())
+ return all_lines
+
+ def _commit_and_upload(self, tool, options):
+ files = tool.scm().changed_files()
+ flaky_tests_path = 'LayoutTests/FlakyTests'
+ if flaky_tests_path not in files:
+ print "%s is not changed, not uploading." % flaky_tests_path
+ return 0
+
+ if options.reviewers:
+ # FIXME: Could validate these as emails. sheriff_calendar has some code for that.
+ reviewer_emails = options.reviewers.split(',')
+ else:
+ reviewer_emails = sheriff_calendar.current_gardener_emails()
+ if not reviewer_emails:
+ print "No gardener, and --reviewers not specified, not bothering."
+ return 1
+
+ commit_message = self.COMMIT_MESSAGE % ','.join(reviewer_emails)
+ git_cmd = ['git', 'commit', '-m', commit_message,
+ tool.filesystem.join(tool.scm().checkout_root, flaky_tests_path)]
+ tool.executive.run_command(git_cmd)
+
+ git_cmd = ['git', 'cl', 'upload', '--send-mail', '-f',
+ '--cc', ','.join(self.ALWAYS_CC)]
+ tool.executive.run_command(git_cmd)
+
+ def execute(self, options, args, tool):
+ factory = self.expectations_factory()
+
+ # FIXME: WebKit Linux 32 and WebKit Linux have the same specifiers;
+ # if we include both of them, we'll get duplicate lines. Ideally
+ # Linux 32 would have unique speicifiers.
+ most_builders = builders.all_builder_names()
+ if 'WebKit Linux 32' in most_builders:
+ most_builders.remove('WebKit Linux 32')
+
+ lines = self._collect_expectation_lines(most_builders, factory)
+ lines.sort(key=lambda line: line.path)
+
+ port = tool.port_factory.get()
+ # Skip any tests which are mentioned in the dashboard but not in our checkout:
+ fs = tool.filesystem
+ lines = filter(lambda line: fs.exists(fs.join(port.layout_tests_dir(), line.path)), lines)
+
+ # Note: This includes all flaky tests from the dashboard, even ones mentioned
+ # in existing TestExpectations. We could certainly load existing TestExpecations
+ # and filter accordingly, or update existing TestExpectations instead of FlakyTests.
+ flaky_tests_path = fs.join(port.layout_tests_dir(), 'FlakyTests')
+ flaky_tests_contents = self.FLAKY_TEST_CONTENTS % TestExpectations.list_to_string(lines)
+ fs.write_text_file(flaky_tests_path, flaky_tests_contents)
+ print "Updated %s" % flaky_tests_path
+
+ if options.upload:
+ return self._commit_and_upload(tool, options)
+
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/flakytests_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/flakytests_unittest.py
new file mode 100644
index 0000000..fdb82d6
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/flakytests_unittest.py
@@ -0,0 +1,64 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import flakytests
+
+from webkitpy.common.checkout.scm.scm_mock import MockSCM
+from webkitpy.tool.commands.commandtest import CommandsTest
+from webkitpy.tool.mocktool import MockTool, MockOptions
+
+
+class FakeBotTestExpectations(object):
+ def expectation_lines(self, only_ignore_very_flaky=False):
+ return []
+
+
+class FakeBotTestExpectationsFactory(object):
+ def expectations_for_builder(self, builder):
+ return FakeBotTestExpectations()
+
+
+class ChangedExpectationsMockSCM(MockSCM):
+ def changed_files(self):
+ return ['LayoutTests/FlakyTests']
+
+
+class FlakyTestsTest(CommandsTest):
+ def test_simple(self):
+ command = flakytests.FlakyTests()
+ factory = FakeBotTestExpectationsFactory()
+ lines = command._collect_expectation_lines(['foo'], factory)
+ self.assertEqual(lines, [])
+
+ def test_integration(self):
+ command = flakytests.FlakyTests()
+ tool = MockTool()
+ command.expectations_factory = FakeBotTestExpectationsFactory
+ options = MockOptions(upload=True)
+ expected_stdout = """Updated /mock-checkout/third_party/WebKit/LayoutTests/FlakyTests
+LayoutTests/FlakyTests is not changed, not uploading.
+"""
+ self.assert_execute_outputs(command, options=options, tool=tool, expected_stdout=expected_stdout)
+
+ port = tool.port_factory.get()
+ self.assertEqual(tool.filesystem.read_text_file(tool.filesystem.join(port.layout_tests_dir(), 'FlakyTests')), command.FLAKY_TEST_CONTENTS % '')
+
+ def test_integration_uploads(self):
+ command = flakytests.FlakyTests()
+ tool = MockTool()
+ tool.scm = ChangedExpectationsMockSCM
+ command.expectations_factory = FakeBotTestExpectationsFactory
+ reviewer = 'foo@chromium.org'
+ options = MockOptions(upload=True, reviewers=reviewer)
+ expected_stdout = """Updated /mock-checkout/third_party/WebKit/LayoutTests/FlakyTests
+"""
+ self.assert_execute_outputs(command, options=options, tool=tool, expected_stdout=expected_stdout)
+ self.assertEqual(tool.executive.calls,
+ [
+ ['git', 'commit', '-m', command.COMMIT_MESSAGE % reviewer, '/mock-checkout/third_party/WebKit/LayoutTests/FlakyTests'],
+ ['git', 'cl', 'upload', '--send-mail', '-f', '--cc', 'ojan@chromium.org,dpranke@chromium.org,eseidel@chromium.org'],
+ ])
+
+ port = tool.port_factory.get()
+ self.assertEqual(tool.filesystem.read_text_file(tool.filesystem.join(port.layout_tests_dir(), 'FlakyTests')), command.FLAKY_TEST_CONTENTS % '')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/layouttestsserver.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/layouttestsserver.py
new file mode 100644
index 0000000..a6bb1ae
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/layouttestsserver.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Copyright (c) 2014 Samsung Electronics. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Starts a local HTTP server which can run layout tests (given a list of layout tests to be run)"""
+
+from webkitpy.tool.commands.abstractlocalservercommand import AbstractLocalServerCommand
+from webkitpy.tool.servers.layouttestsserver import LayoutTestsHTTPServer
+
+
+class LayoutTestsServer(AbstractLocalServerCommand):
+ name = 'layout-test-server'
+ help_text = __doc__
+ show_in_main_help = True
+ server = LayoutTestsHTTPServer
+
+ def _prepare_config(self, options, args, tool):
+ options.show_results = False
+ options.httpd_port = 9630
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/prettydiff.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/prettydiff.py
new file mode 100644
index 0000000..66a06a6
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/prettydiff.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
+from webkitpy.tool import steps
+
+
+class PrettyDiff(AbstractSequencedCommand):
+ name = "pretty-diff"
+ help_text = "Shows the pretty diff in the default browser"
+ show_in_main_help = True
+ steps = [
+ steps.ConfirmDiff,
+ ]
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/queries.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/queries.py
new file mode 100644
index 0000000..7d2f3f5
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/queries.py
@@ -0,0 +1,212 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+# Copyright (c) 2012 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import fnmatch
+import logging
+import re
+
+from optparse import make_option
+
+from webkitpy.common.system.crashlogs import CrashLogs
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.layout_tests.models.test_expectations import TestExpectations
+from webkitpy.layout_tests.port import platform_options
+
+_log = logging.getLogger(__name__)
+
+
+class CrashLog(AbstractDeclarativeCommand):
+ name = "crash-log"
+ help_text = "Print the newest crash log for the given process"
+ show_in_main_help = True
+ long_help = """Finds the newest crash log matching the given process name
+and PID and prints it to stdout."""
+ argument_names = "PROCESS_NAME [PID]"
+
+ def execute(self, options, args, tool):
+ crash_logs = CrashLogs(tool)
+ pid = None
+ if len(args) > 1:
+ pid = int(args[1])
+ print crash_logs.find_newest_log(args[0], pid)
+
+
+class PrintExpectations(AbstractDeclarativeCommand):
+ name = 'print-expectations'
+ help_text = 'Print the expected result for the given test(s) on the given port(s)'
+ show_in_main_help = True
+
+ def __init__(self):
+ options = [
+ make_option('--all', action='store_true', default=False,
+ help='display the expectations for *all* tests'),
+ make_option('-x', '--exclude-keyword', action='append', default=[],
+ help='limit to tests not matching the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
+ make_option('-i', '--include-keyword', action='append', default=[],
+ help='limit to tests with the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
+ make_option('--csv', action='store_true', default=False,
+ help='Print a CSV-style report that includes the port name, bugs, specifiers, tests, and expectations'),
+ make_option('-f', '--full', action='store_true', default=False,
+ help='Print a full TestExpectations-style line for every match'),
+ make_option('--paths', action='store_true', default=False,
+ help='display the paths for all applicable expectation files'),
+ ] + platform_options(use_globs=True)
+
+ AbstractDeclarativeCommand.__init__(self, options=options)
+ self._expectation_models = {}
+
+ def execute(self, options, args, tool):
+ if not options.paths and not args and not options.all:
+ print "You must either specify one or more test paths or --all."
+ return
+
+ if options.platform:
+ port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
+ if not port_names:
+ default_port = tool.port_factory.get(options.platform)
+ if default_port:
+ port_names = [default_port.name()]
+ else:
+ print "No port names match '%s'" % options.platform
+ return
+ else:
+ default_port = tool.port_factory.get(port_names[0])
+ else:
+ default_port = tool.port_factory.get(options=options)
+ port_names = [default_port.name()]
+
+ if options.paths:
+ files = default_port.expectations_files()
+ layout_tests_dir = default_port.layout_tests_dir()
+ for file in files:
+ if file.startswith(layout_tests_dir):
+ file = file.replace(layout_tests_dir, 'LayoutTests')
+ print file
+ return
+
+ tests = set(default_port.tests(args))
+ for port_name in port_names:
+ model = self._model(options, port_name, tests)
+ tests_to_print = self._filter_tests(options, model, tests)
+ lines = [model.get_expectation_line(test) for test in sorted(tests_to_print)]
+ if port_name != port_names[0]:
+ print
+ print '\n'.join(self._format_lines(options, port_name, lines))
+
+ def _filter_tests(self, options, model, tests):
+ filtered_tests = set()
+ if options.include_keyword:
+ for keyword in options.include_keyword:
+ filtered_tests.update(model.get_test_set_for_keyword(keyword))
+ else:
+ filtered_tests = tests
+
+ for keyword in options.exclude_keyword:
+ filtered_tests.difference_update(model.get_test_set_for_keyword(keyword))
+ return filtered_tests
+
+ def _format_lines(self, options, port_name, lines):
+ output = []
+ if options.csv:
+ for line in lines:
+ output.append("%s,%s" % (port_name, line.to_csv()))
+ elif lines:
+ include_modifiers = options.full
+ include_expectations = options.full or len(options.include_keyword) != 1 or len(options.exclude_keyword)
+ output.append("// For %s" % port_name)
+ for line in lines:
+ output.append("%s" % line.to_string(None, include_modifiers, include_expectations, include_comment=False))
+ return output
+
+ def _model(self, options, port_name, tests):
+ port = self._tool.port_factory.get(port_name, options)
+ return TestExpectations(port, tests).model()
+
+
+class PrintBaselines(AbstractDeclarativeCommand):
+ name = 'print-baselines'
+ help_text = 'Prints the baseline locations for given test(s) on the given port(s)'
+ show_in_main_help = True
+
+ def __init__(self):
+ options = [
+ make_option('--all', action='store_true', default=False,
+ help='display the baselines for *all* tests'),
+ make_option('--csv', action='store_true', default=False,
+ help='Print a CSV-style report that includes the port name, test_name, test platform, baseline type, baseline location, and baseline platform'),
+ make_option('--include-virtual-tests', action='store_true',
+ help='Include virtual tests'),
+ ] + platform_options(use_globs=True)
+ AbstractDeclarativeCommand.__init__(self, options=options)
+ self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
+
+ def execute(self, options, args, tool):
+ if not args and not options.all:
+ print "You must either specify one or more test paths or --all."
+ return
+
+ default_port = tool.port_factory.get()
+ if options.platform:
+ port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
+ if not port_names:
+ print "No port names match '%s'" % options.platform
+ else:
+ port_names = [default_port.name()]
+
+ if options.include_virtual_tests:
+ tests = sorted(default_port.tests(args))
+ else:
+ # FIXME: make real_tests() a public method.
+ tests = sorted(default_port._real_tests(args))
+
+ for port_name in port_names:
+ if port_name != port_names[0]:
+ print
+ if not options.csv:
+ print "// For %s" % port_name
+ port = tool.port_factory.get(port_name)
+ for test_name in tests:
+ self._print_baselines(options, port_name, test_name, port.expected_baselines_by_extension(test_name))
+
+ def _print_baselines(self, options, port_name, test_name, baselines):
+ for extension in sorted(baselines.keys()):
+ baseline_location = baselines[extension]
+ if baseline_location:
+ if options.csv:
+ print "%s,%s,%s,%s,%s,%s" % (port_name, test_name, self._platform_for_path(test_name),
+ extension[1:], baseline_location, self._platform_for_path(baseline_location))
+ else:
+ print baseline_location
+
+ def _platform_for_path(self, relpath):
+ platform_matchobj = self._platform_regexp.match(relpath)
+ if platform_matchobj:
+ return platform_matchobj.group(1)
+ return None
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
new file mode 100644
index 0000000..6bce725
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
@@ -0,0 +1,159 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2012 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.port.test import TestPort
+from webkitpy.tool.commands.queries import *
+from webkitpy.tool.mocktool import MockTool, MockOptions
+
+
+class PrintExpectationsTest(unittest.TestCase):
+ def run_test(self, tests, expected_stdout, platform='test-win-xp', **args):
+ options = MockOptions(all=False, csv=False, full=False, platform=platform,
+ include_keyword=[], exclude_keyword=[], paths=False).update(**args)
+ tool = MockTool()
+ tool.port_factory.all_port_names = lambda: TestPort.ALL_BASELINE_VARIANTS
+ command = PrintExpectations()
+ command.bind_to_tool(tool)
+
+ oc = OutputCapture()
+ try:
+ oc.capture_output()
+ command.execute(options, tests, tool)
+ finally:
+ stdout, _, _ = oc.restore_output()
+ self.assertMultiLineEqual(stdout, expected_stdout)
+
+ def test_basic(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('// For test-win-xp\n'
+ 'failures/expected/image.html [ ImageOnlyFailure ]\n'
+ 'failures/expected/text.html [ Failure ]\n'))
+
+ def test_multiple(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('// For test-win-win7\n'
+ 'failures/expected/image.html [ ImageOnlyFailure ]\n'
+ 'failures/expected/text.html [ Failure ]\n'
+ '\n'
+ '// For test-win-xp\n'
+ 'failures/expected/image.html [ ImageOnlyFailure ]\n'
+ 'failures/expected/text.html [ Failure ]\n'),
+ platform='test-win-*')
+
+ def test_full(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('// For test-win-xp\n'
+ 'Bug(test) failures/expected/image.html [ ImageOnlyFailure ]\n'
+ 'Bug(test) failures/expected/text.html [ Failure ]\n'),
+ full=True)
+
+ def test_exclude(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('// For test-win-xp\n'
+ 'failures/expected/text.html [ Failure ]\n'),
+ exclude_keyword=['image'])
+
+ def test_include(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('// For test-win-xp\n'
+ 'failures/expected/image.html\n'),
+ include_keyword=['image'])
+
+ def test_csv(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('test-win-xp,failures/expected/image.html,Bug(test),,IMAGE\n'
+ 'test-win-xp,failures/expected/text.html,Bug(test),,FAIL\n'),
+ csv=True)
+
+ def test_paths(self):
+ self.run_test([],
+ ('/mock-checkout/LayoutTests/TestExpectations\n'
+ 'LayoutTests/platform/test/TestExpectations\n'
+ 'LayoutTests/platform/test-win-xp/TestExpectations\n'),
+ paths=True)
+
+class PrintBaselinesTest(unittest.TestCase):
+ def setUp(self):
+ self.oc = None
+ self.tool = MockTool()
+ self.test_port = self.tool.port_factory.get('test-win-xp')
+ self.tool.port_factory.get = lambda port_name=None: self.test_port
+ self.tool.port_factory.all_port_names = lambda: TestPort.ALL_BASELINE_VARIANTS
+
+ def tearDown(self):
+ if self.oc:
+ self.restore_output()
+
+ def capture_output(self):
+ self.oc = OutputCapture()
+ self.oc.capture_output()
+
+ def restore_output(self):
+ stdout, stderr, logs = self.oc.restore_output()
+ self.oc = None
+ return (stdout, stderr, logs)
+
+ def test_basic(self):
+ command = PrintBaselines()
+ command.bind_to_tool(self.tool)
+ self.capture_output()
+ command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform=None), ['passes/text.html'], self.tool)
+ stdout, _, _ = self.restore_output()
+ self.assertMultiLineEqual(stdout,
+ ('// For test-win-xp\n'
+ 'passes/text-expected.png\n'
+ 'passes/text-expected.txt\n'))
+
+ def test_multiple(self):
+ command = PrintBaselines()
+ command.bind_to_tool(self.tool)
+ self.capture_output()
+ command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform='test-win-*'), ['passes/text.html'], self.tool)
+ stdout, _, _ = self.restore_output()
+ self.assertMultiLineEqual(stdout,
+ ('// For test-win-win7\n'
+ 'passes/text-expected.png\n'
+ 'passes/text-expected.txt\n'
+ '\n'
+ '// For test-win-xp\n'
+ 'passes/text-expected.png\n'
+ 'passes/text-expected.txt\n'))
+
+ def test_csv(self):
+ command = PrintBaselines()
+ command.bind_to_tool(self.tool)
+ self.capture_output()
+ command.execute(MockOptions(all=False, platform='*xp', csv=True, include_virtual_tests=False), ['passes/text.html'], self.tool)
+ stdout, _, _ = self.restore_output()
+ self.assertMultiLineEqual(stdout,
+ ('test-win-xp,passes/text.html,None,png,passes/text-expected.png,None\n'
+ 'test-win-xp,passes/text.html,None,txt,passes/text-expected.txt,None\n'))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/rebaseline.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
new file mode 100644
index 0000000..9d238f4
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
@@ -0,0 +1,943 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR/ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import Queue
+import json
+import logging
+import optparse
+import re
+import sys
+import threading
+import time
+import traceback
+import urllib
+import urllib2
+
+from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
+from webkitpy.common.memoized import memoized
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models.test_expectations import TestExpectations, BASELINE_SUFFIX_LIST, SKIP
+from webkitpy.layout_tests.port import builders
+from webkitpy.layout_tests.port import factory
+from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+
+
+_log = logging.getLogger(__name__)
+
+
+# FIXME: Should TestResultWriter know how to compute this string?
+def _baseline_name(fs, test_name, suffix):
+ return fs.splitext(test_name)[0] + TestResultWriter.FILENAME_SUFFIX_EXPECTED + "." + suffix
+
+
+class AbstractRebaseliningCommand(AbstractDeclarativeCommand):
+ # not overriding execute() - pylint: disable=W0223
+
+ no_optimize_option = optparse.make_option('--no-optimize', dest='optimize', action='store_false', default=True,
+ help=('Do not optimize/de-dup the expectations after rebaselining (default is to de-dup automatically). '
+ 'You can use "webkit-patch optimize-baselines" to optimize separately.'))
+
+ platform_options = factory.platform_options(use_globs=True)
+
+ results_directory_option = optparse.make_option("--results-directory", help="Local results directory to use")
+
+ suffixes_option = optparse.make_option("--suffixes", default=','.join(BASELINE_SUFFIX_LIST), action="store",
+ help="Comma-separated-list of file types to rebaseline")
+
+ def __init__(self, options=None):
+ super(AbstractRebaseliningCommand, self).__init__(options=options)
+ self._baseline_suffix_list = BASELINE_SUFFIX_LIST
+ self._scm_changes = {'add': [], 'delete': [], 'remove-lines': []}
+
+ def _add_to_scm_later(self, path):
+ self._scm_changes['add'].append(path)
+
+ def _delete_from_scm_later(self, path):
+ self._scm_changes['delete'].append(path)
+
+
+class BaseInternalRebaselineCommand(AbstractRebaseliningCommand):
+ def __init__(self):
+ super(BaseInternalRebaselineCommand, self).__init__(options=[
+ self.results_directory_option,
+ self.suffixes_option,
+ optparse.make_option("--builder", help="Builder to pull new baselines from"),
+ optparse.make_option("--test", help="Test to rebaseline"),
+ ])
+
+ def _baseline_directory(self, builder_name):
+ port = self._tool.port_factory.get_from_builder_name(builder_name)
+ override_dir = builders.rebaseline_override_dir(builder_name)
+ if override_dir:
+ return self._tool.filesystem.join(port.layout_tests_dir(), 'platform', override_dir)
+ return port.baseline_version_dir()
+
+ def _test_root(self, test_name):
+ return self._tool.filesystem.splitext(test_name)[0]
+
+ def _file_name_for_actual_result(self, test_name, suffix):
+ return "%s-actual.%s" % (self._test_root(test_name), suffix)
+
+ def _file_name_for_expected_result(self, test_name, suffix):
+ return "%s-expected.%s" % (self._test_root(test_name), suffix)
+
+
+class CopyExistingBaselinesInternal(BaseInternalRebaselineCommand):
+ name = "copy-existing-baselines-internal"
+ help_text = "Copy existing baselines down one level in the baseline order to ensure new baselines don't break existing passing platforms."
+
+ @memoized
+ def _immediate_predecessors_in_fallback(self, path_to_rebaseline):
+ port_names = self._tool.port_factory.all_port_names()
+ immediate_predecessors_in_fallback = []
+ for port_name in port_names:
+ port = self._tool.port_factory.get(port_name)
+ if not port.buildbot_archives_baselines():
+ continue
+ baseline_search_path = port.baseline_search_path()
+ try:
+ index = baseline_search_path.index(path_to_rebaseline)
+ if index:
+ immediate_predecessors_in_fallback.append(self._tool.filesystem.basename(baseline_search_path[index - 1]))
+ except ValueError:
+ # index throw's a ValueError if the item isn't in the list.
+ pass
+ return immediate_predecessors_in_fallback
+
+ def _port_for_primary_baseline(self, baseline):
+ for port in [self._tool.port_factory.get(port_name) for port_name in self._tool.port_factory.all_port_names()]:
+ if self._tool.filesystem.basename(port.baseline_version_dir()) == baseline:
+ return port
+ raise Exception("Failed to find port for primary baseline %s." % baseline)
+
+ def _copy_existing_baseline(self, builder_name, test_name, suffix):
+ baseline_directory = self._baseline_directory(builder_name)
+ ports = [self._port_for_primary_baseline(baseline) for baseline in self._immediate_predecessors_in_fallback(baseline_directory)]
+
+ old_baselines = []
+ new_baselines = []
+
+ # Need to gather all the baseline paths before modifying the filesystem since
+ # the modifications can affect the results of port.expected_filename.
+ for port in ports:
+ old_baseline = port.expected_filename(test_name, "." + suffix)
+ if not self._tool.filesystem.exists(old_baseline):
+ _log.debug("No existing baseline for %s." % test_name)
+ continue
+
+ new_baseline = self._tool.filesystem.join(port.baseline_path(), self._file_name_for_expected_result(test_name, suffix))
+ if self._tool.filesystem.exists(new_baseline):
+ _log.debug("Existing baseline at %s, not copying over it." % new_baseline)
+ continue
+
+ expectations = TestExpectations(port, [test_name])
+ if SKIP in expectations.get_expectations(test_name):
+ _log.debug("%s is skipped on %s." % (test_name, port.name()))
+ continue
+
+ old_baselines.append(old_baseline)
+ new_baselines.append(new_baseline)
+
+ for i in range(len(old_baselines)):
+ old_baseline = old_baselines[i]
+ new_baseline = new_baselines[i]
+
+ _log.debug("Copying baseline from %s to %s." % (old_baseline, new_baseline))
+ self._tool.filesystem.maybe_make_directory(self._tool.filesystem.dirname(new_baseline))
+ self._tool.filesystem.copyfile(old_baseline, new_baseline)
+ if not self._tool.scm().exists(new_baseline):
+ self._add_to_scm_later(new_baseline)
+
+ def execute(self, options, args, tool):
+ for suffix in options.suffixes.split(','):
+ self._copy_existing_baseline(options.builder, options.test, suffix)
+ print json.dumps(self._scm_changes)
+
+
+class RebaselineTest(BaseInternalRebaselineCommand):
+ name = "rebaseline-test-internal"
+ help_text = "Rebaseline a single test from a buildbot. Only intended for use by other webkit-patch commands."
+
+ def _results_url(self, builder_name):
+ return self._tool.buildbot.builder_with_name(builder_name).latest_layout_test_results_url()
+
+ def _save_baseline(self, data, target_baseline, baseline_directory, test_name, suffix):
+ if not data:
+ _log.debug("No baseline data to save.")
+ return
+
+ filesystem = self._tool.filesystem
+ filesystem.maybe_make_directory(filesystem.dirname(target_baseline))
+ filesystem.write_binary_file(target_baseline, data)
+ if not self._tool.scm().exists(target_baseline):
+ self._add_to_scm_later(target_baseline)
+
+ def _rebaseline_test(self, builder_name, test_name, suffix, results_url):
+ baseline_directory = self._baseline_directory(builder_name)
+
+ source_baseline = "%s/%s" % (results_url, self._file_name_for_actual_result(test_name, suffix))
+ target_baseline = self._tool.filesystem.join(baseline_directory, self._file_name_for_expected_result(test_name, suffix))
+
+ _log.debug("Retrieving %s." % source_baseline)
+ self._save_baseline(self._tool.web.get_binary(source_baseline, convert_404_to_None=True), target_baseline, baseline_directory, test_name, suffix)
+
+ def _rebaseline_test_and_update_expectations(self, options):
+ port = self._tool.port_factory.get_from_builder_name(options.builder)
+ if (port.reference_files(options.test)):
+ _log.warning("Cannot rebaseline reftest: %s", options.test)
+ return
+
+ if options.results_directory:
+ results_url = 'file://' + options.results_directory
+ else:
+ results_url = self._results_url(options.builder)
+ self._baseline_suffix_list = options.suffixes.split(',')
+
+ for suffix in self._baseline_suffix_list:
+ self._rebaseline_test(options.builder, options.test, suffix, results_url)
+ self._scm_changes['remove-lines'].append({'builder': options.builder, 'test': options.test})
+
+ def execute(self, options, args, tool):
+ self._rebaseline_test_and_update_expectations(options)
+ print json.dumps(self._scm_changes)
+
+
+class OptimizeBaselines(AbstractRebaseliningCommand):
+ name = "optimize-baselines"
+ help_text = "Reshuffles the baselines for the given tests to use as litte space on disk as possible."
+ show_in_main_help = True
+ argument_names = "TEST_NAMES"
+
+ def __init__(self):
+ super(OptimizeBaselines, self).__init__(options=[
+ self.suffixes_option,
+ optparse.make_option('--no-modify-scm', action='store_true', default=False, help='Dump SCM commands as JSON instead of '),
+ ] + self.platform_options)
+
+ def _optimize_baseline(self, optimizer, test_name):
+ files_to_delete = []
+ files_to_add = []
+ for suffix in self._baseline_suffix_list:
+ baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
+ succeeded, more_files_to_delete, more_files_to_add = optimizer.optimize(baseline_name)
+ if not succeeded:
+ print "Heuristics failed to optimize %s" % baseline_name
+ files_to_delete.extend(more_files_to_delete)
+ files_to_add.extend(more_files_to_add)
+ return files_to_delete, files_to_add
+
+ def execute(self, options, args, tool):
+ self._baseline_suffix_list = options.suffixes.split(',')
+ port_names = tool.port_factory.all_port_names(options.platform)
+ if not port_names:
+ print "No port names match '%s'" % options.platform
+ return
+ port = tool.port_factory.get(port_names[0])
+ optimizer = BaselineOptimizer(tool, port, port_names, skip_scm_commands=options.no_modify_scm)
+ tests = port.tests(args)
+ for test_name in tests:
+ files_to_delete, files_to_add = self._optimize_baseline(optimizer, test_name)
+ for path in files_to_delete:
+ self._delete_from_scm_later(path)
+ for path in files_to_add:
+ self._add_to_scm_later(path)
+
+ print json.dumps(self._scm_changes)
+
+
+class AnalyzeBaselines(AbstractRebaseliningCommand):
+ name = "analyze-baselines"
+ help_text = "Analyzes the baselines for the given tests and prints results that are identical."
+ show_in_main_help = True
+ argument_names = "TEST_NAMES"
+
+ def __init__(self):
+ super(AnalyzeBaselines, self).__init__(options=[
+ self.suffixes_option,
+ optparse.make_option('--missing', action='store_true', default=False, help='show missing baselines as well'),
+ ] + self.platform_options)
+ self._optimizer_class = BaselineOptimizer # overridable for testing
+ self._baseline_optimizer = None
+ self._port = None
+
+ def _write(self, msg):
+ print msg
+
+ def _analyze_baseline(self, options, test_name):
+ for suffix in self._baseline_suffix_list:
+ baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
+ results_by_directory = self._baseline_optimizer.read_results_by_directory(baseline_name)
+ if results_by_directory:
+ self._write("%s:" % baseline_name)
+ self._baseline_optimizer.write_by_directory(results_by_directory, self._write, " ")
+ elif options.missing:
+ self._write("%s: (no baselines found)" % baseline_name)
+
+ def execute(self, options, args, tool):
+ self._baseline_suffix_list = options.suffixes.split(',')
+ port_names = tool.port_factory.all_port_names(options.platform)
+ if not port_names:
+ print "No port names match '%s'" % options.platform
+ return
+ self._port = tool.port_factory.get(port_names[0])
+ self._baseline_optimizer = self._optimizer_class(tool, self._port, port_names, skip_scm_commands=False)
+ for test_name in self._port.tests(args):
+ self._analyze_baseline(options, test_name)
+
+
+class AbstractParallelRebaselineCommand(AbstractRebaseliningCommand):
+ # not overriding execute() - pylint: disable=W0223
+
+ def __init__(self, options=None):
+ super(AbstractParallelRebaselineCommand, self).__init__(options=options)
+ self._builder_data = {}
+
+ def builder_data(self):
+ if not self._builder_data:
+ for builder_name in self._release_builders():
+ builder = self._tool.buildbot.builder_with_name(builder_name)
+ self._builder_data[builder_name] = builder.latest_layout_test_results()
+ return self._builder_data
+
+ # The release builders cycle much faster than the debug ones and cover all the platforms.
+ def _release_builders(self):
+ release_builders = []
+ for builder_name in builders.all_builder_names():
+ if builder_name.find('ASAN') != -1:
+ continue
+ port = self._tool.port_factory.get_from_builder_name(builder_name)
+ if port.test_configuration().build_type == 'release':
+ release_builders.append(builder_name)
+ return release_builders
+
+ def _run_webkit_patch(self, args, verbose):
+ try:
+ verbose_args = ['--verbose'] if verbose else []
+ stderr = self._tool.executive.run_command([self._tool.path()] + verbose_args + args, cwd=self._tool.scm().checkout_root, return_stderr=True)
+ for line in stderr.splitlines():
+ _log.warning(line)
+ except ScriptError, e:
+ _log.error(e)
+
+ def _builders_to_fetch_from(self, builders_to_check):
+ # This routine returns the subset of builders that will cover all of the baseline search paths
+ # used in the input list. In particular, if the input list contains both Release and Debug
+ # versions of a configuration, we *only* return the Release version (since we don't save
+ # debug versions of baselines).
+ release_builders = set()
+ debug_builders = set()
+ builders_to_fallback_paths = {}
+ for builder in builders_to_check:
+ port = self._tool.port_factory.get_from_builder_name(builder)
+ if port.test_configuration().build_type == 'release':
+ release_builders.add(builder)
+ else:
+ debug_builders.add(builder)
+ for builder in list(release_builders) + list(debug_builders):
+ port = self._tool.port_factory.get_from_builder_name(builder)
+ fallback_path = port.baseline_search_path()
+ if fallback_path not in builders_to_fallback_paths.values():
+ builders_to_fallback_paths[builder] = fallback_path
+ return builders_to_fallback_paths.keys()
+
+ def _rebaseline_commands(self, test_prefix_list, options):
+ path_to_webkit_patch = self._tool.path()
+ cwd = self._tool.scm().checkout_root
+ copy_baseline_commands = []
+ rebaseline_commands = []
+ lines_to_remove = {}
+ port = self._tool.port_factory.get()
+
+ for test_prefix in test_prefix_list:
+ for test in port.tests([test_prefix]):
+ for builder in self._builders_to_fetch_from(test_prefix_list[test_prefix]):
+ actual_failures_suffixes = self._suffixes_for_actual_failures(test, builder, test_prefix_list[test_prefix][builder])
+ if not actual_failures_suffixes:
+ # If we're not going to rebaseline the test because it's passing on this
+ # builder, we still want to remove the line from TestExpectations.
+ if test not in lines_to_remove:
+ lines_to_remove[test] = []
+ lines_to_remove[test].append(builder)
+ continue
+
+ suffixes = ','.join(actual_failures_suffixes)
+ cmd_line = ['--suffixes', suffixes, '--builder', builder, '--test', test]
+ if options.results_directory:
+ cmd_line.extend(['--results-directory', options.results_directory])
+ if options.verbose:
+ cmd_line.append('--verbose')
+ copy_baseline_commands.append(tuple([[self._tool.executable, path_to_webkit_patch, 'copy-existing-baselines-internal'] + cmd_line, cwd]))
+ rebaseline_commands.append(tuple([[self._tool.executable, path_to_webkit_patch, 'rebaseline-test-internal'] + cmd_line, cwd]))
+ return copy_baseline_commands, rebaseline_commands, lines_to_remove
+
+ def _serial_commands(self, command_results):
+ files_to_add = set()
+ files_to_delete = set()
+ lines_to_remove = {}
+ for output in [result[1].split('\n') for result in command_results]:
+ file_added = False
+ for line in output:
+ try:
+ if line:
+ parsed_line = json.loads(line)
+ if 'add' in parsed_line:
+ files_to_add.update(parsed_line['add'])
+ if 'delete' in parsed_line:
+ files_to_delete.update(parsed_line['delete'])
+ if 'remove-lines' in parsed_line:
+ for line_to_remove in parsed_line['remove-lines']:
+ test = line_to_remove['test']
+ builder = line_to_remove['builder']
+ if test not in lines_to_remove:
+ lines_to_remove[test] = []
+ lines_to_remove[test].append(builder)
+ file_added = True
+ except ValueError:
+ _log.debug('"%s" is not a JSON object, ignoring' % line)
+
+ if not file_added:
+ _log.debug('Could not add file based off output "%s"' % output)
+
+ return list(files_to_add), list(files_to_delete), lines_to_remove
+
+ def _optimize_baselines(self, test_prefix_list, verbose=False):
+ optimize_commands = []
+ for test in test_prefix_list:
+ all_suffixes = set()
+ for builder in self._builders_to_fetch_from(test_prefix_list[test]):
+ all_suffixes.update(self._suffixes_for_actual_failures(test, builder, test_prefix_list[test][builder]))
+
+ # FIXME: We should propagate the platform options as well.
+ cmd_line = ['--no-modify-scm', '--suffixes', ','.join(all_suffixes), test]
+ if verbose:
+ cmd_line.append('--verbose')
+
+ path_to_webkit_patch = self._tool.path()
+ cwd = self._tool.scm().checkout_root
+ optimize_commands.append(tuple([[self._tool.executable, path_to_webkit_patch, 'optimize-baselines'] + cmd_line, cwd]))
+ return optimize_commands
+
+ def _update_expectations_files(self, lines_to_remove):
+ # FIXME: This routine is way too expensive. We're creating O(n ports) TestExpectations objects.
+ # This is slow and uses a lot of memory.
+ tests = lines_to_remove.keys()
+ to_remove = []
+
+ # This is so we remove lines for builders that skip this test, e.g. Android skips most
+ # tests and we don't want to leave stray [ Android ] lines in TestExpectations..
+ # This is only necessary for "webkit-patch rebaseline" and for rebaselining expected
+ # failures from garden-o-matic. rebaseline-expectations and auto-rebaseline will always
+ # pass the exact set of ports to rebaseline.
+ for port_name in self._tool.port_factory.all_port_names():
+ port = self._tool.port_factory.get(port_name)
+ generic_expectations = TestExpectations(port, tests=tests, include_overrides=False)
+ full_expectations = TestExpectations(port, tests=tests, include_overrides=True)
+ for test in tests:
+ if self._port_skips_test(port, test, generic_expectations, full_expectations):
+ for test_configuration in port.all_test_configurations():
+ if test_configuration.version == port.test_configuration().version:
+ to_remove.append((test, test_configuration))
+
+ for test in lines_to_remove:
+ for builder in lines_to_remove[test]:
+ port = self._tool.port_factory.get_from_builder_name(builder)
+ for test_configuration in port.all_test_configurations():
+ if test_configuration.version == port.test_configuration().version:
+ to_remove.append((test, test_configuration))
+
+ port = self._tool.port_factory.get()
+ expectations = TestExpectations(port, include_overrides=False)
+ expectationsString = expectations.remove_configurations(to_remove)
+ path = port.path_to_generic_test_expectations_file()
+ self._tool.filesystem.write_text_file(path, expectationsString)
+
+ def _port_skips_test(self, port, test, generic_expectations, full_expectations):
+ fs = port.host.filesystem
+ if port.default_smoke_test_only():
+ smoke_test_filename = fs.join(port.layout_tests_dir(), 'SmokeTests')
+ if fs.exists(smoke_test_filename) and test not in fs.read_text_file(smoke_test_filename):
+ return True
+
+ return (SKIP in full_expectations.get_expectations(test) and
+ SKIP not in generic_expectations.get_expectations(test))
+
+ def _run_in_parallel_and_update_scm(self, commands):
+ command_results = self._tool.executive.run_in_parallel(commands)
+ log_output = '\n'.join(result[2] for result in command_results).replace('\n\n', '\n')
+ for line in log_output.split('\n'):
+ if line:
+ print >> sys.stderr, line # FIXME: Figure out how to log properly.
+
+ files_to_add, files_to_delete, lines_to_remove = self._serial_commands(command_results)
+ if files_to_delete:
+ self._tool.scm().delete_list(files_to_delete)
+ if files_to_add:
+ self._tool.scm().add_list(files_to_add)
+ return lines_to_remove
+
+ def _rebaseline(self, options, test_prefix_list):
+ for test, builders_to_check in sorted(test_prefix_list.items()):
+ _log.info("Rebaselining %s" % test)
+ for builder, suffixes in sorted(builders_to_check.items()):
+ _log.debug(" %s: %s" % (builder, ",".join(suffixes)))
+
+ copy_baseline_commands, rebaseline_commands, extra_lines_to_remove = self._rebaseline_commands(test_prefix_list, options)
+ lines_to_remove = {}
+
+ if copy_baseline_commands:
+ self._run_in_parallel_and_update_scm(copy_baseline_commands)
+ if rebaseline_commands:
+ lines_to_remove = self._run_in_parallel_and_update_scm(rebaseline_commands)
+
+ for test in extra_lines_to_remove:
+ if test in lines_to_remove:
+ lines_to_remove[test] = lines_to_remove[test] + extra_lines_to_remove[test]
+ else:
+ lines_to_remove[test] = extra_lines_to_remove[test]
+
+ if lines_to_remove:
+ self._update_expectations_files(lines_to_remove)
+
+ if options.optimize:
+ self._run_in_parallel_and_update_scm(self._optimize_baselines(test_prefix_list, options.verbose))
+
+ def _suffixes_for_actual_failures(self, test, builder_name, existing_suffixes):
+ actual_results = self.builder_data()[builder_name].actual_results(test)
+ if not actual_results:
+ return set()
+ return set(existing_suffixes) & TestExpectations.suffixes_for_actual_expectations_string(actual_results)
+
+
+class RebaselineJson(AbstractParallelRebaselineCommand):
+ name = "rebaseline-json"
+ help_text = "Rebaseline based off JSON passed to stdin. Intended to only be called from other scripts."
+
+ def __init__(self,):
+ super(RebaselineJson, self).__init__(options=[
+ self.no_optimize_option,
+ self.results_directory_option,
+ ])
+
+ def execute(self, options, args, tool):
+ self._rebaseline(options, json.loads(sys.stdin.read()))
+
+
+class RebaselineExpectations(AbstractParallelRebaselineCommand):
+ name = "rebaseline-expectations"
+ help_text = "Rebaselines the tests indicated in TestExpectations."
+ show_in_main_help = True
+
+ def __init__(self):
+ super(RebaselineExpectations, self).__init__(options=[
+ self.no_optimize_option,
+ ] + self.platform_options)
+ self._test_prefix_list = None
+
+ def _tests_to_rebaseline(self, port):
+ tests_to_rebaseline = {}
+ for path, value in port.expectations_dict().items():
+ expectations = TestExpectations(port, include_overrides=False, expectations_dict={path: value})
+ for test in expectations.get_rebaselining_failures():
+ suffixes = TestExpectations.suffixes_for_expectations(expectations.get_expectations(test))
+ tests_to_rebaseline[test] = suffixes or BASELINE_SUFFIX_LIST
+ return tests_to_rebaseline
+
+ def _add_tests_to_rebaseline_for_port(self, port_name):
+ builder_name = builders.builder_name_for_port_name(port_name)
+ if not builder_name:
+ return
+ tests = self._tests_to_rebaseline(self._tool.port_factory.get(port_name)).items()
+
+ if tests:
+ _log.info("Retrieving results for %s from %s." % (port_name, builder_name))
+
+ for test_name, suffixes in tests:
+ _log.info(" %s (%s)" % (test_name, ','.join(suffixes)))
+ if test_name not in self._test_prefix_list:
+ self._test_prefix_list[test_name] = {}
+ self._test_prefix_list[test_name][builder_name] = suffixes
+
+ def execute(self, options, args, tool):
+ options.results_directory = None
+ self._test_prefix_list = {}
+ port_names = tool.port_factory.all_port_names(options.platform)
+ for port_name in port_names:
+ self._add_tests_to_rebaseline_for_port(port_name)
+ if not self._test_prefix_list:
+ _log.warning("Did not find any tests marked Rebaseline.")
+ return
+
+ self._rebaseline(options, self._test_prefix_list)
+
+
+class Rebaseline(AbstractParallelRebaselineCommand):
+ name = "rebaseline"
+ help_text = "Rebaseline tests with results from the build bots. Shows the list of failing tests on the builders if no test names are provided."
+ show_in_main_help = True
+ argument_names = "[TEST_NAMES]"
+
+ def __init__(self):
+ super(Rebaseline, self).__init__(options=[
+ self.no_optimize_option,
+ # FIXME: should we support the platform options in addition to (or instead of) --builders?
+ self.suffixes_option,
+ self.results_directory_option,
+ optparse.make_option("--builders", default=None, action="append", help="Comma-separated-list of builders to pull new baselines from (can also be provided multiple times)"),
+ ])
+
+ def _builders_to_pull_from(self):
+ chosen_names = self._tool.user.prompt_with_list("Which builder to pull results from:", self._release_builders(), can_choose_multiple=True)
+ return [self._builder_with_name(name) for name in chosen_names]
+
+ def _builder_with_name(self, name):
+ return self._tool.buildbot.builder_with_name(name)
+
+ def execute(self, options, args, tool):
+ if not args:
+ _log.error("Must list tests to rebaseline.")
+ return
+
+ if options.builders:
+ builders_to_check = []
+ for builder_names in options.builders:
+ builders_to_check += [self._builder_with_name(name) for name in builder_names.split(",")]
+ else:
+ builders_to_check = self._builders_to_pull_from()
+
+ test_prefix_list = {}
+ suffixes_to_update = options.suffixes.split(",")
+
+ for builder in builders_to_check:
+ for test in args:
+ if test not in test_prefix_list:
+ test_prefix_list[test] = {}
+ test_prefix_list[test][builder.name()] = suffixes_to_update
+
+ if options.verbose:
+ _log.debug("rebaseline-json: " + str(test_prefix_list))
+
+ self._rebaseline(options, test_prefix_list)
+
+
+class AutoRebaseline(AbstractParallelRebaselineCommand):
+ name = "auto-rebaseline"
+ help_text = "Rebaselines any NeedsRebaseline lines in TestExpectations that have cycled through all the bots."
+ AUTO_REBASELINE_BRANCH_NAME = "auto-rebaseline-temporary-branch"
+
+ # Rietveld uploader stinks. Limit the number of rebaselines in a given patch to keep upload from failing.
+ # FIXME: http://crbug.com/263676 Obviously we should fix the uploader here.
+ MAX_LINES_TO_REBASELINE = 200
+
+ SECONDS_BEFORE_GIVING_UP = 300
+
+ def __init__(self):
+ super(AutoRebaseline, self).__init__(options=[
+ # FIXME: Remove this option.
+ self.no_optimize_option,
+ # FIXME: Remove this option.
+ self.results_directory_option,
+ ])
+
+ def bot_revision_data(self):
+ revisions = []
+ for result in self.builder_data().values():
+ if result.run_was_interrupted():
+ _log.error("Can't rebaseline because the latest run on %s exited early." % result.builder_name())
+ return []
+ revisions.append({
+ "builder": result.builder_name(),
+ "revision": result.blink_revision(),
+ })
+ return revisions
+
+ def tests_to_rebaseline(self, tool, min_revision, print_revisions):
+ port = tool.port_factory.get()
+ expectations_file_path = port.path_to_generic_test_expectations_file()
+
+ tests = set()
+ revision = None
+ author = None
+ bugs = set()
+ has_any_needs_rebaseline_lines = False
+
+ for line in tool.scm().blame(expectations_file_path).split("\n"):
+ comment_index = line.find("#")
+ if comment_index == -1:
+ comment_index = len(line)
+ line_without_comments = re.sub(r"\s+", " ", line[:comment_index].strip())
+
+ if "NeedsRebaseline" not in line_without_comments:
+ continue
+
+ has_any_needs_rebaseline_lines = True
+
+ parsed_line = re.match("^(\S*)[^(]*\((\S*).*?([^ ]*)\ \[[^[]*$", line_without_comments)
+
+ commit_hash = parsed_line.group(1)
+ svn_revision = tool.scm().svn_revision_from_git_commit(commit_hash)
+
+ test = parsed_line.group(3)
+ if print_revisions:
+ _log.info("%s is waiting for r%s" % (test, svn_revision))
+
+ if not svn_revision or svn_revision > min_revision:
+ continue
+
+ if revision and svn_revision != revision:
+ continue
+
+ if not revision:
+ revision = svn_revision
+ author = parsed_line.group(2)
+
+ bugs.update(re.findall("crbug\.com\/(\d+)", line_without_comments))
+ tests.add(test)
+
+ if len(tests) >= self.MAX_LINES_TO_REBASELINE:
+ _log.info("Too many tests to rebaseline in one patch. Doing the first %d." % self.MAX_LINES_TO_REBASELINE)
+ break
+
+ return tests, revision, author, bugs, has_any_needs_rebaseline_lines
+
+ def link_to_patch(self, revision):
+ return "http://src.chromium.org/viewvc/blink?view=revision&revision=" + str(revision)
+
+ def commit_message(self, author, revision, bugs):
+ bug_string = ""
+ if bugs:
+ bug_string = "BUG=%s\n" % ",".join(bugs)
+
+ return """Auto-rebaseline for r%s
+
+%s
+
+%sTBR=%s
+""" % (revision, self.link_to_patch(revision), bug_string, author)
+
+ def get_test_prefix_list(self, tests):
+ test_prefix_list = {}
+ lines_to_remove = {}
+
+ for builder_name in self._release_builders():
+ port_name = builders.port_name_for_builder_name(builder_name)
+ port = self._tool.port_factory.get(port_name)
+ expectations = TestExpectations(port, include_overrides=True)
+ for test in expectations.get_needs_rebaseline_failures():
+ if test not in tests:
+ continue
+
+ if test not in test_prefix_list:
+ lines_to_remove[test] = []
+ test_prefix_list[test] = {}
+ lines_to_remove[test].append(builder_name)
+ test_prefix_list[test][builder_name] = BASELINE_SUFFIX_LIST
+
+ return test_prefix_list, lines_to_remove
+
+ def _run_git_cl_command(self, options, command):
+ subprocess_command = ['git', 'cl'] + command
+ if options.verbose:
+ subprocess_command.append('--verbose')
+
+ process = self._tool.executive.popen(subprocess_command, stdout=self._tool.executive.PIPE, stderr=self._tool.executive.STDOUT)
+ last_output_time = time.time()
+
+ # git cl sometimes completely hangs. Bail if we haven't gotten any output to stdout/stderr in a while.
+ while process.poll() == None and time.time() < last_output_time + self.SECONDS_BEFORE_GIVING_UP:
+ # FIXME: This doesn't make any sense. readline blocks, so all this code to
+ # try and bail is useless. Instead, we should do the readline calls on a
+ # subthread. Then the rest of this code would make sense.
+ out = process.stdout.readline().rstrip('\n')
+ if out:
+ last_output_time = time.time()
+ _log.info(out)
+
+ if process.poll() == None:
+ _log.error('Command hung: %s' % subprocess_command)
+ return False
+ return True
+
+ # FIXME: Move this somewhere more general.
+ def tree_status(self):
+ blink_tree_status_url = "http://blink-status.appspot.com/status"
+ status = urllib2.urlopen(blink_tree_status_url).read().lower()
+ if status.find('closed') != -1 or status == "0":
+ return 'closed'
+ elif status.find('open') != -1 or status == "1":
+ return 'open'
+ return 'unknown'
+
+ def execute(self, options, args, tool):
+ if tool.scm().executable_name == "svn":
+ _log.error("Auto rebaseline only works with a git checkout.")
+ return
+
+ if tool.scm().has_working_directory_changes():
+ _log.error("Cannot proceed with working directory changes. Clean working directory first.")
+ return
+
+ revision_data = self.bot_revision_data()
+ if not revision_data:
+ return
+
+ min_revision = int(min([item["revision"] for item in revision_data]))
+ tests, revision, author, bugs, has_any_needs_rebaseline_lines = self.tests_to_rebaseline(tool, min_revision, print_revisions=options.verbose)
+
+ if options.verbose:
+ _log.info("Min revision across all bots is %s." % min_revision)
+ for item in revision_data:
+ _log.info("%s: r%s" % (item["builder"], item["revision"]))
+
+ if not tests:
+ _log.debug('No tests to rebaseline.')
+ return
+
+ if self.tree_status() == 'closed':
+ _log.info('Cannot proceed. Tree is closed.')
+ return
+
+ _log.info('Rebaselining %s for r%s by %s.' % (list(tests), revision, author))
+
+ test_prefix_list, lines_to_remove = self.get_test_prefix_list(tests)
+
+ did_finish = False
+ try:
+ old_branch_name = tool.scm().current_branch()
+ tool.scm().delete_branch(self.AUTO_REBASELINE_BRANCH_NAME)
+ tool.scm().create_clean_branch(self.AUTO_REBASELINE_BRANCH_NAME)
+
+ # If the tests are passing everywhere, then this list will be empty. We don't need
+ # to rebaseline, but we'll still need to update TestExpectations.
+ if test_prefix_list:
+ self._rebaseline(options, test_prefix_list)
+
+ tool.scm().commit_locally_with_message(self.commit_message(author, revision, bugs))
+
+ # FIXME: It would be nice if we could dcommit the patch without uploading, but still
+ # go through all the precommit hooks. For rebaselines with lots of files, uploading
+ # takes a long time and sometimes fails, but we don't want to commit if, e.g. the
+ # tree is closed.
+ did_finish = self._run_git_cl_command(options, ['upload', '-f'])
+
+ if did_finish:
+ # Uploading can take a very long time. Do another pull to make sure TestExpectations is up to date,
+ # so the dcommit can go through.
+ # FIXME: Log the pull and dcommit stdout/stderr to the log-server.
+ tool.executive.run_command(['git', 'pull'])
+
+ self._run_git_cl_command(options, ['dcommit', '-f'])
+ except Exception as e:
+ _log.error(e)
+ finally:
+ if did_finish:
+ self._run_git_cl_command(options, ['set_close'])
+ tool.scm().ensure_cleanly_tracking_remote_master()
+ tool.scm().checkout_branch(old_branch_name)
+ tool.scm().delete_branch(self.AUTO_REBASELINE_BRANCH_NAME)
+
+
+class RebaselineOMatic(AbstractDeclarativeCommand):
+ name = "rebaseline-o-matic"
+ help_text = "Calls webkit-patch auto-rebaseline in a loop."
+ show_in_main_help = True
+
+ SLEEP_TIME_IN_SECONDS = 30
+ LOG_SERVER = 'blinkrebaseline.appspot.com'
+ QUIT_LOG = '##QUIT##'
+
+ # Uploaded log entries append to the existing entry unless the
+ # newentry flag is set. In that case it starts a new entry to
+ # start appending to.
+ def _log_to_server(self, log='', is_new_entry=False):
+ query = {
+ 'log': log,
+ }
+ if is_new_entry:
+ query['newentry'] = 'on'
+ try:
+ urllib2.urlopen("http://" + self.LOG_SERVER + "/updatelog", data=urllib.urlencode(query))
+ except:
+ traceback.print_exc(file=sys.stderr)
+
+ def _log_to_server_thread(self):
+ is_new_entry = True
+ while True:
+ messages = [self._log_queue.get()]
+ while not self._log_queue.empty():
+ messages.append(self._log_queue.get())
+ self._log_to_server('\n'.join(messages), is_new_entry=is_new_entry)
+ is_new_entry = False
+ if self.QUIT_LOG in messages:
+ return
+
+ def _post_log_to_server(self, log):
+ self._log_queue.put(log)
+
+ def _log_line(self, handle):
+ out = handle.readline().rstrip('\n')
+ if out:
+ if self._verbose:
+ print out
+ self._post_log_to_server(out)
+ return out
+
+ def _run_logged_command(self, command):
+ process = self._tool.executive.popen(command, stdout=self._tool.executive.PIPE, stderr=self._tool.executive.STDOUT)
+
+ out = self._log_line(process.stdout)
+ while out:
+ # FIXME: This should probably batch up lines if they're available and log to the server once.
+ out = self._log_line(process.stdout)
+
+ def _do_one_rebaseline(self):
+ self._log_queue = Queue.Queue(256)
+ log_thread = threading.Thread(name='LogToServer', target=self._log_to_server_thread)
+ log_thread.start()
+ try:
+ old_branch_name = self._tool.scm().current_branch()
+ self._run_logged_command(['git', 'pull'])
+ rebaseline_command = [self._tool.filesystem.join(self._tool.scm().checkout_root, 'Tools', 'Scripts', 'webkit-patch'), 'auto-rebaseline']
+ if self._verbose:
+ rebaseline_command.append('--verbose')
+ self._run_logged_command(rebaseline_command)
+ except:
+ self._log_queue.put(self.QUIT_LOG)
+ traceback.print_exc(file=sys.stderr)
+ # Sometimes git crashes and leaves us on a detached head.
+ self._tool.scm().checkout_branch(old_branch_name)
+ else:
+ self._log_queue.put(self.QUIT_LOG)
+ log_thread.join()
+
+ def execute(self, options, args, tool):
+ self._verbose = options.verbose
+ while True:
+ self._do_one_rebaseline()
+ time.sleep(self.SLEEP_TIME_IN_SECONDS)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
new file mode 100644
index 0000000..96f06c3
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
@@ -0,0 +1,1269 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
+from webkitpy.common.checkout.scm.scm_mock import MockSCM
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.net.buildbot.buildbot_mock import MockBuilder
+from webkitpy.common.net.layouttestresults import LayoutTestResults
+from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.executive_mock import MockExecutive2
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.commands.rebaseline import *
+from webkitpy.tool.mocktool import MockTool, MockOptions
+
+
+class _BaseTestCase(unittest.TestCase):
+ MOCK_WEB_RESULT = 'MOCK Web result, convert 404 to None=True'
+ WEB_PREFIX = 'http://example.com/f/builders/WebKit Mac10.7/results/layout-test-results'
+
+ command_constructor = None
+
+ def setUp(self):
+ self.tool = MockTool()
+ self.command = self.command_constructor() # lint warns that command_constructor might not be set, but this is intentional; pylint: disable=E1102
+ self.command.bind_to_tool(self.tool)
+ self.lion_port = self.tool.port_factory.get_from_builder_name("WebKit Mac10.7")
+ self.lion_expectations_path = self.lion_port.path_to_generic_test_expectations_file()
+ self.tool.filesystem.write_text_file(self.tool.filesystem.join(self.lion_port.layout_tests_dir(), "VirtualTestSuites"),
+ '[]')
+
+ # FIXME: crbug.com/279494. We should override builders._exact_matches
+ # here to point to a set of test ports and restore the value in
+ # tearDown(), and that way the individual tests wouldn't have to worry
+ # about it.
+
+ def _expand(self, path):
+ if self.tool.filesystem.isabs(path):
+ return path
+ return self.tool.filesystem.join(self.lion_port.layout_tests_dir(), path)
+
+ def _read(self, path):
+ return self.tool.filesystem.read_text_file(self._expand(path))
+
+ def _write(self, path, contents):
+ self.tool.filesystem.write_text_file(self._expand(path), contents)
+
+ def _zero_out_test_expectations(self):
+ for port_name in self.tool.port_factory.all_port_names():
+ port = self.tool.port_factory.get(port_name)
+ for path in port.expectations_files():
+ self._write(path, '')
+ self.tool.filesystem.written_files = {}
+
+ def _setup_mock_builder_data(self):
+ data = LayoutTestResults.results_from_string("""ADD_RESULTS({
+ "tests": {
+ "userscripts": {
+ "first-test.html": {
+ "expected": "PASS",
+ "actual": "IMAGE+TEXT"
+ },
+ "second-test.html": {
+ "expected": "FAIL",
+ "actual": "IMAGE+TEXT"
+ }
+ }
+ }
+});""")
+ # FIXME: crbug.com/279494 - we shouldn't be mixing mock and real builder names.
+ for builder in ['MOCK builder', 'MOCK builder (Debug)', 'WebKit Mac10.7']:
+ self.command._builder_data[builder] = data
+
+
+class TestCopyExistingBaselinesInternal(_BaseTestCase):
+ command_constructor = CopyExistingBaselinesInternal
+
+ def setUp(self):
+ super(TestCopyExistingBaselinesInternal, self).setUp()
+
+ def test_copying_overwritten_baseline(self):
+ self.tool.executive = MockExecutive2()
+
+ # FIXME: crbug.com/279494. it's confusing that this is the test- port, and not the regular lion port. Really all of the tests should be using the test ports.
+ port = self.tool.port_factory.get('test-mac-snowleopard')
+ self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-mac-snowleopard/failures/expected/image-expected.txt'), 'original snowleopard result')
+
+ old_exact_matches = builders._exact_matches
+ oc = OutputCapture()
+ try:
+ builders._exact_matches = {
+ "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+ "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
+ }
+
+ options = MockOptions(builder="MOCK SnowLeopard", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
+
+ oc.capture_output()
+ self.command.execute(options, [], self.tool)
+ finally:
+ out, _, _ = oc.restore_output()
+ builders._exact_matches = old_exact_matches
+
+ self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result')
+ self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [], "delete": []}\n')
+
+ def test_copying_overwritten_baseline_to_multiple_locations(self):
+ self.tool.executive = MockExecutive2()
+
+ # FIXME: crbug.com/279494. it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
+ port = self.tool.port_factory.get('test-win-win7')
+ self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
+
+ old_exact_matches = builders._exact_matches
+ oc = OutputCapture()
+ try:
+ builders._exact_matches = {
+ "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+ "MOCK Linux": {"port_name": "test-linux-x86_64", "specifiers": set(["mock-specifier"])},
+ "MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
+ }
+
+ options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
+
+ oc.capture_output()
+ self.command.execute(options, [], self.tool)
+ finally:
+ out, _, _ = oc.restore_output()
+ builders._exact_matches = old_exact_matches
+
+ self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-x86_64/failures/expected/image-expected.txt')), 'original win7 result')
+ self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/mac-leopard/userscripts/another-test-expected.txt')))
+ self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [], "delete": []}\n')
+
+ def test_no_copy_existing_baseline(self):
+ self.tool.executive = MockExecutive2()
+
+ # FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
+ port = self.tool.port_factory.get('test-win-win7')
+ self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
+
+ old_exact_matches = builders._exact_matches
+ oc = OutputCapture()
+ try:
+ builders._exact_matches = {
+ "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+ "MOCK Linux": {"port_name": "test-linux-x86_64", "specifiers": set(["mock-specifier"])},
+ "MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
+ }
+
+ options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
+
+ oc.capture_output()
+ self.command.execute(options, [], self.tool)
+ finally:
+ out, _, _ = oc.restore_output()
+ builders._exact_matches = old_exact_matches
+
+ self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-linux-x86_64/failures/expected/image-expected.txt')), 'original win7 result')
+ self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')), 'original win7 result')
+ self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/mac-leopard/userscripts/another-test-expected.txt')))
+ self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [], "delete": []}\n')
+
+ def test_no_copy_skipped_test(self):
+ self.tool.executive = MockExecutive2()
+
+ port = self.tool.port_factory.get('test-win-win7')
+ fs = self.tool.filesystem
+ self._write(fs.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
+ expectations_path = fs.join(port.path_to_generic_test_expectations_file())
+ self._write(expectations_path, (
+ "[ Win ] failures/expected/image.html [ Failure ]\n"
+ "[ Linux ] failures/expected/image.html [ Skip ]\n"))
+ old_exact_matches = builders._exact_matches
+ oc = OutputCapture()
+ try:
+ builders._exact_matches = {
+ "MOCK Linux": {"port_name": "test-linux-x86_64", "specifiers": set(["mock-specifier"])},
+ "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+ "MOCK Win7": {"port_name": "test-win-win7", "specifiers": set(["mock-specifier"])},
+ }
+
+ options = MockOptions(builder="MOCK Win7", suffixes="txt", verbose=True, test="failures/expected/image.html", results_directory=None)
+
+ oc.capture_output()
+ self.command.execute(options, [], self.tool)
+ finally:
+ out, _, _ = oc.restore_output()
+ builders._exact_matches = old_exact_matches
+
+ self.assertFalse(fs.exists(fs.join(port.layout_tests_dir(), 'platform/test-linux-x86_64/failures/expected/image-expected.txt')))
+ self.assertEqual(self._read(fs.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')),
+ 'original win7 result')
+
+
+class TestRebaselineTest(_BaseTestCase):
+ command_constructor = RebaselineTest # AKA webkit-patch rebaseline-test-internal
+
+ def setUp(self):
+ super(TestRebaselineTest, self).setUp()
+ self.options = MockOptions(builder="WebKit Mac10.7", test="userscripts/another-test.html", suffixes="txt", results_directory=None)
+
+ def test_baseline_directory(self):
+ command = self.command
+ self.assertMultiLineEqual(command._baseline_directory("WebKit Mac10.7"), "/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-lion")
+ self.assertMultiLineEqual(command._baseline_directory("WebKit Mac10.6"), "/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-snowleopard")
+
+ def test_rebaseline_updates_expectations_file_noop(self):
+ self._zero_out_test_expectations()
+ self._write(self.lion_expectations_path, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
+Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
+""")
+ self._write("fast/dom/Window/window-postmessage-clone-really-deep-array.html", "Dummy test contents")
+ self._write("fast/css/large-list-of-rules-crash.html", "Dummy test contents")
+ self._write("userscripts/another-test.html", "Dummy test contents")
+
+ self.options.suffixes = "png,wav,txt"
+ self.command._rebaseline_test_and_update_expectations(self.options)
+
+ self.assertItemsEqual(self.tool.web.urls_fetched,
+ [self.WEB_PREFIX + '/userscripts/another-test-actual.png',
+ self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
+ self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
+ new_expectations = self._read(self.lion_expectations_path)
+ self.assertMultiLineEqual(new_expectations, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
+Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
+""")
+
+ def test_rebaseline_test(self):
+ self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", "txt", self.WEB_PREFIX)
+ self.assertItemsEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
+
+ def test_rebaseline_test_with_results_directory(self):
+ self._write("userscripts/another-test.html", "test data")
+ self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
+ self.options.results_directory = '/tmp'
+ self.command._rebaseline_test_and_update_expectations(self.options)
+ self.assertItemsEqual(self.tool.web.urls_fetched, ['file:///tmp/userscripts/another-test-actual.txt'])
+
+ def test_rebaseline_reftest(self):
+ self._write("userscripts/another-test.html", "test data")
+ self._write("userscripts/another-test-expected.html", "generic result")
+ OutputCapture().assert_outputs(self, self.command._rebaseline_test_and_update_expectations, args=[self.options],
+ expected_logs="Cannot rebaseline reftest: userscripts/another-test.html\n")
+ self.assertDictEqual(self.command._scm_changes, {'add': [], 'remove-lines': [], "delete": []})
+
+ def test_rebaseline_test_and_print_scm_changes(self):
+ self.command._print_scm_changes = True
+ self.command._scm_changes = {'add': [], 'delete': []}
+ self.tool._scm.exists = lambda x: False
+
+ self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", "txt", None)
+
+ self.assertDictEqual(self.command._scm_changes, {'add': ['/mock-checkout/third_party/WebKit/LayoutTests/platform/linux/userscripts/another-test-expected.txt'], 'delete': []})
+
+ def test_rebaseline_test_internal_with_port_that_lacks_buildbot(self):
+ self.tool.executive = MockExecutive2()
+
+ # FIXME: it's confusing that this is the test- port, and not the regular win port. Really all of the tests should be using the test ports.
+ port = self.tool.port_factory.get('test-win-win7')
+ self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt'), 'original win7 result')
+
+ old_exact_matches = builders._exact_matches
+ oc = OutputCapture()
+ try:
+ builders._exact_matches = {
+ "MOCK XP": {"port_name": "test-win-xp"},
+ "MOCK Win7": {"port_name": "test-win-win7"},
+ }
+
+ options = MockOptions(optimize=True, builder="MOCK Win7", suffixes="txt",
+ verbose=True, test="failures/expected/image.html", results_directory=None)
+
+ oc.capture_output()
+ self.command.execute(options, [], self.tool)
+ finally:
+ out, _, _ = oc.restore_output()
+ builders._exact_matches = old_exact_matches
+
+ self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-win7/failures/expected/image-expected.txt')), 'MOCK Web result, convert 404 to None=True')
+ self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-win-xp/failures/expected/image-expected.txt')))
+ self.assertMultiLineEqual(out, '{"add": [], "remove-lines": [{"test": "failures/expected/image.html", "builder": "MOCK Win7"}], "delete": []}\n')
+
+
+class TestAbstractParallelRebaselineCommand(_BaseTestCase):
+ command_constructor = AbstractParallelRebaselineCommand
+
+ def test_builders_to_fetch_from(self):
+ old_exact_matches = builders._exact_matches
+ try:
+ builders._exact_matches = {
+ "MOCK XP": {"port_name": "test-win-xp"},
+ "MOCK Win7": {"port_name": "test-win-win7"},
+ "MOCK Win7 (dbg)(1)": {"port_name": "test-win-win7"},
+ "MOCK Win7 (dbg)(2)": {"port_name": "test-win-win7"},
+ }
+
+ builders_to_fetch = self.command._builders_to_fetch_from(["MOCK XP", "MOCK Win7 (dbg)(1)", "MOCK Win7 (dbg)(2)", "MOCK Win7"])
+ self.assertEqual(builders_to_fetch, ["MOCK XP", "MOCK Win7"])
+ finally:
+ builders._exact_matches = old_exact_matches
+
+
+class TestRebaselineJson(_BaseTestCase):
+ command_constructor = RebaselineJson
+
+ def setUp(self):
+ super(TestRebaselineJson, self).setUp()
+ self.tool.executive = MockExecutive2()
+ self.old_exact_matches = builders._exact_matches
+ builders._exact_matches = {
+ "MOCK builder": {"port_name": "test-mac-snowleopard"},
+ "MOCK builder (Debug)": {"port_name": "test-mac-snowleopard"},
+ }
+
+ def tearDown(self):
+ builders._exact_matches = self.old_exact_matches
+ super(TestRebaselineJson, self).tearDown()
+
+ def test_rebaseline_test_passes_on_all_builders(self):
+ self._setup_mock_builder_data()
+
+ def builder_data():
+ self.command._builder_data['MOCK builder'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
+ "tests": {
+ "userscripts": {
+ "first-test.html": {
+ "expected": "NEEDSREBASELINE",
+ "actual": "PASS"
+ }
+ }
+ }
+});""")
+ return self.command._builder_data
+
+ self.command.builder_data = builder_data
+
+ options = MockOptions(optimize=True, verbose=True, results_directory=None)
+
+ self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ ImageOnlyFailure ]\n")
+ self._write("userscripts/first-test.html", "Dummy test contents")
+
+ self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder": ["txt", "png"]}})
+
+ # Note that we have one run_in_parallel() call followed by a run_command()
+ self.assertEqual(self.tool.executive.calls,
+ [[['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', '', 'userscripts/first-test.html', '--verbose']]])
+
+ def test_rebaseline_all(self):
+ self._setup_mock_builder_data()
+
+ options = MockOptions(optimize=True, verbose=True, results_directory=None)
+ self._write("userscripts/first-test.html", "Dummy test contents")
+ self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder": ["txt", "png"]}})
+
+ # Note that we have one run_in_parallel() call followed by a run_command()
+ self.assertEqual(self.tool.executive.calls,
+ [[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']],
+ [['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']],
+ [['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt,png', 'userscripts/first-test.html', '--verbose']]])
+
+ def test_rebaseline_debug(self):
+ self._setup_mock_builder_data()
+
+ options = MockOptions(optimize=True, verbose=True, results_directory=None)
+ self._write("userscripts/first-test.html", "Dummy test contents")
+ self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
+
+ # Note that we have one run_in_parallel() call followed by a run_command()
+ self.assertEqual(self.tool.executive.calls,
+ [[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']],
+ [['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']],
+ [['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt,png', 'userscripts/first-test.html', '--verbose']]])
+
+ def test_no_optimize(self):
+ self._setup_mock_builder_data()
+
+ options = MockOptions(optimize=False, verbose=True, results_directory=None)
+ self._write("userscripts/first-test.html", "Dummy test contents")
+ self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
+
+ # Note that we have only one run_in_parallel() call
+ self.assertEqual(self.tool.executive.calls,
+ [[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']],
+ [['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'userscripts/first-test.html', '--verbose']]])
+
+ def test_results_directory(self):
+ self._setup_mock_builder_data()
+
+ options = MockOptions(optimize=False, verbose=True, results_directory='/tmp')
+ self._write("userscripts/first-test.html", "Dummy test contents")
+ self.command._rebaseline(options, {"userscripts/first-test.html": {"MOCK builder": ["txt", "png"]}})
+
+ # Note that we have only one run_in_parallel() call
+ self.assertEqual(self.tool.executive.calls,
+ [[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--results-directory', '/tmp', '--verbose']],
+ [['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--results-directory', '/tmp', '--verbose']]])
+
+class TestRebaselineJsonUpdatesExpectationsFiles(_BaseTestCase):
+ command_constructor = RebaselineJson
+
+ def setUp(self):
+ super(TestRebaselineJsonUpdatesExpectationsFiles, self).setUp()
+ self.tool.executive = MockExecutive2()
+
+ def mock_run_command(args,
+ cwd=None,
+ input=None,
+ error_handler=None,
+ return_exit_code=False,
+ return_stderr=True,
+ decode_output=False,
+ env=None):
+ return '{"add": [], "remove-lines": [{"test": "userscripts/first-test.html", "builder": "WebKit Mac10.7"}]}\n'
+ self.tool.executive.run_command = mock_run_command
+
+ def test_rebaseline_updates_expectations_file(self):
+ options = MockOptions(optimize=False, verbose=True, results_directory=None)
+
+ self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/first-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
+ self._write("userscripts/first-test.html", "Dummy test contents")
+ self._setup_mock_builder_data()
+
+ self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
+
+ new_expectations = self._read(self.lion_expectations_path)
+ self.assertMultiLineEqual(new_expectations, "Bug(x) [ Mavericks MountainLion Retina SnowLeopard ] userscripts/first-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
+
+ def test_rebaseline_updates_expectations_file_all_platforms(self):
+ options = MockOptions(optimize=False, verbose=True, results_directory=None)
+
+ self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ ImageOnlyFailure ]\n")
+ self._write("userscripts/first-test.html", "Dummy test contents")
+ self._setup_mock_builder_data()
+
+ self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
+
+ new_expectations = self._read(self.lion_expectations_path)
+ self.assertMultiLineEqual(new_expectations, "Bug(x) [ Android Linux Mavericks MountainLion Retina SnowLeopard Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
+
+ def test_rebaseline_handles_platform_skips(self):
+ # This test is just like test_rebaseline_updates_expectations_file_all_platforms(),
+ # except that if a particular port happens to SKIP a test in an overrides file,
+ # we count that as passing, and do not think that we still need to rebaseline it.
+ options = MockOptions(optimize=False, verbose=True, results_directory=None)
+
+ self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ ImageOnlyFailure ]\n")
+ self._write("NeverFixTests", "Bug(y) [ Android ] userscripts [ Skip ]\n")
+ self._write("userscripts/first-test.html", "Dummy test contents")
+ self._setup_mock_builder_data()
+
+ self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
+
+ new_expectations = self._read(self.lion_expectations_path)
+ self.assertMultiLineEqual(new_expectations, "Bug(x) [ Linux Mavericks MountainLion Retina SnowLeopard Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
+
+ def test_rebaseline_handles_skips_in_file(self):
+ # This test is like test_Rebaseline_handles_platform_skips, except that the
+ # Skip is in the same (generic) file rather than a platform file. In this case,
+ # the Skip line should be left unmodified. Note that the first line is now
+ # qualified as "[Linux Mac Win]"; if it was unqualified, it would conflict with
+ # the second line.
+ options = MockOptions(optimize=False, verbose=True, results_directory=None)
+
+ self._write(self.lion_expectations_path,
+ ("Bug(x) [ Linux Mac Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n"
+ "Bug(y) [ Android ] userscripts/first-test.html [ Skip ]\n"))
+ self._write("userscripts/first-test.html", "Dummy test contents")
+ self._setup_mock_builder_data()
+
+ self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
+
+ new_expectations = self._read(self.lion_expectations_path)
+ self.assertMultiLineEqual(new_expectations,
+ ("Bug(x) [ Linux Mavericks MountainLion Retina SnowLeopard Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n"
+ "Bug(y) [ Android ] userscripts/first-test.html [ Skip ]\n"))
+
+ def test_rebaseline_handles_smoke_tests(self):
+ # This test is just like test_rebaseline_handles_platform_skips, except that we check for
+ # a test not being in the SmokeTests file, instead of using overrides files.
+ # If a test is not part of the smoke tests, we count that as passing on ports that only
+ # run smoke tests, and do not think that we still need to rebaseline it.
+ options = MockOptions(optimize=False, verbose=True, results_directory=None)
+
+ self._write(self.lion_expectations_path, "Bug(x) userscripts/first-test.html [ ImageOnlyFailure ]\n")
+ self._write("SmokeTests", "fast/html/article-element.html")
+ self._write("userscripts/first-test.html", "Dummy test contents")
+ self._setup_mock_builder_data()
+
+ self.command._rebaseline(options, {"userscripts/first-test.html": {"WebKit Mac10.7": ["txt", "png"]}})
+
+ new_expectations = self._read(self.lion_expectations_path)
+ self.assertMultiLineEqual(new_expectations, "Bug(x) [ Linux Mavericks MountainLion Retina SnowLeopard Win ] userscripts/first-test.html [ ImageOnlyFailure ]\n")
+
+
+class TestRebaseline(_BaseTestCase):
+ # This command shares most of its logic with RebaselineJson, so these tests just test what is different.
+
+ command_constructor = Rebaseline # AKA webkit-patch rebaseline
+
+ def test_rebaseline(self):
+ self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
+
+ self._write("userscripts/first-test.html", "test data")
+
+ self._zero_out_test_expectations()
+ self._setup_mock_builder_data()
+
+ old_exact_matches = builders._exact_matches
+ try:
+ builders._exact_matches = {
+ "MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+ }
+ self.command.execute(MockOptions(results_directory=False, optimize=False, builders=None, suffixes="txt,png", verbose=True), ['userscripts/first-test.html'], self.tool)
+ finally:
+ builders._exact_matches = old_exact_matches
+
+ calls = filter(lambda x: x != ['qmake', '-v'] and x[0] != 'perl', self.tool.executive.calls)
+ self.assertEqual(calls,
+ [[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']],
+ [['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose']]])
+
+ def test_rebaseline_directory(self):
+ self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
+
+ self._write("userscripts/first-test.html", "test data")
+ self._write("userscripts/second-test.html", "test data")
+
+ self._setup_mock_builder_data()
+
+ old_exact_matches = builders._exact_matches
+ try:
+ builders._exact_matches = {
+ "MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+ }
+ self.command.execute(MockOptions(results_directory=False, optimize=False, builders=None, suffixes="txt,png", verbose=True), ['userscripts'], self.tool)
+ finally:
+ builders._exact_matches = old_exact_matches
+
+ calls = filter(lambda x: x != ['qmake', '-v'] and x[0] != 'perl', self.tool.executive.calls)
+ self.assertEqual(calls,
+ [[['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose'],
+ ['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/second-test.html', '--verbose']],
+ [['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/first-test.html', '--verbose'],
+ ['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'userscripts/second-test.html', '--verbose']]])
+
+
+class MockLineRemovingExecutive(MockExecutive):
+ def run_in_parallel(self, commands):
+ assert len(commands)
+
+ num_previous_calls = len(self.calls)
+ command_outputs = []
+ for cmd_line, cwd in commands:
+ out = self.run_command(cmd_line, cwd=cwd)
+ if 'rebaseline-test-internal' in cmd_line:
+ out = '{"add": [], "remove-lines": [{"test": "%s", "builder": "%s"}], "delete": []}\n' % (cmd_line[8], cmd_line[6])
+ command_outputs.append([0, out, ''])
+
+ new_calls = self.calls[num_previous_calls:]
+ self.calls = self.calls[:num_previous_calls]
+ self.calls.append(new_calls)
+ return command_outputs
+
+
+class TestRebaselineExpectations(_BaseTestCase):
+ command_constructor = RebaselineExpectations
+
+ def setUp(self):
+ super(TestRebaselineExpectations, self).setUp()
+ self.options = MockOptions(optimize=False, builders=None, suffixes=['txt'], verbose=False, platform=None, results_directory=None)
+
+ def _write_test_file(self, port, path, contents):
+ abs_path = self.tool.filesystem.join(port.layout_tests_dir(), path)
+ self.tool.filesystem.write_text_file(abs_path, contents)
+
+ def _setup_test_port(self):
+ test_port = self.tool.port_factory.get('test')
+ original_get = self.tool.port_factory.get
+
+ def get_test_port(port_name=None, options=None, **kwargs):
+ if not port_name:
+ return test_port
+ return original_get(port_name, options, **kwargs)
+ # Need to make sure all the ports grabbed use the test checkout path instead of the mock checkout path.
+ # FIXME: crbug.com/279494 - we shouldn't be doing this.
+ self.tool.port_factory.get = get_test_port
+
+ return test_port
+
+ def test_rebaseline_expectations(self):
+ self._zero_out_test_expectations()
+
+ self.tool.executive = MockExecutive2()
+
+ def builder_data():
+ self.command._builder_data['MOCK SnowLeopard'] = self.command._builder_data['MOCK Leopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
+ "tests": {
+ "userscripts": {
+ "another-test.html": {
+ "expected": "PASS",
+ "actual": "PASS TEXT"
+ },
+ "images.svg": {
+ "expected": "FAIL",
+ "actual": "IMAGE+TEXT"
+ }
+ }
+ }
+});""")
+ return self.command._builder_data
+
+ self.command.builder_data = builder_data
+
+ self._write("userscripts/another-test.html", "Dummy test contents")
+ self._write("userscripts/images.svg", "Dummy test contents")
+ self.command._tests_to_rebaseline = lambda port: {
+ 'userscripts/another-test.html': set(['txt']),
+ 'userscripts/images.svg': set(['png']),
+ 'userscripts/not-actually-failing.html': set(['txt', 'png', 'wav']),
+ }
+
+ old_exact_matches = builders._exact_matches
+ try:
+ builders._exact_matches = {
+ "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+ "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
+ }
+ self.command.execute(self.options, [], self.tool)
+ finally:
+ builders._exact_matches = old_exact_matches
+
+ # FIXME: change this to use the test- ports.
+ calls = filter(lambda x: x != ['qmake', '-v'], self.tool.executive.calls)
+ self.assertEqual(self.tool.executive.calls, [
+ [
+ ['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'userscripts/another-test.html'],
+ ['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/another-test.html'],
+ ['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'png', '--builder', 'MOCK Leopard', '--test', 'userscripts/images.svg'],
+ ['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/images.svg'],
+ ],
+ [
+ ['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'userscripts/another-test.html'],
+ ['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/another-test.html'],
+ ['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'MOCK Leopard', '--test', 'userscripts/images.svg'],
+ ['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'userscripts/images.svg'],
+ ],
+ ])
+
+ def test_rebaseline_expectations_noop(self):
+ self._zero_out_test_expectations()
+
+ oc = OutputCapture()
+ try:
+ oc.capture_output()
+ self.command.execute(self.options, [], self.tool)
+ finally:
+ _, _, logs = oc.restore_output()
+ self.assertEqual(self.tool.filesystem.written_files, {})
+ self.assertEqual(logs, 'Did not find any tests marked Rebaseline.\n')
+
+ def disabled_test_overrides_are_included_correctly(self):
+ # This tests that the any tests marked as REBASELINE in the overrides are found, but
+ # that the overrides do not get written into the main file.
+ self._zero_out_test_expectations()
+
+ self._write(self.lion_expectations_path, '')
+ self.lion_port.expectations_dict = lambda: {
+ self.lion_expectations_path: '',
+ 'overrides': ('Bug(x) userscripts/another-test.html [ Failure Rebaseline ]\n'
+ 'Bug(y) userscripts/test.html [ Crash ]\n')}
+ self._write('/userscripts/another-test.html', '')
+
+ self.assertDictEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': set(['png', 'txt', 'wav'])})
+ self.assertEqual(self._read(self.lion_expectations_path), '')
+
+ def test_rebaseline_without_other_expectations(self):
+ self._write("userscripts/another-test.html", "Dummy test contents")
+ self._write(self.lion_expectations_path, "Bug(x) userscripts/another-test.html [ Rebaseline ]\n")
+ self.assertDictEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': ('png', 'wav', 'txt')})
+
+ def test_rebaseline_test_passes_everywhere(self):
+ test_port = self._setup_test_port()
+
+ old_builder_data = self.command.builder_data
+
+ def builder_data():
+ self.command._builder_data['MOCK Leopard'] = self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
+ "tests": {
+ "fast": {
+ "dom": {
+ "prototype-taco.html": {
+ "expected": "FAIL",
+ "actual": "PASS",
+ "is_unexpected": true
+ }
+ }
+ }
+ }
+});""")
+ return self.command._builder_data
+
+ self.command.builder_data = builder_data
+
+ self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
+Bug(foo) fast/dom/prototype-taco.html [ Rebaseline ]
+""")
+
+ self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
+
+ self.tool.executive = MockLineRemovingExecutive()
+
+ old_exact_matches = builders._exact_matches
+ try:
+ builders._exact_matches = {
+ "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+ "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
+ }
+
+ self.command.execute(self.options, [], self.tool)
+ self.assertEqual(self.tool.executive.calls, [])
+
+ # The mac ports should both be removed since they're the only ones in builders._exact_matches.
+ self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
+Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ Rebaseline ]
+""")
+ finally:
+ builders._exact_matches = old_exact_matches
+
+
+class _FakeOptimizer(BaselineOptimizer):
+ def read_results_by_directory(self, baseline_name):
+ if baseline_name.endswith('txt'):
+ return {'LayoutTests/passes/text.html': '123456'}
+ return {}
+
+
+class TestOptimizeBaselines(_BaseTestCase):
+ command_constructor = OptimizeBaselines
+
+ def _write_test_file(self, port, path, contents):
+ abs_path = self.tool.filesystem.join(port.layout_tests_dir(), path)
+ self.tool.filesystem.write_text_file(abs_path, contents)
+
+ def setUp(self):
+ super(TestOptimizeBaselines, self).setUp()
+
+ # FIXME: This is a hack to get the unittest and the BaselineOptimize to both use /mock-checkout
+ # instead of one using /mock-checkout and one using /test-checkout.
+ default_port = self.tool.port_factory.get()
+ self.tool.port_factory.get = lambda port_name=None: default_port
+
+ def test_modify_scm(self):
+ test_port = self.tool.port_factory.get('test')
+ self._write_test_file(test_port, 'another/test.html', "Dummy test contents")
+ self._write_test_file(test_port, 'platform/mac-snowleopard/another/test-expected.txt', "result A")
+ self._write_test_file(test_port, 'another/test-expected.txt', "result A")
+
+ old_exact_matches = builders._exact_matches
+ try:
+ builders._exact_matches = {
+ "MOCK Leopard Debug": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
+ }
+ OutputCapture().assert_outputs(self, self.command.execute, args=[
+ MockOptions(suffixes='txt', no_modify_scm=False, platform='test-mac-snowleopard'),
+ ['another/test.html'],
+ self.tool,
+ ], expected_stdout='{"add": [], "remove-lines": [], "delete": []}\n')
+ finally:
+ builders._exact_matches = old_exact_matches
+
+ self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'platform/mac/another/test-expected.txt')))
+ self.assertTrue(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.txt')))
+
+ def test_no_modify_scm(self):
+ test_port = self.tool.port_factory.get('test')
+ self._write_test_file(test_port, 'another/test.html', "Dummy test contents")
+ self._write_test_file(test_port, 'platform/mac-snowleopard/another/test-expected.txt', "result A")
+ self._write_test_file(test_port, 'another/test-expected.txt', "result A")
+
+ old_exact_matches = builders._exact_matches
+ try:
+ builders._exact_matches = {
+ "MOCK Leopard Debug": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
+ }
+ OutputCapture().assert_outputs(self, self.command.execute, args=[
+ MockOptions(suffixes='txt', no_modify_scm=True, platform='test-mac-snowleopard'),
+ ['another/test.html'],
+ self.tool,
+ ], expected_stdout='{"add": [], "remove-lines": [], "delete": ["/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-snowleopard/another/test-expected.txt"]}\n')
+ finally:
+ builders._exact_matches = old_exact_matches
+
+ self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'platform/mac/another/test-expected.txt')))
+ self.assertTrue(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.txt')))
+
+ def test_optimize_all_suffixes_by_default(self):
+ test_port = self.tool.port_factory.get('test')
+ self._write_test_file(test_port, 'another/test.html', "Dummy test contents")
+ self._write_test_file(test_port, 'platform/mac-snowleopard/another/test-expected.txt', "result A")
+ self._write_test_file(test_port, 'platform/mac-snowleopard/another/test-expected.png', "result A png")
+ self._write_test_file(test_port, 'another/test-expected.txt', "result A")
+ self._write_test_file(test_port, 'another/test-expected.png', "result A png")
+
+ old_exact_matches = builders._exact_matches
+ try:
+ builders._exact_matches = {
+ "MOCK Leopard Debug": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
+ }
+ oc = OutputCapture()
+ oc.capture_output()
+ self.command.execute(MockOptions(suffixes='txt,wav,png', no_modify_scm=True, platform='test-mac-snowleopard'),
+ ['another/test.html'],
+ self.tool)
+ finally:
+ out, err, logs = oc.restore_output()
+ builders._exact_matches = old_exact_matches
+
+ self.assertEquals(out, '{"add": [], "remove-lines": [], "delete": ["/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-snowleopard/another/test-expected.txt", "/mock-checkout/third_party/WebKit/LayoutTests/platform/mac-snowleopard/another/test-expected.png"]}\n')
+ self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'platform/mac/another/test-expected.txt')))
+ self.assertFalse(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'platform/mac/another/test-expected.png')))
+ self.assertTrue(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.txt')))
+ self.assertTrue(self.tool.filesystem.exists(self.tool.filesystem.join(test_port.layout_tests_dir(), 'another/test-expected.png')))
+
+
+class TestAnalyzeBaselines(_BaseTestCase):
+ command_constructor = AnalyzeBaselines
+
+ def setUp(self):
+ super(TestAnalyzeBaselines, self).setUp()
+ self.port = self.tool.port_factory.get('test')
+ self.tool.port_factory.get = (lambda port_name=None, options=None: self.port)
+ self.lines = []
+ self.command._optimizer_class = _FakeOptimizer
+ self.command._write = (lambda msg: self.lines.append(msg)) # pylint bug warning about unnecessary lambda? pylint: disable=W0108
+
+ def test_default(self):
+ self.command.execute(MockOptions(suffixes='txt', missing=False, platform=None), ['passes/text.html'], self.tool)
+ self.assertEqual(self.lines,
+ ['passes/text-expected.txt:',
+ ' (generic): 123456'])
+
+ def test_missing_baselines(self):
+ self.command.execute(MockOptions(suffixes='png,txt', missing=True, platform=None), ['passes/text.html'], self.tool)
+ self.assertEqual(self.lines,
+ ['passes/text-expected.png: (no baselines found)',
+ 'passes/text-expected.txt:',
+ ' (generic): 123456'])
+
+
+class TestAutoRebaseline(_BaseTestCase):
+ command_constructor = AutoRebaseline
+
+ def _write_test_file(self, port, path, contents):
+ abs_path = self.tool.filesystem.join(port.layout_tests_dir(), path)
+ self.tool.filesystem.write_text_file(abs_path, contents)
+
+ def _setup_test_port(self):
+ test_port = self.tool.port_factory.get('test')
+ original_get = self.tool.port_factory.get
+
+ def get_test_port(port_name=None, options=None, **kwargs):
+ if not port_name:
+ return test_port
+ return original_get(port_name, options, **kwargs)
+ # Need to make sure all the ports grabbed use the test checkout path instead of the mock checkout path.
+ # FIXME: crbug.com/279494 - we shouldn't be doing this.
+ self.tool.port_factory.get = get_test_port
+
+ return test_port
+
+ def setUp(self):
+ super(TestAutoRebaseline, self).setUp()
+ self.command.latest_revision_processed_on_all_bots = lambda: 9000
+ self.command.bot_revision_data = lambda: [{"builder": "Mock builder", "revision": "9000"}]
+
+ def test_release_builders(self):
+ old_exact_matches = builders._exact_matches
+ try:
+ builders._exact_matches = {
+ "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+ "MOCK Leopard Debug": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
+ "MOCK Leopard ASAN": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
+ }
+ self.assertEqual(self.command._release_builders(), ['MOCK Leopard'])
+ finally:
+ builders._exact_matches = old_exact_matches
+
+ def test_tests_to_rebaseline(self):
+ def blame(path):
+ return """
+624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/norebaseline.html [ ImageOnlyFailure ]
+624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) Bug(foo) path/to/rebaseline-without-bug-number.html [ NeedsRebaseline ]
+624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/rebaseline-with-modifiers.html [ NeedsRebaseline ]
+624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 crbug.com/234 path/to/rebaseline-without-modifiers.html [ NeedsRebaseline ]
+6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/rebaseline-new-revision.html [ NeedsRebaseline ]
+624caaaaaa path/to/TestExpectations (foo@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
+0000000000 path/to/TestExpectations (foo@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
+"""
+ self.tool.scm().blame = blame
+
+ min_revision = 9000
+ self.assertEqual(self.command.tests_to_rebaseline(self.tool, min_revision, print_revisions=False), (
+ set(['path/to/rebaseline-without-bug-number.html', 'path/to/rebaseline-with-modifiers.html', 'path/to/rebaseline-without-modifiers.html']),
+ 5678,
+ 'foobarbaz1@chromium.org',
+ set(['24182', '234']),
+ True))
+
+ def test_tests_to_rebaseline_over_limit(self):
+ def blame(path):
+ result = ""
+ for i in range(0, self.command.MAX_LINES_TO_REBASELINE + 1):
+ result += "624c3081c0 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) crbug.com/24182 path/to/rebaseline-%s.html [ NeedsRebaseline ]\n" % i
+ return result
+ self.tool.scm().blame = blame
+
+ expected_list_of_tests = []
+ for i in range(0, self.command.MAX_LINES_TO_REBASELINE):
+ expected_list_of_tests.append("path/to/rebaseline-%s.html" % i)
+
+ min_revision = 9000
+ self.assertEqual(self.command.tests_to_rebaseline(self.tool, min_revision, print_revisions=False), (
+ set(expected_list_of_tests),
+ 5678,
+ 'foobarbaz1@chromium.org',
+ set(['24182']),
+ True))
+
+ def test_commit_message(self):
+ author = "foo@chromium.org"
+ revision = 1234
+ bugs = set()
+ self.assertEqual(self.command.commit_message(author, revision, bugs),
+ """Auto-rebaseline for r1234
+
+http://src.chromium.org/viewvc/blink?view=revision&revision=1234
+
+TBR=foo@chromium.org
+""")
+
+ bugs = set(["234", "345"])
+ self.assertEqual(self.command.commit_message(author, revision, bugs),
+ """Auto-rebaseline for r1234
+
+http://src.chromium.org/viewvc/blink?view=revision&revision=1234
+
+BUG=234,345
+TBR=foo@chromium.org
+""")
+
+ def test_no_needs_rebaseline_lines(self):
+ def blame(path):
+ return """
+6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/norebaseline.html [ ImageOnlyFailure ]
+"""
+ self.tool.scm().blame = blame
+
+ self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False), [], self.tool)
+ self.assertEqual(self.tool.executive.calls, [])
+
+ def test_execute(self):
+ def blame(path):
+ return """
+6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) # Test NeedsRebaseline being in a comment doesn't bork parsing.
+6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ Debug ] path/to/norebaseline.html [ ImageOnlyFailure ]
+6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
+6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-06-14 20:18:46 +0000 11) crbug.com/24182 [ SnowLeopard ] fast/dom/prototype-strawberry.html [ NeedsRebaseline ]
+6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
+624caaaaaa path/to/TestExpectations (foo@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
+0000000000 path/to/TestExpectations (foo@chromium.org 2013-04-28 04:52:41 +0000 12) crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
+"""
+ self.tool.scm().blame = blame
+
+ test_port = self._setup_test_port()
+
+ old_builder_data = self.command.builder_data
+
+ def builder_data():
+ old_builder_data()
+ # have prototype-chocolate only fail on "MOCK Leopard".
+ self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
+ "tests": {
+ "fast": {
+ "dom": {
+ "prototype-taco.html": {
+ "expected": "PASS",
+ "actual": "PASS TEXT",
+ "is_unexpected": true
+ },
+ "prototype-chocolate.html": {
+ "expected": "FAIL",
+ "actual": "PASS"
+ },
+ "prototype-strawberry.html": {
+ "expected": "PASS",
+ "actual": "IMAGE PASS",
+ "is_unexpected": true
+ }
+ }
+ }
+ }
+});""")
+ return self.command._builder_data
+
+ self.command.builder_data = builder_data
+
+ self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
+crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Rebaseline ]
+Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
+crbug.com/24182 [ SnowLeopard ] fast/dom/prototype-strawberry.html [ NeedsRebaseline ]
+crbug.com/24182 fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
+crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
+crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
+""")
+
+ self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
+ self._write_test_file(test_port, 'fast/dom/prototype-strawberry.html', "Dummy test contents")
+ self._write_test_file(test_port, 'fast/dom/prototype-chocolate.html', "Dummy test contents")
+
+ self.tool.executive = MockLineRemovingExecutive()
+
+ old_exact_matches = builders._exact_matches
+ try:
+ builders._exact_matches = {
+ "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+ "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
+ }
+
+ self.command.tree_status = lambda: 'closed'
+ self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False), [], self.tool)
+ self.assertEqual(self.tool.executive.calls, [])
+
+ self.command.tree_status = lambda: 'open'
+ self.tool.executive.calls = []
+ self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False), [], self.tool)
+
+ self.assertEqual(self.tool.executive.calls, [
+ [
+ ['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt,png', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-chocolate.html'],
+ ['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-strawberry.html'],
+ ['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-taco.html'],
+ ['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
+ ],
+ [
+ ['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-chocolate.html'],
+ ['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-strawberry.html'],
+ ['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK Leopard', '--test', 'fast/dom/prototype-taco.html'],
+ ['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
+ ],
+ [
+ ['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt,png', 'fast/dom/prototype-chocolate.html'],
+ ['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'png', 'fast/dom/prototype-strawberry.html'],
+ ['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt', 'fast/dom/prototype-taco.html'],
+ ],
+ ['git', 'cl', 'upload', '-f'],
+ ['git', 'pull'],
+ ['git', 'cl', 'dcommit', '-f'],
+ ['git', 'cl', 'set_close'],
+ ])
+
+ # The mac ports should both be removed since they're the only ones in builders._exact_matches.
+ self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
+crbug.com/24182 [ Debug ] path/to/norebaseline.html [ Rebaseline ]
+Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
+crbug.com/24182 [ Linux Win ] fast/dom/prototype-chocolate.html [ NeedsRebaseline ]
+crbug.com/24182 path/to/not-cycled-through-bots.html [ NeedsRebaseline ]
+crbug.com/24182 path/to/locally-changed-lined.html [ NeedsRebaseline ]
+""")
+ finally:
+ builders._exact_matches = old_exact_matches
+
+ def test_execute_git_cl_hangs(self):
+ def blame(path):
+ return """
+6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
+"""
+ self.tool.scm().blame = blame
+
+ test_port = self._setup_test_port()
+
+ old_builder_data = self.command.builder_data
+
+ def builder_data():
+ old_builder_data()
+ # have prototype-chocolate only fail on "MOCK Leopard".
+ self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
+ "tests": {
+ "fast": {
+ "dom": {
+ "prototype-taco.html": {
+ "expected": "PASS",
+ "actual": "PASS TEXT",
+ "is_unexpected": true
+ }
+ }
+ }
+ }
+});""")
+ return self.command._builder_data
+
+ self.command.builder_data = builder_data
+
+ self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
+Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
+""")
+
+ self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
+
+ old_exact_matches = builders._exact_matches
+ try:
+ builders._exact_matches = {
+ "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
+ }
+
+ self.command.SECONDS_BEFORE_GIVING_UP = 0
+ self.command.tree_status = lambda: 'open'
+ self.tool.executive.calls = []
+ self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False), [], self.tool)
+
+ self.assertEqual(self.tool.executive.calls, [
+ [
+ ['python', 'echo', 'copy-existing-baselines-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
+ ],
+ [
+ ['python', 'echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'MOCK SnowLeopard', '--test', 'fast/dom/prototype-taco.html'],
+ ],
+ [['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', 'txt', 'fast/dom/prototype-taco.html']],
+ ['git', 'cl', 'upload', '-f'],
+ ])
+ finally:
+ builders._exact_matches = old_exact_matches
+
+ def test_execute_test_passes_everywhere(self):
+ def blame(path):
+ return """
+6469e754a1 path/to/TestExpectations (foobarbaz1@chromium.org 2013-04-28 04:52:41 +0000 13) Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
+"""
+ self.tool.scm().blame = blame
+
+ test_port = self._setup_test_port()
+
+ old_builder_data = self.command.builder_data
+
+ def builder_data():
+ self.command._builder_data['MOCK Leopard'] = self.command._builder_data['MOCK SnowLeopard'] = LayoutTestResults.results_from_string("""ADD_RESULTS({
+ "tests": {
+ "fast": {
+ "dom": {
+ "prototype-taco.html": {
+ "expected": "FAIL",
+ "actual": "PASS",
+ "is_unexpected": true
+ }
+ }
+ }
+ }
+});""")
+ return self.command._builder_data
+
+ self.command.builder_data = builder_data
+
+ self.tool.filesystem.write_text_file(test_port.path_to_generic_test_expectations_file(), """
+Bug(foo) fast/dom/prototype-taco.html [ NeedsRebaseline ]
+""")
+
+ self._write_test_file(test_port, 'fast/dom/prototype-taco.html', "Dummy test contents")
+
+ self.tool.executive = MockLineRemovingExecutive()
+
+ old_exact_matches = builders._exact_matches
+ try:
+ builders._exact_matches = {
+ "MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
+ "MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
+ }
+
+ self.command.tree_status = lambda: 'open'
+ self.command.execute(MockOptions(optimize=True, verbose=False, move_overwritten_baselines=False, results_directory=False), [], self.tool)
+ self.assertEqual(self.tool.executive.calls, [
+ [['python', 'echo', 'optimize-baselines', '--no-modify-scm', '--suffixes', '', 'fast/dom/prototype-taco.html']],
+ ['git', 'cl', 'upload', '-f'],
+ ['git', 'pull'],
+ ['git', 'cl', 'dcommit', '-f'],
+ ['git', 'cl', 'set_close'],
+ ])
+
+ # The mac ports should both be removed since they're the only ones in builders._exact_matches.
+ self.assertEqual(self.tool.filesystem.read_text_file(test_port.path_to_generic_test_expectations_file()), """
+Bug(foo) [ Linux Win ] fast/dom/prototype-taco.html [ NeedsRebaseline ]
+""")
+ finally:
+ builders._exact_matches = old_exact_matches
+
+
+class TestRebaselineOMatic(_BaseTestCase):
+ command_constructor = RebaselineOMatic
+
+ def setUp(self):
+ super(TestRebaselineOMatic, self).setUp()
+ self._logs = []
+
+ def _mock_log_to_server(self, log=''):
+ self._logs.append(log)
+
+ def test_run_logged_command(self):
+ self.command._verbose = False
+ self.command._post_log_to_server = self._mock_log_to_server
+ self.command._run_logged_command(['echo', 'foo'])
+ self.assertEqual(self.tool.executive.calls, [['echo', 'foo']])
+ self.assertEqual(self._logs, ['MOCK STDOUT'])
+
+ def test_do_one_rebaseline(self):
+ self.command._verbose = False
+ self.command._post_log_to_server = self._mock_log_to_server
+
+ oc = OutputCapture()
+ oc.capture_output()
+ self.command._do_one_rebaseline()
+ out, _, _ = oc.restore_output()
+
+ self.assertEqual(out, '')
+ self.assertEqual(self.tool.executive.calls, [
+ ['git', 'pull'],
+ ['/mock-checkout/third_party/WebKit/Tools/Scripts/webkit-patch', 'auto-rebaseline'],
+ ])
+ self.assertEqual(self._logs, ['MOCK STDOUT'])
+
+ def test_do_one_rebaseline_verbose(self):
+ self.command._verbose = True
+ self.command._post_log_to_server = self._mock_log_to_server
+
+ oc = OutputCapture()
+ oc.capture_output()
+ self.command._do_one_rebaseline()
+ out, _, _ = oc.restore_output()
+
+ self.assertEqual(out, 'MOCK STDOUT\n')
+ self.assertEqual(self.tool.executive.calls, [
+ ['git', 'pull'],
+ ['/mock-checkout/third_party/WebKit/Tools/Scripts/webkit-patch', 'auto-rebaseline', '--verbose'],
+ ])
+ self.assertEqual(self._logs, ['MOCK STDOUT'])
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py
new file mode 100644
index 0000000..2075c99
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py
@@ -0,0 +1,101 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Starts a local HTTP server which displays layout test failures (given a test
+results directory), provides comparisons of expected and actual results (both
+images and text) and allows one-click rebaselining of tests."""
+
+from webkitpy.common import system
+from webkitpy.common.host import Host
+from webkitpy.common.net.layouttestresults import for_each_test, JSONTestResult
+from webkitpy.layout_tests.layout_package import json_results_generator
+from webkitpy.tool.commands.abstractlocalservercommand import AbstractLocalServerCommand
+from webkitpy.tool.servers.rebaselineserver import get_test_baselines, RebaselineHTTPServer, STATE_NEEDS_REBASELINE
+
+
+class TestConfig(object):
+ def __init__(self, test_port, layout_tests_directory, results_directory, platforms, host):
+ self.test_port = test_port
+ self.layout_tests_directory = layout_tests_directory
+ self.results_directory = results_directory
+ self.platforms = platforms
+ self.host = host
+ self.filesystem = host.filesystem
+ self.scm = host.scm()
+
+
+class RebaselineServer(AbstractLocalServerCommand):
+ name = "rebaseline-server"
+ help_text = __doc__
+ show_in_main_help = True
+ argument_names = "/path/to/results/directory"
+
+ server = RebaselineHTTPServer
+
+ def _gather_baselines(self, results_json):
+ # Rebaseline server and it's associated JavaScript expected the tests subtree to
+ # be key-value pairs instead of hierarchical.
+ # FIXME: make the rebaseline server use the hierarchical tree.
+ new_tests_subtree = {}
+
+ def gather_baselines_for_test(test_name, result_dict):
+ result = JSONTestResult(test_name, result_dict)
+ if result.did_pass_or_run_as_expected():
+ return
+ result_dict['state'] = STATE_NEEDS_REBASELINE
+ result_dict['baselines'] = get_test_baselines(test_name, self._test_config)
+ new_tests_subtree[test_name] = result_dict
+
+ for_each_test(results_json['tests'], gather_baselines_for_test)
+ results_json['tests'] = new_tests_subtree
+
+ def _prepare_config(self, options, args, tool):
+ results_directory = args[0]
+ host = Host()
+ host.initialize_scm()
+
+ print 'Parsing full_results.json...'
+ results_json_path = host.filesystem.join(results_directory, 'full_results.json')
+ results_json = json_results_generator.load_json(host.filesystem, results_json_path)
+
+ port = tool.port_factory.get()
+ layout_tests_directory = port.layout_tests_dir()
+ platforms = host.filesystem.listdir(host.filesystem.join(layout_tests_directory, 'platform'))
+ self._test_config = TestConfig(port, layout_tests_directory, results_directory, platforms, host)
+
+ print 'Gathering current baselines...'
+ self._gather_baselines(results_json)
+
+ return {
+ 'test_config': self._test_config,
+ "results_json": results_json,
+ "platforms_json": {
+ 'platforms': platforms,
+ 'defaultPlatform': port.name(),
+ },
+ }
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/stepsequence.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/stepsequence.py
new file mode 100644
index 0000000..c48c2c4
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/commands/stepsequence.py
@@ -0,0 +1,87 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import sys
+
+from webkitpy.tool import steps
+
+from webkitpy.common.system.executive import ScriptError
+
+_log = logging.getLogger(__name__)
+
+
+class StepSequenceErrorHandler():
+ @classmethod
+ def handle_script_error(cls, tool, patch, script_error):
+ raise NotImplementedError, "subclasses must implement"
+
+ @classmethod
+ def handle_checkout_needs_update(cls, tool, state, options, error):
+ raise NotImplementedError, "subclasses must implement"
+
+
+class StepSequence(object):
+ def __init__(self, steps):
+ self._steps = steps or []
+
+ def options(self):
+ collected_options = [
+ steps.Options.parent_command,
+ steps.Options.quiet,
+ ]
+ for step in self._steps:
+ collected_options = collected_options + step.options()
+ # Remove duplicates.
+ collected_options = sorted(set(collected_options))
+ return collected_options
+
+ def _run(self, tool, options, state):
+ for step in self._steps:
+ step(tool, options).run(state)
+
+ # Child processes exit with a special code to the parent queue process can detect the error was handled.
+ handled_error_code = 2
+
+ @classmethod
+ def exit_after_handled_error(cls, error):
+ _log.error(error)
+ sys.exit(cls.handled_error_code)
+
+ def run_and_handle_errors(self, tool, options, state=None):
+ if not state:
+ state = {}
+ try:
+ self._run(tool, options, state)
+ except ScriptError, e:
+ if not options.quiet:
+ _log.error(e.message_with_output())
+ if options.parent_command:
+ command = tool.command_by_name(options.parent_command)
+ command.handle_script_error(tool, state, e)
+ self.exit_after_handled_error(e)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/grammar.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/grammar.py
new file mode 100644
index 0000000..8db9826
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/grammar.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+
+def plural(noun):
+ # This is a dumb plural() implementation that is just enough for our uses.
+ if re.search("h$", noun):
+ return noun + "es"
+ else:
+ return noun + "s"
+
+
+def pluralize(noun, count):
+ if count != 1:
+ noun = plural(noun)
+ return "%d %s" % (count, noun)
+
+
+def join_with_separators(list_of_strings, separator=', ', only_two_separator=" and ", last_separator=', and '):
+ if not list_of_strings:
+ return ""
+ if len(list_of_strings) == 1:
+ return list_of_strings[0]
+ if len(list_of_strings) == 2:
+ return only_two_separator.join(list_of_strings)
+ return "%s%s%s" % (separator.join(list_of_strings[:-1]), last_separator, list_of_strings[-1])
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/grammar_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/grammar_unittest.py
new file mode 100644
index 0000000..afc67db
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/grammar_unittest.py
@@ -0,0 +1,38 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.tool.grammar import join_with_separators
+
+class GrammarTest(unittest.TestCase):
+
+ def test_join_with_separators(self):
+ self.assertEqual(join_with_separators(["one"]), "one")
+ self.assertEqual(join_with_separators(["one", "two"]), "one and two")
+ self.assertEqual(join_with_separators(["one", "two", "three"]), "one, two, and three")
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/main.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/main.py
new file mode 100644
index 0000000..710a08e
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/main.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# A tool for automating dealing with bugzilla, posting patches, committing patches, etc.
+
+from optparse import make_option
+
+from webkitpy.common.host import Host
+from webkitpy.tool.multicommandtool import MultiCommandTool
+from webkitpy.tool import commands
+
+
+class WebKitPatch(MultiCommandTool, Host):
+ global_options = [
+ make_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="enable all logging"),
+ make_option("-d", "--directory", action="append", dest="patch_directories", default=[], help="Directory to look at for changed files"),
+ ]
+
+ def __init__(self, path):
+ MultiCommandTool.__init__(self)
+ Host.__init__(self)
+ self._path = path
+
+ def path(self):
+ return self._path
+
+ def should_show_in_main_help(self, command):
+ if not command.show_in_main_help:
+ return False
+ if command.requires_local_commits:
+ return self.scm().supports_local_commits()
+ return True
+
+ # FIXME: This may be unnecessary since we pass global options to all commands during execute() as well.
+ def handle_global_options(self, options):
+ self.initialize_scm(options.patch_directories)
+
+ def should_execute_command(self, command):
+ if command.requires_local_commits and not self.scm().supports_local_commits():
+ failure_reason = "%s requires local commits using %s in %s." % (command.name, self.scm().display_name(), self.scm().checkout_root)
+ return (False, failure_reason)
+ return (True, None)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/mocktool.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/mocktool.py
new file mode 100644
index 0000000..31ada71
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/mocktool.py
@@ -0,0 +1,69 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.net.buildbot.buildbot_mock import MockBuildBot
+
+# FIXME: Old-style "Ports" need to die and be replaced by modern layout_tests.port which needs to move to common.
+from webkitpy.common.config.ports_mock import MockPort
+
+
+# FIXME: We should just replace this with optparse.Values(default=kwargs)
+class MockOptions(object):
+ """Mock implementation of optparse.Values."""
+
+ def __init__(self, **kwargs):
+ # The caller can set option values using keyword arguments. We don't
+ # set any values by default because we don't know how this
+ # object will be used. Generally speaking unit tests should
+ # subclass this or provider wrapper functions that set a common
+ # set of options.
+ self.update(**kwargs)
+
+ def update(self, **kwargs):
+ self.__dict__.update(**kwargs)
+ return self
+
+ def ensure_value(self, key, value):
+ if getattr(self, key, None) == None:
+ self.__dict__[key] = value
+ return self.__dict__[key]
+
+
+# FIXME: This should be renamed MockWebKitPatch.
+class MockTool(MockHost):
+ def __init__(self, *args, **kwargs):
+ MockHost.__init__(self, *args, **kwargs)
+
+ self._deprecated_port = MockPort()
+
+ def deprecated_port(self):
+ return self._deprecated_port
+
+ def path(self):
+ return "echo"
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/mocktool_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/mocktool_unittest.py
new file mode 100644
index 0000000..5117909
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/mocktool_unittest.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from mocktool import MockOptions
+
+
+class MockOptionsTest(unittest.TestCase):
+ # MockOptions() should implement the same semantics that
+ # optparse.Values does.
+
+ def test_get__set(self):
+ # Test that we can still set options after we construct the
+ # object.
+ options = MockOptions()
+ options.foo = 'bar'
+ self.assertEqual(options.foo, 'bar')
+
+ def test_get__unset(self):
+ # Test that unset options raise an exception (regular Mock
+ # objects return an object and hence are different from
+ # optparse.Values()).
+ options = MockOptions()
+ self.assertRaises(AttributeError, lambda: options.foo)
+
+ def test_kwarg__set(self):
+ # Test that keyword arguments work in the constructor.
+ options = MockOptions(foo='bar')
+ self.assertEqual(options.foo, 'bar')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/multicommandtool.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/multicommandtool.py
new file mode 100644
index 0000000..3961ac1
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/multicommandtool.py
@@ -0,0 +1,319 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# MultiCommandTool provides a framework for writing svn-like/git-like tools
+# which are called with the following format:
+# tool-name [global options] command-name [command options]
+
+import logging
+import sys
+
+from optparse import OptionParser, IndentedHelpFormatter, SUPPRESS_USAGE, make_option
+
+from webkitpy.tool.grammar import pluralize
+
+_log = logging.getLogger(__name__)
+
+
+class TryAgain(Exception):
+ pass
+
+
+class Command(object):
+ name = None
+ show_in_main_help = False
+ def __init__(self, help_text, argument_names=None, options=None, long_help=None, requires_local_commits=False):
+ self.help_text = help_text
+ self.long_help = long_help
+ self.argument_names = argument_names
+ self.required_arguments = self._parse_required_arguments(argument_names)
+ self.options = options
+ self.requires_local_commits = requires_local_commits
+ self._tool = None
+ # option_parser can be overriden by the tool using set_option_parser
+ # This default parser will be used for standalone_help printing.
+ self.option_parser = HelpPrintingOptionParser(usage=SUPPRESS_USAGE, add_help_option=False, option_list=self.options)
+
+ def _exit(self, code):
+ sys.exit(code)
+
+ # This design is slightly awkward, but we need the
+ # the tool to be able to create and modify the option_parser
+ # before it knows what Command to run.
+ def set_option_parser(self, option_parser):
+ self.option_parser = option_parser
+ self._add_options_to_parser()
+
+ def _add_options_to_parser(self):
+ options = self.options or []
+ for option in options:
+ self.option_parser.add_option(option)
+
+ # The tool calls bind_to_tool on each Command after adding it to its list.
+ def bind_to_tool(self, tool):
+ # Command instances can only be bound to one tool at a time.
+ if self._tool and tool != self._tool:
+ raise Exception("Command already bound to tool!")
+ self._tool = tool
+
+ @staticmethod
+ def _parse_required_arguments(argument_names):
+ required_args = []
+ if not argument_names:
+ return required_args
+ split_args = argument_names.split(" ")
+ for argument in split_args:
+ if argument[0] == '[':
+ # For now our parser is rather dumb. Do some minimal validation that
+ # we haven't confused it.
+ if argument[-1] != ']':
+ raise Exception("Failure to parse argument string %s. Argument %s is missing ending ]" % (argument_names, argument))
+ else:
+ required_args.append(argument)
+ return required_args
+
+ def name_with_arguments(self):
+ usage_string = self.name
+ if self.options:
+ usage_string += " [options]"
+ if self.argument_names:
+ usage_string += " " + self.argument_names
+ return usage_string
+
+ def parse_args(self, args):
+ return self.option_parser.parse_args(args)
+
+ def check_arguments_and_execute(self, options, args, tool=None):
+ if len(args) < len(self.required_arguments):
+ _log.error("%s required, %s provided. Provided: %s Required: %s\nSee '%s help %s' for usage." % (
+ pluralize("argument", len(self.required_arguments)),
+ pluralize("argument", len(args)),
+ "'%s'" % " ".join(args),
+ " ".join(self.required_arguments),
+ tool.name(),
+ self.name))
+ return 1
+ return self.execute(options, args, tool) or 0
+
+ def standalone_help(self):
+ help_text = self.name_with_arguments().ljust(len(self.name_with_arguments()) + 3) + self.help_text + "\n\n"
+ if self.long_help:
+ help_text += "%s\n\n" % self.long_help
+ help_text += self.option_parser.format_option_help(IndentedHelpFormatter())
+ return help_text
+
+ def execute(self, options, args, tool):
+ raise NotImplementedError, "subclasses must implement"
+
+ # main() exists so that Commands can be turned into stand-alone scripts.
+ # Other parts of the code will likely require modification to work stand-alone.
+ def main(self, args=sys.argv):
+ (options, args) = self.parse_args(args)
+ # Some commands might require a dummy tool
+ return self.check_arguments_and_execute(options, args)
+
+
+# FIXME: This should just be rolled into Command. help_text and argument_names do not need to be instance variables.
+class AbstractDeclarativeCommand(Command):
+ help_text = None
+ argument_names = None
+ long_help = None
+ def __init__(self, options=None, **kwargs):
+ Command.__init__(self, self.help_text, self.argument_names, options=options, long_help=self.long_help, **kwargs)
+
+
+class HelpPrintingOptionParser(OptionParser):
+ def __init__(self, epilog_method=None, *args, **kwargs):
+ self.epilog_method = epilog_method
+ OptionParser.__init__(self, *args, **kwargs)
+
+ def error(self, msg):
+ self.print_usage(sys.stderr)
+ error_message = "%s: error: %s\n" % (self.get_prog_name(), msg)
+ # This method is overriden to add this one line to the output:
+ error_message += "\nType \"%s --help\" to see usage.\n" % self.get_prog_name()
+ self.exit(1, error_message)
+
+ # We override format_epilog to avoid the default formatting which would paragraph-wrap the epilog
+ # and also to allow us to compute the epilog lazily instead of in the constructor (allowing it to be context sensitive).
+ def format_epilog(self, epilog):
+ if self.epilog_method:
+ return "\n%s\n" % self.epilog_method()
+ return ""
+
+
+class HelpCommand(AbstractDeclarativeCommand):
+ name = "help"
+ help_text = "Display information about this program or its subcommands"
+ argument_names = "[COMMAND]"
+
+ def __init__(self):
+ options = [
+ make_option("-a", "--all-commands", action="store_true", dest="show_all_commands", help="Print all available commands"),
+ ]
+ AbstractDeclarativeCommand.__init__(self, options)
+ self.show_all_commands = False # A hack used to pass --all-commands to _help_epilog even though it's called by the OptionParser.
+
+ def _help_epilog(self):
+ # Only show commands which are relevant to this checkout's SCM system. Might this be confusing to some users?
+ if self.show_all_commands:
+ epilog = "All %prog commands:\n"
+ relevant_commands = self._tool.commands[:]
+ else:
+ epilog = "Common %prog commands:\n"
+ relevant_commands = filter(self._tool.should_show_in_main_help, self._tool.commands)
+ longest_name_length = max(map(lambda command: len(command.name), relevant_commands))
+ relevant_commands.sort(lambda a, b: cmp(a.name, b.name))
+ command_help_texts = map(lambda command: " %s %s\n" % (command.name.ljust(longest_name_length), command.help_text), relevant_commands)
+ epilog += "%s\n" % "".join(command_help_texts)
+ epilog += "See '%prog help --all-commands' to list all commands.\n"
+ epilog += "See '%prog help COMMAND' for more information on a specific command.\n"
+ return epilog.replace("%prog", self._tool.name()) # Use of %prog here mimics OptionParser.expand_prog_name().
+
+ # FIXME: This is a hack so that we don't show --all-commands as a global option:
+ def _remove_help_options(self):
+ for option in self.options:
+ self.option_parser.remove_option(option.get_opt_string())
+
+ def execute(self, options, args, tool):
+ if args:
+ command = self._tool.command_by_name(args[0])
+ if command:
+ print command.standalone_help()
+ return 0
+
+ self.show_all_commands = options.show_all_commands
+ self._remove_help_options()
+ self.option_parser.print_help()
+ return 0
+
+
+class MultiCommandTool(object):
+ global_options = None
+
+ def __init__(self, name=None, commands=None):
+ self._name = name or OptionParser(prog=name).get_prog_name() # OptionParser has nice logic for fetching the name.
+ # Allow the unit tests to disable command auto-discovery.
+ self.commands = commands or [cls() for cls in self._find_all_commands() if cls.name]
+ self.help_command = self.command_by_name(HelpCommand.name)
+ # Require a help command, even if the manual test list doesn't include one.
+ if not self.help_command:
+ self.help_command = HelpCommand()
+ self.commands.append(self.help_command)
+ for command in self.commands:
+ command.bind_to_tool(self)
+
+ @classmethod
+ def _add_all_subclasses(cls, class_to_crawl, seen_classes):
+ for subclass in class_to_crawl.__subclasses__():
+ if subclass not in seen_classes:
+ seen_classes.add(subclass)
+ cls._add_all_subclasses(subclass, seen_classes)
+
+ @classmethod
+ def _find_all_commands(cls):
+ commands = set()
+ cls._add_all_subclasses(Command, commands)
+ return sorted(commands)
+
+ def name(self):
+ return self._name
+
+ def _create_option_parser(self):
+ usage = "Usage: %prog [options] COMMAND [ARGS]"
+ return HelpPrintingOptionParser(epilog_method=self.help_command._help_epilog, prog=self.name(), usage=usage)
+
+ @staticmethod
+ def _split_command_name_from_args(args):
+ # Assume the first argument which doesn't start with "-" is the command name.
+ command_index = 0
+ for arg in args:
+ if arg[0] != "-":
+ break
+ command_index += 1
+ else:
+ return (None, args[:])
+
+ command = args[command_index]
+ return (command, args[:command_index] + args[command_index + 1:])
+
+ def command_by_name(self, command_name):
+ for command in self.commands:
+ if command_name == command.name:
+ return command
+ return None
+
+ def path(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def command_completed(self):
+ pass
+
+ def should_show_in_main_help(self, command):
+ return command.show_in_main_help
+
+ def should_execute_command(self, command):
+ return True
+
+ def _add_global_options(self, option_parser):
+ global_options = self.global_options or []
+ for option in global_options:
+ option_parser.add_option(option)
+
+ def handle_global_options(self, options):
+ pass
+
+ def main(self, argv=sys.argv):
+ (command_name, args) = self._split_command_name_from_args(argv[1:])
+
+ option_parser = self._create_option_parser()
+ self._add_global_options(option_parser)
+
+ command = self.command_by_name(command_name) or self.help_command
+ if not command:
+ option_parser.error("%s is not a recognized command" % command_name)
+
+ command.set_option_parser(option_parser)
+ (options, args) = command.parse_args(args)
+ self.handle_global_options(options)
+
+ (should_execute, failure_reason) = self.should_execute_command(command)
+ if not should_execute:
+ _log.error(failure_reason)
+ return 0 # FIXME: Should this really be 0?
+
+ while True:
+ try:
+ result = command.check_arguments_and_execute(options, args, self)
+ break
+ except TryAgain, e:
+ pass
+
+ self.command_completed()
+ return result
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py
new file mode 100644
index 0000000..9200609
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py
@@ -0,0 +1,173 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import unittest
+
+from optparse import make_option
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.tool.multicommandtool import MultiCommandTool, Command, TryAgain
+
+
+class TrivialCommand(Command):
+ name = "trivial"
+ show_in_main_help = True
+ def __init__(self, **kwargs):
+ Command.__init__(self, "help text", **kwargs)
+
+ def execute(self, options, args, tool):
+ pass
+
+
+class UncommonCommand(TrivialCommand):
+ name = "uncommon"
+ show_in_main_help = False
+
+
+class LikesToRetry(Command):
+ name = "likes-to-retry"
+ show_in_main_help = True
+
+ def __init__(self, **kwargs):
+ Command.__init__(self, "help text", **kwargs)
+ self.execute_count = 0
+
+ def execute(self, options, args, tool):
+ self.execute_count += 1
+ if self.execute_count < 2:
+ raise TryAgain()
+
+
+class CommandTest(unittest.TestCase):
+ def test_name_with_arguments(self):
+ command_with_args = TrivialCommand(argument_names="ARG1 ARG2")
+ self.assertEqual(command_with_args.name_with_arguments(), "trivial ARG1 ARG2")
+
+ command_with_args = TrivialCommand(options=[make_option("--my_option")])
+ self.assertEqual(command_with_args.name_with_arguments(), "trivial [options]")
+
+ def test_parse_required_arguments(self):
+ self.assertEqual(Command._parse_required_arguments("ARG1 ARG2"), ["ARG1", "ARG2"])
+ self.assertEqual(Command._parse_required_arguments("[ARG1] [ARG2]"), [])
+ self.assertEqual(Command._parse_required_arguments("[ARG1] ARG2"), ["ARG2"])
+ # Note: We might make our arg parsing smarter in the future and allow this type of arguments string.
+ self.assertRaises(Exception, Command._parse_required_arguments, "[ARG1 ARG2]")
+
+ def test_required_arguments(self):
+ two_required_arguments = TrivialCommand(argument_names="ARG1 ARG2 [ARG3]")
+ expected_logs = "2 arguments required, 1 argument provided. Provided: 'foo' Required: ARG1 ARG2\nSee 'trivial-tool help trivial' for usage.\n"
+ exit_code = OutputCapture().assert_outputs(self, two_required_arguments.check_arguments_and_execute, [None, ["foo"], TrivialTool()], expected_logs=expected_logs)
+ self.assertEqual(exit_code, 1)
+
+
+class TrivialTool(MultiCommandTool):
+ def __init__(self, commands=None):
+ MultiCommandTool.__init__(self, name="trivial-tool", commands=commands)
+
+ def path(self):
+ return __file__
+
+ def should_execute_command(self, command):
+ return (True, None)
+
+
+class MultiCommandToolTest(unittest.TestCase):
+ def _assert_split(self, args, expected_split):
+ self.assertEqual(MultiCommandTool._split_command_name_from_args(args), expected_split)
+
+ def test_split_args(self):
+ # MultiCommandToolTest._split_command_name_from_args returns: (command, args)
+ full_args = ["--global-option", "command", "--option", "arg"]
+ full_args_expected = ("command", ["--global-option", "--option", "arg"])
+ self._assert_split(full_args, full_args_expected)
+
+ full_args = []
+ full_args_expected = (None, [])
+ self._assert_split(full_args, full_args_expected)
+
+ full_args = ["command", "arg"]
+ full_args_expected = ("command", ["arg"])
+ self._assert_split(full_args, full_args_expected)
+
+ def test_command_by_name(self):
+ # This also tests Command auto-discovery.
+ tool = TrivialTool()
+ self.assertEqual(tool.command_by_name("trivial").name, "trivial")
+ self.assertEqual(tool.command_by_name("bar"), None)
+
+ def _assert_tool_main_outputs(self, tool, main_args, expected_stdout, expected_stderr = "", expected_exit_code=0):
+ exit_code = OutputCapture().assert_outputs(self, tool.main, [main_args], expected_stdout=expected_stdout, expected_stderr=expected_stderr)
+ self.assertEqual(exit_code, expected_exit_code)
+
+ def test_retry(self):
+ likes_to_retry = LikesToRetry()
+ tool = TrivialTool(commands=[likes_to_retry])
+ tool.main(["tool", "likes-to-retry"])
+ self.assertEqual(likes_to_retry.execute_count, 2)
+
+ def test_global_help(self):
+ tool = TrivialTool(commands=[TrivialCommand(), UncommonCommand()])
+ expected_common_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
+
+Options:
+ -h, --help show this help message and exit
+
+Common trivial-tool commands:
+ trivial help text
+
+See 'trivial-tool help --all-commands' to list all commands.
+See 'trivial-tool help COMMAND' for more information on a specific command.
+
+"""
+ self._assert_tool_main_outputs(tool, ["tool"], expected_common_commands_help)
+ self._assert_tool_main_outputs(tool, ["tool", "help"], expected_common_commands_help)
+ expected_all_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
+
+Options:
+ -h, --help show this help message and exit
+
+All trivial-tool commands:
+ help Display information about this program or its subcommands
+ trivial help text
+ uncommon help text
+
+See 'trivial-tool help --all-commands' to list all commands.
+See 'trivial-tool help COMMAND' for more information on a specific command.
+
+"""
+ self._assert_tool_main_outputs(tool, ["tool", "help", "--all-commands"], expected_all_commands_help)
+ # Test that arguments can be passed before commands as well
+ self._assert_tool_main_outputs(tool, ["tool", "--all-commands", "help"], expected_all_commands_help)
+
+
+ def test_command_help(self):
+ command_with_options = TrivialCommand(options=[make_option("--my_option")], long_help="LONG HELP")
+ tool = TrivialTool(commands=[command_with_options])
+ expected_subcommand_help = "trivial [options] help text\n\nLONG HELP\n\nOptions:\n --my_option=MY_OPTION\n\n"
+ self._assert_tool_main_outputs(tool, ["tool", "help", "trivial"], expected_subcommand_help)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/index.html b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/index.html
new file mode 100644
index 0000000..f40a34d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/index.html
@@ -0,0 +1,182 @@
+<!DOCTYPE html>
+<!--
+ Copyright (c) 2010 Google Inc. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+<html>
+<head>
+ <title>Layout Test Rebaseline Server</title>
+ <link rel="stylesheet" href="/main.css" type="text/css">
+ <script src="/util.js"></script>
+ <script src="/loupe.js"></script>
+ <script src="/main.js"></script>
+ <script src="/queue.js"></script>
+</head>
+<body class="loading">
+
+<pre id="log" style="display: none"></pre>
+<div id="queue" style="display: none">
+ Queue:
+ <select id="queue-select" size="10"></select>
+ <button id="remove-queue-selection">Remove selection</button>
+ <button id="rebaseline-queue">Rebaseline queue</button>
+</div>
+
+<div id="header">
+ <div id="controls">
+ <!-- Add a dummy <select> node so that this lines up with the text on the left -->
+ <select style="visibility: hidden"></select>
+ <span id="toggle-sort" class="link">Sort tests by metric</span>
+ <span class="divider">|</span>
+ <span id="toggle-log" class="link">Log</span>
+ <span class="divider">|</span>
+ <a href="/quitquitquit">Exit</a>
+ </div>
+
+ <span id="selectors">
+ <label>
+ Failure type:
+ <select id="failure-type-selector"></select>
+ </label>
+
+ <label>
+ Directory:
+ <select id="directory-selector"></select>
+ </label>
+
+ <label>
+ Test:
+ <select id="test-selector"></select>
+ </label>
+ </span>
+
+ <a id="test-link" target="_blank">View test</a>
+
+ <span id="nav-buttons">
+ <button id="previous-test">«</button>
+ <span id="test-index"></span> of <span id="test-count"></span>
+ <button id="next-test">»</button>
+ </span>
+</div>
+
+<table id="test-output">
+ <thead id="labels">
+ <tr>
+ <th>Expected</th>
+ <th>Actual</th>
+ <th>Diff</th>
+ </tr>
+ </thead>
+ <tbody id="image-outputs" style="display: none">
+ <tr>
+ <td colspan="3"><h2>Image</h2></td>
+ </tr>
+ <tr>
+ <td><img id="expected-image"></td>
+ <td><img id="actual-image"></td>
+ <td>
+ <canvas id="diff-canvas" width="800" height="600"></canvas>
+ <div id="diff-checksum" style="display: none">
+ <h3>Checksum mismatch</h3>
+ Expected: <span id="expected-checksum"></span><br>
+ Actual: <span id="actual-checksum"></span>
+ </div>
+ </td>
+ </tr>
+ </tbody>
+ <tbody id="text-outputs" style="display: none">
+ <tr>
+ <td colspan="3"><h2>Text</h2></td>
+ </tr>
+ <tr>
+ <td><pre id="expected-text" class="text-output"></pre></td>
+ <td><pre id="actual-text" class="text-output"></pre></td>
+ <td><div id="diff-text-pretty" class="text-output"></div></td>
+ </tr>
+ </tbody>
+</table>
+
+<div id="footer">
+ <label>State: <span id="state"></span></label>
+ <label>Existing baselines: <span id="current-baselines"></span></label>
+ <label>
+ Baseline target:
+ <select id="baseline-target"></select>
+ </label>
+ <label>
+ Move current baselines to:
+ <select id="baseline-move-to">
+ <option value="none">Nowhere (replace)</option>
+ </select>
+ </label>
+
+ <!-- Add a dummy <button> node so that this lines up with the text on the right -->
+ <button style="visibility: hidden; padding-left: 0; padding-right: 0;"></button>
+
+ <div id="action-buttons">
+ <span id="toggle-queue" class="link">Queue</span>
+ <button id="add-to-rebaseline-queue">Add to rebaseline queue</button>
+ </div>
+</div>
+
+<table id="loupe" style="display: none">
+ <tr>
+ <td colspan="3" id="loupe-info">
+ <span id="loupe-close" class="link">Close</span>
+ <label>Coordinate: <span id="loupe-coordinate"></span></label>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ <div class="loupe-container">
+ <canvas id="expected-loupe" width="210" height="210"></canvas>
+ <div class="center-highlight"></div>
+ </div>
+ </td>
+ <td>
+ <div class="loupe-container">
+ <canvas id="actual-loupe" width="210" height="210"></canvas>
+ <div class="center-highlight"></div>
+ </div>
+ </td>
+ <td>
+ <div class="loupe-container">
+ <canvas id="diff-loupe" width="210" height="210"></canvas>
+ <div class="center-highlight"></div>
+ </div>
+ </td>
+ </tr>
+ <tr id="loupe-colors">
+ <td><label>Exp. color: <span id="expected-loupe-color"></span></label></td>
+ <td><label>Actual color: <span id="actual-loupe-color"></span></label></td>
+ <td><label>Diff color: <span id="diff-loupe-color"></span></label></td>
+ </tr>
+</table>
+
+</body>
+</html>
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/loupe.js b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/loupe.js
new file mode 100644
index 0000000..41f977a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/loupe.js
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+var LOUPE_MAGNIFICATION_FACTOR = 10;
+
+function Loupe()
+{
+ this._node = $('loupe');
+ this._currentCornerX = -1;
+ this._currentCornerY = -1;
+
+ var self = this;
+
+ function handleOutputClick(event) { self._handleOutputClick(event); }
+ $('expected-image').addEventListener('click', handleOutputClick);
+ $('actual-image').addEventListener('click', handleOutputClick);
+ $('diff-canvas').addEventListener('click', handleOutputClick);
+
+ function handleLoupeClick(event) { self._handleLoupeClick(event); }
+ $('expected-loupe').addEventListener('click', handleLoupeClick);
+ $('actual-loupe').addEventListener('click', handleLoupeClick);
+ $('diff-loupe').addEventListener('click', handleLoupeClick);
+
+ function hide(event) { self.hide(); }
+ $('loupe-close').addEventListener('click', hide);
+}
+
+Loupe.prototype._handleOutputClick = function(event)
+{
+ // The -1 compensates for the border around the image/canvas.
+ this._showFor(event.offsetX - 1, event.offsetY - 1);
+};
+
+Loupe.prototype._handleLoupeClick = function(event)
+{
+ var deltaX = Math.floor(event.offsetX/LOUPE_MAGNIFICATION_FACTOR);
+ var deltaY = Math.floor(event.offsetY/LOUPE_MAGNIFICATION_FACTOR);
+
+ this._showFor(
+ this._currentCornerX + deltaX, this._currentCornerY + deltaY);
+}
+
+Loupe.prototype.hide = function()
+{
+ this._node.style.display = 'none';
+};
+
+Loupe.prototype._showFor = function(x, y)
+{
+ this._fillFromImage(x, y, 'expected', $('expected-image'));
+ this._fillFromImage(x, y, 'actual', $('actual-image'));
+ this._fillFromCanvas(x, y, 'diff', $('diff-canvas'));
+
+ this._node.style.display = '';
+};
+
+Loupe.prototype._fillFromImage = function(x, y, type, sourceImage)
+{
+ var tempCanvas = document.createElement('canvas');
+ tempCanvas.width = sourceImage.width;
+ tempCanvas.height = sourceImage.height;
+ var tempContext = tempCanvas.getContext('2d');
+
+ tempContext.drawImage(sourceImage, 0, 0);
+
+ this._fillFromCanvas(x, y, type, tempCanvas);
+};
+
+Loupe.prototype._fillFromCanvas = function(x, y, type, canvas)
+{
+ var context = canvas.getContext('2d');
+ var sourceImageData =
+ context.getImageData(0, 0, canvas.width, canvas.height);
+
+ var targetCanvas = $(type + '-loupe');
+ var targetContext = targetCanvas.getContext('2d');
+ targetContext.fillStyle = 'rgba(255, 255, 255, 1)';
+ targetContext.fillRect(0, 0, targetCanvas.width, targetCanvas.height);
+
+ var sourceXOffset = (targetCanvas.width/LOUPE_MAGNIFICATION_FACTOR - 1)/2;
+ var sourceYOffset = (targetCanvas.height/LOUPE_MAGNIFICATION_FACTOR - 1)/2;
+
+ function readPixelComponent(x, y, component) {
+ var offset = (y * sourceImageData.width + x) * 4 + component;
+ return sourceImageData.data[offset];
+ }
+
+ for (var i = -sourceXOffset; i <= sourceXOffset; i++) {
+ for (var j = -sourceYOffset; j <= sourceYOffset; j++) {
+ var sourceX = x + i;
+ var sourceY = y + j;
+
+ var sourceR = readPixelComponent(sourceX, sourceY, 0);
+ var sourceG = readPixelComponent(sourceX, sourceY, 1);
+ var sourceB = readPixelComponent(sourceX, sourceY, 2);
+ var sourceA = readPixelComponent(sourceX, sourceY, 3)/255;
+ sourceA = Math.round(sourceA * 10)/10;
+
+ var targetX = (i + sourceXOffset) * LOUPE_MAGNIFICATION_FACTOR;
+ var targetY = (j + sourceYOffset) * LOUPE_MAGNIFICATION_FACTOR;
+ var colorString =
+ sourceR + ', ' + sourceG + ', ' + sourceB + ', ' + sourceA;
+ targetContext.fillStyle = 'rgba(' + colorString + ')';
+ targetContext.fillRect(
+ targetX, targetY,
+ LOUPE_MAGNIFICATION_FACTOR, LOUPE_MAGNIFICATION_FACTOR);
+
+ if (i == 0 && j == 0) {
+ $('loupe-coordinate').textContent = sourceX + ', ' + sourceY;
+ $(type + '-loupe-color').textContent = colorString;
+ }
+ }
+ }
+
+ this._currentCornerX = x - sourceXOffset;
+ this._currentCornerY = y - sourceYOffset;
+};
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/main.css b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/main.css
new file mode 100644
index 0000000..280c3b2
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/main.css
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+body {
+ font-size: 12px;
+ font-family: Helvetica, Arial, sans-serif;
+ padding: 0;
+ margin: 0;
+}
+
+.loading {
+ opacity: 0.5;
+}
+
+div {
+ margin: 0;
+}
+
+a, .link {
+ color: #aaf;
+ text-decoration: underline;
+ cursor: pointer;
+}
+
+.link.selected {
+ color: #fff;
+ font-weight: bold;
+ text-decoration: none;
+}
+
+#log,
+#queue {
+ padding: .25em 0 0 .25em;
+ position: absolute;
+ right: 0;
+ height: 200px;
+ overflow: auto;
+ background: #fff;
+ -webkit-box-shadow: 1px 1px 5px rgba(0, 0, 0, .5);
+}
+
+#log {
+ top: 2em;
+ width: 500px;
+}
+
+#queue {
+ bottom: 3em;
+ width: 400px;
+}
+
+#queue-select {
+ display: block;
+ width: 390px;
+}
+
+#header,
+#footer {
+ padding: .5em 1em;
+ background: #333;
+ color: #fff;
+ -webkit-box-shadow: 0 1px 5px rgba(0, 0, 0, 0.5);
+}
+
+#header {
+ margin-bottom: 1em;
+}
+
+#header .divider,
+#footer .divider {
+ opacity: .3;
+ padding: 0 .5em;
+}
+
+#header label,
+#footer label {
+ padding-right: 1em;
+ color: #ccc;
+}
+
+#test-link {
+ margin-right: 1em;
+}
+
+#header label span,
+#footer label span {
+ color: #fff;
+ font-weight: bold;
+}
+
+#nav-buttons {
+ white-space: nowrap;
+}
+
+#nav-buttons button {
+ background: #fff;
+ border: 0;
+ border-radius: 10px;
+}
+
+#nav-buttons button:active {
+ -webkit-box-shadow: 0 0 5px #33f inset;
+ background: #aaa;
+}
+
+#nav-buttons button[disabled] {
+ opacity: .5;
+}
+
+#controls {
+ float: right;
+}
+
+.disabled-control {
+ color: #888;
+}
+
+#test-output {
+ border-spacing: 0;
+ border-collapse: collapse;
+ margin: 0 auto;
+ width: 100%;
+}
+
+#test-output td,
+#test-output th {
+ padding: 0;
+ vertical-align: top;
+}
+
+#image-outputs img,
+#image-outputs canvas,
+#image-outputs #diff-checksum {
+ width: 800px;
+ height: 600px;
+ border: solid 1px #ddd;
+ -webkit-user-select: none;
+ -webkit-user-drag: none;
+}
+
+#image-outputs img,
+#image-outputs canvas {
+ cursor: crosshair;
+}
+
+#image-outputs img.loading,
+#image-outputs canvas.loading {
+ opacity: .5;
+}
+
+#image-outputs #actual-image {
+ margin: 0 1em;
+}
+
+#test-output #labels th {
+ text-align: center;
+ color: #666;
+}
+
+#text-outputs .text-output {
+ height: 600px;
+ width: 800px;
+ overflow: auto;
+}
+
+#test-output h2 {
+ border-bottom: solid 1px #ccc;
+ font-weight: bold;
+ margin: 0;
+ background: #eee;
+}
+
+#footer {
+ position: absolute;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ margin-top: 1em;
+}
+
+#state.needs_rebaseline {
+ color: yellow;
+}
+
+#state.rebaseline_failed {
+ color: red;
+}
+
+#state.rebaseline_succeeded {
+ color: green;
+}
+
+#state.in_queue {
+ color: gray;
+}
+
+#current-baselines {
+ font-weight: normal !important;
+}
+
+#current-baselines .platform {
+ font-weight: bold;
+}
+
+#current-baselines a {
+ color: #ddf;
+}
+
+#current-baselines .was-used-for-test {
+ color: #aaf;
+ font-weight: bold;
+}
+
+#action-buttons {
+ float: right;
+}
+
+#action-buttons .link {
+ margin-right: 1em;
+}
+
+#footer button {
+ padding: 1em;
+}
+
+#loupe {
+ -webkit-box-shadow: 2px 2px 5px rgba(0, 0, 0, .5);
+ position: absolute;
+ width: 634px;
+ top: 50%;
+ left: 50%;
+ margin-left: -151px;
+ margin-top: -50px;
+ background: #fff;
+ border-spacing: 0;
+ border-collapse: collapse;
+}
+
+#loupe td {
+ padding: 0;
+ border: solid 1px #ccc;
+}
+
+#loupe label {
+ color: #999;
+ padding-right: 1em;
+}
+
+#loupe span {
+ color: #000;
+ font-weight: bold;
+}
+
+#loupe canvas {
+ cursor: crosshair;
+}
+
+#loupe #loupe-close {
+ float: right;
+}
+
+#loupe #loupe-info {
+ background: #eee;
+ padding: .3em .5em;
+}
+
+#loupe #loupe-colors td {
+ text-align: center;
+}
+
+#loupe .loupe-container {
+ position: relative;
+ width: 210px;
+ height: 210px;
+}
+
+#loupe .center-highlight {
+ position: absolute;
+ width: 10px;
+ height: 10px;
+ top: 50%;
+ left: 50%;
+ margin-left: -5px;
+ margin-top: -5px;
+ outline: solid 1px #999;
+}
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/main.js b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/main.js
new file mode 100644
index 0000000..5e1fa52
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/main.js
@@ -0,0 +1,577 @@
+/*
+ * Copyright (c) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+var ALL_DIRECTORY_PATH = '[all]';
+
+var STATE_NEEDS_REBASELINE = 'needs_rebaseline';
+var STATE_REBASELINE_FAILED = 'rebaseline_failed';
+var STATE_REBASELINE_SUCCEEDED = 'rebaseline_succeeded';
+var STATE_IN_QUEUE = 'in_queue';
+var STATE_TO_DISPLAY_STATE = {};
+STATE_TO_DISPLAY_STATE[STATE_NEEDS_REBASELINE] = 'Needs rebaseline';
+STATE_TO_DISPLAY_STATE[STATE_REBASELINE_FAILED] = 'Rebaseline failed';
+STATE_TO_DISPLAY_STATE[STATE_REBASELINE_SUCCEEDED] = 'Rebaseline succeeded';
+STATE_TO_DISPLAY_STATE[STATE_IN_QUEUE] = 'In queue';
+
+var results;
+var testsByFailureType = {};
+var testsByDirectory = {};
+var selectedTests = [];
+var loupe;
+var queue;
+var shouldSortTestsByMetric = false;
+
+function main()
+{
+ $('failure-type-selector').addEventListener('change', selectFailureType);
+ $('directory-selector').addEventListener('change', selectDirectory);
+ $('test-selector').addEventListener('change', selectTest);
+ $('next-test').addEventListener('click', nextTest);
+ $('previous-test').addEventListener('click', previousTest);
+
+ $('toggle-log').addEventListener('click', function() { toggle('log'); });
+ disableSorting();
+
+ loupe = new Loupe();
+ queue = new RebaselineQueue();
+
+ document.addEventListener('keydown', function(event) {
+ if (event.altKey || event.ctrlKey || event.metaKey || event.shiftKey) {
+ return;
+ }
+
+ switch (event.keyIdentifier) {
+ case 'Left':
+ event.preventDefault();
+ previousTest();
+ break;
+ case 'Right':
+ event.preventDefault();
+ nextTest();
+ break;
+ case 'U+0051': // q
+ queue.addCurrentTest();
+ break;
+ case 'U+0058': // x
+ queue.removeCurrentTest();
+ break;
+ case 'U+0052': // r
+ queue.rebaseline();
+ break;
+ }
+ });
+
+ loadText('/platforms.json', function(text) {
+ var platforms = JSON.parse(text);
+ platforms.platforms.forEach(function(platform) {
+ var platformOption = document.createElement('option');
+ platformOption.value = platform;
+ platformOption.textContent = platform;
+
+ var targetOption = platformOption.cloneNode(true);
+ targetOption.selected = platform == platforms.defaultPlatform;
+ $('baseline-target').appendChild(targetOption);
+ $('baseline-move-to').appendChild(platformOption.cloneNode(true));
+ });
+ });
+
+ loadText('/results.json', function(text) {
+ results = JSON.parse(text);
+ displayResults();
+ });
+}
+
+/**
+ * Groups test results by failure type.
+ */
+function displayResults()
+{
+ var failureTypeSelector = $('failure-type-selector');
+ var failureTypes = [];
+
+ for (var testName in results.tests) {
+ var test = results.tests[testName];
+ if (test.actual == 'PASS') {
+ continue;
+ }
+ var failureType = test.actual + ' (expected ' + test.expected + ')';
+ if (!(failureType in testsByFailureType)) {
+ testsByFailureType[failureType] = [];
+ failureTypes.push(failureType);
+ }
+ testsByFailureType[failureType].push(testName);
+ }
+
+ // Sort by number of failures
+ failureTypes.sort(function(a, b) {
+ return testsByFailureType[b].length - testsByFailureType[a].length;
+ });
+
+ for (var i = 0, failureType; failureType = failureTypes[i]; i++) {
+ var failureTypeOption = document.createElement('option');
+ failureTypeOption.value = failureType;
+ failureTypeOption.textContent = failureType + ' - ' + testsByFailureType[failureType].length + ' tests';
+ failureTypeSelector.appendChild(failureTypeOption);
+ }
+
+ selectFailureType();
+
+ document.body.className = '';
+}
+
+function enableSorting()
+{
+ $('toggle-sort').onclick = function() {
+ shouldSortTestsByMetric = !shouldSortTestsByMetric;
+ // Regenerates the list of tests; this alphabetizes, and
+ // then re-sorts if we turned sorting on.
+ selectDirectory();
+ }
+ $('toggle-sort').classList.remove('disabled-control');
+}
+
+function disableSorting()
+{
+ $('toggle-sort').onclick = function() { return false; }
+ $('toggle-sort').classList.add('disabled-control');
+}
+
+/**
+ * For a given failure type, gets all the tests and groups them by directory
+ * (populating the directory selector with them).
+ */
+function selectFailureType()
+{
+ var selectedFailureType = getSelectValue('failure-type-selector');
+ var tests = testsByFailureType[selectedFailureType];
+
+ testsByDirectory = {}
+ var displayDirectoryNamesByDirectory = {};
+ var directories = [];
+
+ // Include a special option for all tests
+ testsByDirectory[ALL_DIRECTORY_PATH] = tests;
+ displayDirectoryNamesByDirectory[ALL_DIRECTORY_PATH] = 'all';
+ directories.push(ALL_DIRECTORY_PATH);
+
+ // Roll up tests by ancestor directories
+ tests.forEach(function(test) {
+ var pathPieces = test.split('/');
+ var pathDirectories = pathPieces.slice(0, pathPieces.length -1);
+ var ancestorDirectory = '';
+
+ pathDirectories.forEach(function(pathDirectory, index) {
+ ancestorDirectory += pathDirectory + '/';
+ if (!(ancestorDirectory in testsByDirectory)) {
+ testsByDirectory[ancestorDirectory] = [];
+ var displayDirectoryName = new Array(index * 6).join(' ') + pathDirectory;
+ displayDirectoryNamesByDirectory[ancestorDirectory] = displayDirectoryName;
+ directories.push(ancestorDirectory);
+ }
+
+ testsByDirectory[ancestorDirectory].push(test);
+ });
+ });
+
+ directories.sort();
+
+ var directorySelector = $('directory-selector');
+ directorySelector.innerHTML = '';
+
+ directories.forEach(function(directory) {
+ var directoryOption = document.createElement('option');
+ directoryOption.value = directory;
+ directoryOption.innerHTML =
+ displayDirectoryNamesByDirectory[directory] + ' - ' +
+ testsByDirectory[directory].length + ' tests';
+ directorySelector.appendChild(directoryOption);
+ });
+
+ selectDirectory();
+}
+
+/**
+ * For a given failure type and directory and failure type, gets all the tests
+ * in that directory and populatest the test selector with them.
+ */
+function selectDirectory()
+{
+ var previouslySelectedTest = getSelectedTest();
+
+ var selectedDirectory = getSelectValue('directory-selector');
+ selectedTests = testsByDirectory[selectedDirectory];
+ selectedTests.sort();
+
+ var testsByState = {};
+ selectedTests.forEach(function(testName) {
+ var state = results.tests[testName].state;
+ if (state == STATE_IN_QUEUE) {
+ state = STATE_NEEDS_REBASELINE;
+ }
+ if (!(state in testsByState)) {
+ testsByState[state] = [];
+ }
+ testsByState[state].push(testName);
+ });
+
+ var optionIndexByTest = {};
+
+ var testSelector = $('test-selector');
+ testSelector.innerHTML = '';
+
+ var selectedFailureType = getSelectValue('failure-type-selector');
+ var sampleSelectedTest = testsByFailureType[selectedFailureType][0];
+ var selectedTypeIsSortable = 'metric' in results.tests[sampleSelectedTest];
+ if (selectedTypeIsSortable) {
+ enableSorting();
+ if (shouldSortTestsByMetric) {
+ for (var state in testsByState) {
+ testsByState[state].sort(function(a, b) {
+ return results.tests[b].metric - results.tests[a].metric
+ })
+ }
+ }
+ } else
+ disableSorting();
+
+ for (var state in testsByState) {
+ var stateOption = document.createElement('option');
+ stateOption.textContent = STATE_TO_DISPLAY_STATE[state];
+ stateOption.disabled = true;
+ testSelector.appendChild(stateOption);
+
+ testsByState[state].forEach(function(testName) {
+ var testOption = document.createElement('option');
+ testOption.value = testName;
+ var testDisplayName = testName;
+ if (testName.lastIndexOf(selectedDirectory) == 0) {
+ testDisplayName = testName.substring(selectedDirectory.length);
+ }
+ testOption.innerHTML = ' ' + testDisplayName;
+ optionIndexByTest[testName] = testSelector.options.length;
+ testSelector.appendChild(testOption);
+ });
+ }
+
+ if (previouslySelectedTest in optionIndexByTest) {
+ testSelector.selectedIndex = optionIndexByTest[previouslySelectedTest];
+ } else if (STATE_NEEDS_REBASELINE in testsByState) {
+ testSelector.selectedIndex =
+ optionIndexByTest[testsByState[STATE_NEEDS_REBASELINE][0]];
+ selectTest();
+ } else {
+ testSelector.selectedIndex = 1;
+ selectTest();
+ }
+
+ selectTest();
+}
+
+function getSelectedTest()
+{
+ return getSelectValue('test-selector');
+}
+
+function selectTest()
+{
+ var selectedTest = getSelectedTest();
+
+ if (results.tests[selectedTest].actual.indexOf('IMAGE') != -1) {
+ $('image-outputs').style.display = '';
+ displayImageResults(selectedTest);
+ } else {
+ $('image-outputs').style.display = 'none';
+ }
+
+ if (results.tests[selectedTest].actual.indexOf('TEXT') != -1) {
+ $('text-outputs').style.display = '';
+ displayTextResults(selectedTest);
+ } else {
+ $('text-outputs').style.display = 'none';
+ }
+
+ var currentBaselines = $('current-baselines');
+ currentBaselines.textContent = '';
+ var baselines = results.tests[selectedTest].baselines;
+ var testName = selectedTest.split('.').slice(0, -1).join('.');
+ getSortedKeys(baselines).forEach(function(platform, i) {
+ if (i != 0) {
+ currentBaselines.appendChild(document.createTextNode('; '));
+ }
+ var platformName = document.createElement('span');
+ platformName.className = 'platform';
+ platformName.textContent = platform;
+ currentBaselines.appendChild(platformName);
+ currentBaselines.appendChild(document.createTextNode(' ('));
+ getSortedKeys(baselines[platform]).forEach(function(extension, j) {
+ if (j != 0) {
+ currentBaselines.appendChild(document.createTextNode(', '));
+ }
+ var link = document.createElement('a');
+ var baselinePath = '';
+ if (platform != 'base') {
+ baselinePath += 'platform/' + platform + '/';
+ }
+ baselinePath += testName + '-expected' + extension;
+ link.href = getTracUrl(baselinePath);
+ if (extension == '.checksum') {
+ link.textContent = 'chk';
+ } else {
+ link.textContent = extension.substring(1);
+ }
+ link.target = '_blank';
+ if (baselines[platform][extension]) {
+ link.className = 'was-used-for-test';
+ }
+ currentBaselines.appendChild(link);
+ });
+ currentBaselines.appendChild(document.createTextNode(')'));
+ });
+
+ updateState();
+ loupe.hide();
+
+ prefetchNextImageTest();
+}
+
+function prefetchNextImageTest()
+{
+ var testSelector = $('test-selector');
+ if (testSelector.selectedIndex == testSelector.options.length - 1) {
+ return;
+ }
+ var nextTest = testSelector.options[testSelector.selectedIndex + 1].value;
+ if (results.tests[nextTest].actual.indexOf('IMAGE') != -1) {
+ new Image().src = getTestResultUrl(nextTest, 'expected-image');
+ new Image().src = getTestResultUrl(nextTest, 'actual-image');
+ }
+}
+
+function updateState()
+{
+ var testName = getSelectedTest();
+ var testIndex = selectedTests.indexOf(testName);
+ var testCount = selectedTests.length
+ $('test-index').textContent = testIndex + 1;
+ $('test-count').textContent = testCount;
+
+ $('next-test').disabled = testIndex == testCount - 1;
+ $('previous-test').disabled = testIndex == 0;
+
+ $('test-link').href = getTracUrl(testName);
+
+ var state = results.tests[testName].state;
+ $('state').className = state;
+ $('state').innerHTML = STATE_TO_DISPLAY_STATE[state];
+
+ queue.updateState();
+}
+
+function getTestResultUrl(testName, mode)
+{
+ return '/test_result?test=' + testName + '&mode=' + mode;
+}
+
+var currentExpectedImageTest;
+var currentActualImageTest;
+
+function displayImageResults(testName)
+{
+ if (currentExpectedImageTest == currentActualImageTest
+ && currentExpectedImageTest == testName) {
+ return;
+ }
+
+ function displayImageResult(mode, callback) {
+ var image = $(mode);
+ image.className = 'loading';
+ image.src = getTestResultUrl(testName, mode);
+ image.onload = function() {
+ image.className = '';
+ callback();
+ updateImageDiff();
+ };
+ }
+
+ displayImageResult(
+ 'expected-image',
+ function() { currentExpectedImageTest = testName; });
+ displayImageResult(
+ 'actual-image',
+ function() { currentActualImageTest = testName; });
+
+ $('diff-canvas').className = 'loading';
+ $('diff-canvas').style.display = '';
+ $('diff-checksum').style.display = 'none';
+}
+
+/**
+ * Computes a graphical a diff between the expected and actual images by
+ * rendering each to a canvas, getting the image data, and comparing the RGBA
+ * components of each pixel. The output is put into the diff canvas, with
+ * identical pixels appearing at 12.5% opacity and different pixels being
+ * highlighted in red.
+ */
+function updateImageDiff() {
+ if (currentExpectedImageTest != currentActualImageTest)
+ return;
+
+ var expectedImage = $('expected-image');
+ var actualImage = $('actual-image');
+
+ function getImageData(image) {
+ var imageCanvas = document.createElement('canvas');
+ imageCanvas.width = image.width;
+ imageCanvas.height = image.height;
+ imageCanvasContext = imageCanvas.getContext('2d');
+
+ imageCanvasContext.fillStyle = 'rgba(255, 255, 255, 1)';
+ imageCanvasContext.fillRect(
+ 0, 0, image.width, image.height);
+
+ imageCanvasContext.drawImage(image, 0, 0);
+ return imageCanvasContext.getImageData(
+ 0, 0, image.width, image.height);
+ }
+
+ var expectedImageData = getImageData(expectedImage);
+ var actualImageData = getImageData(actualImage);
+
+ var diffCanvas = $('diff-canvas');
+ var diffCanvasContext = diffCanvas.getContext('2d');
+ var diffImageData =
+ diffCanvasContext.createImageData(diffCanvas.width, diffCanvas.height);
+
+ // Avoiding property lookups for all these during the per-pixel loop below
+ // provides a significant performance benefit.
+ var expectedWidth = expectedImage.width;
+ var expectedHeight = expectedImage.height;
+ var expected = expectedImageData.data;
+
+ var actualWidth = actualImage.width;
+ var actual = actualImageData.data;
+
+ var diffWidth = diffImageData.width;
+ var diff = diffImageData.data;
+
+ var hadDiff = false;
+ for (var x = 0; x < expectedWidth; x++) {
+ for (var y = 0; y < expectedHeight; y++) {
+ var expectedOffset = (y * expectedWidth + x) * 4;
+ var actualOffset = (y * actualWidth + x) * 4;
+ var diffOffset = (y * diffWidth + x) * 4;
+ if (expected[expectedOffset] != actual[actualOffset] ||
+ expected[expectedOffset + 1] != actual[actualOffset + 1] ||
+ expected[expectedOffset + 2] != actual[actualOffset + 2] ||
+ expected[expectedOffset + 3] != actual[actualOffset + 3]) {
+ hadDiff = true;
+ diff[diffOffset] = 255;
+ diff[diffOffset + 1] = 0;
+ diff[diffOffset + 2] = 0;
+ diff[diffOffset + 3] = 255;
+ } else {
+ diff[diffOffset] = expected[expectedOffset];
+ diff[diffOffset + 1] = expected[expectedOffset + 1];
+ diff[diffOffset + 2] = expected[expectedOffset + 2];
+ diff[diffOffset + 3] = 32;
+ }
+ }
+ }
+
+ diffCanvasContext.putImageData(
+ diffImageData,
+ 0, 0,
+ 0, 0,
+ diffImageData.width, diffImageData.height);
+ diffCanvas.className = '';
+
+ if (!hadDiff) {
+ diffCanvas.style.display = 'none';
+ $('diff-checksum').style.display = '';
+ loadTextResult(currentExpectedImageTest, 'expected-checksum');
+ loadTextResult(currentExpectedImageTest, 'actual-checksum');
+ }
+}
+
+function loadTextResult(testName, mode, responseIsHtml)
+{
+ loadText(getTestResultUrl(testName, mode), function(text) {
+ if (responseIsHtml) {
+ $(mode).innerHTML = text;
+ } else {
+ $(mode).textContent = text;
+ }
+ });
+}
+
+function displayTextResults(testName)
+{
+ loadTextResult(testName, 'expected-text');
+ loadTextResult(testName, 'actual-text');
+ loadTextResult(testName, 'diff-text-pretty', true);
+}
+
+function nextTest()
+{
+ var testSelector = $('test-selector');
+ var nextTestIndex = testSelector.selectedIndex + 1;
+ while (true) {
+ if (nextTestIndex == testSelector.options.length) {
+ return;
+ }
+ if (testSelector.options[nextTestIndex].disabled) {
+ nextTestIndex++;
+ } else {
+ testSelector.selectedIndex = nextTestIndex;
+ selectTest();
+ return;
+ }
+ }
+}
+
+function previousTest()
+{
+ var testSelector = $('test-selector');
+ var previousTestIndex = testSelector.selectedIndex - 1;
+ while (true) {
+ if (previousTestIndex == -1) {
+ return;
+ }
+ if (testSelector.options[previousTestIndex].disabled) {
+ previousTestIndex--;
+ } else {
+ testSelector.selectedIndex = previousTestIndex;
+ selectTest();
+ return
+ }
+ }
+}
+
+window.addEventListener('DOMContentLoaded', main);
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/queue.js b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/queue.js
new file mode 100644
index 0000000..338e28f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/queue.js
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+function RebaselineQueue()
+{
+ this._selectNode = $('queue-select');
+ this._rebaselineButtonNode = $('rebaseline-queue');
+ this._toggleNode = $('toggle-queue');
+ this._removeSelectionButtonNode = $('remove-queue-selection');
+
+ this._inProgressRebaselineCount = 0;
+
+ var self = this;
+ $('add-to-rebaseline-queue').addEventListener(
+ 'click', function() { self.addCurrentTest(); });
+ this._selectNode.addEventListener('change', updateState);
+ this._removeSelectionButtonNode.addEventListener(
+ 'click', function() { self._removeSelection(); });
+ this._rebaselineButtonNode.addEventListener(
+ 'click', function() { self.rebaseline(); });
+ this._toggleNode.addEventListener(
+ 'click', function() { toggle('queue'); });
+}
+
+RebaselineQueue.prototype.updateState = function()
+{
+ var testName = getSelectedTest();
+
+ var state = results.tests[testName].state;
+ $('add-to-rebaseline-queue').disabled = state != STATE_NEEDS_REBASELINE;
+
+ var queueLength = this._selectNode.options.length;
+ if (this._inProgressRebaselineCount > 0) {
+ this._rebaselineButtonNode.disabled = true;
+ this._rebaselineButtonNode.textContent =
+ 'Rebaseline in progress (' + this._inProgressRebaselineCount +
+ ' tests left)';
+ } else if (queueLength == 0) {
+ this._rebaselineButtonNode.disabled = true;
+ this._rebaselineButtonNode.textContent = 'Rebaseline queue';
+ this._toggleNode.textContent = 'Queue';
+ } else {
+ this._rebaselineButtonNode.disabled = false;
+ this._rebaselineButtonNode.textContent =
+ 'Rebaseline queue (' + queueLength + ' tests)';
+ this._toggleNode.textContent = 'Queue (' + queueLength + ' tests)';
+ }
+ this._removeSelectionButtonNode.disabled =
+ this._selectNode.selectedIndex == -1;
+};
+
+RebaselineQueue.prototype.addCurrentTest = function()
+{
+ var testName = getSelectedTest();
+ var test = results.tests[testName];
+
+ if (test.state != STATE_NEEDS_REBASELINE) {
+ log('Cannot add test with state "' + test.state + '" to queue.',
+ log.WARNING);
+ return;
+ }
+
+ var queueOption = document.createElement('option');
+ queueOption.value = testName;
+ queueOption.textContent = testName;
+ this._selectNode.appendChild(queueOption);
+ test.state = STATE_IN_QUEUE;
+ updateState();
+};
+
+RebaselineQueue.prototype.removeCurrentTest = function()
+{
+ this._removeTest(getSelectedTest());
+};
+
+RebaselineQueue.prototype._removeSelection = function()
+{
+ if (this._selectNode.selectedIndex == -1)
+ return;
+
+ this._removeTest(
+ this._selectNode.options[this._selectNode.selectedIndex].value);
+};
+
+RebaselineQueue.prototype._removeTest = function(testName)
+{
+ var queueOption = this._selectNode.firstChild;
+
+ while (queueOption && queueOption.value != testName) {
+ queueOption = queueOption.nextSibling;
+ }
+
+ if (!queueOption)
+ return;
+
+ this._selectNode.removeChild(queueOption);
+ var test = results.tests[testName];
+ test.state = STATE_NEEDS_REBASELINE;
+ updateState();
+};
+
+RebaselineQueue.prototype.rebaseline = function()
+{
+ var testNames = [];
+ for (var queueOption = this._selectNode.firstChild;
+ queueOption;
+ queueOption = queueOption.nextSibling) {
+ testNames.push(queueOption.value);
+ }
+
+ this._inProgressRebaselineCount = testNames.length;
+ updateState();
+
+ testNames.forEach(this._rebaselineTest, this);
+};
+
+RebaselineQueue.prototype._rebaselineTest = function(testName)
+{
+ var baselineTarget = getSelectValue('baseline-target');
+ var baselineMoveTo = getSelectValue('baseline-move-to');
+
+ var xhr = new XMLHttpRequest();
+ xhr.open('POST',
+ '/rebaseline?test=' + encodeURIComponent(testName) +
+ '&baseline-target=' + encodeURIComponent(baselineTarget) +
+ '&baseline-move-to=' + encodeURIComponent(baselineMoveTo));
+
+ var self = this;
+ function handleResponse(logType, newState) {
+ log(xhr.responseText, logType);
+ self._removeTest(testName);
+ self._inProgressRebaselineCount--;
+ results.tests[testName].state = newState;
+ updateState();
+ // If we're done with a set of rebaselines, regenerate the test menu
+ // (which is grouped by state) since test states have changed.
+ if (self._inProgressRebaselineCount == 0) {
+ selectDirectory();
+ }
+ }
+
+ function handleSuccess() {
+ handleResponse(log.SUCCESS, STATE_REBASELINE_SUCCEEDED);
+ }
+ function handleFailure() {
+ handleResponse(log.ERROR, STATE_REBASELINE_FAILED);
+ }
+
+ xhr.addEventListener('load', function() {
+ if (xhr.status < 400) {
+ handleSuccess();
+ } else {
+ handleFailure();
+ }
+ });
+ xhr.addEventListener('error', handleFailure);
+
+ xhr.send();
+};
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/util.js b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/util.js
new file mode 100644
index 0000000..5ad7612
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/data/rebaselineserver/util.js
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+var results;
+var testsByFailureType = {};
+var testsByDirectory = {};
+var selectedTests = [];
+
+function $(id)
+{
+ return document.getElementById(id);
+}
+
+function getSelectValue(id)
+{
+ var select = $(id);
+ if (select.selectedIndex == -1) {
+ return null;
+ } else {
+ return select.options[select.selectedIndex].value;
+ }
+}
+
+function loadText(url, callback)
+{
+ var xhr = new XMLHttpRequest();
+ xhr.open('GET', url);
+ xhr.addEventListener('load', function() { callback(xhr.responseText); });
+ xhr.send();
+}
+
+function log(text, type)
+{
+ var node = $('log');
+
+ if (type) {
+ var typeNode = document.createElement('span');
+ typeNode.textContent = type.text;
+ typeNode.style.color = type.color;
+ node.appendChild(typeNode);
+ }
+
+ node.appendChild(document.createTextNode(text + '\n'));
+ node.scrollTop = node.scrollHeight;
+}
+
+log.WARNING = {text: 'Warning: ', color: '#aa3'};
+log.SUCCESS = {text: 'Success: ', color: 'green'};
+log.ERROR = {text: 'Error: ', color: 'red'};
+
+function toggle(id)
+{
+ var element = $(id);
+ var toggler = $('toggle-' + id);
+ if (element.style.display == 'none') {
+ element.style.display = '';
+ toggler.className = 'link selected';
+ } else {
+ element.style.display = 'none';
+ toggler.className = 'link';
+ }
+}
+
+function getTracUrl(layoutTestPath)
+{
+ return 'http://trac.webkit.org/browser/trunk/LayoutTests/' + layoutTestPath;
+}
+
+function getSortedKeys(obj)
+{
+ var keys = [];
+ for (var key in obj) {
+ keys.push(key);
+ }
+ keys.sort();
+ return keys;
+}
\ No newline at end of file
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/layouttestsserver.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/layouttestsserver.py
new file mode 100644
index 0000000..71c207f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/layouttestsserver.py
@@ -0,0 +1,78 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2014 Samsung Electronics. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import time
+import json
+import BaseHTTPServer
+import subprocess
+from subprocess import Popen, PIPE, STDOUT
+
+from webkitpy.common.webkit_finder import WebKitFinder
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.tool.servers.reflectionhandler import ReflectionHandler
+
+
+class LayoutTestsHTTPServer(BaseHTTPServer.HTTPServer):
+ def __init__(self, httpd_port, config):
+ server_name = ""
+ server_address = ("", httpd_port)
+ BaseHTTPServer.HTTPServer.__init__(self, server_address, LayoutTestsServerHTTPRequestHandler)
+
+
+class LayoutTestsServerHTTPRequestHandler(ReflectionHandler):
+
+ def do_POST(self):
+ json_raw_data = self.rfile.read(int(self.headers.getheader('content-length')))
+ json_data = json.loads(json_raw_data)
+ test_list = ''
+ for each in json_data['tests']:
+ test_list += each + ' '
+ filesystem = FileSystem()
+ webkit_finder = WebKitFinder(filesystem)
+ script_dir = webkit_finder.path_from_webkit_base('Tools', 'Scripts')
+ executable_path = script_dir + "/run-webkit-tests"
+ cmd = "python " + executable_path + " --no-show-results "
+ cmd += test_list
+ process = subprocess.Popen(cmd, shell=True, cwd=script_dir, env=None, stdout=subprocess.PIPE, stderr=STDOUT)
+ self.send_response(200)
+ self.send_header('Access-Control-Allow-Origin', '*')
+ self.send_header("Content-type", "text/html")
+ self.end_headers()
+ while process.poll() is None:
+ html_output = '<br>' + str(process.stdout.readline())
+ self.wfile.write(html_output)
+ self.wfile.flush()
+ time.sleep(0.05)
+ process.wait()
+
+ def do_OPTIONS(self):
+ self.send_response(200, "ok")
+ self.send_header('Access-Control-Allow-Origin', '*')
+ self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
+ self.send_header("Access-Control-Allow-Headers", "Content-type")
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py
new file mode 100644
index 0000000..cdbb759
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py
@@ -0,0 +1,283 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import fnmatch
+import os
+import os.path
+import BaseHTTPServer
+
+from webkitpy.layout_tests.port.base import Port
+from webkitpy.tool.servers.reflectionhandler import ReflectionHandler
+
+
+STATE_NEEDS_REBASELINE = 'needs_rebaseline'
+STATE_REBASELINE_FAILED = 'rebaseline_failed'
+STATE_REBASELINE_SUCCEEDED = 'rebaseline_succeeded'
+
+
+def _get_actual_result_files(test_file, test_config):
+ test_name, _ = os.path.splitext(test_file)
+ test_directory = os.path.dirname(test_file)
+
+ test_results_directory = test_config.filesystem.join(
+ test_config.results_directory, test_directory)
+ actual_pattern = os.path.basename(test_name) + '-actual.*'
+ actual_files = []
+ for filename in test_config.filesystem.listdir(test_results_directory):
+ if fnmatch.fnmatch(filename, actual_pattern):
+ actual_files.append(filename)
+ actual_files.sort()
+ return tuple(actual_files)
+
+
+def _rebaseline_test(test_file, baseline_target, baseline_move_to, test_config, log):
+ test_name, _ = os.path.splitext(test_file)
+ test_directory = os.path.dirname(test_name)
+
+ log('Rebaselining %s...' % test_name)
+
+ actual_result_files = _get_actual_result_files(test_file, test_config)
+ filesystem = test_config.filesystem
+ scm = test_config.scm
+ layout_tests_directory = test_config.layout_tests_directory
+ results_directory = test_config.results_directory
+ target_expectations_directory = filesystem.join(
+ layout_tests_directory, 'platform', baseline_target, test_directory)
+ test_results_directory = test_config.filesystem.join(
+ test_config.results_directory, test_directory)
+
+ # If requested, move current baselines out
+ current_baselines = get_test_baselines(test_file, test_config)
+ if baseline_target in current_baselines and baseline_move_to != 'none':
+ log(' Moving current %s baselines to %s' %
+ (baseline_target, baseline_move_to))
+
+ # See which ones we need to move (only those that are about to be
+ # updated), and make sure we're not clobbering any files in the
+ # destination.
+ current_extensions = set(current_baselines[baseline_target].keys())
+ actual_result_extensions = [
+ os.path.splitext(f)[1] for f in actual_result_files]
+ extensions_to_move = current_extensions.intersection(
+ actual_result_extensions)
+
+ if extensions_to_move.intersection(
+ current_baselines.get(baseline_move_to, {}).keys()):
+ log(' Already had baselines in %s, could not move existing '
+ '%s ones' % (baseline_move_to, baseline_target))
+ return False
+
+ # Do the actual move.
+ if extensions_to_move:
+ if not _move_test_baselines(
+ test_file,
+ list(extensions_to_move),
+ baseline_target,
+ baseline_move_to,
+ test_config,
+ log):
+ return False
+ else:
+ log(' No current baselines to move')
+
+ log(' Updating baselines for %s' % baseline_target)
+ filesystem.maybe_make_directory(target_expectations_directory)
+ for source_file in actual_result_files:
+ source_path = filesystem.join(test_results_directory, source_file)
+ destination_file = source_file.replace('-actual', '-expected')
+ destination_path = filesystem.join(
+ target_expectations_directory, destination_file)
+ filesystem.copyfile(source_path, destination_path)
+ exit_code = scm.add(destination_path, return_exit_code=True)
+ if exit_code:
+ log(' Could not update %s in SCM, exit code %d' %
+ (destination_file, exit_code))
+ return False
+ else:
+ log(' Updated %s' % destination_file)
+
+ return True
+
+
+def _move_test_baselines(test_file, extensions_to_move, source_platform, destination_platform, test_config, log):
+ test_file_name = os.path.splitext(os.path.basename(test_file))[0]
+ test_directory = os.path.dirname(test_file)
+ filesystem = test_config.filesystem
+
+ # Want predictable output order for unit tests.
+ extensions_to_move.sort()
+
+ source_directory = os.path.join(
+ test_config.layout_tests_directory,
+ 'platform',
+ source_platform,
+ test_directory)
+ destination_directory = os.path.join(
+ test_config.layout_tests_directory,
+ 'platform',
+ destination_platform,
+ test_directory)
+ filesystem.maybe_make_directory(destination_directory)
+
+ for extension in extensions_to_move:
+ file_name = test_file_name + '-expected' + extension
+ source_path = filesystem.join(source_directory, file_name)
+ destination_path = filesystem.join(destination_directory, file_name)
+ filesystem.copyfile(source_path, destination_path)
+ exit_code = test_config.scm.add(destination_path, return_exit_code=True)
+ if exit_code:
+ log(' Could not update %s in SCM, exit code %d' %
+ (file_name, exit_code))
+ return False
+ else:
+ log(' Moved %s' % file_name)
+
+ return True
+
+
+def get_test_baselines(test_file, test_config):
+ # FIXME: This seems like a hack. This only seems used to access the Port.expected_baselines logic.
+ class AllPlatformsPort(Port):
+ def __init__(self, host):
+ super(AllPlatformsPort, self).__init__(host, 'mac')
+ self._platforms_by_directory = dict([(self._webkit_baseline_path(p), p) for p in test_config.platforms])
+
+ def baseline_search_path(self):
+ return self._platforms_by_directory.keys()
+
+ def platform_from_directory(self, directory):
+ return self._platforms_by_directory[directory]
+
+ test_path = test_config.filesystem.join(test_config.layout_tests_directory, test_file)
+ host = test_config.host
+ all_platforms_port = AllPlatformsPort(host)
+
+ all_test_baselines = {}
+ for baseline_extension in ('.txt', '.checksum', '.png'):
+ test_baselines = test_config.test_port.expected_baselines(test_file, baseline_extension)
+ baselines = all_platforms_port.expected_baselines(test_file, baseline_extension, all_baselines=True)
+ for platform_directory, expected_filename in baselines:
+ if not platform_directory:
+ continue
+ if platform_directory == test_config.layout_tests_directory:
+ platform = 'base'
+ else:
+ platform = all_platforms_port.platform_from_directory(platform_directory)
+ platform_baselines = all_test_baselines.setdefault(platform, {})
+ was_used_for_test = (platform_directory, expected_filename) in test_baselines
+ platform_baselines[baseline_extension] = was_used_for_test
+
+ return all_test_baselines
+
+
+class RebaselineHTTPServer(BaseHTTPServer.HTTPServer):
+ def __init__(self, httpd_port, config):
+ server_name = ""
+ BaseHTTPServer.HTTPServer.__init__(self, (server_name, httpd_port), RebaselineHTTPRequestHandler)
+ self.test_config = config['test_config']
+ self.results_json = config['results_json']
+ self.platforms_json = config['platforms_json']
+
+
+class RebaselineHTTPRequestHandler(ReflectionHandler):
+ STATIC_FILE_NAMES = frozenset([
+ "index.html",
+ "loupe.js",
+ "main.js",
+ "main.css",
+ "queue.js",
+ "util.js",
+ ])
+
+ STATIC_FILE_DIRECTORY = os.path.join(os.path.dirname(__file__), "data", "rebaselineserver")
+
+ def results_json(self):
+ self._serve_json(self.server.results_json)
+
+ def test_config(self):
+ self._serve_json(self.server.test_config)
+
+ def platforms_json(self):
+ self._serve_json(self.server.platforms_json)
+
+ def rebaseline(self):
+ test = self.query['test'][0]
+ baseline_target = self.query['baseline-target'][0]
+ baseline_move_to = self.query['baseline-move-to'][0]
+ test_json = self.server.results_json['tests'][test]
+
+ if test_json['state'] != STATE_NEEDS_REBASELINE:
+ self.send_error(400, "Test %s is in unexpected state: %s" % (test, test_json["state"]))
+ return
+
+ log = []
+ success = _rebaseline_test(
+ test,
+ baseline_target,
+ baseline_move_to,
+ self.server.test_config,
+ log=lambda l: log.append(l))
+
+ if success:
+ test_json['state'] = STATE_REBASELINE_SUCCEEDED
+ self.send_response(200)
+ else:
+ test_json['state'] = STATE_REBASELINE_FAILED
+ self.send_response(500)
+
+ self.send_header('Content-type', 'text/plain')
+ self.end_headers()
+ self.wfile.write('\n'.join(log))
+
+ def test_result(self):
+ test_name, _ = os.path.splitext(self.query['test'][0])
+ mode = self.query['mode'][0]
+ if mode == 'expected-image':
+ file_name = test_name + '-expected.png'
+ elif mode == 'actual-image':
+ file_name = test_name + '-actual.png'
+ if mode == 'expected-checksum':
+ file_name = test_name + '-expected.checksum'
+ elif mode == 'actual-checksum':
+ file_name = test_name + '-actual.checksum'
+ elif mode == 'diff-image':
+ file_name = test_name + '-diff.png'
+ if mode == 'expected-text':
+ file_name = test_name + '-expected.txt'
+ elif mode == 'actual-text':
+ file_name = test_name + '-actual.txt'
+ elif mode == 'diff-text':
+ file_name = test_name + '-diff.txt'
+ elif mode == 'diff-text-pretty':
+ file_name = test_name + '-pretty-diff.html'
+
+ file_path = os.path.join(self.server.test_config.results_directory, file_name)
+
+ # Let results be cached for 60 seconds, so that they can be pre-fetched
+ # by the UI
+ self._serve_file(file_path, cacheable_seconds=60)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py
new file mode 100644
index 0000000..8837e62
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py
@@ -0,0 +1,312 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import unittest
+
+from webkitpy.common.net import layouttestresults_unittest
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.layout_package.json_results_generator import strip_json_wrapper
+from webkitpy.layout_tests.port.base import Port
+from webkitpy.tool.commands.rebaselineserver import TestConfig, RebaselineServer
+from webkitpy.tool.servers import rebaselineserver
+
+
+class RebaselineTestTest(unittest.TestCase):
+ def test_text_rebaseline_update(self):
+ self._assertRebaseline(
+ test_files=(
+ 'fast/text-expected.txt',
+ 'platform/mac/fast/text-expected.txt',
+ ),
+ results_files=(
+ 'fast/text-actual.txt',
+ ),
+ test_name='fast/text.html',
+ baseline_target='mac',
+ baseline_move_to='none',
+ expected_success=True,
+ expected_log=[
+ 'Rebaselining fast/text...',
+ ' Updating baselines for mac',
+ ' Updated text-expected.txt',
+ ])
+
+ def test_text_rebaseline_new(self):
+ self._assertRebaseline(
+ test_files=(
+ 'fast/text-expected.txt',
+ ),
+ results_files=(
+ 'fast/text-actual.txt',
+ ),
+ test_name='fast/text.html',
+ baseline_target='mac',
+ baseline_move_to='none',
+ expected_success=True,
+ expected_log=[
+ 'Rebaselining fast/text...',
+ ' Updating baselines for mac',
+ ' Updated text-expected.txt',
+ ])
+
+ def test_text_rebaseline_move_no_op_1(self):
+ self._assertRebaseline(
+ test_files=(
+ 'fast/text-expected.txt',
+ 'platform/win/fast/text-expected.txt',
+ ),
+ results_files=(
+ 'fast/text-actual.txt',
+ ),
+ test_name='fast/text.html',
+ baseline_target='mac',
+ baseline_move_to='mac-leopard',
+ expected_success=True,
+ expected_log=[
+ 'Rebaselining fast/text...',
+ ' Updating baselines for mac',
+ ' Updated text-expected.txt',
+ ])
+
+ def test_text_rebaseline_move_no_op_2(self):
+ self._assertRebaseline(
+ test_files=(
+ 'fast/text-expected.txt',
+ 'platform/mac/fast/text-expected.checksum',
+ ),
+ results_files=(
+ 'fast/text-actual.txt',
+ ),
+ test_name='fast/text.html',
+ baseline_target='mac',
+ baseline_move_to='mac-leopard',
+ expected_success=True,
+ expected_log=[
+ 'Rebaselining fast/text...',
+ ' Moving current mac baselines to mac-leopard',
+ ' No current baselines to move',
+ ' Updating baselines for mac',
+ ' Updated text-expected.txt',
+ ])
+
+ def test_text_rebaseline_move(self):
+ self._assertRebaseline(
+ test_files=(
+ 'fast/text-expected.txt',
+ 'platform/mac/fast/text-expected.txt',
+ ),
+ results_files=(
+ 'fast/text-actual.txt',
+ ),
+ test_name='fast/text.html',
+ baseline_target='mac',
+ baseline_move_to='mac-leopard',
+ expected_success=True,
+ expected_log=[
+ 'Rebaselining fast/text...',
+ ' Moving current mac baselines to mac-leopard',
+ ' Moved text-expected.txt',
+ ' Updating baselines for mac',
+ ' Updated text-expected.txt',
+ ])
+
+ def test_text_rebaseline_move_only_images(self):
+ self._assertRebaseline(
+ test_files=(
+ 'fast/image-expected.txt',
+ 'platform/mac/fast/image-expected.txt',
+ 'platform/mac/fast/image-expected.png',
+ 'platform/mac/fast/image-expected.checksum',
+ ),
+ results_files=(
+ 'fast/image-actual.png',
+ 'fast/image-actual.checksum',
+ ),
+ test_name='fast/image.html',
+ baseline_target='mac',
+ baseline_move_to='mac-leopard',
+ expected_success=True,
+ expected_log=[
+ 'Rebaselining fast/image...',
+ ' Moving current mac baselines to mac-leopard',
+ ' Moved image-expected.checksum',
+ ' Moved image-expected.png',
+ ' Updating baselines for mac',
+ ' Updated image-expected.checksum',
+ ' Updated image-expected.png',
+ ])
+
+ def test_text_rebaseline_move_already_exist(self):
+ self._assertRebaseline(
+ test_files=(
+ 'fast/text-expected.txt',
+ 'platform/mac-leopard/fast/text-expected.txt',
+ 'platform/mac/fast/text-expected.txt',
+ ),
+ results_files=(
+ 'fast/text-actual.txt',
+ ),
+ test_name='fast/text.html',
+ baseline_target='mac',
+ baseline_move_to='mac-leopard',
+ expected_success=False,
+ expected_log=[
+ 'Rebaselining fast/text...',
+ ' Moving current mac baselines to mac-leopard',
+ ' Already had baselines in mac-leopard, could not move existing mac ones',
+ ])
+
+ def test_image_rebaseline(self):
+ self._assertRebaseline(
+ test_files=(
+ 'fast/image-expected.txt',
+ 'platform/mac/fast/image-expected.png',
+ 'platform/mac/fast/image-expected.checksum',
+ ),
+ results_files=(
+ 'fast/image-actual.png',
+ 'fast/image-actual.checksum',
+ ),
+ test_name='fast/image.html',
+ baseline_target='mac',
+ baseline_move_to='none',
+ expected_success=True,
+ expected_log=[
+ 'Rebaselining fast/image...',
+ ' Updating baselines for mac',
+ ' Updated image-expected.checksum',
+ ' Updated image-expected.png',
+ ])
+
+ def test_gather_baselines(self):
+ example_json = layouttestresults_unittest.LayoutTestResultsTest.example_full_results_json
+ results_json = json.loads(strip_json_wrapper(example_json))
+ server = RebaselineServer()
+ server._test_config = get_test_config()
+ server._gather_baselines(results_json)
+ self.assertEqual(results_json['tests']['svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html']['state'], 'needs_rebaseline')
+ self.assertNotIn('prototype-chocolate.html', results_json['tests'])
+
+ def _assertRebaseline(self, test_files, results_files, test_name, baseline_target, baseline_move_to, expected_success, expected_log):
+ log = []
+ test_config = get_test_config(test_files, results_files)
+ success = rebaselineserver._rebaseline_test(
+ test_name,
+ baseline_target,
+ baseline_move_to,
+ test_config,
+ log=lambda l: log.append(l))
+ self.assertEqual(expected_log, log)
+ self.assertEqual(expected_success, success)
+
+
+class GetActualResultFilesTest(unittest.TestCase):
+ def test(self):
+ test_config = get_test_config(result_files=(
+ 'fast/text-actual.txt',
+ 'fast2/text-actual.txt',
+ 'fast/text2-actual.txt',
+ 'fast/text-notactual.txt',
+ ))
+ self.assertItemsEqual(
+ ('text-actual.txt',),
+ rebaselineserver._get_actual_result_files(
+ 'fast/text.html', test_config))
+
+
+class GetBaselinesTest(unittest.TestCase):
+ def test_no_baselines(self):
+ self._assertBaselines(
+ test_files=(),
+ test_name='fast/missing.html',
+ expected_baselines={})
+
+ def test_text_baselines(self):
+ self._assertBaselines(
+ test_files=(
+ 'fast/text-expected.txt',
+ 'platform/mac/fast/text-expected.txt',
+ ),
+ test_name='fast/text.html',
+ expected_baselines={
+ 'mac': {'.txt': True},
+ 'base': {'.txt': False},
+ })
+
+ def test_image_and_text_baselines(self):
+ self._assertBaselines(
+ test_files=(
+ 'fast/image-expected.txt',
+ 'platform/mac/fast/image-expected.png',
+ 'platform/mac/fast/image-expected.checksum',
+ 'platform/win/fast/image-expected.png',
+ 'platform/win/fast/image-expected.checksum',
+ ),
+ test_name='fast/image.html',
+ expected_baselines={
+ 'base': {'.txt': True},
+ 'mac': {'.checksum': True, '.png': True},
+ 'win': {'.checksum': False, '.png': False},
+ })
+
+ def test_extra_baselines(self):
+ self._assertBaselines(
+ test_files=(
+ 'fast/text-expected.txt',
+ 'platform/nosuchplatform/fast/text-expected.txt',
+ ),
+ test_name='fast/text.html',
+ expected_baselines={'base': {'.txt': True}})
+
+ def _assertBaselines(self, test_files, test_name, expected_baselines):
+ actual_baselines = rebaselineserver.get_test_baselines(test_name, get_test_config(test_files))
+ self.assertEqual(expected_baselines, actual_baselines)
+
+
+def get_test_config(test_files=[], result_files=[]):
+ host = MockHost()
+ port = host.port_factory.get()
+ layout_tests_directory = port.layout_tests_dir()
+ results_directory = port.results_directory()
+
+ for file in test_files:
+ host.filesystem.write_binary_file(host.filesystem.join(layout_tests_directory, file), '')
+ for file in result_files:
+ host.filesystem.write_binary_file(host.filesystem.join(results_directory, file), '')
+
+ class TestMacPort(Port):
+ port_name = "mac"
+ FALLBACK_PATHS = {'': ['mac']}
+
+ return TestConfig(
+ TestMacPort(host, 'mac'),
+ layout_tests_directory,
+ results_directory,
+ ('mac', 'mac-leopard', 'win', 'linux'),
+ host)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/reflectionhandler.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/reflectionhandler.py
new file mode 100644
index 0000000..549b271
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/reflectionhandler.py
@@ -0,0 +1,152 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import BaseHTTPServer
+
+import cgi
+import codecs
+import datetime
+import fnmatch
+import json
+import mimetypes
+import os.path
+import shutil
+import threading
+import time
+import urlparse
+import wsgiref.handlers
+import BaseHTTPServer
+
+
+class ReflectionHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ STATIC_FILE_EXTENSIONS = ['.js', '.css', '.html']
+ # Subclasses should override.
+ STATIC_FILE_DIRECTORY = None
+
+ # Setting this flag to True causes the server to send
+ # Access-Control-Allow-Origin: *
+ # with every response.
+ allow_cross_origin_requests = False
+
+ def do_GET(self):
+ self._handle_request()
+
+ def do_POST(self):
+ self._handle_request()
+
+ def do_HEAD(self):
+ self._handle_request()
+
+ def read_entity_body(self):
+ length = int(self.headers.getheader('content-length'))
+ return self.rfile.read(length)
+
+ def _read_entity_body_as_json(self):
+ return json.loads(self.read_entity_body())
+
+ def _handle_request(self):
+ if "?" in self.path:
+ path, query_string = self.path.split("?", 1)
+ self.query = cgi.parse_qs(query_string)
+ else:
+ path = self.path
+ self.query = {}
+ function_or_file_name = path[1:] or "index.html"
+
+ _, extension = os.path.splitext(function_or_file_name)
+ if extension in self.STATIC_FILE_EXTENSIONS:
+ self._serve_static_file(function_or_file_name)
+ return
+
+ function_name = function_or_file_name.replace(".", "_")
+ if not hasattr(self, function_name):
+ self.send_error(404, "Unknown function %s" % function_name)
+ return
+ if function_name[0] == "_":
+ self.send_error(401, "Not allowed to invoke private or protected methods")
+ return
+ function = getattr(self, function_name)
+ function()
+
+ def _serve_static_file(self, static_path):
+ self._serve_file(os.path.join(self.STATIC_FILE_DIRECTORY, static_path))
+
+ def quitquitquit(self):
+ self._serve_text("Server quit.\n")
+ # Shutdown has to happen on another thread from the server's thread,
+ # otherwise there's a deadlock
+ threading.Thread(target=lambda: self.server.shutdown()).start()
+
+ def _send_access_control_header(self):
+ if self.allow_cross_origin_requests:
+ self.send_header('Access-Control-Allow-Origin', '*')
+
+ def _serve_text(self, text):
+ self.send_response(200)
+ self._send_access_control_header()
+ self.send_header("Content-type", "text/plain")
+ self.end_headers()
+ self.wfile.write(text)
+
+ def _serve_json(self, json_object):
+ self.send_response(200)
+ self._send_access_control_header()
+ self.send_header('Content-type', 'application/json')
+ self.end_headers()
+ json.dump(json_object, self.wfile)
+
+ def _serve_file(self, file_path, cacheable_seconds=0, headers_only=False):
+ if not os.path.exists(file_path):
+ self.send_error(404, "File not found")
+ return
+ with codecs.open(file_path, "rb") as static_file:
+ self.send_response(200)
+ self._send_access_control_header()
+ self.send_header("Content-Length", os.path.getsize(file_path))
+ mime_type, encoding = mimetypes.guess_type(file_path)
+ if mime_type:
+ self.send_header("Content-type", mime_type)
+
+ if cacheable_seconds:
+ expires_time = (datetime.datetime.now() +
+ datetime.timedelta(0, cacheable_seconds))
+ expires_formatted = wsgiref.handlers.format_date_time(
+ time.mktime(expires_time.timetuple()))
+ self.send_header("Expires", expires_formatted)
+ self.end_headers()
+
+ if not headers_only:
+ shutil.copyfileobj(static_file, self.wfile)
+
+ def _serve_xml(self, xml):
+ self.send_response(200)
+ self._send_access_control_header()
+ self.send_header("Content-type", "text/xml")
+ self.end_headers()
+ xml = xml.encode('utf-8')
+ self.wfile.write(xml)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/reflectionhandler_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/reflectionhandler_unittest.py
new file mode 100644
index 0000000..fe0c738
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/servers/reflectionhandler_unittest.py
@@ -0,0 +1,99 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.tool.servers.reflectionhandler import ReflectionHandler
+
+
+class TestReflectionHandler(ReflectionHandler):
+ STATIC_FILE_DIRECTORY = "/"
+
+ def __init__(self):
+ self.static_files_served = set()
+ self.errors_sent = set()
+ self.functions_run = set()
+
+ def _serve_static_file(self, name):
+ self.static_files_served.add(name)
+
+ def send_error(self, code, description):
+ self.errors_sent.add(code)
+
+ def function_one(self):
+ self.functions_run.add("function_one")
+
+ def some_html(self):
+ self.functions_run.add("some_html")
+
+
+class WriteConvertingLogger(object):
+ def __init__(self):
+ self.data = ''
+
+ def write(self, data):
+ # If data is still in ASCII, this will throw an exception.
+ self.data = str(data)
+
+
+class TestReflectionHandlerServeXML(ReflectionHandler):
+ def __init__(self):
+ self.requestline = False
+ self.client_address = '127.0.0.1'
+ self.request_version = '1'
+ self.wfile = WriteConvertingLogger()
+
+ def serve_xml(self, data):
+ self._serve_xml(data)
+
+ def log_message(self, _format, *_args):
+ pass
+
+
+class ReflectionHandlerTest(unittest.TestCase):
+ def assert_handler_response(self, requests, expected_static_files, expected_errors, expected_functions):
+ handler = TestReflectionHandler()
+ for request in requests:
+ handler.path = request
+ handler._handle_request()
+ self.assertEqual(handler.static_files_served, expected_static_files)
+ self.assertEqual(handler.errors_sent, expected_errors)
+ self.assertEqual(handler.functions_run, expected_functions)
+
+ def test_static_content_or_function_switch(self):
+ self.assert_handler_response(["/test.js"], set(["test.js"]), set(), set())
+ self.assert_handler_response(["/test.js", "/test.css", "/test.html"], set(["test.js", "test.html", "test.css"]), set(), set())
+ self.assert_handler_response(["/test.js", "/test.exe", "/testhtml"], set(["test.js"]), set([404]), set())
+ self.assert_handler_response(["/test.html", "/function.one"], set(["test.html"]), set(), set(['function_one']))
+ self.assert_handler_response(["/some.html"], set(["some.html"]), set(), set())
+
+ def test_svn_log_non_ascii(self):
+ xmlChangelog = u'<?xml version="1.0"?>\n<log>\n<logentry revision="1">\n<msg>Patch from John Do\xe9.</msg>\n</logentry>\n</log>'
+ handler = TestReflectionHandlerServeXML()
+ handler.serve_xml(xmlChangelog)
+ self.assertEqual(handler.wfile.data, xmlChangelog.encode('utf-8'))
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/steps/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/steps/__init__.py
new file mode 100644
index 0000000..261606d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/steps/__init__.py
@@ -0,0 +1,31 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# FIXME: Is this the right way to do this?
+from webkitpy.tool.steps.confirmdiff import ConfirmDiff
+from webkitpy.tool.steps.options import Options
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/steps/abstractstep.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/steps/abstractstep.py
new file mode 100644
index 0000000..9d7f2e0
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/steps/abstractstep.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.tool.steps.options import Options
+
+
+class AbstractStep(object):
+ def __init__(self, tool, options):
+ self._tool = tool
+ self._options = options
+
+ def _exit(self, code):
+ sys.exit(code)
+
+ @classmethod
+ def options(cls):
+ return [
+ # We need this option here because cached_lookup uses it. :(
+ Options.git_commit,
+ ]
+
+ def run(self, state):
+ raise NotImplementedError, "subclasses must implement"
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/steps/confirmdiff.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/steps/confirmdiff.py
new file mode 100644
index 0000000..8472da6
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/steps/confirmdiff.py
@@ -0,0 +1,78 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import urllib
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.prettypatch import PrettyPatch
+from webkitpy.common.system import logutils
+from webkitpy.common.system.executive import ScriptError
+
+
+_log = logutils.get_logger(__file__)
+
+
+class ConfirmDiff(AbstractStep):
+ @classmethod
+ def options(cls):
+ return AbstractStep.options() + [
+ Options.confirm,
+ ]
+
+ def _show_pretty_diff(self):
+ if not self._tool.user.can_open_url():
+ return None
+
+ try:
+ pretty_patch = PrettyPatch(self._tool.executive)
+ pretty_diff_file = pretty_patch.pretty_diff_file(self.diff())
+ url = "file://%s" % urllib.quote(pretty_diff_file.name)
+ self._tool.user.open_url(url)
+ # We return the pretty_diff_file here because we need to keep the
+ # file alive until the user has had a chance to confirm the diff.
+ return pretty_diff_file
+ except ScriptError, e:
+ _log.warning("PrettyPatch failed. :(")
+ except OSError, e:
+ _log.warning("PrettyPatch unavailable.")
+
+ def diff(self):
+ changed_files = self._tool.scm().changed_files(self._options.git_commit)
+ return self._tool.scm().create_patch(self._options.git_commit,
+ changed_files=changed_files)
+
+ def run(self, state):
+ if not self._options.confirm:
+ return
+ pretty_diff_file = self._show_pretty_diff()
+ if pretty_diff_file:
+ diff_correct = self._tool.user.confirm("Was that diff correct?")
+ pretty_diff_file.close()
+ if not diff_correct:
+ self._exit(1)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/tool/steps/options.py b/src/third_party/blink/Tools/Scripts/webkitpy/tool/steps/options.py
new file mode 100644
index 0000000..4cd2b7d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/tool/steps/options.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from optparse import make_option
+
+class Options(object):
+ confirm = make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Skip confirmation steps.")
+ git_commit = make_option("-g", "--git-commit", action="store", dest="git_commit", help="Operate on a local commit. If a range, the commits are squashed into one. <ref>.... includes the working copy changes. UPSTREAM can be used for the upstream/tracking branch.")
+ parent_command = make_option("--parent-command", action="store", dest="parent_command", default=None, help="(Internal) The command that spawned this instance.")
+ quiet = make_option("--quiet", action="store_true", dest="quiet", default=False, help="Produce less console output.")
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/w3c/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/w3c/deps_updater.py b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/deps_updater.py
new file mode 100644
index 0000000..874c2d9
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/deps_updater.py
@@ -0,0 +1,179 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Pull latest revisions of the W3C test repos and update our DEPS entries."""
+
+import argparse
+
+
+from webkitpy.common.webkit_finder import WebKitFinder
+
+
+class DepsUpdater(object):
+ def __init__(self, host):
+ self.host = host
+ self.executive = host.executive
+ self.fs = host.filesystem
+ self.finder = WebKitFinder(self.fs)
+ self.verbose = False
+ self.allow_local_blink_commits = False
+ self.keep_w3c_repos_around = False
+
+ def main(self, argv=None):
+ self.parse_args(argv)
+
+ self.cd('')
+ if not self.checkout_is_okay():
+ return 1
+
+ self.print_('## noting the current Blink commitish')
+ blink_commitish = self.run(['git', 'show-ref', 'HEAD'])[1].split()[0]
+
+ wpt_import_text = self.update('web-platform-tests',
+ 'https://chromium.googlesource.com/external/w3c/web-platform-tests.git')
+
+ css_import_text = self.update('csswg-test',
+ 'https://chromium.googlesource.com/external/w3c/csswg-test.git')
+
+ self.commit_changes_if_needed(blink_commitish, css_import_text, wpt_import_text)
+
+ return 0
+
+ def parse_args(self, argv):
+ parser = argparse.ArgumentParser()
+ parser.description = __doc__
+ parser.add_argument('-v', '--verbose', action='store_true',
+ help='log what we are doing')
+ parser.add_argument('--allow-local-blink-commits', action='store_true',
+ help='allow script to run even if we have local blink commits')
+ parser.add_argument('--keep-w3c-repos-around', action='store_true',
+ help='leave the w3c repos around that were imported previously.')
+
+ args = parser.parse_args(argv)
+ self.allow_local_blink_commits = args.allow_local_blink_commits
+ self.keep_w3c_repos_around = args.keep_w3c_repos_around
+ self.verbose = args.verbose
+
+ def checkout_is_okay(self):
+ if self.run(['git', 'diff', '--quiet', 'HEAD'], exit_on_failure=False)[0]:
+ self.print_('## blink checkout is dirty, aborting')
+ return False
+
+ local_blink_commits = self.run(['git', 'log', '--oneline', 'origin/master..HEAD'])[1]
+ if local_blink_commits and not self.allow_local_blink_commits:
+ self.print_('## blink checkout has local commits, aborting')
+ return False
+
+ if self.fs.exists(self.path_from_webkit_base('web-platform-tests')):
+ self.print_('## web-platform-tests repo exists, aborting')
+ return False
+
+ if self.fs.exists(self.path_from_webkit_base('csswg-test')):
+ self.print_('## csswg-test repo exists, aborting')
+ return False
+
+ return True
+
+ def update(self, repo, url):
+ self.print_('## cloning %s' % repo)
+ self.cd('')
+ self.run(['git', 'clone', url])
+
+ self.print_('## noting the revision we are importing')
+ master_commitish = self.run(['git', 'show-ref', 'origin/master'])[1].split()[0]
+
+ self.print_('## cleaning out tests from LayoutTests/imported/%s' % repo)
+ dest_repo = self.path_from_webkit_base('LayoutTests', 'imported', repo)
+ files_to_delete = self.fs.files_under(dest_repo, file_filter=self.is_not_baseline)
+ for subpath in files_to_delete:
+ self.remove('LayoutTests', 'imported', subpath)
+
+ self.print_('## importing the tests')
+ src_repo = self.path_from_webkit_base(repo)
+ import_path = self.path_from_webkit_base('Tools', 'Scripts', 'import-w3c-tests')
+ self.run([self.host.executable, import_path, '-d', 'imported', src_repo])
+
+ self.cd('')
+ self.run(['git', 'add', '--all', 'LayoutTests/imported/%s' % repo])
+
+ self.print_('## deleting any orphaned baselines')
+ previous_baselines = self.fs.files_under(dest_repo, file_filter=self.is_baseline)
+ for subpath in previous_baselines:
+ full_path = self.fs.join(dest_repo, subpath)
+ if self.fs.glob(full_path.replace('-expected.txt', '*')) == [full_path]:
+ self.fs.remove(full_path)
+
+ if not self.keep_w3c_repos_around:
+ self.print_('## deleting %s repo' % repo)
+ self.cd('')
+ self.rmtree(repo)
+
+ return 'imported %s@%s' % (repo, master_commitish)
+
+ def commit_changes_if_needed(self, blink_commitish, css_import_text, wpt_import_text):
+ if self.run(['git', 'diff', '--quiet', 'HEAD'], exit_on_failure=False)[0]:
+ self.print_('## commiting changes')
+ commit_msg = ('update-w3c-deps import using blink %s:\n'
+ '\n'
+ '%s\n'
+ '%s\n' % (blink_commitish, css_import_text, wpt_import_text))
+ path_to_commit_msg = self.path_from_webkit_base('commit_msg')
+ if self.verbose:
+ self.print_('cat > %s <<EOF' % path_to_commit_msg)
+ self.print_(commit_msg)
+ self.print_('EOF')
+ self.fs.write_text_file(path_to_commit_msg, commit_msg)
+ self.run(['git', 'commit', '-a', '-F', path_to_commit_msg])
+ self.remove(path_to_commit_msg)
+ self.print_('## Done: changes imported and committed')
+ else:
+ self.print_('## Done: no changes to import')
+
+ def is_baseline(self, fs, dirname, basename):
+ return basename.endswith('-expected.txt')
+
+ def is_not_baseline(self, fs, dirname, basename):
+ return not self.is_baseline(fs, dirname, basename)
+
+ def run(self, cmd, exit_on_failure=True):
+ if self.verbose:
+ self.print_(' '.join(cmd))
+
+ proc = self.executive.popen(cmd, stdout=self.executive.PIPE, stderr=self.executive.PIPE)
+ out, err = proc.communicate()
+ if proc.returncode or self.verbose:
+ self.print_('# ret> %d' % proc.returncode)
+ if out:
+ for line in out.splitlines():
+ self.print_('# out> %s' % line)
+ if err:
+ for line in err.splitlines():
+ self.print_('# err> %s' % line)
+ if exit_on_failure and proc.returncode:
+ self.host.exit(proc.returncode)
+ return proc.returncode, out
+
+ def cd(self, *comps):
+ dest = self.path_from_webkit_base(*comps)
+ if self.verbose:
+ self.print_('cd %s' % dest)
+ self.fs.chdir(dest)
+
+ def remove(self, *comps):
+ dest = self.path_from_webkit_base(*comps)
+ if self.verbose:
+ self.print_('rm %s' % dest)
+ self.fs.remove(dest)
+
+ def rmtree(self, *comps):
+ dest = self.path_from_webkit_base(*comps)
+ if self.verbose:
+ self.print_('rm -fr %s' % dest)
+ self.fs.rmtree(dest)
+
+ def path_from_webkit_base(self, *comps):
+ return self.finder.path_from_webkit_base(*comps)
+
+ def print_(self, msg):
+ self.host.print_(msg)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_converter.py b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_converter.py
new file mode 100755
index 0000000..4eddf19
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_converter.py
@@ -0,0 +1,221 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import logging
+import re
+
+from webkitpy.common.host import Host
+from webkitpy.common.webkit_finder import WebKitFinder
+from HTMLParser import HTMLParser
+
+
+_log = logging.getLogger(__name__)
+
+
+def convert_for_webkit(new_path, filename, reference_support_info, host=Host()):
+ """ Converts a file's |contents| so it will function correctly in its |new_path| in Webkit.
+
+ Returns the list of modified properties and the modified text if the file was modifed, None otherwise."""
+ contents = host.filesystem.read_binary_file(filename)
+ converter = _W3CTestConverter(new_path, filename, reference_support_info, host)
+ if filename.endswith('.css'):
+ return converter.add_webkit_prefix_to_unprefixed_properties(contents)
+ else:
+ converter.feed(contents)
+ converter.close()
+ return converter.output()
+
+
+class _W3CTestConverter(HTMLParser):
+ def __init__(self, new_path, filename, reference_support_info, host=Host()):
+ HTMLParser.__init__(self)
+
+ self._host = host
+ self._filesystem = self._host.filesystem
+ self._webkit_root = WebKitFinder(self._filesystem).webkit_base()
+
+ self.converted_data = []
+ self.converted_properties = []
+ self.in_style_tag = False
+ self.style_data = []
+ self.filename = filename
+ self.reference_support_info = reference_support_info
+
+ resources_path = self.path_from_webkit_root('LayoutTests', 'resources')
+ resources_relpath = self._filesystem.relpath(resources_path, new_path)
+ self.resources_relpath = resources_relpath
+
+ # These settings might vary between WebKit and Blink
+ self._css_property_file = self.path_from_webkit_root('Source', 'core', 'css', 'CSSProperties.in')
+
+ self.prefixed_properties = self.read_webkit_prefixed_css_property_list()
+
+ self.prefixed_properties = self.read_webkit_prefixed_css_property_list()
+ prop_regex = '([\s{]|^)(' + "|".join(prop.replace('-webkit-', '') for prop in self.prefixed_properties) + ')(\s+:|:)'
+ self.prop_re = re.compile(prop_regex)
+
+ def output(self):
+ return (self.converted_properties, ''.join(self.converted_data))
+
+ def path_from_webkit_root(self, *comps):
+ return self._filesystem.abspath(self._filesystem.join(self._webkit_root, *comps))
+
+ def read_webkit_prefixed_css_property_list(self):
+ prefixed_properties = []
+ unprefixed_properties = set()
+
+ contents = self._filesystem.read_text_file(self._css_property_file)
+ for line in contents.splitlines():
+ if re.match('^(#|//|$)', line):
+ # skip comments and preprocessor directives
+ continue
+ prop = line.split()[0]
+ # Find properties starting with the -webkit- prefix.
+ match = re.match('-webkit-([\w|-]*)', prop)
+ if match:
+ prefixed_properties.append(match.group(1))
+ else:
+ unprefixed_properties.add(prop.strip())
+
+ # Ignore any prefixed properties for which an unprefixed version is supported
+ return [prop for prop in prefixed_properties if prop not in unprefixed_properties]
+
+ def add_webkit_prefix_to_unprefixed_properties(self, text):
+ """ Searches |text| for instances of properties requiring the -webkit- prefix and adds the prefix to them.
+
+ Returns the list of converted properties and the modified text."""
+
+ converted_properties = set()
+ text_chunks = []
+ cur_pos = 0
+ for m in self.prop_re.finditer(text):
+ text_chunks.extend([text[cur_pos:m.start()], m.group(1), '-webkit-', m.group(2), m.group(3)])
+ converted_properties.add(m.group(2))
+ cur_pos = m.end()
+ text_chunks.append(text[cur_pos:])
+
+ for prop in converted_properties:
+ _log.info(' converting %s', prop)
+
+ # FIXME: Handle the JS versions of these properties and GetComputedStyle, too.
+ return (converted_properties, ''.join(text_chunks))
+
+ def convert_reference_relpaths(self, text):
+ """ Searches |text| for instances of files in reference_support_info and updates the relative path to be correct for the new ref file location"""
+
+ converted = text
+ for path in self.reference_support_info['files']:
+ if text.find(path) != -1:
+ # FIXME: This doesn't handle an edge case where simply removing the relative path doesn't work.
+ # See crbug.com/421584 for details.
+ new_path = re.sub(self.reference_support_info['reference_relpath'], '', path, 1)
+ converted = re.sub(path, new_path, text)
+
+ return converted
+
+ def convert_style_data(self, data):
+ converted = self.add_webkit_prefix_to_unprefixed_properties(data)
+ if converted[0]:
+ self.converted_properties.extend(list(converted[0]))
+
+ if self.reference_support_info is None or self.reference_support_info == {}:
+ return converted[1]
+
+ return self.convert_reference_relpaths(converted[1])
+
+ def convert_attributes_if_needed(self, tag, attrs):
+ converted = self.get_starttag_text()
+ if tag in ('script', 'link'):
+ target_attr = 'src'
+ if tag != 'script':
+ target_attr = 'href'
+ for attr_name, attr_value in attrs:
+ if attr_name == target_attr:
+ new_path = re.sub('/resources/testharness',
+ self.resources_relpath + '/testharness',
+ attr_value)
+ converted = re.sub(re.escape(attr_value), new_path, converted)
+ new_path = re.sub('/common/vendor-prefix',
+ self.resources_relpath + '/vendor-prefix',
+ attr_value)
+ converted = re.sub(re.escape(attr_value), new_path, converted)
+
+ for attr_name, attr_value in attrs:
+ if attr_name == 'style':
+ new_style = self.convert_style_data(attr_value)
+ converted = re.sub(re.escape(attr_value), new_style, converted)
+ if attr_name == 'class' and 'instructions' in attr_value:
+ # Always hide instructions, they're for manual testers.
+ converted = re.sub(' style=".*?"', '', converted)
+ converted = re.sub('\>', ' style="display:none">', converted)
+
+ src_tags = ('script', 'img', 'style', 'frame', 'iframe', 'input', 'layer', 'textarea', 'video', 'audio')
+ if tag in src_tags and self.reference_support_info is not None and self.reference_support_info != {}:
+ for attr_name, attr_value in attrs:
+ if attr_name == 'src':
+ new_path = self.convert_reference_relpaths(attr_value)
+ converted = re.sub(re.escape(attr_value), new_path, converted)
+
+ self.converted_data.append(converted)
+
+ def handle_starttag(self, tag, attrs):
+ if tag == 'style':
+ self.in_style_tag = True
+ self.convert_attributes_if_needed(tag, attrs)
+
+ def handle_endtag(self, tag):
+ if tag == 'style':
+ self.converted_data.append(self.convert_style_data(''.join(self.style_data)))
+ self.in_style_tag = False
+ self.style_data = []
+ self.converted_data.extend(['</', tag, '>'])
+
+ def handle_startendtag(self, tag, attrs):
+ self.convert_attributes_if_needed(tag, attrs)
+
+ def handle_data(self, data):
+ if self.in_style_tag:
+ self.style_data.append(data)
+ else:
+ self.converted_data.append(data)
+
+ def handle_entityref(self, name):
+ self.converted_data.extend(['&', name, ';'])
+
+ def handle_charref(self, name):
+ self.converted_data.extend(['&#', name, ';'])
+
+ def handle_comment(self, data):
+ self.converted_data.extend(['<!-- ', data, ' -->'])
+
+ def handle_decl(self, decl):
+ self.converted_data.extend(['<!', decl, '>'])
+
+ def handle_pi(self, data):
+ self.converted_data.extend(['<?', data, '>'])
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_converter_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_converter_unittest.py
new file mode 100755
index 0000000..f705f84
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_converter_unittest.py
@@ -0,0 +1,425 @@
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import os
+import re
+import unittest
+
+from webkitpy.common.host import Host
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.webkit_finder import WebKitFinder
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
+from webkitpy.w3c.test_converter import _W3CTestConverter
+
+DUMMY_FILENAME = 'dummy.html'
+DUMMY_PATH = 'dummy/testharness/path'
+
+class W3CTestConverterTest(unittest.TestCase):
+
+ # FIXME: When we move to using a MockHost, this method should be removed, since
+ # then we can just pass in a dummy dir path
+ def fake_dir_path(self, dirname):
+ filesystem = Host().filesystem
+ webkit_root = WebKitFinder(filesystem).webkit_base()
+ return filesystem.abspath(filesystem.join(webkit_root, "LayoutTests", "css", dirname))
+
+ def test_read_prefixed_property_list(self):
+ """ Tests that the current list of properties requiring the -webkit- prefix load correctly """
+
+ # FIXME: We should be passing in a MockHost here ...
+ converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
+ prop_list = converter.prefixed_properties
+ self.assertTrue(prop_list, 'No prefixed properties found')
+
+ def test_convert_for_webkit_nothing_to_convert(self):
+ """ Tests convert_for_webkit() using a basic test that has nothing to convert """
+
+ test_html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<title>CSS Test: DESCRIPTION OF TEST</title>
+<link rel="author" title="NAME_OF_AUTHOR"
+href="mailto:EMAIL OR http://CONTACT_PAGE"/>
+<link rel="help" href="RELEVANT_SPEC_SECTION"/>
+<meta name="assert" content="TEST ASSERTION"/>
+<style type="text/css"><![CDATA[
+CSS FOR TEST
+]]></style>
+</head>
+<body>
+CONTENT OF TEST
+</body>
+</html>
+"""
+ converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
+
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ converter.feed(test_html)
+ converter.close()
+ converted = converter.output()
+ finally:
+ oc.restore_output()
+
+ self.verify_no_conversion_happened(converted, test_html)
+
+ def test_convert_for_webkit_harness_only(self):
+ """ Tests convert_for_webkit() using a basic JS test that uses testharness.js only and has no prefixed properties """
+
+ test_html = """<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+</head>
+"""
+ fake_dir_path = self.fake_dir_path("harnessonly")
+ converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
+ converter.feed(test_html)
+ converter.close()
+ converted = converter.output()
+
+ self.verify_conversion_happened(converted)
+ self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
+ self.verify_prefixed_properties(converted, [])
+
+ def test_convert_for_webkit_properties_only(self):
+ """ Tests convert_for_webkit() using a test that has 2 prefixed properties: 1 in a style block + 1 inline style """
+
+ test_html = """<html>
+<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+<style type="text/css">
+
+#block1 { @test0@: propvalue; }
+
+</style>
+</head>
+<body>
+<div id="elem1" style="@test1@: propvalue;"></div>
+</body>
+</html>
+"""
+ fake_dir_path = self.fake_dir_path('harnessandprops')
+ converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
+ test_content = self.generate_test_content(converter.prefixed_properties, 1, test_html)
+
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ converter.feed(test_content[1])
+ converter.close()
+ converted = converter.output()
+ finally:
+ oc.restore_output()
+
+ self.verify_conversion_happened(converted)
+ self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
+ self.verify_prefixed_properties(converted, test_content[0])
+
+ def test_convert_for_webkit_harness_and_properties(self):
+ """ Tests convert_for_webkit() using a basic JS test that uses testharness.js and testharness.css and has 4 prefixed properties: 3 in a style block + 1 inline style """
+
+ test_html = """<html>
+<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+<style type="text/css">
+
+#block1 { @test0@: propvalue; }
+#block2 { @test1@: propvalue; }
+#block3 { @test2@: propvalue; }
+
+</style>
+</head>
+<body>
+<div id="elem1" style="@test3@: propvalue;"></div>
+</body>
+</html>
+"""
+ fake_dir_path = self.fake_dir_path('harnessandprops')
+ converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
+
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ test_content = self.generate_test_content(converter.prefixed_properties, 2, test_html)
+ converter.feed(test_content[1])
+ converter.close()
+ converted = converter.output()
+ finally:
+ oc.restore_output()
+
+ self.verify_conversion_happened(converted)
+ self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
+ self.verify_prefixed_properties(converted, test_content[0])
+
+ def test_convert_test_harness_paths(self):
+ """ Tests convert_testharness_paths() with a test that uses all three testharness files """
+
+ test_html = """<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+</head>
+"""
+ fake_dir_path = self.fake_dir_path('testharnesspaths')
+ converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
+
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ converter.feed(test_html)
+ converter.close()
+ converted = converter.output()
+ finally:
+ oc.restore_output()
+
+ self.verify_conversion_happened(converted)
+ self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 2, 1)
+
+ def test_convert_vendor_prefix_js_paths(self):
+ test_html = """<head>
+<script src="/common/vendor-prefix.js">
+</head>
+"""
+ fake_dir_path = self.fake_dir_path('adapterjspaths')
+ converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
+
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ converter.feed(test_html)
+ converter.close()
+ converted = converter.output()
+ finally:
+ oc.restore_output()
+
+ new_html = BeautifulSoup(converted[1])
+
+ # Verify the original paths are gone, and the new paths are present.
+ orig_path_pattern = re.compile('\"/common/vendor-prefix.js')
+ self.assertEquals(len(new_html.findAll(src=orig_path_pattern)), 0, 'vendor-prefix.js path was not converted')
+
+ resources_dir = converter.path_from_webkit_root("LayoutTests", "resources")
+ new_relpath = os.path.relpath(resources_dir, fake_dir_path)
+ relpath_pattern = re.compile(new_relpath)
+ self.assertEquals(len(new_html.findAll(src=relpath_pattern)), 1, 'vendor-prefix.js relative path not correct')
+
+ def test_convert_prefixed_properties(self):
+ """ Tests convert_prefixed_properties() file that has 20 properties requiring the -webkit- prefix:
+ 10 in one style block + 5 in another style
+ block + 5 inline styles, including one with multiple prefixed properties.
+ The properties in the test content are in all sorts of wack formatting.
+ """
+
+ test_html = """<html>
+<style type="text/css"><![CDATA[
+
+.block1 {
+ width: 300px;
+ height: 300px
+}
+
+.block2 {
+ @test0@: propvalue;
+}
+
+.block3{@test1@: propvalue;}
+
+.block4 { @test2@:propvalue; }
+
+.block5{ @test3@ :propvalue; }
+
+#block6 { @test4@ : propvalue; }
+
+#block7
+{
+ @test5@: propvalue;
+}
+
+#block8 { @test6@: propvalue; }
+
+#block9:pseudo
+{
+
+ @test7@: propvalue;
+ @test8@: propvalue propvalue propvalue;
+}
+
+]]></style>
+</head>
+<body>
+ <div id="elem1" style="@test9@: propvalue;"></div>
+ <div id="elem2" style="propname: propvalue; @test10@ : propvalue; propname:propvalue;"></div>
+ <div id="elem2" style="@test11@: propvalue; @test12@ : propvalue; @test13@ :propvalue;"></div>
+ <div id="elem3" style="@test14@:propvalue"></div>
+</body>
+<style type="text/css"><![CDATA[
+
+.block10{ @test15@: propvalue; }
+.block11{ @test16@: propvalue; }
+.block12{ @test17@: propvalue; }
+#block13:pseudo
+{
+ @test18@: propvalue;
+ @test19@: propvalue;
+}
+
+]]></style>
+</html>
+"""
+ converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
+ test_content = self.generate_test_content(converter.prefixed_properties, 20, test_html)
+
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ converter.feed(test_content[1])
+ converter.close()
+ converted = converter.output()
+ finally:
+ oc.restore_output()
+
+ self.verify_conversion_happened(converted)
+ self.verify_prefixed_properties(converted, test_content[0])
+
+ def test_hides_all_instructions_for_manual_testers(self):
+ test_html = """<body>
+<h1 class="instructions">Hello manual tester!</h1>
+<p class="instructions some_other_class">This is how you run this test.</p>
+<p style="willbeoverwritten" class="instructions">...</p>
+<doesntmatterwhichtagitis class="some_other_class instructions">...</p>
+<p>Legit content may contain the instructions string</p>
+</body>
+"""
+ expected_test_html = """<body>
+<h1 class="instructions" style="display:none">Hello manual tester!</h1>
+<p class="instructions some_other_class" style="display:none">This is how you run this test.</p>
+<p class="instructions" style="display:none">...</p>
+<doesntmatterwhichtagitis class="some_other_class instructions" style="display:none">...</p>
+<p>Legit content may contain the instructions string</p>
+</body>
+"""
+ converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
+
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ converter.feed(test_html)
+ converter.close()
+ converted = converter.output()
+ finally:
+ oc.restore_output()
+
+ self.assertEqual(converted[1], expected_test_html)
+
+ def test_convert_attributes_if_needed(self):
+ """ Tests convert_attributes_if_needed() using a reference file that has some relative src paths """
+
+ test_html = """<html>
+ <head>
+ <script src="../../some-script.js"></script>
+ <style src="../../../some-style.css"></style>
+ </head>
+ <body>
+ <img src="../../../../some-image.jpg">
+ </body>
+ </html>
+ """
+ test_reference_support_info = {'reference_relpath': '../', 'files': ['../../some-script.js', '../../../some-style.css', '../../../../some-image.jpg'], 'elements': ['script', 'style', 'img']}
+ converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, test_reference_support_info)
+
+ oc = OutputCapture()
+ oc.capture_output()
+
+ try:
+ converter.feed(test_html)
+ converter.close()
+ converted = converter.output()
+ finally:
+ oc.restore_output()
+
+ self.verify_conversion_happened(converted)
+ self.verify_reference_relative_paths(converted, test_reference_support_info)
+
+ def verify_conversion_happened(self, converted):
+ self.assertTrue(converted, "conversion didn't happen")
+
+ def verify_no_conversion_happened(self, converted, original):
+ self.assertEqual(converted[1], original, 'test should not have been converted')
+
+ def verify_test_harness_paths(self, converter, converted, test_path, num_src_paths, num_href_paths):
+ if isinstance(converted, basestring):
+ converted = BeautifulSoup(converted)
+
+ resources_dir = converter.path_from_webkit_root("LayoutTests", "resources")
+
+ # Verify the original paths are gone, and the new paths are present.
+ orig_path_pattern = re.compile('\"/resources/testharness')
+ self.assertEquals(len(converted.findAll(src=orig_path_pattern)), 0, 'testharness src path was not converted')
+ self.assertEquals(len(converted.findAll(href=orig_path_pattern)), 0, 'testharness href path was not converted')
+
+ new_relpath = os.path.relpath(resources_dir, test_path)
+ relpath_pattern = re.compile(new_relpath)
+ self.assertEquals(len(converted.findAll(src=relpath_pattern)), num_src_paths, 'testharness src relative path not correct')
+ self.assertEquals(len(converted.findAll(href=relpath_pattern)), num_href_paths, 'testharness href relative path not correct')
+
+ def verify_prefixed_properties(self, converted, test_properties):
+ self.assertEqual(len(set(converted[0])), len(set(test_properties)), 'Incorrect number of properties converted')
+ for test_prop in test_properties:
+ self.assertTrue((test_prop in converted[1]), 'Property ' + test_prop + ' not found in converted doc')
+
+ def verify_reference_relative_paths(self, converted, reference_support_info):
+ idx = 0
+ for path in reference_support_info['files']:
+ expected_path = re.sub(reference_support_info['reference_relpath'], '', path, 1)
+ element = reference_support_info['elements'][idx]
+ expected_tag = '<' + element + ' src=\"' + expected_path + '\">'
+ self.assertTrue(expected_tag in converted[1], 'relative path ' + path + ' was not converted correcty')
+ idx += 1
+
+ def generate_test_content(self, full_property_list, num_test_properties, html):
+ """Inserts properties requiring a -webkit- prefix into the content, replacing \'@testXX@\' with a property."""
+ test_properties = []
+ count = 0
+ while count < num_test_properties:
+ test_properties.append(full_property_list[count])
+ count += 1
+
+ # Replace the tokens in the testhtml with the test properties. Walk backward
+ # through the list to replace the double-digit tokens first
+ index = len(test_properties) - 1
+ while index >= 0:
+ # Use the unprefixed version
+ test_prop = test_properties[index].replace('-webkit-', '')
+ # Replace the token
+ html = html.replace('@test' + str(index) + '@', test_prop)
+ index -= 1
+
+ return (test_properties, html)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_importer.py b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_importer.py
new file mode 100644
index 0000000..8e4a541
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_importer.py
@@ -0,0 +1,405 @@
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+"""
+ This script imports a directory of W3C tests into WebKit.
+
+ This script will import the tests into WebKit following these rules:
+
+ - By default, all tests are imported under LayoutTests/w3c/[repo-name].
+
+ - By default, only reftests and jstest are imported. This can be overridden
+ with a -a or --all argument
+
+ - Also by default, if test files by the same name already exist in the
+ destination directory, they are overwritten with the idea that running
+ this script would refresh files periodically. This can also be
+ overridden by a -n or --no-overwrite flag
+
+ - All files are converted to work in WebKit:
+ 1. Paths to testharness.js and vendor-prefix.js files are modified to
+ point to Webkit's copy of them in LayoutTests/resources, using the
+ correct relative path from the new location.
+ 2. All CSS properties requiring the -webkit-vendor prefix are prefixed
+ (the list of what needs prefixes is read from Source/WebCore/CSS/CSSProperties.in).
+ 3. Each reftest has its own copy of its reference file following
+ the naming conventions new-run-webkit-tests expects.
+ 4. If a reference files lives outside the directory of the test that
+ uses it, it is checked for paths to support files as it will be
+ imported into a different relative position to the test file
+ (in the same directory).
+ 5. Any tags with the class "instructions" have style="display:none" added
+ to them. Some w3c tests contain instructions to manual testers which we
+ want to strip out (the test result parser only recognizes pure testharness.js
+ output and not those instructions).
+
+ - Upon completion, script outputs the total number tests imported, broken
+ down by test type
+
+ - Also upon completion, if we are not importing the files in place, each
+ directory where files are imported will have a w3c-import.log file written with
+ a timestamp, the W3C Mercurial changeset if available, the list of CSS
+ properties used that require prefixes, the list of imported files, and
+ guidance for future test modification and maintenance. On subsequent
+ imports, this file is read to determine if files have been
+ removed in the newer changesets. The script removes these files
+ accordingly.
+"""
+
+# FIXME: Change this file to use the Host abstractions rather that os, sys, shutils, etc.
+
+import datetime
+import logging
+import mimetypes
+import optparse
+import os
+import shutil
+import sys
+
+from webkitpy.common.host import Host
+from webkitpy.common.webkit_finder import WebKitFinder
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.layout_tests.models.test_expectations import TestExpectationParser
+from webkitpy.w3c.test_parser import TestParser
+from webkitpy.w3c.test_converter import convert_for_webkit
+
+
+CHANGESET_NOT_AVAILABLE = 'Not Available'
+
+
+_log = logging.getLogger(__name__)
+
+
+def main(_argv, _stdout, _stderr):
+ options, args = parse_args()
+ dir_to_import = os.path.normpath(os.path.abspath(args[0]))
+ if len(args) == 1:
+ top_of_repo = dir_to_import
+ else:
+ top_of_repo = os.path.normpath(os.path.abspath(args[1]))
+
+ if not os.path.exists(dir_to_import):
+ sys.exit('Directory %s not found!' % dir_to_import)
+ if not os.path.exists(top_of_repo):
+ sys.exit('Repository directory %s not found!' % top_of_repo)
+ if top_of_repo not in dir_to_import:
+ sys.exit('Repository directory %s must be a parent of %s' % (top_of_repo, dir_to_import))
+
+ configure_logging()
+ test_importer = TestImporter(Host(), dir_to_import, top_of_repo, options)
+ test_importer.do_import()
+
+
+def configure_logging():
+ class LogHandler(logging.StreamHandler):
+
+ def format(self, record):
+ if record.levelno > logging.INFO:
+ return "%s: %s" % (record.levelname, record.getMessage())
+ return record.getMessage()
+
+ logger = logging.getLogger()
+ logger.setLevel(logging.INFO)
+ handler = LogHandler()
+ handler.setLevel(logging.INFO)
+ logger.addHandler(handler)
+ return handler
+
+
+def parse_args():
+ parser = optparse.OptionParser(usage='usage: %prog [options] [dir_to_import] [top_of_repo]')
+ parser.add_option('-n', '--no-overwrite', dest='overwrite', action='store_false', default=True,
+ help='Flag to prevent duplicate test files from overwriting existing tests. By default, they will be overwritten.')
+ parser.add_option('-a', '--all', action='store_true', default=False,
+ help='Import all tests including reftests, JS tests, and manual/pixel tests. By default, only reftests and JS tests are imported.')
+ parser.add_option('-d', '--dest-dir', dest='destination', default='w3c',
+ help='Import into a specified directory relative to the LayoutTests root. By default, files are imported under LayoutTests/w3c.')
+ parser.add_option('--ignore-expectations', action='store_true', default=False,
+ help='Ignore the W3CImportExpectations file and import everything.')
+ parser.add_option('--dry-run', action='store_true', default=False,
+ help='Dryrun only (don\'t actually write any results).')
+
+ options, args = parser.parse_args()
+ if len(args) > 2:
+ parser.error('Incorrect number of arguments')
+ elif len(args) == 0:
+ args = (os.getcwd(),)
+ return options, args
+
+
+class TestImporter(object):
+
+ def __init__(self, host, dir_to_import, top_of_repo, options):
+ self.host = host
+ self.dir_to_import = dir_to_import
+ self.top_of_repo = top_of_repo
+ self.options = options
+
+ self.filesystem = self.host.filesystem
+ self.webkit_finder = WebKitFinder(self.filesystem)
+ self._webkit_root = self.webkit_finder.webkit_base()
+ self.layout_tests_dir = self.webkit_finder.path_from_webkit_base('LayoutTests')
+ self.destination_directory = self.filesystem.normpath(self.filesystem.join(self.layout_tests_dir, options.destination,
+ self.filesystem.basename(self.top_of_repo)))
+ self.import_in_place = (self.dir_to_import == self.destination_directory)
+ self.dir_above_repo = self.filesystem.dirname(self.top_of_repo)
+
+ self.changeset = CHANGESET_NOT_AVAILABLE
+
+ self.import_list = []
+
+ def do_import(self):
+ _log.info("Importing %s into %s", self.dir_to_import, self.destination_directory)
+ self.find_importable_tests(self.dir_to_import)
+ self.load_changeset()
+ self.import_tests()
+
+ def load_changeset(self):
+ """Returns the current changeset from mercurial or "Not Available"."""
+ try:
+ self.changeset = self.host.executive.run_command(['hg', 'tip']).split('changeset:')[1]
+ except (OSError, ScriptError):
+ self.changeset = CHANGESET_NOT_AVAILABLE
+
+ def find_importable_tests(self, directory):
+ # FIXME: use filesystem
+ paths_to_skip = self.find_paths_to_skip()
+
+ for root, dirs, files in os.walk(directory):
+ cur_dir = root.replace(self.dir_above_repo + '/', '') + '/'
+ _log.info(' scanning ' + cur_dir + '...')
+ total_tests = 0
+ reftests = 0
+ jstests = 0
+
+ DIRS_TO_SKIP = ('.git', '.hg')
+ if dirs:
+ for d in DIRS_TO_SKIP:
+ if d in dirs:
+ dirs.remove(d)
+
+ for path in paths_to_skip:
+ path_base = path.replace(self.options.destination + '/', '')
+ path_base = path_base.replace(cur_dir, '')
+ path_full = self.filesystem.join(root, path_base)
+ if path_base in dirs:
+ dirs.remove(path_base)
+ if not self.options.dry_run and self.import_in_place:
+ _log.info(" pruning %s" % path_base)
+ self.filesystem.rmtree(path_full)
+ else:
+ _log.info(" skipping %s" % path_base)
+
+
+ copy_list = []
+
+ for filename in files:
+ path_full = self.filesystem.join(root, filename)
+ path_base = path_full.replace(self.layout_tests_dir + '/', '')
+ if path_base in paths_to_skip:
+ if not self.options.dry_run and self.import_in_place:
+ _log.info(" pruning %s" % path_base)
+ self.filesystem.remove(path_full)
+ continue
+ else:
+ continue
+ # FIXME: This block should really be a separate function, but the early-continues make that difficult.
+
+ if filename.startswith('.') or filename.endswith('.pl'):
+ continue # For some reason the w3c repo contains random perl scripts we don't care about.
+
+ fullpath = os.path.join(root, filename)
+
+ mimetype = mimetypes.guess_type(fullpath)
+ if not 'html' in str(mimetype[0]) and not 'application/xhtml+xml' in str(mimetype[0]) and not 'application/xml' in str(mimetype[0]):
+ copy_list.append({'src': fullpath, 'dest': filename})
+ continue
+
+ if root.endswith('resources'):
+ copy_list.append({'src': fullpath, 'dest': filename})
+ continue
+
+ test_parser = TestParser(vars(self.options), filename=fullpath)
+ test_info = test_parser.analyze_test()
+ if test_info is None:
+ continue
+
+ if 'reference' in test_info.keys():
+ reftests += 1
+ total_tests += 1
+ test_basename = os.path.basename(test_info['test'])
+
+ # Add the ref file, following WebKit style.
+ # FIXME: Ideally we'd support reading the metadata
+ # directly rather than relying on a naming convention.
+ # Using a naming convention creates duplicate copies of the
+ # reference files.
+ ref_file = os.path.splitext(test_basename)[0] + '-expected'
+ ref_file += os.path.splitext(test_basename)[1]
+
+ copy_list.append({'src': test_info['reference'], 'dest': ref_file, 'reference_support_info': test_info['reference_support_info']})
+ copy_list.append({'src': test_info['test'], 'dest': filename})
+
+ elif 'jstest' in test_info.keys():
+ jstests += 1
+ total_tests += 1
+ copy_list.append({'src': fullpath, 'dest': filename})
+ else:
+ total_tests += 1
+ copy_list.append({'src': fullpath, 'dest': filename})
+
+ if copy_list:
+ # Only add this directory to the list if there's something to import
+ self.import_list.append({'dirname': root, 'copy_list': copy_list,
+ 'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests})
+
+ def find_paths_to_skip(self):
+ if self.options.ignore_expectations:
+ return set()
+
+ paths_to_skip = set()
+ port = self.host.port_factory.get()
+ w3c_import_expectations_path = self.webkit_finder.path_from_webkit_base('LayoutTests', 'W3CImportExpectations')
+ w3c_import_expectations = self.filesystem.read_text_file(w3c_import_expectations_path)
+ parser = TestExpectationParser(port, full_test_list=(), is_lint_mode=False)
+ expectation_lines = parser.parse(w3c_import_expectations_path, w3c_import_expectations)
+ for line in expectation_lines:
+ if 'SKIP' in line.expectations:
+ if line.specifiers:
+ _log.warning("W3CImportExpectations:%s should not have any specifiers" % line.line_numbers)
+ continue
+ paths_to_skip.add(line.name)
+ return paths_to_skip
+
+ def import_tests(self):
+ total_imported_tests = 0
+ total_imported_reftests = 0
+ total_imported_jstests = 0
+ total_prefixed_properties = {}
+
+ for dir_to_copy in self.import_list:
+ total_imported_tests += dir_to_copy['total_tests']
+ total_imported_reftests += dir_to_copy['reftests']
+ total_imported_jstests += dir_to_copy['jstests']
+
+ prefixed_properties = []
+
+ if not dir_to_copy['copy_list']:
+ continue
+
+ orig_path = dir_to_copy['dirname']
+
+ subpath = os.path.relpath(orig_path, self.top_of_repo)
+ new_path = os.path.join(self.destination_directory, subpath)
+
+ if not(os.path.exists(new_path)):
+ os.makedirs(new_path)
+
+ copied_files = []
+
+ for file_to_copy in dir_to_copy['copy_list']:
+ # FIXME: Split this block into a separate function.
+ orig_filepath = os.path.normpath(file_to_copy['src'])
+
+ if os.path.isdir(orig_filepath):
+ # FIXME: Figure out what is triggering this and what to do about it.
+ _log.error('%s refers to a directory' % orig_filepath)
+ continue
+
+ if not(os.path.exists(orig_filepath)):
+ _log.warning('%s not found. Possible error in the test.', orig_filepath)
+ continue
+
+ new_filepath = os.path.join(new_path, file_to_copy['dest'])
+ if 'reference_support_info' in file_to_copy.keys() and file_to_copy['reference_support_info'] != {}:
+ reference_support_info = file_to_copy['reference_support_info']
+ else:
+ reference_support_info = None
+
+ if not(os.path.exists(os.path.dirname(new_filepath))):
+ if not self.import_in_place and not self.options.dry_run:
+ os.makedirs(os.path.dirname(new_filepath))
+
+ relpath = os.path.relpath(new_filepath, self.layout_tests_dir)
+ if not self.options.overwrite and os.path.exists(new_filepath):
+ _log.info(' skipping %s' % relpath)
+ else:
+ # FIXME: Maybe doing a file diff is in order here for existing files?
+ # In other words, there's no sense in overwriting identical files, but
+ # there's no harm in copying the identical thing.
+ _log.info(' %s' % relpath)
+
+ # Only html, xml, or css should be converted
+ # FIXME: Eventually, so should js when support is added for this type of conversion
+ mimetype = mimetypes.guess_type(orig_filepath)
+ if 'html' in str(mimetype[0]) or 'xml' in str(mimetype[0]) or 'css' in str(mimetype[0]):
+ converted_file = convert_for_webkit(new_path, filename=orig_filepath, reference_support_info=reference_support_info)
+
+ if not converted_file:
+ if not self.import_in_place and not self.options.dry_run:
+ shutil.copyfile(orig_filepath, new_filepath) # The file was unmodified.
+ else:
+ for prefixed_property in converted_file[0]:
+ total_prefixed_properties.setdefault(prefixed_property, 0)
+ total_prefixed_properties[prefixed_property] += 1
+
+ prefixed_properties.extend(set(converted_file[0]) - set(prefixed_properties))
+ if not self.options.dry_run:
+ outfile = open(new_filepath, 'wb')
+ outfile.write(converted_file[1])
+ outfile.close()
+ else:
+ if not self.import_in_place and not self.options.dry_run:
+ shutil.copyfile(orig_filepath, new_filepath)
+
+ copied_files.append(new_filepath.replace(self._webkit_root, ''))
+
+ _log.info('')
+ _log.info('Import complete')
+ _log.info('')
+ _log.info('IMPORTED %d TOTAL TESTS', total_imported_tests)
+ _log.info('Imported %d reftests', total_imported_reftests)
+ _log.info('Imported %d JS tests', total_imported_jstests)
+ _log.info('Imported %d pixel/manual tests', total_imported_tests - total_imported_jstests - total_imported_reftests)
+ _log.info('')
+
+ if total_prefixed_properties:
+ _log.info('Properties needing prefixes (by count):')
+ for prefixed_property in sorted(total_prefixed_properties, key=lambda p: total_prefixed_properties[p]):
+ _log.info(' %s: %s', prefixed_property, total_prefixed_properties[prefixed_property])
+
+ def setup_destination_directory(self):
+ """ Creates a destination directory that mirrors that of the source directory """
+
+ new_subpath = self.dir_to_import[len(self.top_of_repo):]
+
+ destination_directory = os.path.join(self.destination_directory, new_subpath)
+
+ if not os.path.exists(destination_directory):
+ os.makedirs(destination_directory)
+
+ _log.info('Tests will be imported into: %s', destination_directory)
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_importer_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_importer_unittest.py
new file mode 100644
index 0000000..ec78ff1
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_importer_unittest.py
@@ -0,0 +1,78 @@
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import optparse
+import shutil
+import tempfile
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.executive_mock import MockExecutive2, ScriptError
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.w3c.test_importer import TestImporter
+
+
+FAKE_SOURCE_DIR = '/blink/w3c'
+FAKE_REPO_DIR = '/blink'
+
+FAKE_FILES = {
+ '/blink/w3c/empty_dir/README.txt': '',
+ '/mock-checkout/third_party/WebKit/LayoutTests/w3c/README.txt': '',
+ '/mock-checkout/third_party/WebKit/LayoutTests/W3CImportExpectations': '',
+}
+
+class TestImporterTest(unittest.TestCase):
+
+ def test_import_dir_with_no_tests_and_no_hg(self):
+ host = MockHost()
+ host.executive = MockExecutive2(exception=OSError())
+ host.filesystem = MockFileSystem(files=FAKE_FILES)
+
+ importer = TestImporter(host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values({"overwrite": False, 'destination': 'w3c', 'ignore_expectations': False}))
+
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ importer.do_import()
+ finally:
+ oc.restore_output()
+
+ def test_import_dir_with_no_tests(self):
+ host = MockHost()
+ host.executive = MockExecutive2(exception=ScriptError("abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!"))
+ host.filesystem = MockFileSystem(files=FAKE_FILES)
+
+ importer = TestImporter(host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values({"overwrite": False, 'destination': 'w3c', 'ignore_expectations': False}))
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ importer.do_import()
+ finally:
+ oc.restore_output()
+
+ # FIXME: Needs more tests.
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_parser.py b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_parser.py
new file mode 100755
index 0000000..83cf21f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_parser.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import logging
+import re
+
+from webkitpy.common.host import Host
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup as Parser
+
+
+_log = logging.getLogger(__name__)
+
+
+class TestParser(object):
+
+ def __init__(self, options, filename):
+ self.options = options
+ self.filename = filename
+ self.host = Host()
+ self.filesystem = self.host.filesystem
+
+ self.test_doc = None
+ self.ref_doc = None
+ self.load_file(filename)
+
+ def load_file(self, filename, is_ref=False):
+ if self.filesystem.isfile(filename):
+ try:
+ doc = Parser(self.filesystem.read_binary_file(filename))
+ except:
+ # FIXME: Figure out what to do if we can't parse the file.
+ _log.error("Failed to parse %s", filename)
+ doc = None
+ else:
+ if self.filesystem.isdir(filename):
+ # FIXME: Figure out what is triggering this and what to do about it.
+ _log.error("Trying to load %s, which is a directory", filename)
+ doc = None
+
+ if is_ref:
+ self.ref_doc = doc
+ else:
+ self.test_doc = doc
+
+ def analyze_test(self, test_contents=None, ref_contents=None):
+ """ Analyzes a file to determine if it's a test, what type of test, and what reference or support files it requires. Returns all of the test info """
+
+ test_info = None
+
+ if test_contents is None and self.test_doc is None:
+ return test_info
+
+ if test_contents is not None:
+ self.test_doc = Parser(test_contents)
+
+ if ref_contents is not None:
+ self.ref_doc = Parser(ref_contents)
+
+ # First check if it's a reftest
+ matches = self.reference_links_of_type('match') + self.reference_links_of_type('mismatch')
+ if matches:
+ if len(matches) > 1:
+ # FIXME: Is this actually true? We should fix this.
+ _log.warning('Multiple references are not supported. Importing the first ref defined in %s',
+ self.filesystem.basename(self.filename))
+
+ try:
+ ref_file = self.filesystem.join(self.filesystem.dirname(self.filename), matches[0]['href'])
+ except KeyError as e:
+ # FIXME: Figure out what to do w/ invalid test files.
+ _log.error('%s has a reference link but is missing the "href"', self.filesystem)
+ return None
+
+ if self.ref_doc is None:
+ self.load_file(ref_file, True)
+
+ test_info = {'test': self.filename, 'reference': ref_file}
+
+ # If the ref file does not live in the same directory as the test file, check it for support files
+ test_info['reference_support_info'] = {}
+ if self.filesystem.dirname(ref_file) != self.filesystem.dirname(self.filename):
+ reference_support_files = self.support_files(self.ref_doc)
+ if len(reference_support_files) > 0:
+ reference_relpath = self.filesystem.relpath(self.filesystem.dirname(self.filename), self.filesystem.dirname(ref_file)) + self.filesystem.sep
+ test_info['reference_support_info'] = {'reference_relpath': reference_relpath, 'files': reference_support_files}
+
+ elif self.is_jstest():
+ test_info = {'test': self.filename, 'jstest': True}
+ elif self.options['all'] is True and not('-ref' in self.filename) and not('reference' in self.filename):
+ test_info = {'test': self.filename}
+
+ return test_info
+
+ def reference_links_of_type(self, reftest_type):
+ return self.test_doc.findAll(rel=reftest_type)
+
+ def is_jstest(self):
+ """Returns whether the file appears to be a jstest, by searching for usage of W3C-style testharness paths."""
+ return bool(self.test_doc.find(src=re.compile('[\'\"/]?/resources/testharness')))
+
+ def support_files(self, doc):
+ """ Searches the file for all paths specified in url()'s or src attributes."""
+ support_files = []
+
+ if doc is None:
+ return support_files
+
+ elements_with_src_attributes = doc.findAll(src=re.compile('.*'))
+ elements_with_href_attributes = doc.findAll(href=re.compile('.*'))
+
+ url_pattern = re.compile('url\(.*\)')
+ urls = []
+ for url in doc.findAll(text=url_pattern):
+ url = re.search(url_pattern, url)
+ url = re.sub('url\([\'\"]?', '', url.group(0))
+ url = re.sub('[\'\"]?\)', '', url)
+ urls.append(url)
+
+ src_paths = [src_tag['src'] for src_tag in elements_with_src_attributes]
+ href_paths = [href_tag['href'] for href_tag in elements_with_href_attributes]
+
+ paths = src_paths + href_paths + urls
+ for path in paths:
+ if not(path.startswith('http:')) and not(path.startswith('mailto:')):
+ uri_scheme_pattern = re.compile(r"[A-Za-z][A-Za-z+.-]*:")
+ if not uri_scheme_pattern.match(path):
+ support_files.append(path)
+
+ return support_files
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_parser_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_parser_unittest.py
new file mode 100644
index 0000000..1f8a7c5
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/w3c/test_parser_unittest.py
@@ -0,0 +1,215 @@
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import os
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.w3c.test_parser import TestParser
+
+
+options = {'all': False, 'no_overwrite': False}
+
+
+class TestParserTest(unittest.TestCase):
+
+ def test_analyze_test_reftest_one_match(self):
+ test_html = """<head>
+<link rel="match" href="green-box-ref.xht" />
+</head>
+"""
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html)
+
+ self.assertNotEqual(test_info, None, 'did not find a test')
+ self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+ self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
+ self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
+ self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+ self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
+
+ def test_analyze_test_reftest_multiple_matches(self):
+ test_html = """<head>
+<link rel="match" href="green-box-ref.xht" />
+<link rel="match" href="blue-box-ref.xht" />
+<link rel="match" href="orange-box-ref.xht" />
+</head>
+"""
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html)
+ finally:
+ _, _, logs = oc.restore_output()
+
+ self.assertNotEqual(test_info, None, 'did not find a test')
+ self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+ self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
+ self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
+ self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+ self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
+
+ self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')
+
+ def test_analyze_test_reftest_match_and_mismatch(self):
+ test_html = """<head>
+<link rel="match" href="green-box-ref.xht" />
+<link rel="match" href="blue-box-ref.xht" />
+<link rel="mismatch" href="orange-box-notref.xht" />
+</head>
+"""
+ oc = OutputCapture()
+ oc.capture_output()
+
+ try:
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html)
+ finally:
+ _, _, logs = oc.restore_output()
+
+ self.assertNotEqual(test_info, None, 'did not find a test')
+ self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+ self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
+ self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
+ self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+ self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
+
+ self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')
+
+ def test_analyze_test_reftest_with_ref_support_Files(self):
+ """ Tests analyze_test() using a reftest that has refers to a reference file outside of the tests directory and the reference file has paths to other support files """
+
+ test_html = """<html>
+<head>
+<link rel="match" href="../reference/green-box-ref.xht" />
+</head>
+"""
+ ref_html = """<head>
+<link href="support/css/ref-stylesheet.css" rel="stylesheet" type="text/css">
+<style type="text/css">
+ background-image: url("../../support/some-image.png")
+</style>
+</head>
+<body>
+<div><img src="../support/black96x96.png" alt="Image download support must be enabled" /></div>
+</body>
+</html>
+"""
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html, ref_contents=ref_html)
+
+ self.assertNotEqual(test_info, None, 'did not find a test')
+ self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+ self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
+ self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
+ self.assertTrue('reference_support_info' in test_info.keys(), 'there should be reference_support_info for this test')
+ self.assertEquals(len(test_info['reference_support_info']['files']), 3, 'there should be 3 support files in this reference')
+ self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
+
+ def test_analyze_jstest(self):
+ """ Tests analyze_test() using a jstest """
+
+ test_html = """<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+</head>
+"""
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html)
+
+ self.assertNotEqual(test_info, None, 'test_info is None')
+ self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+ self.assertFalse('reference' in test_info.keys(), 'shold not have found a reference file')
+ self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+ self.assertTrue('jstest' in test_info.keys(), 'test should be a jstest')
+
+ def test_analyze_pixel_test_all_true(self):
+ """ Tests analyze_test() using a test that is neither a reftest or jstest with all=False """
+
+ test_html = """<html>
+<head>
+<title>CSS Test: DESCRIPTION OF TEST</title>
+<link rel="author" title="NAME_OF_AUTHOR" />
+<style type="text/css"><![CDATA[
+CSS FOR TEST
+]]></style>
+</head>
+<body>
+CONTENT OF TEST
+</body>
+</html>
+"""
+ # Set options to 'all' so this gets found
+ options['all'] = True
+
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html)
+
+ self.assertNotEqual(test_info, None, 'test_info is None')
+ self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+ self.assertFalse('reference' in test_info.keys(), 'shold not have found a reference file')
+ self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+ self.assertFalse('jstest' in test_info.keys(), 'test should not be a jstest')
+
+ def test_analyze_pixel_test_all_false(self):
+ """ Tests analyze_test() using a test that is neither a reftest or jstest, with -all=False """
+
+ test_html = """<html>
+<head>
+<title>CSS Test: DESCRIPTION OF TEST</title>
+<link rel="author" title="NAME_OF_AUTHOR" />
+<style type="text/css"><![CDATA[
+CSS FOR TEST
+]]></style>
+</head>
+<body>
+CONTENT OF TEST
+</body>
+</html>
+"""
+ # Set all to false so this gets skipped
+ options['all'] = False
+
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html)
+
+ self.assertEqual(test_info, None, 'test should have been skipped')
+
+ def test_analyze_non_html_file(self):
+ """ Tests analyze_test() with a file that has no html"""
+ # FIXME: use a mock filesystem
+ parser = TestParser(options, os.path.join(os.path.dirname(__file__), 'test_parser.py'))
+ test_info = parser.analyze_test()
+ self.assertEqual(test_info, None, 'no tests should have been found in this file')
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/webgl/__init__.py b/src/third_party/blink/Tools/Scripts/webkitpy/webgl/__init__.py
new file mode 100644
index 0000000..c0528b7
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/webgl/__init__.py
@@ -0,0 +1,10 @@
+# Required for Python to search this directory for module files
+# This directory houses Python modules that do not yet have a proper home.
+#
+# Some of the Python modules in this directory aren't really part of webkitpy
+# in the sense that they're not classes that are meant to be used as part of
+# the webkitpy library. Instead, they're a bunch of helper code for individual
+# scripts in in Tools/Scripts.
+#
+# Really, all this code should either be refactored or moved somewhere else,
+# hence the somewhat lame name for this directory.
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/webgl/update_webgl_conformance_tests.py b/src/third_party/blink/Tools/Scripts/webkitpy/webgl/update_webgl_conformance_tests.py
new file mode 100644
index 0000000..c7082c8
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/webgl/update_webgl_conformance_tests.py
@@ -0,0 +1,157 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import glob
+import logging
+import optparse
+import os
+import re
+import sys
+from webkitpy.common.checkout import scm
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.system.executive import Executive
+
+
+_log = logging.getLogger(__name__)
+
+
+def remove_first_line_comment(text):
+ return re.compile(r'^<!--.*?-->\s*', re.DOTALL).sub('', text)
+
+
+def translate_includes(text):
+ # Mapping of single filename to relative path under WebKit root.
+ # Assumption: these filenames are globally unique.
+ include_mapping = {
+ "js-test-style.css": "../../js/resources",
+ "js-test-pre.js": "../../js/resources",
+ "js-test-post.js": "../../js/resources",
+ "desktop-gl-constants.js": "resources",
+ }
+
+ for filename, path in include_mapping.items():
+ search = r'(?:[^"\'= ]*/)?' + re.escape(filename)
+ # We use '/' instead of os.path.join in order to produce consistent
+ # output cross-platform.
+ replace = path + '/' + filename
+ text = re.sub(search, replace, text)
+
+ return text
+
+
+def translate_khronos_test(text):
+ """
+ This method translates the contents of a Khronos test to a WebKit test.
+ """
+
+ translateFuncs = [
+ remove_first_line_comment,
+ translate_includes,
+ ]
+
+ for f in translateFuncs:
+ text = f(text)
+
+ return text
+
+
+def update_file(in_filename, out_dir):
+ # check in_filename exists
+ # check out_dir exists
+ out_filename = os.path.join(out_dir, os.path.basename(in_filename))
+
+ _log.debug("Processing " + in_filename)
+ with open(in_filename, 'r') as in_file:
+ with open(out_filename, 'w') as out_file:
+ out_file.write(translate_khronos_test(in_file.read()))
+
+
+def update_directory(in_dir, out_dir):
+ for filename in glob.glob(os.path.join(in_dir, '*.html')):
+ update_file(os.path.join(in_dir, filename), out_dir)
+
+
+def default_out_dir():
+ detector = scm.SCMDetector(FileSystem(), Executive())
+ current_scm = detector.detect_scm_system(os.path.dirname(sys.argv[0]))
+ if not current_scm:
+ return os.getcwd()
+ root_dir = current_scm.checkout_root
+ if not root_dir:
+ return os.getcwd()
+ out_dir = os.path.join(root_dir, "LayoutTests/fast/canvas/webgl")
+ if os.path.isdir(out_dir):
+ return out_dir
+ return os.getcwd()
+
+
+def configure_logging(options):
+ """Configures the logging system."""
+ log_fmt = '%(levelname)s: %(message)s'
+ log_datefmt = '%y%m%d %H:%M:%S'
+ log_level = logging.INFO
+ if options.verbose:
+ log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s '
+ '%(message)s')
+ log_level = logging.DEBUG
+ logging.basicConfig(level=log_level, format=log_fmt,
+ datefmt=log_datefmt)
+
+
+def option_parser():
+ usage = "usage: %prog [options] (input file or directory)"
+ parser = optparse.OptionParser(usage=usage)
+ parser.add_option('-v', '--verbose',
+ action='store_true',
+ default=False,
+ help='include debug-level logging')
+ parser.add_option('-o', '--output',
+ action='store',
+ type='string',
+ default=default_out_dir(),
+ metavar='DIR',
+ help='specify an output directory to place files '
+ 'in [default: %default]')
+ return parser
+
+
+def main():
+ parser = option_parser()
+ (options, args) = parser.parse_args()
+ configure_logging(options)
+
+ if len(args) == 0:
+ _log.error("Must specify an input directory or filename.")
+ parser.print_help()
+ return 1
+
+ in_name = args[0]
+ if os.path.isfile(in_name):
+ update_file(in_name, options.output)
+ elif os.path.isdir(in_name):
+ update_directory(in_name, options.output)
+ else:
+ _log.error("'%s' is not a directory or a file.", in_name)
+ return 2
+
+ return 0
diff --git a/src/third_party/blink/Tools/Scripts/webkitpy/webgl/update_webgl_conformance_tests_unittest.py b/src/third_party/blink/Tools/Scripts/webkitpy/webgl/update_webgl_conformance_tests_unittest.py
new file mode 100644
index 0000000..4820a61
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitpy/webgl/update_webgl_conformance_tests_unittest.py
@@ -0,0 +1,98 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for update_webgl_conformance_tests."""
+
+import unittest
+
+from webkitpy.webgl import update_webgl_conformance_tests as webgl
+
+
+def construct_script(name):
+ return "<script src=\"" + name + "\"></script>\n"
+
+
+def construct_style(name):
+ return "<link rel=\"stylesheet\" href=\"" + name + "\">"
+
+
+class TestTranslation(unittest.TestCase):
+ def assert_unchanged(self, text):
+ self.assertEqual(text, webgl.translate_khronos_test(text))
+
+ def assert_translate(self, input, output):
+ self.assertEqual(output, webgl.translate_khronos_test(input))
+
+ def test_simple_unchanged(self):
+ self.assert_unchanged("")
+ self.assert_unchanged("<html></html>")
+
+ def test_header_strip(self):
+ single_line_header = "<!-- single line header. -->"
+ multi_line_header = """<!-- this is a multi-line
+ header. it should all be removed too.
+ -->"""
+ text = "<html></html>"
+ self.assert_translate(single_line_header, "")
+ self.assert_translate(single_line_header + text, text)
+ self.assert_translate(multi_line_header + text, text)
+
+ def dont_strip_other_headers(self):
+ self.assert_unchanged("<html>\n<!-- don't remove comments on other lines. -->\n</html>")
+
+ def test_include_rewriting(self):
+ # Mappings to None are unchanged
+ styles = {
+ "../resources/js-test-style.css": "../../js/resources/js-test-style.css",
+ "fail.css": None,
+ "resources/stylesheet.css": None,
+ "../resources/style.css": None,
+ }
+ scripts = {
+ "../resources/js-test-pre.js": "../../js/resources/js-test-pre.js",
+ "../resources/js-test-post.js": "../../js/resources/js-test-post.js",
+ "../resources/desktop-gl-constants.js": "resources/desktop-gl-constants.js",
+
+ "resources/shadow-offset.js": None,
+ "../resources/js-test-post-async.js": None,
+ }
+
+ input_text = ""
+ output_text = ""
+ for input, output in styles.items():
+ input_text += construct_style(input)
+ output_text += construct_style(output if output else input)
+ for input, output in scripts.items():
+ input_text += construct_script(input)
+ output_text += construct_script(output if output else input)
+
+ head = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">\n<html>\n<head>\n'
+ foot = '</head>\n<body>\n</body>\n</html>'
+ input_text = head + input_text + foot
+ output_text = head + output_text + foot
+ self.assert_translate(input_text, output_text)
diff --git a/src/third_party/blink/Tools/Scripts/webkitruby/PrettyPatch/PrettyPatch.rb b/src/third_party/blink/Tools/Scripts/webkitruby/PrettyPatch/PrettyPatch.rb
new file mode 100644
index 0000000..7398888
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitruby/PrettyPatch/PrettyPatch.rb
@@ -0,0 +1,1005 @@
+require 'cgi'
+require 'diff'
+require 'open3'
+require 'open-uri'
+require 'pp'
+require 'set'
+require 'tempfile'
+
+module PrettyPatch
+
+public
+
+ GIT_PATH = "git"
+
+ def self.prettify(string)
+ $last_prettify_file_count = -1
+ $last_prettify_part_count = { "remove" => 0, "add" => 0, "shared" => 0, "binary" => 0, "extract-error" => 0 }
+ string = normalize_line_ending(string)
+ str = "#{HEADER}<body>\n"
+
+ # Just look at the first line to see if it is an SVN revision number as added
+ # by webkit-patch for git checkouts.
+ $svn_revision = 0
+ string.each_line do |line|
+ match = /^Subversion\ Revision: (\d*)$/.match(line)
+ unless match.nil?
+ str << "<span class='revision'>#{match[1]}</span>\n"
+ $svn_revision = match[1].to_i;
+ end
+ break
+ end
+
+ fileDiffs = FileDiff.parse(string)
+
+ $last_prettify_file_count = fileDiffs.length
+ str << fileDiffs.collect{ |diff| diff.to_html }.join
+ str << "</body></html>"
+ end
+
+ def self.filename_from_diff_header(line)
+ DIFF_HEADER_FORMATS.each do |format|
+ match = format.match(line)
+ return match[1] unless match.nil?
+ end
+ nil
+ end
+
+ def self.diff_header?(line)
+ RELAXED_DIFF_HEADER_FORMATS.any? { |format| line =~ format }
+ end
+
+private
+ DIFF_HEADER_FORMATS = [
+ /^Index: (.*)\r?$/,
+ /^diff --git "?a\/.+"? "?b\/(.+)"?\r?$/,
+ /^\+\+\+ ([^\t]+)(\t.*)?\r?$/
+ ]
+
+ RELAXED_DIFF_HEADER_FORMATS = [
+ /^Index:/,
+ /^diff/
+ ]
+
+ BINARY_FILE_MARKER_FORMAT = /^Cannot display: file marked as a binary type.$/
+
+ IMAGE_FILE_MARKER_FORMAT = /^svn:mime-type = image\/png$/
+
+ GIT_INDEX_MARKER_FORMAT = /^index ([0-9a-f]{40})\.\.([0-9a-f]{40})/
+
+ GIT_BINARY_FILE_MARKER_FORMAT = /^GIT binary patch$/
+
+ GIT_BINARY_PATCH_FORMAT = /^(literal|delta) \d+$/
+
+ GIT_LITERAL_FORMAT = /^literal \d+$/
+
+ GIT_DELTA_FORMAT = /^delta \d+$/
+
+ START_OF_BINARY_DATA_FORMAT = /^[0-9a-zA-Z\+\/=]{20,}/ # Assume 20 chars without a space is base64 binary data.
+
+ START_OF_SECTION_FORMAT = /^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@\s*(.*)/
+
+ START_OF_EXTENT_STRING = "%c" % 0
+ END_OF_EXTENT_STRING = "%c" % 1
+
+ # We won't search for intra-line diffs in lines longer than this length, to avoid hangs. See <http://webkit.org/b/56109>.
+ MAXIMUM_INTRALINE_DIFF_LINE_LENGTH = 10000
+
+ SMALLEST_EQUAL_OPERATION = 3
+
+ OPENSOURCE_URL = "http://src.chromium.org/viewvc/blink/"
+
+ OPENSOURCE_DIRS = Set.new %w[
+ LayoutTests
+ PerformanceTests
+ Source
+ Tools
+ ]
+
+ IMAGE_CHECKSUM_ERROR = "INVALID: Image lacks a checksum. This will fail with a MISSING error in run-webkit-tests. Always generate new png files using run-webkit-tests."
+
+ def self.normalize_line_ending(s)
+ if RUBY_VERSION >= "1.9"
+ # Transliteration table from http://stackoverflow.com/a/6609998
+ transliteration_table = { '\xc2\x82' => ',', # High code comma
+ '\xc2\x84' => ',,', # High code double comma
+ '\xc2\x85' => '...', # Tripple dot
+ '\xc2\x88' => '^', # High carat
+ '\xc2\x91' => '\x27', # Forward single quote
+ '\xc2\x92' => '\x27', # Reverse single quote
+ '\xc2\x93' => '\x22', # Forward double quote
+ '\xc2\x94' => '\x22', # Reverse double quote
+ '\xc2\x95' => ' ',
+ '\xc2\x96' => '-', # High hyphen
+ '\xc2\x97' => '--', # Double hyphen
+ '\xc2\x99' => ' ',
+ '\xc2\xa0' => ' ',
+ '\xc2\xa6' => '|', # Split vertical bar
+ '\xc2\xab' => '<<', # Double less than
+ '\xc2\xbb' => '>>', # Double greater than
+ '\xc2\xbc' => '1/4', # one quarter
+ '\xc2\xbd' => '1/2', # one half
+ '\xc2\xbe' => '3/4', # three quarters
+ '\xca\xbf' => '\x27', # c-single quote
+ '\xcc\xa8' => '', # modifier - under curve
+ '\xcc\xb1' => '' # modifier - under line
+ }
+ encoded_string = s.force_encoding('UTF-8').encode('UTF-16', :invalid => :replace, :replace => '', :fallback => transliteration_table).encode('UTF-8')
+ encoded_string.gsub /\r\n?/, "\n"
+ else
+ s.gsub /\r\n?/, "\n"
+ end
+ end
+
+ def self.find_url_and_path(file_path)
+ # Search file_path from the bottom up, at each level checking whether
+ # we've found a directory we know exists in the source tree.
+
+ dirname, basename = File.split(file_path)
+ dirname.split(/\//).reverse.inject(basename) do |path, directory|
+ path = directory + "/" + path
+
+ return [OPENSOURCE_URL, path] if OPENSOURCE_DIRS.include?(directory)
+
+ path
+ end
+
+ [nil, file_path]
+ end
+
+ def self.linkifyFilename(filename)
+ url, pathBeneathTrunk = find_url_and_path(filename)
+
+ url.nil? ? filename : "<a href='#{url}trunk/#{pathBeneathTrunk}'>#{filename}</a>"
+ end
+
+
+ HEADER =<<EOF
+<html>
+<head>
+<style>
+:link, :visited {
+ text-decoration: none;
+ border-bottom: 1px dotted;
+}
+
+:link {
+ color: #039;
+}
+
+.FileDiff {
+ background-color: #f8f8f8;
+ border: 1px solid #ddd;
+ font-family: monospace;
+ margin: 1em 0;
+ position: relative;
+}
+
+h1 {
+ color: #333;
+ font-family: sans-serif;
+ font-size: 1em;
+ margin-left: 0.5em;
+ display: table-cell;
+ width: 100%;
+ padding: 0.5em;
+}
+
+h1 :link, h1 :visited {
+ color: inherit;
+}
+
+h1 :hover {
+ color: #555;
+ background-color: #eee;
+}
+
+.DiffLinks {
+ float: right;
+}
+
+.FileDiffLinkContainer {
+ opacity: 0;
+ display: table-cell;
+ padding-right: 0.5em;
+ white-space: nowrap;
+}
+
+.DiffSection {
+ background-color: white;
+ border: solid #ddd;
+ border-width: 1px 0px;
+}
+
+.ExpansionLine, .LineContainer {
+ white-space: nowrap;
+}
+
+.sidebyside .DiffBlockPart.add:first-child {
+ float: right;
+}
+
+.LineSide,
+.sidebyside .DiffBlockPart.remove,
+.sidebyside .DiffBlockPart.add {
+ display:inline-block;
+ width: 50%;
+ vertical-align: top;
+}
+
+.sidebyside .resizeHandle {
+ width: 5px;
+ height: 100%;
+ cursor: move;
+ position: absolute;
+ top: 0;
+ left: 50%;
+}
+
+.sidebyside .resizeHandle:hover {
+ background-color: grey;
+ opacity: 0.5;
+}
+
+.sidebyside .DiffBlockPart.remove .to,
+.sidebyside .DiffBlockPart.add .from {
+ display: none;
+}
+
+.lineNumber, .expansionLineNumber {
+ border-bottom: 1px solid #998;
+ border-right: 1px solid #ddd;
+ color: #444;
+ display: inline-block;
+ padding: 1px 5px 0px 0px;
+ text-align: right;
+ vertical-align: bottom;
+ width: 3em;
+}
+
+.lineNumber {
+ background-color: #eed;
+}
+
+.expansionLineNumber {
+ background-color: #eee;
+}
+
+.text {
+ padding-left: 5px;
+ white-space: pre-wrap;
+ word-wrap: break-word;
+}
+
+.image {
+ border: 2px solid black;
+}
+
+.context, .context .lineNumber {
+ color: #849;
+ background-color: #fef;
+}
+
+.Line.add, .FileDiff .add {
+ background-color: #dfd;
+}
+
+.Line.add ins {
+ background-color: #9e9;
+ text-decoration: none;
+}
+
+.Line.remove, .FileDiff .remove {
+ background-color: #fdd;
+}
+
+.Line.remove del {
+ background-color: #e99;
+ text-decoration: none;
+}
+
+/* Support for inline comments */
+
+.author {
+ font-style: italic;
+}
+
+.comment {
+ position: relative;
+}
+
+.comment textarea {
+ height: 6em;
+}
+
+.overallComments textarea {
+ height: 2em;
+ max-width: 100%;
+ min-width: 200px;
+}
+
+.comment textarea, .overallComments textarea {
+ display: block;
+ width: 100%;
+}
+
+.overallComments .open {
+ -webkit-transition: height .2s;
+ height: 4em;
+}
+
+#statusBubbleContainer.wrap {
+ display: block;
+}
+
+#toolbar {
+ display: -webkit-flex;
+ display: -moz-flex;
+ padding: 3px;
+ left: 0;
+ right: 0;
+ border: 1px solid #ddd;
+ background-color: #eee;
+ font-family: sans-serif;
+ position: fixed;
+ bottom: 0;
+}
+
+#toolbar .actions {
+ float: right;
+}
+
+.winter {
+ position: fixed;
+ z-index: 5;
+ left: 0;
+ right: 0;
+ top: 0;
+ bottom: 0;
+ background-color: black;
+ opacity: 0.8;
+}
+
+.inactive {
+ display: none;
+}
+
+.lightbox {
+ position: fixed;
+ z-index: 6;
+ left: 10%;
+ right: 10%;
+ top: 10%;
+ bottom: 10%;
+ background: white;
+}
+
+.lightbox iframe {
+ width: 100%;
+ height: 100%;
+}
+
+.commentContext .lineNumber {
+ background-color: yellow;
+}
+
+.selected .lineNumber {
+ background-color: #69F;
+ border-bottom-color: #69F;
+ border-right-color: #69F;
+}
+
+.ExpandLinkContainer {
+ opacity: 0;
+ border-top: 1px solid #ddd;
+ border-bottom: 1px solid #ddd;
+}
+
+.ExpandArea {
+ margin: 0;
+}
+
+.ExpandText {
+ margin-left: 0.67em;
+}
+
+.LinkContainer {
+ font-family: sans-serif;
+ font-size: small;
+ font-style: normal;
+ -webkit-transition: opacity 0.5s;
+}
+
+.LinkContainer a {
+ border: 0;
+}
+
+.LinkContainer label:after,
+.LinkContainer a:after {
+ content: " | ";
+ color: black;
+}
+
+.LinkContainer a:last-of-type:after {
+ content: "";
+}
+
+.LinkContainer label {
+ color: #039;
+}
+
+.help {
+ color: gray;
+ font-style: italic;
+}
+
+#message {
+ font-size: small;
+ font-family: sans-serif;
+}
+
+.commentStatus {
+ font-style: italic;
+}
+
+.comment, .previousComment, .frozenComment {
+ background-color: #ffd;
+}
+
+.overallComments {
+ -webkit-flex: 1;
+ -moz-flex: 1;
+ margin-right: 3px;
+}
+
+.previousComment, .frozenComment {
+ border: inset 1px;
+ padding: 5px;
+ white-space: pre-wrap;
+}
+
+.comment button {
+ width: 6em;
+}
+
+div:focus {
+ outline: 1px solid blue;
+ outline-offset: -1px;
+}
+
+.statusBubble {
+ /* The width/height get set to the bubble contents via postMessage on browsers that support it. */
+ width: 450px;
+ height: 20px;
+ margin: 2px 2px 0 0;
+ border: none;
+ vertical-align: middle;
+}
+
+.revision {
+ display: none;
+}
+
+.autosave-state {
+ position: absolute;
+ right: 0;
+ top: -1.3em;
+ padding: 0 3px;
+ outline: 1px solid #DDD;
+ color: #8FDF5F;
+ font-size: small;
+ background-color: #EEE;
+}
+
+.autosave-state:empty {
+ outline: 0px;
+}
+.autosave-state.saving {
+ color: #E98080;
+}
+
+.clear_float {
+ clear: both;
+}
+</style>
+</head>
+EOF
+
+ def self.revisionOrDescription(string)
+ case string
+ when /\(revision \d+\)/
+ /\(revision (\d+)\)/.match(string)[1]
+ when /\(.*\)/
+ /\((.*)\)/.match(string)[1]
+ end
+ end
+
+ def self.has_image_suffix(filename)
+ filename =~ /\.(png|jpg|gif)$/
+ end
+
+ class FileDiff
+ def initialize(lines)
+ @filename = PrettyPatch.filename_from_diff_header(lines[0].chomp)
+ startOfSections = 1
+ for i in 0...lines.length
+ case lines[i]
+ when /^--- /
+ @from = PrettyPatch.revisionOrDescription(lines[i])
+ when /^\+\+\+ /
+ @filename = PrettyPatch.filename_from_diff_header(lines[i].chomp) if @filename.nil?
+ @to = PrettyPatch.revisionOrDescription(lines[i])
+ startOfSections = i + 1
+ break
+ when BINARY_FILE_MARKER_FORMAT
+ @binary = true
+ if (IMAGE_FILE_MARKER_FORMAT.match(lines[i + 1]) or PrettyPatch.has_image_suffix(@filename)) then
+ @image = true
+ startOfSections = i + 2
+ for x in startOfSections...lines.length
+ # Binary diffs often have property changes listed before the actual binary data. Skip them.
+ if START_OF_BINARY_DATA_FORMAT.match(lines[x]) then
+ startOfSections = x
+ break
+ end
+ end
+ end
+ break
+ when GIT_INDEX_MARKER_FORMAT
+ @git_indexes = [$1, $2]
+ when GIT_BINARY_FILE_MARKER_FORMAT
+ @binary = true
+ if (GIT_BINARY_PATCH_FORMAT.match(lines[i + 1]) and PrettyPatch.has_image_suffix(@filename)) then
+ @git_image = true
+ startOfSections = i + 1
+ end
+ break
+ end
+ end
+ lines_with_contents = lines[startOfSections...lines.length]
+ @sections = DiffSection.parse(lines_with_contents) unless @binary
+ if @image and not lines_with_contents.empty?
+ @image_url = "data:image/png;base64," + lines_with_contents.join
+ @image_checksum = FileDiff.read_checksum_from_png(lines_with_contents.join.unpack("m").join)
+ elsif @git_image
+ begin
+ raise "index line is missing" unless @git_indexes
+
+ chunks = nil
+ for i in 0...lines_with_contents.length
+ if lines_with_contents[i] =~ /^$/
+ chunks = [lines_with_contents[i + 1 .. -1], lines_with_contents[0 .. i]]
+ break
+ end
+ end
+
+ raise "no binary chunks" unless chunks
+
+ from_filepath = FileDiff.extract_contents_of_from_revision(@filename, chunks[0], @git_indexes[0])
+ to_filepath = FileDiff.extract_contents_of_to_revision(@filename, chunks[1], @git_indexes[1], from_filepath, @git_indexes[0])
+ filepaths = from_filepath, to_filepath
+
+ binary_contents = filepaths.collect { |filepath| File.exists?(filepath) ? File.read(filepath) : nil }
+ @image_urls = binary_contents.collect { |content| (content and not content.empty?) ? "data:image/png;base64," + [content].pack("m") : nil }
+ @image_checksums = binary_contents.collect { |content| FileDiff.read_checksum_from_png(content) }
+ rescue
+ $last_prettify_part_count["extract-error"] += 1
+ @image_error = "Exception raised during decoding git binary patch:<pre>#{CGI.escapeHTML($!.to_s + "\n" + $!.backtrace.join("\n"))}</pre>"
+ ensure
+ File.unlink(from_filepath) if (from_filepath and File.exists?(from_filepath))
+ File.unlink(to_filepath) if (to_filepath and File.exists?(to_filepath))
+ end
+ end
+ nil
+ end
+
+ def image_to_html
+ if not @image_url then
+ return "<span class='text'>Image file removed</span>"
+ end
+
+ image_checksum = ""
+ if @image_checksum
+ image_checksum = @image_checksum
+ elsif @filename.include? "-expected.png" and @image_url
+ image_checksum = IMAGE_CHECKSUM_ERROR
+ end
+
+ return "<p>" + image_checksum + "</p><img class='image' src='" + @image_url + "' />"
+ end
+
+ def to_html
+ str = "<div class='FileDiff'>\n"
+ str += "<h1>#{PrettyPatch.linkifyFilename(@filename)}</h1>\n"
+ if @image then
+ str += self.image_to_html
+ elsif @git_image then
+ if @image_error
+ str += @image_error
+ else
+ for i in (0...2)
+ image_url = @image_urls[i]
+ image_checksum = @image_checksums[i]
+
+ style = ["remove", "add"][i]
+ str += "<p class=\"#{style}\">"
+
+ if image_checksum
+ str += image_checksum
+ elsif @filename.include? "-expected.png" and image_url
+ str += IMAGE_CHECKSUM_ERROR
+ end
+
+ str += "<br>"
+
+ if image_url
+ str += "<img class='image' src='" + image_url + "' />"
+ else
+ str += ["</p>Added", "</p>Removed"][i]
+ end
+ end
+ end
+ elsif @binary then
+ $last_prettify_part_count["binary"] += 1
+ str += "<span class='text'>Binary file, nothing to see here</span>"
+ else
+ str += @sections.collect{ |section| section.to_html }.join("<br>\n") unless @sections.nil?
+ end
+
+ if @from then
+ str += "<span class='revision'>" + @from + "</span>"
+ end
+
+ str += "</div>\n"
+ end
+
+ def self.parse(string)
+ haveSeenDiffHeader = false
+ linesForDiffs = []
+ string.each_line do |line|
+ if (PrettyPatch.diff_header?(line))
+ linesForDiffs << []
+ haveSeenDiffHeader = true
+ elsif (!haveSeenDiffHeader && line =~ /^--- /)
+ linesForDiffs << []
+ haveSeenDiffHeader = false
+ end
+ linesForDiffs.last << line unless linesForDiffs.last.nil?
+ end
+
+ linesForDiffs.collect { |lines| FileDiff.new(lines) }
+ end
+
+ def self.read_checksum_from_png(png_bytes)
+ # Ruby 1.9 added the concept of string encodings, so to avoid treating binary data as UTF-8,
+ # we can force the encoding to binary at this point.
+ if RUBY_VERSION >= "1.9"
+ png_bytes.force_encoding('binary')
+ end
+ match = png_bytes && png_bytes.match(/tEXtchecksum\0([a-fA-F0-9]{32})/)
+ match ? match[1] : nil
+ end
+
+ def self.git_new_file_binary_patch(filename, encoded_chunk, git_index)
+ return <<END
+diff --git a/#{filename} b/#{filename}
+new file mode 100644
+index 0000000000000000000000000000000000000000..#{git_index}
+GIT binary patch
+#{encoded_chunk.join("")}literal 0
+HcmV?d00001
+
+END
+ end
+
+ def self.git_changed_file_binary_patch(to_filename, from_filename, encoded_chunk, to_git_index, from_git_index)
+ return <<END
+diff --git a/#{from_filename} b/#{to_filename}
+copy from #{from_filename}
++++ b/#{to_filename}
+index #{from_git_index}..#{to_git_index}
+GIT binary patch
+#{encoded_chunk.join("")}literal 0
+HcmV?d00001
+
+END
+ end
+
+ def self.get_svn_uri(repository_path)
+ "http://src.chromium.org/blink/trunk/" + (repository_path) + "?p=" + $svn_revision.to_s
+ end
+
+ def self.get_new_temp_filepath_and_name
+ tempfile = Tempfile.new("PrettyPatch")
+ filepath = tempfile.path + '.bin'
+ filename = File.basename(filepath)
+ return filepath, filename
+ end
+
+ def self.download_from_revision_from_svn(repository_path)
+ filepath, filename = get_new_temp_filepath_and_name
+ svn_uri = get_svn_uri(repository_path)
+ open(filepath, 'wb') do |to_file|
+ to_file << open(svn_uri) { |from_file| from_file.read }
+ end
+ return filepath
+ end
+
+ def self.run_git_apply_on_patch(output_filepath, patch)
+ # Apply the git binary patch using git-apply.
+ cmd = GIT_PATH + " apply --directory=" + File.dirname(output_filepath)
+ stdin, stdout, stderr = *Open3.popen3(cmd)
+ begin
+ stdin.puts(patch)
+ stdin.close
+
+ error = stderr.read
+ if error != ""
+ error = "Error running " + cmd + "\n" + "with patch:\n" + patch[0..500] + "...\n" + error
+ end
+ raise error if error != ""
+ ensure
+ stdin.close unless stdin.closed?
+ stdout.close
+ stderr.close
+ end
+ end
+
+ def self.extract_contents_from_git_binary_literal_chunk(encoded_chunk, git_index)
+ filepath, filename = get_new_temp_filepath_and_name
+ patch = FileDiff.git_new_file_binary_patch(filename, encoded_chunk, git_index)
+ run_git_apply_on_patch(filepath, patch)
+ return filepath
+ end
+
+ def self.extract_contents_from_git_binary_delta_chunk(from_filepath, from_git_index, encoded_chunk, to_git_index)
+ to_filepath, to_filename = get_new_temp_filepath_and_name
+ from_filename = File.basename(from_filepath)
+ patch = FileDiff.git_changed_file_binary_patch(to_filename, from_filename, encoded_chunk, to_git_index, from_git_index)
+ run_git_apply_on_patch(to_filepath, patch)
+ return to_filepath
+ end
+
+ def self.extract_contents_of_from_revision(repository_path, encoded_chunk, git_index)
+ # For literal encoded, simply reconstruct.
+ if GIT_LITERAL_FORMAT.match(encoded_chunk[0])
+ return extract_contents_from_git_binary_literal_chunk(encoded_chunk, git_index)
+ end
+ # For delta encoded, download from svn.
+ if GIT_DELTA_FORMAT.match(encoded_chunk[0])
+ return download_from_revision_from_svn(repository_path)
+ end
+ raise "Error: unknown git patch encoding"
+ end
+
+ def self.extract_contents_of_to_revision(repository_path, encoded_chunk, git_index, from_filepath, from_git_index)
+ # For literal encoded, simply reconstruct.
+ if GIT_LITERAL_FORMAT.match(encoded_chunk[0])
+ return extract_contents_from_git_binary_literal_chunk(encoded_chunk, git_index)
+ end
+ # For delta encoded, reconstruct using delta and previously constructed 'from' revision.
+ if GIT_DELTA_FORMAT.match(encoded_chunk[0])
+ return extract_contents_from_git_binary_delta_chunk(from_filepath, from_git_index, encoded_chunk, git_index)
+ end
+ raise "Error: unknown git patch encoding"
+ end
+ end
+
+ class DiffBlock
+ attr_accessor :parts
+
+ def initialize(container)
+ @parts = []
+ container << self
+ end
+
+ def to_html
+ str = "<div class='DiffBlock'>\n"
+ str += @parts.collect{ |part| part.to_html }.join
+ str += "<div class='clear_float'></div></div>\n"
+ end
+ end
+
+ class DiffBlockPart
+ attr_reader :className
+ attr :lines
+
+ def initialize(className, container)
+ $last_prettify_part_count[className] += 1
+ @className = className
+ @lines = []
+ container.parts << self
+ end
+
+ def to_html
+ str = "<div class='DiffBlockPart %s'>\n" % @className
+ str += @lines.collect{ |line| line.to_html }.join
+ # Don't put white-space after this so adjacent inline-block DiffBlockParts will not wrap.
+ str += "</div>"
+ end
+ end
+
+ class DiffSection
+ def initialize(lines)
+ lines.length >= 1 or raise "DiffSection.parse only received %d lines" % lines.length
+
+ matches = START_OF_SECTION_FORMAT.match(lines[0])
+
+ if matches
+ from, to = [matches[1].to_i, matches[3].to_i]
+ if matches[2] and matches[4]
+ from_end = from + matches[2].to_i
+ to_end = to + matches[4].to_i
+ end
+ end
+
+ @blocks = []
+ diff_block = nil
+ diff_block_part = nil
+
+ for line in lines[1...lines.length]
+ startOfLine = line =~ /^[-\+ ]/ ? 1 : 0
+ text = line[startOfLine...line.length].chomp
+ case line[0]
+ when ?-
+ if (diff_block_part.nil? or diff_block_part.className != 'remove')
+ diff_block = DiffBlock.new(@blocks)
+ diff_block_part = DiffBlockPart.new('remove', diff_block)
+ end
+
+ diff_block_part.lines << CodeLine.new(from, nil, text)
+ from += 1 unless from.nil?
+ when ?+
+ if (diff_block_part.nil? or diff_block_part.className != 'add')
+ # Put add lines that immediately follow remove lines into the same DiffBlock.
+ if (diff_block.nil? or diff_block_part.className != 'remove')
+ diff_block = DiffBlock.new(@blocks)
+ end
+
+ diff_block_part = DiffBlockPart.new('add', diff_block)
+ end
+
+ diff_block_part.lines << CodeLine.new(nil, to, text)
+ to += 1 unless to.nil?
+ else
+ if (diff_block_part.nil? or diff_block_part.className != 'shared')
+ diff_block = DiffBlock.new(@blocks)
+ diff_block_part = DiffBlockPart.new('shared', diff_block)
+ end
+
+ diff_block_part.lines << CodeLine.new(from, to, text)
+ from += 1 unless from.nil?
+ to += 1 unless to.nil?
+ end
+
+ break if from_end and to_end and from == from_end and to == to_end
+ end
+
+ changes = [ [ [], [] ] ]
+ for block in @blocks
+ for block_part in block.parts
+ for line in block_part.lines
+ if (!line.fromLineNumber.nil? and !line.toLineNumber.nil?) then
+ changes << [ [], [] ]
+ next
+ end
+ changes.last.first << line if line.toLineNumber.nil?
+ changes.last.last << line if line.fromLineNumber.nil?
+ end
+ end
+ end
+
+ for change in changes
+ next unless change.first.length == change.last.length
+ for i in (0...change.first.length)
+ from_text = change.first[i].text
+ to_text = change.last[i].text
+ next if from_text.length > MAXIMUM_INTRALINE_DIFF_LINE_LENGTH or to_text.length > MAXIMUM_INTRALINE_DIFF_LINE_LENGTH
+ raw_operations = HTMLDiff::DiffBuilder.new(from_text, to_text).operations
+ operations = []
+ back = 0
+ raw_operations.each_with_index do |operation, j|
+ if operation.action == :equal and j < raw_operations.length - 1
+ length = operation.end_in_new - operation.start_in_new
+ if length < SMALLEST_EQUAL_OPERATION
+ back = length
+ next
+ end
+ end
+ operation.start_in_old -= back
+ operation.start_in_new -= back
+ back = 0
+ operations << operation
+ end
+ change.first[i].operations = operations
+ change.last[i].operations = operations
+ end
+ end
+
+ @blocks.unshift(ContextLine.new(matches[5])) unless matches.nil? || matches[5].empty?
+ end
+
+ def to_html
+ str = "<div class='DiffSection'>\n"
+ str += @blocks.collect{ |block| block.to_html }.join
+ str += "</div>\n"
+ end
+
+ def self.parse(lines)
+ linesForSections = lines.inject([[]]) do |sections, line|
+ sections << [] if line =~ /^@@/
+ sections.last << line
+ sections
+ end
+
+ linesForSections.delete_if { |lines| lines.nil? or lines.empty? }
+ linesForSections.collect { |lines| DiffSection.new(lines) }
+ end
+ end
+
+ class Line
+ attr_reader :fromLineNumber
+ attr_reader :toLineNumber
+ attr_reader :text
+
+ def initialize(from, to, text)
+ @fromLineNumber = from
+ @toLineNumber = to
+ @text = text
+ end
+
+ def text_as_html
+ CGI.escapeHTML(text)
+ end
+
+ def classes
+ lineClasses = ["Line", "LineContainer"]
+ lineClasses << ["add"] unless @toLineNumber.nil? or !@fromLineNumber.nil?
+ lineClasses << ["remove"] unless @fromLineNumber.nil? or !@toLineNumber.nil?
+ lineClasses
+ end
+
+ def to_html
+ markedUpText = self.text_as_html
+ str = "<div class='%s'>\n" % self.classes.join(' ')
+ str += "<span class='from lineNumber'>%s</span><span class='to lineNumber'>%s</span>" %
+ [@fromLineNumber.nil? ? ' ' : @fromLineNumber,
+ @toLineNumber.nil? ? ' ' : @toLineNumber] unless @fromLineNumber.nil? and @toLineNumber.nil?
+ str += "<span class='text'>%s</span>\n" % markedUpText
+ str += "</div>\n"
+ end
+ end
+
+ class CodeLine < Line
+ attr :operations, true
+
+ def text_as_html
+ html = []
+ tag = @fromLineNumber.nil? ? "ins" : "del"
+ if @operations.nil? or @operations.empty?
+ return CGI.escapeHTML(@text)
+ end
+ @operations.each do |operation|
+ start = @fromLineNumber.nil? ? operation.start_in_new : operation.start_in_old
+ eend = @fromLineNumber.nil? ? operation.end_in_new : operation.end_in_old
+ escaped_text = CGI.escapeHTML(@text[start...eend])
+ if eend - start === 0 or operation.action === :equal
+ html << escaped_text
+ else
+ html << "<#{tag}>#{escaped_text}</#{tag}>"
+ end
+ end
+ html.join
+ end
+ end
+
+ class ContextLine < Line
+ def initialize(context)
+ super("@", "@", context)
+ end
+
+ def classes
+ super << "context"
+ end
+ end
+end
diff --git a/src/third_party/blink/Tools/Scripts/webkitruby/PrettyPatch/PrettyPatch_test.rb b/src/third_party/blink/Tools/Scripts/webkitruby/PrettyPatch/PrettyPatch_test.rb
new file mode 100755
index 0000000..0d5f943
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitruby/PrettyPatch/PrettyPatch_test.rb
@@ -0,0 +1,99 @@
+#!/usr/bin/ruby
+
+require 'test/unit'
+require 'open-uri'
+require 'PrettyPatch'
+
+# Note: internet connection is needed to run this test suite.
+
+class PrettyPatch_test < Test::Unit::TestCase
+ class Info
+ TITLE = 0
+ FILE = 1
+ ADD = 2
+ REMOVE = 3
+ SHARED = 4
+ end
+
+ PATCHES = {
+ 20510 => ["Single change", 1, 1, 0, 2],
+ 20528 => ["No 'Index' or 'diff' in patch header", 1, 4, 3, 7],
+ 21151 => ["Leading '/' in the path of files", 4, 9, 1, 16],
+ # Binary files use shared blocks, there are three in 30488.
+ 30488 => ["Quoted filenames in git diff", 23, 28, 25, 64 + 3],
+ 23920 => ["Mac line ending", 3, 3, 0, 5],
+ 39615 => ["Git signature", 2, 2, 0, 3],
+ 80852 => ["Changes one line plus ChangeLog", 2, 2, 1, 4],
+ 83127 => ["Only add stuff", 2, 2, 0, 3],
+ 85071 => ["Adds and removes from a file plus git signature", 2, 5, 3, 9],
+ 106368 => ["Images with git delta binary patch", 69, 8, 23, 10],
+ }
+
+ def get_patch_uri(id)
+ "https://bugs.webkit.org/attachment.cgi?id=" + id.to_s
+ end
+
+ def get_patch(id)
+ result = nil
+ patch_uri = get_patch_uri(id)
+ begin
+ result = open(patch_uri) { |f| result = f.read }
+ rescue => exception
+ assert(false, "Fail to get patch " + patch_uri)
+ end
+ result
+ end
+
+ def check_one_patch(id, info)
+ patch = get_patch(id)
+ description = get_patch_uri(id)
+ description += " (" + info[Info::TITLE] + ")" unless info[Info::TITLE].nil?
+ puts "Testing " + description
+ pretty = nil
+ assert_nothing_raised("Crash while prettifying " + description) {
+ pretty = PrettyPatch.prettify(patch)
+ }
+ assert(pretty, "Empty result while prettifying " + description)
+ assert_equal(info[Info::FILE], $last_prettify_file_count, "Wrong number of files changed in " + description)
+ assert_equal(info[Info::ADD], $last_prettify_part_count["add"], "Wrong number of 'add' parts in " + description)
+ assert_equal(info[Info::REMOVE], $last_prettify_part_count["remove"], "Wrong number of 'remove' parts in " + description)
+ assert_equal(info[Info::SHARED], $last_prettify_part_count["shared"], "Wrong number of 'shared' parts in " + description)
+ assert_equal(0, $last_prettify_part_count["binary"], "Wrong number of 'binary' parts in " + description)
+ assert_equal(0, $last_prettify_part_count["extract-error"], "Wrong number of 'extract-error' parts in " + description)
+ return pretty
+ end
+
+ def test_patches
+ PATCHES.each { |id, info| check_one_patch(id, info) }
+ end
+
+ def test_images_without_checksum
+ pretty = check_one_patch(144064, ["Images without checksums", 10, 5, 4, 8])
+ matches = pretty.match("INVALID: Image lacks a checksum.")
+ # FIXME: This should match, but there's a bug when running the tests where the image data
+ # doesn't get properly written out to the temp files, so there is no image and we don't print
+ # the warning that the image is missing its checksum.
+ assert(!matches, "Should have invalid checksums")
+ # FIXME: This should only have 4 invalid images, but due to the above tempfile issue, there are 0.
+ assert_equal(0, pretty.scan(/INVALID\: Image lacks a checksum\./).size)
+ end
+
+ def test_new_image
+ pretty = check_one_patch(145881, ["New image", 19, 36, 19, 56])
+ matches = pretty.match("INVALID: Image lacks a checksum.")
+ assert(!matches, "Should not have invalid checksums")
+ end
+
+ def test_images_correctly_without_checksum_git
+ pretty = check_one_patch(101620, ["Images correctly without checksums git", 7, 15, 10, 26])
+ matches = pretty.match("INVALID: Image lacks a checksum.")
+ assert(!matches, "Png should lack a checksum without an error.")
+ end
+
+ def test_images_correctly_without_checksum_svn
+ pretty = check_one_patch(31202, ["Images correctly without checksums svn", 4, 4, 1, 4])
+ matches = pretty.match("INVALID: Image lacks a checksum.")
+ assert(!matches, "Png should lack a checksum without an error.")
+ end
+
+end
diff --git a/src/third_party/blink/Tools/Scripts/webkitruby/PrettyPatch/diff.rb b/src/third_party/blink/Tools/Scripts/webkitruby/PrettyPatch/diff.rb
new file mode 100644
index 0000000..e5c154a
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitruby/PrettyPatch/diff.rb
@@ -0,0 +1,164 @@
+module HTMLDiff
+
+ Match = Struct.new(:start_in_old, :start_in_new, :size)
+ class Match
+ def end_in_old
+ self.start_in_old + self.size
+ end
+
+ def end_in_new
+ self.start_in_new + self.size
+ end
+ end
+
+ Operation = Struct.new(:action, :start_in_old, :end_in_old, :start_in_new, :end_in_new)
+
+ class DiffBuilder
+
+ def initialize(old_version, new_version)
+ @old_version, @new_version = old_version, new_version
+ split_inputs_to_words
+ index_new_words
+ end
+
+ def split_inputs_to_words
+ @old_words = explode(@old_version)
+ @new_words = explode(@new_version)
+ end
+
+ def index_new_words
+ @word_indices = Hash.new { |h, word| h[word] = [] }
+ @new_words.each_with_index { |word, i| @word_indices[word] << i }
+ end
+
+ def operations
+ position_in_old = position_in_new = 0
+ operations = []
+
+ matches = matching_blocks
+ # an empty match at the end forces the loop below to handle the unmatched tails
+ # I'm sure it can be done more gracefully, but not at 23:52
+ matches << Match.new(@old_words.length, @new_words.length, 0)
+
+ matches.each_with_index do |match, i|
+ match_starts_at_current_position_in_old = (position_in_old == match.start_in_old)
+ match_starts_at_current_position_in_new = (position_in_new == match.start_in_new)
+
+ action_upto_match_positions =
+ case [match_starts_at_current_position_in_old, match_starts_at_current_position_in_new]
+ when [false, false]
+ :replace
+ when [true, false]
+ :insert
+ when [false, true]
+ :delete
+ else
+ # this happens if the first few words are same in both versions
+ :none
+ end
+
+ if action_upto_match_positions != :none
+ operation_upto_match_positions =
+ Operation.new(action_upto_match_positions,
+ position_in_old, match.start_in_old,
+ position_in_new, match.start_in_new)
+ operations << operation_upto_match_positions
+ end
+ if match.size != 0
+ match_operation = Operation.new(:equal,
+ match.start_in_old, match.end_in_old,
+ match.start_in_new, match.end_in_new)
+ operations << match_operation
+ end
+
+ position_in_old = match.end_in_old
+ position_in_new = match.end_in_new
+ end
+
+ operations
+ end
+
+ def matching_blocks
+ matching_blocks = []
+ recursively_find_matching_blocks(0, @old_words.size, 0, @new_words.size, matching_blocks)
+ matching_blocks
+ end
+
+ def recursively_find_matching_blocks(start_in_old, end_in_old, start_in_new, end_in_new, matching_blocks)
+ match = find_match(start_in_old, end_in_old, start_in_new, end_in_new)
+ if match
+ if start_in_old < match.start_in_old and start_in_new < match.start_in_new
+ recursively_find_matching_blocks(
+ start_in_old, match.start_in_old, start_in_new, match.start_in_new, matching_blocks)
+ end
+ matching_blocks << match
+ if match.end_in_old < end_in_old and match.end_in_new < end_in_new
+ recursively_find_matching_blocks(
+ match.end_in_old, end_in_old, match.end_in_new, end_in_new, matching_blocks)
+ end
+ end
+ end
+
+ def find_match(start_in_old, end_in_old, start_in_new, end_in_new)
+
+ best_match_in_old = start_in_old
+ best_match_in_new = start_in_new
+ best_match_size = 0
+
+ match_length_at = Hash.new { |h, index| h[index] = 0 }
+
+ start_in_old.upto(end_in_old - 1) do |index_in_old|
+
+ new_match_length_at = Hash.new { |h, index| h[index] = 0 }
+
+ @word_indices[@old_words[index_in_old]].each do |index_in_new|
+ next if index_in_new < start_in_new
+ break if index_in_new >= end_in_new
+
+ new_match_length = match_length_at[index_in_new - 1] + 1
+ new_match_length_at[index_in_new] = new_match_length
+
+ if new_match_length > best_match_size
+ best_match_in_old = index_in_old - new_match_length + 1
+ best_match_in_new = index_in_new - new_match_length + 1
+ best_match_size = new_match_length
+ end
+ end
+ match_length_at = new_match_length_at
+ end
+
+# best_match_in_old, best_match_in_new, best_match_size = add_matching_words_left(
+# best_match_in_old, best_match_in_new, best_match_size, start_in_old, start_in_new)
+# best_match_in_old, best_match_in_new, match_size = add_matching_words_right(
+# best_match_in_old, best_match_in_new, best_match_size, end_in_old, end_in_new)
+
+ return (best_match_size != 0 ? Match.new(best_match_in_old, best_match_in_new, best_match_size) : nil)
+ end
+
+ def add_matching_words_left(match_in_old, match_in_new, match_size, start_in_old, start_in_new)
+ while match_in_old > start_in_old and
+ match_in_new > start_in_new and
+ @old_words[match_in_old - 1] == @new_words[match_in_new - 1]
+ match_in_old -= 1
+ match_in_new -= 1
+ match_size += 1
+ end
+ [match_in_old, match_in_new, match_size]
+ end
+
+ def add_matching_words_right(match_in_old, match_in_new, match_size, end_in_old, end_in_new)
+ while match_in_old + match_size < end_in_old and
+ match_in_new + match_size < end_in_new and
+ @old_words[match_in_old + match_size] == @new_words[match_in_new + match_size]
+ match_size += 1
+ end
+ [match_in_old, match_in_new, match_size]
+ end
+
+ def explode(sequence)
+ sequence.is_a?(String) ? sequence.split(//) : sequence
+ end
+
+ end # of class Diff Builder
+
+end
diff --git a/src/third_party/blink/Tools/Scripts/webkitruby/PrettyPatch/prettify.rb b/src/third_party/blink/Tools/Scripts/webkitruby/PrettyPatch/prettify.rb
new file mode 100755
index 0000000..a9c211f
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitruby/PrettyPatch/prettify.rb
@@ -0,0 +1,37 @@
+#!/usr/bin/env ruby
+
+require 'optparse'
+require 'pathname'
+require 'webrick/htmlutils'
+
+$LOAD_PATH << Pathname.new(__FILE__).dirname.realpath.to_s
+
+require 'PrettyPatch'
+
+BACKTRACE_SEPARATOR = "\n\tfrom "
+
+options = { :html_exceptions => false }
+OptionParser.new do |opts|
+ opts.banner = "Usage: #{File.basename($0)} [options] [patch-file]"
+
+ opts.separator ""
+
+ opts.on("--html-exceptions", "Print exceptions to stdout as HTML") { |h| options[:html_exceptions] = h }
+end.parse!
+
+patch_data = nil
+if ARGV.length == 0 || ARGV[0] == '-' then
+ patch_data = $stdin.read
+else
+ File.open(ARGV[0]) { |file| patch_data = file.read }
+end
+
+begin
+ puts PrettyPatch.prettify(patch_data)
+rescue => exception
+ raise unless options[:html_exceptions]
+
+ backtrace = exception.backtrace
+ backtrace[0] += ": " + exception + " (" + exception.class.to_s + ")"
+ print "<pre>\n", WEBrick::HTMLUtils::escape(backtrace.join(BACKTRACE_SEPARATOR)), "\n</pre>\n"
+end
diff --git a/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/fake-data-failing-expected.txt b/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/fake-data-failing-expected.txt
new file mode 100644
index 0000000..7d0039e
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/fake-data-failing-expected.txt
@@ -0,0 +1,11 @@
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:2' included forbidden macro 'PLATFORM' => '#if PLATFORM(MAC)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:4' included forbidden macro 'CPU' => '#if CPU(X86)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:6' included forbidden macro 'OS' => '#if OS(MACOSX)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:8' included forbidden macro 'COMPILER' => '#if COMPILER(CLANG)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:10' included forbidden macro 'ENABLE' => '#if ENABLE(FEATURE)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:12' included forbidden macro 'HAVE' => '#if HAVE(FEATURE)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:14' included forbidden macro 'USE' => '#if USE(FEATURE)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:16' included forbidden macro 'COMPILER' => '#if COMPILER_SUPPORTS(FEATURE)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:18' included forbidden macro 'COMPILER' => '#if COMPILER_QUIRK(FEATURE)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:23' included forbidden macro 'PLATFORM' => ' #if PLATFORM(X)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:28' included forbidden macro 'PLATFORM' => '#if defined(ignored) && PLATFORM(X)'
diff --git a/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/pass-expected.txt b/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/pass-expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/pass-expected.txt
diff --git a/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Fail.h b/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Fail.h
new file mode 100644
index 0000000..77f465d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Fail.h
@@ -0,0 +1,29 @@
+// Common macros that we want to catch.
+#if PLATFORM(MAC)
+#endif
+#if CPU(X86)
+#endif
+#if OS(MACOSX)
+#endif
+#if COMPILER(CLANG)
+#endif
+#if ENABLE(FEATURE)
+#endif
+#if HAVE(FEATURE)
+#endif
+#if USE(FEATURE)
+#endif
+#if COMPILER_SUPPORTS(FEATURE)
+#endif
+#if COMPILER_QUIRK(FEATURE)
+#endif
+
+// Indented.
+#if 1
+ #if PLATFORM(X)
+ #endif
+#endif
+
+// Conditionals, we don't evalute. We just check for the existence of the macro.
+#if defined(ignored) && PLATFORM(X)
+#endif
diff --git a/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Pass.h b/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Pass.h
new file mode 100644
index 0000000..3a8a15d
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Pass.h
@@ -0,0 +1,6 @@
+// A macro word in a #error should not matter, that is just a coincidence.
+#error PLATFORM
+
+// There are references to a OS2, but that is not the OS() macro.
+#if defined(__OS2__) || defined(OS2)
+#endif
diff --git a/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/run-test.rb b/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/run-test.rb
new file mode 100755
index 0000000..e362ba3
--- /dev/null
+++ b/src/third_party/blink/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/run-test.rb
@@ -0,0 +1,74 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+# Testing Tools/Scripts/check-for-macros-in-external-headers
+$test_directory = File.dirname(__FILE__)
+$tool = File.expand_path(File.join($test_directory, '..', '..', 'check-for-inappropriate-macros-in-external-headers'))
+puts "Testing: Tools/Scripts/check-for-inappropriate-macros-in-external-headers"
+
+$was_failure = false
+
+def sanitized_output(output)
+ lines = output.split("\n").map { |line| line.sub(/\'(.*)?\/(.*)?\.framework/, "'--stripped--/\\2.framework") }
+ lines.join("\n") + (lines.empty? ? "" : "\n")
+end
+
+def run_test(config)
+ ENV['TARGET_BUILD_DIR'] = File.join($test_directory, 'resources')
+ ENV['PROJECT_NAME'] = config[:framework]
+ ENV['SHALLOW_BUNDLE'] = config[:shallow] ? 'YES' : 'NO'
+ output = sanitized_output %x{ #{$tool} #{config[:paths].join(' ')} 2>&1 }
+
+ if config[:expectedToPass] != ($?.exitstatus == 0)
+ pass = false
+ else
+ expected_output = File.read File.join($test_directory, config[:expectedOutput])
+ pass = output == expected_output
+ end
+
+ puts "#{pass ? "PASS" : "FAIL"} - #{config[:name]}"
+ $was_failure = true if !pass
+end
+
+[
+ {
+ :name => 'test_good_fake_data',
+ :framework => 'Fake',
+ :shallow => true,
+ :paths => ['Headers/Pass.h'],
+ :expectedToPass => true,
+ :expectedOutput => 'pass-expected.txt'
+ },
+ {
+ :name => 'test_bad_fake_data',
+ :framework => 'Fake',
+ :shallow => true,
+ :paths => ['Headers/Fail.h'],
+ :expectedToPass => false,
+ :expectedOutput => 'fake-data-failing-expected.txt'
+ }
+].each { |x| run_test(x) }
+
+exit 1 if $was_failure