Initial import of Cobalt 2.8885 2016-07-27
diff --git a/src/net/url_request/data_protocol_handler.cc b/src/net/url_request/data_protocol_handler.cc
new file mode 100644
index 0000000..3222f72
--- /dev/null
+++ b/src/net/url_request/data_protocol_handler.cc
@@ -0,0 +1,19 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/data_protocol_handler.h"
+
+#include "net/url_request/url_request_data_job.h"
+
+namespace net {
+
+DataProtocolHandler::DataProtocolHandler() {
+}
+
+URLRequestJob* DataProtocolHandler::MaybeCreateJob(
+    URLRequest* request, NetworkDelegate* network_delegate) const {
+  return new URLRequestDataJob(request, network_delegate);
+}
+
+}  // namespace net
diff --git a/src/net/url_request/data_protocol_handler.h b/src/net/url_request/data_protocol_handler.h
new file mode 100644
index 0000000..abb5abe
--- /dev/null
+++ b/src/net/url_request/data_protocol_handler.h
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_DATA_PROTOCOL_HANDLER_H_
+#define NET_URL_REQUEST_DATA_PROTOCOL_HANDLER_H_
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "net/url_request/url_request_job_factory.h"
+
+namespace net {
+
+class URLRequestJob;
+
+// Implements a ProtocolHandler for Data jobs.
+class NET_EXPORT DataProtocolHandler
+    : public URLRequestJobFactory::ProtocolHandler {
+ public:
+  DataProtocolHandler();
+  virtual URLRequestJob* MaybeCreateJob(
+      URLRequest* request, NetworkDelegate* network_delegate) const OVERRIDE;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(DataProtocolHandler);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_DATA_PROTOCOL_HANDLER_H_
diff --git a/src/net/url_request/file_protocol_handler.cc b/src/net/url_request/file_protocol_handler.cc
new file mode 100644
index 0000000..bbeac1c
--- /dev/null
+++ b/src/net/url_request/file_protocol_handler.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/file_protocol_handler.h"
+
+#include "base/logging.h"
+#include "net/base/net_errors.h"
+#include "net/base/net_util.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_error_job.h"
+#include "net/url_request/url_request_file_dir_job.h"
+#include "net/url_request/url_request_file_job.h"
+
+namespace net {
+
+FileProtocolHandler::FileProtocolHandler() { }
+
+URLRequestJob* FileProtocolHandler::MaybeCreateJob(
+    URLRequest* request, NetworkDelegate* network_delegate) const {
+  FilePath file_path;
+  const bool is_file = FileURLToFilePath(request->url(), &file_path);
+
+  // Check file access permissions.
+  if (!network_delegate ||
+      !network_delegate->CanAccessFile(*request, file_path)) {
+    return new URLRequestErrorJob(request, network_delegate, ERR_ACCESS_DENIED);
+  }
+#if !defined (COBALT) // Cobalt doesn't support URLRequestFileDirJob
+  // We need to decide whether to create URLRequestFileJob for file access or
+  // URLRequestFileDirJob for directory access. To avoid accessing the
+  // filesystem, we only look at the path string here.
+  // The code in the URLRequestFileJob::Start() method discovers that a path,
+  // which doesn't end with a slash, should really be treated as a directory,
+  // and it then redirects to the URLRequestFileDirJob.
+  if (is_file &&
+      file_util::EndsWithSeparator(file_path) &&
+      file_path.IsAbsolute()) {
+    return new URLRequestFileDirJob(request, network_delegate, file_path);
+  }
+#endif
+  // Use a regular file request job for all non-directories (including invalid
+  // file names).
+  return new URLRequestFileJob(request, network_delegate, file_path);
+}
+
+}  // namespace net
diff --git a/src/net/url_request/file_protocol_handler.h b/src/net/url_request/file_protocol_handler.h
new file mode 100644
index 0000000..619227b
--- /dev/null
+++ b/src/net/url_request/file_protocol_handler.h
@@ -0,0 +1,32 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_FILE_PROTOCOL_HANDLER_H_
+#define NET_URL_REQUEST_FILE_PROTOCOL_HANDLER_H_
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "net/url_request/url_request_job_factory.h"
+
+namespace net {
+
+class NetworkDelegate;
+class URLRequestJob;
+
+// Implements a ProtocolHandler for File jobs. If |network_delegate_| is NULL,
+// then all file requests will fail with ERR_ACCESS_DENIED.
+class NET_EXPORT FileProtocolHandler :
+    public URLRequestJobFactory::ProtocolHandler {
+ public:
+  FileProtocolHandler();
+  virtual URLRequestJob* MaybeCreateJob(
+      URLRequest* request, NetworkDelegate* network_delegate) const OVERRIDE;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FileProtocolHandler);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_FILE_PROTOCOL_HANDLER_H_
diff --git a/src/net/url_request/fraudulent_certificate_reporter.h b/src/net/url_request/fraudulent_certificate_reporter.h
new file mode 100644
index 0000000..007f660
--- /dev/null
+++ b/src/net/url_request/fraudulent_certificate_reporter.h
@@ -0,0 +1,33 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_FRAUDULENT_CERTIFICATE_REPORTER_H_
+#define NET_URL_REQUEST_FRAUDULENT_CERTIFICATE_REPORTER_H_
+
+#include <string>
+
+namespace net {
+
+class SSLInfo;
+
+// FraudulentCertificateReporter is an interface for asynchronously
+// reporting certificate chains that fail the certificate pinning
+// check.
+class FraudulentCertificateReporter {
+ public:
+  virtual ~FraudulentCertificateReporter() {}
+
+  // Sends a report to the report collection server containing the |ssl_info|
+  // associated with a connection to |hostname|. If |sni_available| is true,
+  // searches the SNI transport security metadata as well as the usual
+  // transport security metadata when determining policy for sending the report.
+  virtual void SendReport(const std::string& hostname,
+                          const SSLInfo& ssl_info,
+                          bool sni_available) = 0;
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_FRAUDULENT_CERTIFICATE_REPORTER_H_
+
diff --git a/src/net/url_request/ftp_protocol_handler.cc b/src/net/url_request/ftp_protocol_handler.cc
new file mode 100644
index 0000000..7d9ba88
--- /dev/null
+++ b/src/net/url_request/ftp_protocol_handler.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/ftp_protocol_handler.h"
+
+#include "base/logging.h"
+#include "net/base/net_errors.h"
+#include "net/base/net_util.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_error_job.h"
+#include "net/url_request/url_request_ftp_job.h"
+#include "googleurl/src/gurl.h"
+
+namespace net {
+
+FtpProtocolHandler::FtpProtocolHandler(
+    FtpTransactionFactory* ftp_transaction_factory,
+    FtpAuthCache* ftp_auth_cache)
+    : ftp_transaction_factory_(ftp_transaction_factory),
+      ftp_auth_cache_(ftp_auth_cache) {
+  DCHECK(ftp_transaction_factory_);
+  DCHECK(ftp_auth_cache_);
+}
+
+URLRequestJob* FtpProtocolHandler::MaybeCreateJob(
+    URLRequest* request, NetworkDelegate* network_delegate) const {
+  int port = request->url().IntPort();
+  if (request->url().has_port() &&
+      !IsPortAllowedByFtp(port) && !IsPortAllowedByOverride(port)) {
+    return new URLRequestErrorJob(request, network_delegate, ERR_UNSAFE_PORT);
+  }
+
+  return new URLRequestFtpJob(request,
+                              network_delegate,
+                              ftp_transaction_factory_,
+                              ftp_auth_cache_);
+}
+
+}  // namespace net
diff --git a/src/net/url_request/ftp_protocol_handler.h b/src/net/url_request/ftp_protocol_handler.h
new file mode 100644
index 0000000..871f422
--- /dev/null
+++ b/src/net/url_request/ftp_protocol_handler.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_FTP_PROTOCOL_HANDLER_H_
+#define NET_URL_REQUEST_FTP_PROTOCOL_HANDLER_H_
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "net/url_request/url_request_job_factory.h"
+
+namespace net {
+
+class FtpAuthCache;
+class FtpTransactionFactory;
+class NetworkDelegate;
+class URLRequestJob;
+
+// Implements a ProtocolHandler for FTP.
+class NET_EXPORT FtpProtocolHandler :
+    public URLRequestJobFactory::ProtocolHandler {
+ public:
+  FtpProtocolHandler(FtpTransactionFactory* ftp_transaction_factory,
+                     FtpAuthCache* ftp_auth_cache);
+  virtual URLRequestJob* MaybeCreateJob(
+      URLRequest* request, NetworkDelegate* network_delegate) const OVERRIDE;
+
+ private:
+  FtpTransactionFactory* ftp_transaction_factory_;
+  FtpAuthCache* ftp_auth_cache_;
+
+  DISALLOW_COPY_AND_ASSIGN(FtpProtocolHandler);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_FTP_PROTOCOL_HANDLER_H_
diff --git a/src/net/url_request/http_user_agent_settings.h b/src/net/url_request/http_user_agent_settings.h
new file mode 100644
index 0000000..f04f556
--- /dev/null
+++ b/src/net/url_request/http_user_agent_settings.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_HTTP_USER_AGENT_SETTINGS_H_
+#define NET_URL_REQUEST_HTTP_USER_AGENT_SETTINGS_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "net/base/net_export.h"
+
+class GURL;
+
+namespace net {
+
+// The interface used by HTTP jobs to retrieve HTTP Accept-Language,
+// Accept-Charset and User-Agent header values.
+class NET_EXPORT HttpUserAgentSettings {
+ public:
+  HttpUserAgentSettings() {}
+  virtual ~HttpUserAgentSettings() {}
+
+  // Gets the value of 'Accept-Language' header field.
+  virtual const std::string& GetAcceptLanguage() const = 0;
+
+  // Gets the value of 'Accept-Charset' header field.
+  virtual const std::string& GetAcceptCharset() const = 0;
+
+  // Gets the UA string.
+  virtual const std::string& GetUserAgent() const = 0;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HttpUserAgentSettings);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_HTTP_USER_AGENT_SETTINGS_H_
+
diff --git a/src/net/url_request/static_http_user_agent_settings.cc b/src/net/url_request/static_http_user_agent_settings.cc
new file mode 100644
index 0000000..d694326
--- /dev/null
+++ b/src/net/url_request/static_http_user_agent_settings.cc
@@ -0,0 +1,34 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/static_http_user_agent_settings.h"
+
+namespace net {
+
+StaticHttpUserAgentSettings::StaticHttpUserAgentSettings(
+    const std::string& accept_language,
+    const std::string& accept_charset,
+    const std::string& user_agent)
+    : accept_language_(accept_language),
+      accept_charset_(accept_charset),
+      user_agent_(user_agent) {
+}
+
+StaticHttpUserAgentSettings::~StaticHttpUserAgentSettings() {
+}
+
+const std::string& StaticHttpUserAgentSettings::GetAcceptLanguage() const {
+  return accept_language_;
+}
+
+const std::string& StaticHttpUserAgentSettings::GetAcceptCharset() const {
+  return accept_charset_;
+}
+
+const std::string& StaticHttpUserAgentSettings::GetUserAgent() const {
+  return user_agent_;
+}
+
+}  // namespace net
+
diff --git a/src/net/url_request/static_http_user_agent_settings.h b/src/net/url_request/static_http_user_agent_settings.h
new file mode 100644
index 0000000..0663f93
--- /dev/null
+++ b/src/net/url_request/static_http_user_agent_settings.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_STATIC_HTTP_USER_AGENT_SETTINGS_H_
+#define NET_URL_REQUEST_STATIC_HTTP_USER_AGENT_SETTINGS_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "net/base/net_export.h"
+#include "net/url_request/http_user_agent_settings.h"
+
+namespace net {
+
+// An implementation of |HttpUserAgentSettings| that always provides the
+// same constant values for the HTTP Accept-Language, Accept-Charset, and
+// User-Agent headers.
+class NET_EXPORT StaticHttpUserAgentSettings : public HttpUserAgentSettings {
+ public:
+  StaticHttpUserAgentSettings(const std::string& accept_language,
+                              const std::string& accept_charset,
+                              const std::string& user_agent);
+  virtual ~StaticHttpUserAgentSettings();
+
+  // HttpUserAgentSettings implementation
+  virtual const std::string& GetAcceptLanguage() const OVERRIDE;
+  virtual const std::string& GetAcceptCharset() const OVERRIDE;
+  virtual const std::string& GetUserAgent() const OVERRIDE;
+
+ private:
+  const std::string accept_language_;
+  const std::string accept_charset_;
+  const std::string user_agent_;
+
+  DISALLOW_COPY_AND_ASSIGN(StaticHttpUserAgentSettings);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_STATIC_HTTP_USER_AGENT_SETTINGS_H_
+
diff --git a/src/net/url_request/test_url_fetcher_factory.cc b/src/net/url_request/test_url_fetcher_factory.cc
new file mode 100644
index 0000000..4b076d2
--- /dev/null
+++ b/src/net/url_request/test_url_fetcher_factory.cc
@@ -0,0 +1,377 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/test_url_fetcher_factory.h"
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop.h"
+#include "net/base/host_port_pair.h"
+#include "net/http/http_response_headers.h"
+#include "net/url_request/url_fetcher_delegate.h"
+#include "net/url_request/url_fetcher_impl.h"
+#include "net/url_request/url_request_status.h"
+
+namespace net {
+
+ScopedURLFetcherFactory::ScopedURLFetcherFactory(
+    URLFetcherFactory* factory) {
+  DCHECK(!URLFetcherImpl::factory());
+  URLFetcherImpl::set_factory(factory);
+}
+
+ScopedURLFetcherFactory::~ScopedURLFetcherFactory() {
+  DCHECK(URLFetcherImpl::factory());
+  URLFetcherImpl::set_factory(NULL);
+}
+
+TestURLFetcher::TestURLFetcher(int id,
+                               const GURL& url,
+                               URLFetcherDelegate* d)
+    : owner_(NULL),
+      id_(id),
+      original_url_(url),
+      delegate_(d),
+      delegate_for_tests_(NULL),
+      did_receive_last_chunk_(false),
+      fake_load_flags_(0),
+      fake_response_code_(-1),
+      fake_response_destination_(STRING),
+      fake_was_fetched_via_proxy_(false),
+      fake_max_retries_(0) {
+}
+
+TestURLFetcher::~TestURLFetcher() {
+  if (delegate_for_tests_)
+    delegate_for_tests_->OnRequestEnd(id_);
+  if (owner_)
+    owner_->RemoveFetcherFromMap(id_);
+}
+
+void TestURLFetcher::SetUploadData(const std::string& upload_content_type,
+                                   const std::string& upload_content) {
+  upload_data_ = upload_content;
+}
+
+void TestURLFetcher::SetChunkedUpload(const std::string& upload_content_type) {
+}
+
+void TestURLFetcher::AppendChunkToUpload(const std::string& data,
+                                         bool is_last_chunk) {
+  DCHECK(!did_receive_last_chunk_);
+  did_receive_last_chunk_ = is_last_chunk;
+  chunks_.push_back(data);
+  if (delegate_for_tests_)
+    delegate_for_tests_->OnChunkUpload(id_);
+}
+
+void TestURLFetcher::SetLoadFlags(int load_flags) {
+  fake_load_flags_= load_flags;
+}
+
+int TestURLFetcher::GetLoadFlags() const {
+  return fake_load_flags_;
+}
+
+void TestURLFetcher::SetReferrer(const std::string& referrer) {
+}
+
+void TestURLFetcher::SetExtraRequestHeaders(
+    const std::string& extra_request_headers) {
+  fake_extra_request_headers_.Clear();
+  fake_extra_request_headers_.AddHeadersFromString(extra_request_headers);
+}
+
+void TestURLFetcher::AddExtraRequestHeader(const std::string& header_line) {
+  fake_extra_request_headers_.AddHeaderFromString(header_line);
+}
+
+void TestURLFetcher::GetExtraRequestHeaders(
+    HttpRequestHeaders* headers) const {
+  *headers = fake_extra_request_headers_;
+}
+
+void TestURLFetcher::SetRequestContext(
+    URLRequestContextGetter* request_context_getter) {
+}
+
+void TestURLFetcher::SetFirstPartyForCookies(
+    const GURL& first_party_for_cookies) {
+}
+
+void TestURLFetcher::SetURLRequestUserData(
+    const void* key,
+    const CreateDataCallback& create_data_callback) {
+}
+
+void TestURLFetcher::SetStopOnRedirect(bool stop_on_redirect) {
+}
+
+void TestURLFetcher::SetAutomaticallyRetryOn5xx(bool retry) {
+}
+
+void TestURLFetcher::SetMaxRetriesOn5xx(int max_retries) {
+  fake_max_retries_ = max_retries;
+}
+
+int TestURLFetcher::GetMaxRetriesOn5xx() const {
+  return fake_max_retries_;
+}
+
+base::TimeDelta TestURLFetcher::GetBackoffDelay() const {
+  return fake_backoff_delay_;
+}
+
+void TestURLFetcher::SetAutomaticallyRetryOnNetworkChanges(int max_retries) {
+}
+
+void TestURLFetcher::SaveResponseToFileAtPath(
+    const FilePath& file_path,
+    scoped_refptr<base::TaskRunner> file_task_runner) {
+}
+
+void TestURLFetcher::SaveResponseToTemporaryFile(
+    scoped_refptr<base::TaskRunner> file_task_runner) {
+}
+
+#if defined(COBALT)
+void TestURLFetcher::DiscardResponse() {
+}
+#endif
+
+HttpResponseHeaders* TestURLFetcher::GetResponseHeaders() const {
+  return fake_response_headers_;
+}
+
+HostPortPair TestURLFetcher::GetSocketAddress() const {
+  NOTIMPLEMENTED();
+  return HostPortPair();
+}
+
+bool TestURLFetcher::WasFetchedViaProxy() const {
+  return fake_was_fetched_via_proxy_;
+}
+
+void TestURLFetcher::Start() {
+  // Overriden to do nothing. It is assumed the caller will notify the delegate.
+  if (delegate_for_tests_)
+    delegate_for_tests_->OnRequestStart(id_);
+}
+
+const GURL& TestURLFetcher::GetOriginalURL() const {
+  return original_url_;
+}
+
+const GURL& TestURLFetcher::GetURL() const {
+  return fake_url_;
+}
+
+const URLRequestStatus& TestURLFetcher::GetStatus() const {
+  return fake_status_;
+}
+
+int TestURLFetcher::GetResponseCode() const {
+  return fake_response_code_;
+}
+
+const ResponseCookies& TestURLFetcher::GetCookies() const {
+  return fake_cookies_;
+}
+
+bool TestURLFetcher::FileErrorOccurred(
+    base::PlatformFileError* out_error_code) const {
+  NOTIMPLEMENTED();
+  return false;
+}
+
+void TestURLFetcher::ReceivedContentWasMalformed() {
+}
+
+bool TestURLFetcher::GetResponseAsString(
+    std::string* out_response_string) const {
+  if (fake_response_destination_ != STRING)
+    return false;
+
+  *out_response_string = fake_response_string_;
+  return true;
+}
+
+bool TestURLFetcher::GetResponseAsFilePath(
+    bool take_ownership, FilePath* out_response_path) const {
+  if (fake_response_destination_ != TEMP_FILE)
+    return false;
+
+  *out_response_path = fake_response_file_path_;
+  return true;
+}
+
+void TestURLFetcher::set_status(const URLRequestStatus& status) {
+  fake_status_ = status;
+}
+
+void TestURLFetcher::set_was_fetched_via_proxy(bool flag) {
+  fake_was_fetched_via_proxy_ = flag;
+}
+
+void TestURLFetcher::set_response_headers(
+    scoped_refptr<HttpResponseHeaders> headers) {
+  fake_response_headers_ = headers;
+}
+
+void TestURLFetcher::set_backoff_delay(base::TimeDelta backoff_delay) {
+  fake_backoff_delay_ = backoff_delay;
+}
+
+void TestURLFetcher::SetDelegateForTests(DelegateForTests* delegate_for_tests) {
+  delegate_for_tests_ = delegate_for_tests;
+}
+
+void TestURLFetcher::SetResponseString(const std::string& response) {
+  fake_response_destination_ = STRING;
+  fake_response_string_ = response;
+}
+
+void TestURLFetcher::SetResponseFilePath(const FilePath& path) {
+  fake_response_destination_ = TEMP_FILE;
+  fake_response_file_path_ = path;
+}
+
+TestURLFetcherFactory::TestURLFetcherFactory()
+    : ScopedURLFetcherFactory(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+      delegate_for_tests_(NULL),
+      remove_fetcher_on_delete_(false) {
+}
+
+TestURLFetcherFactory::~TestURLFetcherFactory() {}
+
+URLFetcher* TestURLFetcherFactory::CreateURLFetcher(
+    int id,
+    const GURL& url,
+    URLFetcher::RequestType request_type,
+    URLFetcherDelegate* d) {
+  TestURLFetcher* fetcher = new TestURLFetcher(id, url, d);
+  if (remove_fetcher_on_delete_)
+    fetcher->set_owner(this);
+  fetcher->SetDelegateForTests(delegate_for_tests_);
+  fetchers_[id] = fetcher;
+  return fetcher;
+}
+
+TestURLFetcher* TestURLFetcherFactory::GetFetcherByID(int id) const {
+  Fetchers::const_iterator i = fetchers_.find(id);
+  return i == fetchers_.end() ? NULL : i->second;
+}
+
+void TestURLFetcherFactory::RemoveFetcherFromMap(int id) {
+  Fetchers::iterator i = fetchers_.find(id);
+  DCHECK(i != fetchers_.end());
+  fetchers_.erase(i);
+}
+
+void TestURLFetcherFactory::SetDelegateForTests(
+    TestURLFetcherDelegateForTests* delegate_for_tests) {
+  delegate_for_tests_ = delegate_for_tests;
+}
+
+// This class is used by the FakeURLFetcherFactory below.
+class FakeURLFetcher : public TestURLFetcher {
+ public:
+  // Normal URL fetcher constructor but also takes in a pre-baked response.
+  FakeURLFetcher(const GURL& url,
+                 URLFetcherDelegate* d,
+                 const std::string& response_data, bool success)
+      : TestURLFetcher(0, url, d),
+        ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {
+    set_status(URLRequestStatus(
+        success ? URLRequestStatus::SUCCESS : URLRequestStatus::FAILED,
+        0));
+    set_response_code(success ? 200 : 500);
+    SetResponseString(response_data);
+  }
+
+  // Start the request.  This will call the given delegate asynchronously
+  // with the pre-baked response as parameter.
+  virtual void Start() OVERRIDE {
+    MessageLoop::current()->PostTask(
+        FROM_HERE,
+        base::Bind(&FakeURLFetcher::RunDelegate, weak_factory_.GetWeakPtr()));
+  }
+
+  virtual const GURL& GetURL() const OVERRIDE {
+    return TestURLFetcher::GetOriginalURL();
+  }
+
+ private:
+  virtual ~FakeURLFetcher() {
+  }
+
+  // This is the method which actually calls the delegate that is passed in the
+  // constructor.
+  void RunDelegate() {
+    delegate()->OnURLFetchComplete(this);
+  }
+
+  base::WeakPtrFactory<FakeURLFetcher> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(FakeURLFetcher);
+};
+
+FakeURLFetcherFactory::FakeURLFetcherFactory()
+    : ScopedURLFetcherFactory(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+      default_factory_(NULL) {
+}
+
+FakeURLFetcherFactory::FakeURLFetcherFactory(
+    URLFetcherFactory* default_factory)
+    : ScopedURLFetcherFactory(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+      default_factory_(default_factory) {
+}
+
+FakeURLFetcherFactory::~FakeURLFetcherFactory() {}
+
+URLFetcher* FakeURLFetcherFactory::CreateURLFetcher(
+    int id,
+    const GURL& url,
+    URLFetcher::RequestType request_type,
+    URLFetcherDelegate* d) {
+  FakeResponseMap::const_iterator it = fake_responses_.find(url);
+  if (it == fake_responses_.end()) {
+    if (default_factory_ == NULL) {
+      // If we don't have a baked response for that URL we return NULL.
+      DLOG(ERROR) << "No baked response for URL: " << url.spec();
+      return NULL;
+    } else {
+      return default_factory_->CreateURLFetcher(id, url, request_type, d);
+    }
+  }
+  return new FakeURLFetcher(url, d, it->second.first, it->second.second);
+}
+
+void FakeURLFetcherFactory::SetFakeResponse(const std::string& url,
+                                            const std::string& response_data,
+                                            bool success) {
+  // Overwrite existing URL if it already exists.
+  fake_responses_[GURL(url)] = std::make_pair(response_data, success);
+}
+
+void FakeURLFetcherFactory::ClearFakeResponses() {
+  fake_responses_.clear();
+}
+
+URLFetcherImplFactory::URLFetcherImplFactory() {}
+
+URLFetcherImplFactory::~URLFetcherImplFactory() {}
+
+URLFetcher* URLFetcherImplFactory::CreateURLFetcher(
+    int id,
+    const GURL& url,
+    URLFetcher::RequestType request_type,
+    URLFetcherDelegate* d) {
+  return new URLFetcherImpl(url, request_type, d);
+}
+
+}  // namespace net
diff --git a/src/net/url_request/test_url_fetcher_factory.h b/src/net/url_request/test_url_fetcher_factory.h
new file mode 100644
index 0000000..62c0efb
--- /dev/null
+++ b/src/net/url_request/test_url_fetcher_factory.h
@@ -0,0 +1,342 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_TEST_URL_FETCHER_FACTORY_H_
+#define NET_URL_REQUEST_TEST_URL_FETCHER_FACTORY_H_
+
+#include <list>
+#include <map>
+#include <string>
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/threading/non_thread_safe.h"
+#include "googleurl/src/gurl.h"
+#include "net/http/http_request_headers.h"
+#include "net/url_request/url_fetcher_factory.h"
+#include "net/url_request/url_request_status.h"
+
+namespace net {
+
+// Changes URLFetcher's Factory for the lifetime of the object.
+// Note that this scoper cannot be nested (to make it even harder to misuse).
+class ScopedURLFetcherFactory : public base::NonThreadSafe {
+ public:
+  explicit ScopedURLFetcherFactory(URLFetcherFactory* factory);
+  virtual ~ScopedURLFetcherFactory();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ScopedURLFetcherFactory);
+};
+
+// TestURLFetcher and TestURLFetcherFactory are used for testing consumers of
+// URLFetcher. TestURLFetcherFactory is a URLFetcherFactory that creates
+// TestURLFetchers. TestURLFetcher::Start is overriden to do nothing. It is
+// expected that you'll grab the delegate from the TestURLFetcher and invoke
+// the callback method when appropriate. In this way it's easy to mock a
+// URLFetcher.
+// Typical usage:
+//   // TestURLFetcher requires a MessageLoop.
+//   MessageLoop message_loop;
+//   // And an IO thread to release URLRequestContextGetter in URLFetcher::Core.
+//   BrowserThreadImpl io_thread(BrowserThread::IO, &message_loop);
+//   // Create factory (it automatically sets itself as URLFetcher's factory).
+//   TestURLFetcherFactory factory;
+//   // Do something that triggers creation of a URLFetcher.
+//   ...
+//   TestURLFetcher* fetcher = factory.GetFetcherByID(expected_id);
+//   DCHECK(fetcher);
+//   // Notify delegate with whatever data you want.
+//   fetcher->delegate()->OnURLFetchComplete(...);
+//   // Make sure consumer of URLFetcher does the right thing.
+//   ...
+//
+// Note: if you don't know when your request objects will be created you
+// might want to use the FakeURLFetcher and FakeURLFetcherFactory classes
+// below.
+
+class TestURLFetcherFactory;
+class TestURLFetcher : public URLFetcher {
+ public:
+  // Interface for tests to intercept production code classes using URLFetcher.
+  // Allows even-driven mock server classes to analyze the correctness of
+  // requests / uploads events and forge responses back at the right moment.
+  class DelegateForTests {
+   public:
+    // Callback issued correspondingly to the call to the |Start()| method.
+    virtual void OnRequestStart(int fetcher_id) = 0;
+
+    // Callback issued correspondingly to the call to |AppendChunkToUpload|.
+    // Uploaded chunks can be retrieved with the |upload_chunks()| getter.
+    virtual void OnChunkUpload(int fetcher_id) = 0;
+
+    // Callback issued correspondingly to the destructor.
+    virtual void OnRequestEnd(int fetcher_id) = 0;
+  };
+
+  TestURLFetcher(int id,
+                 const GURL& url,
+                 URLFetcherDelegate* d);
+  virtual ~TestURLFetcher();
+
+  // URLFetcher implementation
+  virtual void SetUploadData(const std::string& upload_content_type,
+                             const std::string& upload_content) OVERRIDE;
+  virtual void SetChunkedUpload(
+      const std::string& upload_content_type) OVERRIDE;
+  // Overriden to cache the chunks uploaded. Caller can read back the uploaded
+  // chunks with the upload_chunks() accessor.
+  virtual void AppendChunkToUpload(const std::string& data,
+                                   bool is_last_chunk) OVERRIDE;
+  virtual void SetLoadFlags(int load_flags) OVERRIDE;
+  virtual int GetLoadFlags() const OVERRIDE;
+  virtual void SetReferrer(const std::string& referrer) OVERRIDE;
+  virtual void SetExtraRequestHeaders(
+      const std::string& extra_request_headers) OVERRIDE;
+  virtual void AddExtraRequestHeader(const std::string& header_line) OVERRIDE;
+  virtual void GetExtraRequestHeaders(
+      HttpRequestHeaders* headers) const OVERRIDE;
+  virtual void SetRequestContext(
+      URLRequestContextGetter* request_context_getter) OVERRIDE;
+  virtual void SetFirstPartyForCookies(
+      const GURL& first_party_for_cookies) OVERRIDE;
+  virtual void SetURLRequestUserData(
+      const void* key,
+      const CreateDataCallback& create_data_callback) OVERRIDE;
+  virtual void SetStopOnRedirect(bool stop_on_redirect) OVERRIDE;
+  virtual void SetAutomaticallyRetryOn5xx(bool retry) OVERRIDE;
+  virtual void SetMaxRetriesOn5xx(int max_retries) OVERRIDE;
+  virtual int GetMaxRetriesOn5xx() const OVERRIDE;
+  virtual base::TimeDelta GetBackoffDelay() const OVERRIDE;
+  virtual void SetAutomaticallyRetryOnNetworkChanges(int max_retries) OVERRIDE;
+  virtual void SaveResponseToFileAtPath(
+      const FilePath& file_path,
+      scoped_refptr<base::TaskRunner> file_task_runner) OVERRIDE;
+  virtual void SaveResponseToTemporaryFile(
+      scoped_refptr<base::TaskRunner> file_task_runner) OVERRIDE;
+#if defined(COBALT)
+  virtual void DiscardResponse() OVERRIDE;
+#endif
+  virtual HttpResponseHeaders* GetResponseHeaders() const OVERRIDE;
+  virtual HostPortPair GetSocketAddress() const OVERRIDE;
+  virtual bool WasFetchedViaProxy() const OVERRIDE;
+  virtual void Start() OVERRIDE;
+
+  // URL we were created with. Because of how we're using URLFetcher GetURL()
+  // always returns an empty URL. Chances are you'll want to use
+  // GetOriginalURL() in your tests.
+  virtual const GURL& GetOriginalURL() const OVERRIDE;
+  virtual const GURL& GetURL() const OVERRIDE;
+  virtual const URLRequestStatus& GetStatus() const OVERRIDE;
+  virtual int GetResponseCode() const OVERRIDE;
+  virtual const ResponseCookies& GetCookies() const OVERRIDE;
+  virtual bool FileErrorOccurred(
+      base::PlatformFileError* out_error_code) const OVERRIDE;
+  virtual void ReceivedContentWasMalformed() OVERRIDE;
+  // Override response access functions to return fake data.
+  virtual bool GetResponseAsString(
+      std::string* out_response_string) const OVERRIDE;
+  virtual bool GetResponseAsFilePath(
+      bool take_ownership, FilePath* out_response_path) const OVERRIDE;
+
+  // Sets owner of this class.  Set it to a non-NULL value if you want
+  // to automatically unregister this fetcher from the owning factory
+  // upon destruction.
+  void set_owner(TestURLFetcherFactory* owner) { owner_ = owner; }
+
+  // Unique ID in our factory.
+  int id() const { return id_; }
+
+  // Returns the data uploaded on this URLFetcher.
+  const std::string& upload_data() const { return upload_data_; }
+
+  // Returns the chunks of data uploaded on this URLFetcher.
+  const std::list<std::string>& upload_chunks() const { return chunks_; }
+
+  // Checks whether the last call to |AppendChunkToUpload(...)| was final.
+  bool did_receive_last_chunk() const { return did_receive_last_chunk_; }
+
+  // Returns the delegate installed on the URLFetcher.
+  URLFetcherDelegate* delegate() const { return delegate_; }
+
+  void set_url(const GURL& url) { fake_url_ = url; }
+  void set_status(const URLRequestStatus& status);
+  void set_response_code(int response_code) {
+    fake_response_code_ = response_code;
+  }
+  void set_cookies(const ResponseCookies& c) { fake_cookies_ = c; }
+  void set_was_fetched_via_proxy(bool flag);
+  void set_response_headers(scoped_refptr<HttpResponseHeaders> headers);
+  void set_backoff_delay(base::TimeDelta backoff_delay);
+  void SetDelegateForTests(DelegateForTests* delegate_for_tests);
+
+  // Set string data.
+  void SetResponseString(const std::string& response);
+
+  // Set File data.
+  void SetResponseFilePath(const FilePath& path);
+
+ private:
+  enum ResponseDestinationType {
+    STRING,  // Default: In a std::string
+    TEMP_FILE  // Write to a temp file
+  };
+
+  TestURLFetcherFactory* owner_;
+  const int id_;
+  const GURL original_url_;
+  URLFetcherDelegate* delegate_;
+  DelegateForTests* delegate_for_tests_;
+  std::string upload_data_;
+  std::list<std::string> chunks_;
+  bool did_receive_last_chunk_;
+
+  // User can use set_* methods to provide values returned by getters.
+  // Setting the real values is not possible, because the real class
+  // has no setters. The data is a private member of a class defined
+  // in a .cc file, so we can't get at it with friendship.
+  int fake_load_flags_;
+  GURL fake_url_;
+  URLRequestStatus fake_status_;
+  int fake_response_code_;
+  ResponseCookies fake_cookies_;
+  ResponseDestinationType fake_response_destination_;
+  std::string fake_response_string_;
+  FilePath fake_response_file_path_;
+  bool fake_was_fetched_via_proxy_;
+  scoped_refptr<HttpResponseHeaders> fake_response_headers_;
+  HttpRequestHeaders fake_extra_request_headers_;
+  int fake_max_retries_;
+  base::TimeDelta fake_backoff_delay_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestURLFetcher);
+};
+
+typedef TestURLFetcher::DelegateForTests TestURLFetcherDelegateForTests;
+
+// Simple URLFetcherFactory method that creates TestURLFetchers. All fetchers
+// are registered in a map by the id passed to the create method.
+// Optionally, a fetcher may be automatically unregistered from the map upon
+// its destruction.
+class TestURLFetcherFactory : public URLFetcherFactory,
+                              public ScopedURLFetcherFactory {
+ public:
+  TestURLFetcherFactory();
+  virtual ~TestURLFetcherFactory();
+
+  virtual URLFetcher* CreateURLFetcher(
+      int id,
+      const GURL& url,
+      URLFetcher::RequestType request_type,
+      URLFetcherDelegate* d) OVERRIDE;
+  TestURLFetcher* GetFetcherByID(int id) const;
+  void RemoveFetcherFromMap(int id);
+  void SetDelegateForTests(TestURLFetcherDelegateForTests* delegate_for_tests);
+  void set_remove_fetcher_on_delete(bool remove_fetcher_on_delete) {
+    remove_fetcher_on_delete_ = remove_fetcher_on_delete;
+  }
+
+ private:
+  // Maps from id passed to create to the returned URLFetcher.
+  typedef std::map<int, TestURLFetcher*> Fetchers;
+  Fetchers fetchers_;
+  TestURLFetcherDelegateForTests* delegate_for_tests_;
+  // Whether to automatically unregister a fetcher from this factory upon its
+  // destruction, false by default.
+  bool remove_fetcher_on_delete_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestURLFetcherFactory);
+};
+
+// The FakeURLFetcher and FakeURLFetcherFactory classes are similar to the
+// ones above but don't require you to know when exactly the URLFetcher objects
+// will be created.
+//
+// These classes let you set pre-baked HTTP responses for particular URLs.
+// E.g., if the user requests http://a.com/ then respond with an HTTP/500.
+//
+// We assume that the thread that is calling Start() on the URLFetcher object
+// has a message loop running.
+//
+// This class is not thread-safe.  You should not call SetFakeResponse or
+// ClearFakeResponse at the same time you call CreateURLFetcher.  However, it is
+// OK to start URLFetcher objects while setting or clearning fake responses
+// since already created URLFetcher objects will not be affected by any changes
+// made to the fake responses (once a URLFetcher object is created you cannot
+// change its fake response).
+//
+// Example usage:
+//  FakeURLFetcherFactory factory;
+//
+//  // You know that class SomeService will request url http://a.com/ and you
+//  // want to test the service class by returning an error.
+//  factory.SetFakeResponse("http://a.com/", "", false);
+//  // But if the service requests http://b.com/asdf you want to respond with
+//  // a simple html page and an HTTP/200 code.
+//  factory.SetFakeResponse("http://b.com/asdf",
+//                          "<html><body>hello world</body></html>",
+//                          true);
+//
+//  SomeService service;
+//  service.Run();  // Will eventually request these two URLs.
+
+class FakeURLFetcherFactory : public URLFetcherFactory,
+                              public ScopedURLFetcherFactory {
+ public:
+  FakeURLFetcherFactory();
+  // FakeURLFetcherFactory that will delegate creating URLFetcher for unknown
+  // url to the given factory.
+  explicit FakeURLFetcherFactory(URLFetcherFactory* default_factory);
+  virtual ~FakeURLFetcherFactory();
+
+  // If no fake response is set for the given URL this method will delegate the
+  // call to |default_factory_| if it is not NULL, or return NULL if it is
+  // NULL.
+  // Otherwise, it will return a URLFetcher object which will respond with the
+  // pre-baked response that the client has set by calling SetFakeResponse().
+  virtual URLFetcher* CreateURLFetcher(
+      int id,
+      const GURL& url,
+      URLFetcher::RequestType request_type,
+      URLFetcherDelegate* d) OVERRIDE;
+
+  // Sets the fake response for a given URL.  If success is true we will serve
+  // an HTTP/200 and an HTTP/500 otherwise.  The |response_data| may be empty.
+  void SetFakeResponse(const std::string& url,
+                       const std::string& response_data,
+                       bool success);
+
+  // Clear all the fake responses that were previously set via
+  // SetFakeResponse().
+  void ClearFakeResponses();
+
+ private:
+  typedef std::map<GURL, std::pair<std::string, bool> > FakeResponseMap;
+  FakeResponseMap fake_responses_;
+  URLFetcherFactory* default_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(FakeURLFetcherFactory);
+};
+
+// This is an implementation of URLFetcherFactory that will create a
+// URLFetcherImpl. It can be use in conjunction with a FakeURLFetcherFactory in
+// integration tests to control the behavior of some requests but execute
+// all the other ones.
+class URLFetcherImplFactory : public URLFetcherFactory {
+ public:
+  URLFetcherImplFactory();
+  virtual ~URLFetcherImplFactory();
+
+  // This method will create a real URLFetcher.
+  virtual URLFetcher* CreateURLFetcher(
+      int id,
+      const GURL& url,
+      URLFetcher::RequestType request_type,
+      URLFetcherDelegate* d) OVERRIDE;
+
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_TEST_URL_FETCHER_FACTORY_H_
diff --git a/src/net/url_request/url_fetcher.cc b/src/net/url_request/url_fetcher.cc
new file mode 100644
index 0000000..3cc75ad
--- /dev/null
+++ b/src/net/url_request/url_fetcher.cc
@@ -0,0 +1,48 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_fetcher.h"
+
+#include "net/url_request/url_fetcher_factory.h"
+#include "net/url_request/url_fetcher_impl.h"
+
+namespace net {
+
+URLFetcher::~URLFetcher() {}
+
+// static
+URLFetcher* net::URLFetcher::Create(
+    const GURL& url,
+    URLFetcher::RequestType request_type,
+    URLFetcherDelegate* d) {
+  return new URLFetcherImpl(url, request_type, d);
+}
+
+// static
+URLFetcher* net::URLFetcher::Create(
+    int id,
+    const GURL& url,
+    URLFetcher::RequestType request_type,
+    URLFetcherDelegate* d) {
+  URLFetcherFactory* factory = URLFetcherImpl::factory();
+  return factory ? factory->CreateURLFetcher(id, url, request_type, d) :
+                   new URLFetcherImpl(url, request_type, d);
+}
+
+// static
+void net::URLFetcher::CancelAll() {
+  URLFetcherImpl::CancelAll();
+}
+
+// static
+void net::URLFetcher::SetEnableInterceptionForTests(bool enabled) {
+  URLFetcherImpl::SetEnableInterceptionForTests(enabled);
+}
+
+// static
+void net::URLFetcher::SetIgnoreCertificateRequests(bool ignored) {
+  URLFetcherImpl::SetIgnoreCertificateRequests(ignored);
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_fetcher.h b/src/net/url_request/url_fetcher.h
new file mode 100644
index 0000000..59ca148
--- /dev/null
+++ b/src/net/url_request/url_fetcher.h
@@ -0,0 +1,297 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_FETCHER_H_
+#define NET_URL_REQUEST_URL_FETCHER_H_
+
+#include <string>
+#include <vector>
+
+#include "base/callback_forward.h"
+#include "base/memory/ref_counted.h"
+#include "base/platform_file.h"
+#include "base/supports_user_data.h"
+#include "base/task_runner.h"
+#include "net/base/net_export.h"
+
+class FilePath;
+class GURL;
+
+namespace base {
+class MessageLoopProxy;
+class TimeDelta;
+}
+
+namespace net {
+class HostPortPair;
+class HttpRequestHeaders;
+class HttpResponseHeaders;
+class URLFetcherDelegate;
+class URLRequestContextGetter;
+class URLRequestStatus;
+typedef std::vector<std::string> ResponseCookies;
+
+// To use this class, create an instance with the desired URL and a pointer to
+// the object to be notified when the URL has been loaded:
+//   URLFetcher* fetcher = URLFetcher::Create("http://www.google.com",
+//                                            URLFetcher::GET, this);
+//
+// You must also set a request context getter:
+//
+//   fetcher->SetRequestContext(&my_request_context_getter);
+//
+// Then, optionally set properties on this object, like the request context or
+// extra headers:
+//   fetcher->set_extra_request_headers("X-Foo: bar");
+//
+// Finally, start the request:
+//   fetcher->Start();
+//
+//
+// The object you supply as a delegate must inherit from URLFetcherDelegate.
+// When the response headers are received, OnURLFetchResponseStarted()
+// will be called with a pointer to the URLFetcher and GetResponseCode()/
+// GetResponseHeaders() can be called INSIDE it to retrieve the response code
+// and headers.  Note that when retry is enabled, the response code and headers
+// retrieved at this stage may different than the final ones.  When the fetch
+// is completed, OnURLFetchComplete() will be called with a pointer to the
+// URLFetcher.  From that point until the original URLFetcher instance is
+// destroyed, you may use accessor methods to see the result of the fetch.  You
+// should copy these objects if you need them to live longer than the
+// URLFetcher instance.  If the URLFetcher instance is destroyed before the
+// callback happens, the fetch will be canceled and no callback will occur.
+//
+// You may create the URLFetcher instance on any thread; OnURLFetchComplete()
+// will be called back on the same thread you use to create the instance.
+//
+//
+// NOTE: By default URLFetcher requests are NOT intercepted, except when
+// interception is explicitly enabled in tests.
+class NET_EXPORT URLFetcher {
+ public:
+  // Imposible http response code. Used to signal that no http response code
+  // was received.
+  enum ResponseCode {
+    RESPONSE_CODE_INVALID = -1
+  };
+
+  enum RequestType {
+    GET,
+    POST,
+    HEAD,
+    DELETE_REQUEST,   // DELETE is already taken on Windows.
+                      // <winnt.h> defines a DELETE macro.
+    PUT,
+  };
+
+  // Used by SetURLRequestUserData.  The callback should make a fresh
+  // base::SupportsUserData::Data object every time it's called.
+  typedef base::Callback<base::SupportsUserData::Data*()> CreateDataCallback;
+
+  virtual ~URLFetcher();
+
+  // |url| is the URL to send the request to.
+  // |request_type| is the type of request to make.
+  // |d| the object that will receive the callback on fetch completion.
+  static URLFetcher* Create(const GURL& url,
+                            URLFetcher::RequestType request_type,
+                            URLFetcherDelegate* d);
+
+  // Like above, but if there's a URLFetcherFactory registered with the
+  // implementation it will be used. |id| may be used during testing to identify
+  // who is creating the URLFetcher.
+  static URLFetcher* Create(int id,
+                            const GURL& url,
+                            URLFetcher::RequestType request_type,
+                            URLFetcherDelegate* d);
+
+  // Cancels all existing URLFetchers.  Will notify the URLFetcherDelegates.
+  // Note that any new URLFetchers created while this is running will not be
+  // cancelled.  Typically, one would call this in the CleanUp() method of an IO
+  // thread, so that no new URLRequests would be able to start on the IO thread
+  // anyway.  This doesn't prevent new URLFetchers from trying to post to the IO
+  // thread though, even though the task won't ever run.
+  static void CancelAll();
+
+  // Normally interception is disabled for URLFetcher, but you can use this
+  // to enable it for tests. Also see ScopedURLFetcherFactory for another way
+  // of testing code that uses an URLFetcher.
+  static void SetEnableInterceptionForTests(bool enabled);
+
+  // Normally, URLFetcher will abort loads that request SSL client certificate
+  // authentication, but this method may be used to cause URLFetchers to ignore
+  // requests for client certificates and continue anonymously. Because such
+  // behaviour affects the URLRequestContext's shared network state and socket
+  // pools, it should only be used for testing.
+  static void SetIgnoreCertificateRequests(bool ignored);
+
+  // Sets data only needed by POSTs.  All callers making POST requests should
+  // call this before the request is started.  |upload_content_type| is the MIME
+  // type of the content, while |upload_content| is the data to be sent (the
+  // Content-Length header value will be set to the length of this data).
+  virtual void SetUploadData(const std::string& upload_content_type,
+                             const std::string& upload_content) = 0;
+
+  // Indicates that the POST data is sent via chunked transfer encoding.
+  // This may only be called before calling Start().
+  // Use AppendChunkToUpload() to give the data chunks after calling Start().
+  virtual void SetChunkedUpload(const std::string& upload_content_type) = 0;
+
+  // Adds the given bytes to a request's POST data transmitted using chunked
+  // transfer encoding.
+  // This method should be called ONLY after calling Start().
+  virtual void AppendChunkToUpload(const std::string& data,
+                                   bool is_last_chunk) = 0;
+
+  // Set one or more load flags as defined in net/base/load_flags.h.  Must be
+  // called before the request is started.
+  virtual void SetLoadFlags(int load_flags) = 0;
+
+  // Returns the current load flags.
+  virtual int GetLoadFlags() const = 0;
+
+  // The referrer URL for the request. Must be called before the request is
+  // started.
+  virtual void SetReferrer(const std::string& referrer) = 0;
+
+  // Set extra headers on the request.  Must be called before the request
+  // is started.
+  // This replaces the entire extra request headers.
+  virtual void SetExtraRequestHeaders(
+      const std::string& extra_request_headers) = 0;
+
+  // Add header (with format field-name ":" [ field-value ]) to the request
+  // headers.  Must be called before the request is started.
+  // This appends the header to the current extra request headers.
+  virtual void AddExtraRequestHeader(const std::string& header_line) = 0;
+
+  virtual void GetExtraRequestHeaders(
+      HttpRequestHeaders* headers) const = 0;
+
+  // Set the URLRequestContext on the request.  Must be called before the
+  // request is started.
+  virtual void SetRequestContext(
+      URLRequestContextGetter* request_context_getter) = 0;
+
+  // Set the URL that should be consulted for the third-party cookie
+  // blocking policy.
+  virtual void SetFirstPartyForCookies(
+      const GURL& first_party_for_cookies) = 0;
+
+  // Set the key and data callback that is used when setting the user
+  // data on any URLRequest objects this object creates.
+  virtual void SetURLRequestUserData(
+      const void* key,
+      const CreateDataCallback& create_data_callback) = 0;
+
+  // If |stop_on_redirect| is true, 3xx responses will cause the fetch to halt
+  // immediately rather than continue through the redirect.  OnURLFetchComplete
+  // will be called, with the URLFetcher's URL set to the redirect destination,
+  // its status set to CANCELED, and its response code set to the relevant 3xx
+  // server response code.
+  virtual void SetStopOnRedirect(bool stop_on_redirect) = 0;
+
+  // If |retry| is false, 5xx responses will be propagated to the observer,
+  // if it is true URLFetcher will automatically re-execute the request,
+  // after backoff_delay() elapses. URLFetcher has it set to true by default.
+  virtual void SetAutomaticallyRetryOn5xx(bool retry) = 0;
+
+  virtual void SetMaxRetriesOn5xx(int max_retries) = 0;
+  virtual int GetMaxRetriesOn5xx() const = 0;
+
+  // Returns the back-off delay before the request will be retried,
+  // when a 5xx response was received.
+  virtual base::TimeDelta GetBackoffDelay() const = 0;
+
+  // Retries up to |max_retries| times when requests fail with
+  // ERR_NETWORK_CHANGED. If ERR_NETWORK_CHANGED is received after having
+  // retried |max_retries| times then it is propagated to the observer.
+  virtual void SetAutomaticallyRetryOnNetworkChanges(int max_retries) = 0;
+
+  // By default, the response is saved in a string. Call this method to save the
+  // response to a file instead. Must be called before Start().
+  // |file_task_runner| will be used for all file operations.
+  // To save to a temporary file, use SaveResponseToTemporaryFile().
+  // The created file is removed when the URLFetcher is deleted unless you
+  // take ownership by calling GetResponseAsFilePath().
+  virtual void SaveResponseToFileAtPath(
+      const FilePath& file_path,
+      scoped_refptr<base::TaskRunner> file_task_runner) = 0;
+
+  // By default, the response is saved in a string. Call this method to save the
+  // response to a temporary file instead. Must be called before Start().
+  // |file_task_runner| will be used for all file operations.
+  // The created file is removed when the URLFetcher is deleted unless you
+  // take ownership by calling GetResponseAsFilePath().
+  virtual void SaveResponseToTemporaryFile(
+      scoped_refptr<base::TaskRunner> file_task_runner) = 0;
+
+#if defined(COBALT)
+  // By default, the response is saved in a string. Call this method to
+  // discard the response instead. Must be called before Start().
+  // This may be useful if your delegate will be implementing
+  // ShouldSendDownloadData() and buffering it internally.
+  virtual void DiscardResponse() = 0;
+#endif
+
+  // Retrieve the response headers from the request.  Must only be called
+  // inside the OnURLFetchReceivedResponseHeaders callback or after the
+  // OnURLFetchComplete callback is called.
+  virtual HttpResponseHeaders* GetResponseHeaders() const = 0;
+
+  // Retrieve the remote socket address from the request.  Must only
+  // be called after the OnURLFetchComplete callback has run and if
+  // the request has not failed.
+  virtual HostPortPair GetSocketAddress() const = 0;
+
+  // Returns true if the request was delivered through a proxy.  Must only
+  // be called after the OnURLFetchComplete callback has run and the request
+  // has not failed.
+  virtual bool WasFetchedViaProxy() const = 0;
+
+  // Start the request.  After this is called, you may not change any other
+  // settings.
+  virtual void Start() = 0;
+
+  // Return the URL that we were asked to fetch.
+  virtual const GURL& GetOriginalURL() const = 0;
+
+  // Return the URL that this fetcher is processing.
+  virtual const GURL& GetURL() const = 0;
+
+  // The status of the URL fetch.
+  virtual const URLRequestStatus& GetStatus() const = 0;
+
+  // The http response code received. Will return RESPONSE_CODE_INVALID
+  // if an error prevented any response from being received.
+  virtual int GetResponseCode() const = 0;
+
+  // Cookies recieved.
+  virtual const ResponseCookies& GetCookies() const = 0;
+
+  // Return true if any file system operation failed.  If so, set |error_code|
+  // to the error code. File system errors are only possible if user called
+  // SaveResponseToTemporaryFile().
+  virtual bool FileErrorOccurred(
+      base::PlatformFileError* out_error_code) const = 0;
+
+  // Reports that the received content was malformed.
+  virtual void ReceivedContentWasMalformed() = 0;
+
+  // Get the response as a string. Return false if the fetcher was not
+  // set to store the response as a string.
+  virtual bool GetResponseAsString(std::string* out_response_string) const = 0;
+
+  // Get the path to the file containing the response body. Returns false
+  // if the response body was not saved to a file. If take_ownership is
+  // true, caller takes responsibility for the file, and it will not
+  // be removed once the URLFetcher is destroyed.  User should not take
+  // ownership more than once, or call this method after taking ownership.
+  virtual bool GetResponseAsFilePath(bool take_ownership,
+                                     FilePath* out_response_path) const = 0;
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_FETCHER_H_
diff --git a/src/net/url_request/url_fetcher_core.cc b/src/net/url_request/url_fetcher_core.cc
new file mode 100644
index 0000000..2097903
--- /dev/null
+++ b/src/net/url_request/url_fetcher_core.cc
@@ -0,0 +1,1123 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_fetcher_core.h"
+
+#include "base/bind.h"
+#include "base/file_util_proxy.h"
+#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+#include "base/metrics/histogram.h"
+#include "base/stl_util.h"
+#include "base/thread_task_runner_handle.h"
+#include "base/tracked_objects.h"
+#include "net/base/io_buffer.h"
+#include "net/base/load_flags.h"
+#include "net/base/net_errors.h"
+#include "net/base/upload_bytes_element_reader.h"
+#include "net/base/upload_data_stream.h"
+#include "net/http/http_response_headers.h"
+#include "net/url_request/url_fetcher_delegate.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_context_getter.h"
+#include "net/url_request/url_request_throttler_manager.h"
+
+namespace {
+
+const int kBufferSize = 16384;
+const int kDownloadCacheSize = 65536;
+const int kUploadProgressTimerInterval = 100;
+bool g_interception_enabled = false;
+bool g_ignore_certificate_requests = false;
+
+}  // namespace
+
+namespace net {
+
+// URLFetcherCore::Registry ---------------------------------------------------
+
+URLFetcherCore::Registry::Registry() {}
+URLFetcherCore::Registry::~Registry() {}
+
+void URLFetcherCore::Registry::AddURLFetcherCore(URLFetcherCore* core) {
+  DCHECK(!ContainsKey(fetchers_, core));
+  fetchers_.insert(core);
+}
+
+void URLFetcherCore::Registry::RemoveURLFetcherCore(URLFetcherCore* core) {
+  DCHECK(ContainsKey(fetchers_, core));
+  fetchers_.erase(core);
+}
+
+void URLFetcherCore::Registry::CancelAll() {
+  while (!fetchers_.empty())
+    (*fetchers_.begin())->CancelURLRequest();
+}
+
+
+// URLFetcherCore::FileWriter -------------------------------------------------
+
+URLFetcherCore::FileWriter::FileWriter(
+    URLFetcherCore* core,
+    scoped_refptr<base::TaskRunner> file_task_runner)
+    : core_(core),
+      error_code_(base::PLATFORM_FILE_OK),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)),
+      file_task_runner_(file_task_runner),
+      file_handle_(base::kInvalidPlatformFileValue) {
+}
+
+URLFetcherCore::FileWriter::~FileWriter() {
+  CloseAndDeleteFile();
+}
+
+void URLFetcherCore::FileWriter::CreateFileAtPath(
+    const FilePath& file_path) {
+  DCHECK(core_->network_task_runner_->BelongsToCurrentThread());
+  DCHECK(file_task_runner_.get());
+  base::FileUtilProxy::CreateOrOpen(
+      file_task_runner_,
+      file_path,
+      base::PLATFORM_FILE_CREATE_ALWAYS | base::PLATFORM_FILE_WRITE,
+      base::Bind(&URLFetcherCore::FileWriter::DidCreateFile,
+                 weak_factory_.GetWeakPtr(),
+                 file_path));
+}
+
+void URLFetcherCore::FileWriter::CreateTempFile() {
+  DCHECK(core_->network_task_runner_->BelongsToCurrentThread());
+  DCHECK(file_task_runner_.get());
+  base::FileUtilProxy::CreateTemporary(
+      file_task_runner_,
+      0,  // No additional file flags.
+      base::Bind(&URLFetcherCore::FileWriter::DidCreateTempFile,
+                 weak_factory_.GetWeakPtr()));
+}
+
+void URLFetcherCore::FileWriter::WriteBuffer(int num_bytes) {
+  DCHECK(core_->network_task_runner_->BelongsToCurrentThread());
+
+  // Start writing to the file by setting the initial state
+  // of |pending_bytes_| and |buffer_offset_| to indicate that the
+  // entire buffer has not yet been written.
+  pending_bytes_ = num_bytes;
+  buffer_offset_ = 0;
+  ContinueWrite(base::PLATFORM_FILE_OK, 0);
+}
+
+void URLFetcherCore::FileWriter::ContinueWrite(
+    base::PlatformFileError error_code,
+    int bytes_written) {
+  DCHECK(core_->network_task_runner_->BelongsToCurrentThread());
+
+  if (file_handle_ == base::kInvalidPlatformFileValue) {
+    // While a write was being done on the file thread, a request
+    // to close or disown the file occured on the IO thread.  At
+    // this point a request to close the file is pending on the
+    // file thread.
+    return;
+  }
+
+  // Every code path that resets |core_->request_| should reset
+  // |core->file_writer_| or cause the file writer to disown the file.  In the
+  // former case, this callback can not be called, because the weak pointer to
+  // |this| will be NULL. In the latter case, the check of |file_handle_| at the
+  // start of this method ensures that we can not reach this point.
+  CHECK(core_->request_.get());
+
+  if (base::PLATFORM_FILE_OK != error_code) {
+    error_code_ = error_code;
+    CloseAndDeleteFile();
+    core_->delegate_task_runner_->PostTask(
+        FROM_HERE,
+        base::Bind(&URLFetcherCore::InformDelegateFetchIsComplete, core_));
+    return;
+  }
+
+  total_bytes_written_ += bytes_written;
+  buffer_offset_ += bytes_written;
+  pending_bytes_ -= bytes_written;
+
+  if (pending_bytes_ > 0) {
+    base::FileUtilProxy::Write(
+        file_task_runner_, file_handle_,
+        total_bytes_written_,  // Append to the end
+        (core_->buffer_->data() + buffer_offset_), pending_bytes_,
+        base::Bind(&URLFetcherCore::FileWriter::ContinueWrite,
+                   weak_factory_.GetWeakPtr()));
+  } else {
+    // Finished writing core_->buffer_ to the file. Read some more.
+    core_->ReadResponse();
+  }
+}
+
+void URLFetcherCore::FileWriter::DisownFile() {
+  DCHECK(core_->network_task_runner_->BelongsToCurrentThread());
+
+  // Disowning is done by the delegate's OnURLFetchComplete method.
+  // The file should be closed by the time that method is called.
+  DCHECK(file_handle_ == base::kInvalidPlatformFileValue);
+
+  // Forget about any file by reseting the path.
+  file_path_.clear();
+}
+
+void URLFetcherCore::FileWriter::CloseFileAndCompleteRequest() {
+  DCHECK(core_->network_task_runner_->BelongsToCurrentThread());
+
+  if (file_handle_ != base::kInvalidPlatformFileValue) {
+    base::FileUtilProxy::Close(
+        file_task_runner_, file_handle_,
+        base::Bind(&URLFetcherCore::FileWriter::DidCloseFile,
+                   weak_factory_.GetWeakPtr()));
+    file_handle_ = base::kInvalidPlatformFileValue;
+  }
+}
+
+void URLFetcherCore::FileWriter::CloseAndDeleteFile() {
+  DCHECK(core_->network_task_runner_->BelongsToCurrentThread());
+
+  if (file_handle_ == base::kInvalidPlatformFileValue) {
+    DeleteFile(base::PLATFORM_FILE_OK);
+    return;
+  }
+  // Close the file if it is open.
+  base::FileUtilProxy::Close(
+      file_task_runner_, file_handle_,
+      base::Bind(&URLFetcherCore::FileWriter::DeleteFile,
+                 weak_factory_.GetWeakPtr()));
+  file_handle_ = base::kInvalidPlatformFileValue;
+}
+
+void URLFetcherCore::FileWriter::DeleteFile(
+    base::PlatformFileError error_code) {
+  DCHECK(core_->network_task_runner_->BelongsToCurrentThread());
+  if (file_path_.empty())
+    return;
+
+  base::FileUtilProxy::Delete(
+      file_task_runner_, file_path_,
+      false,  // No need to recurse, as the path is to a file.
+      base::FileUtilProxy::StatusCallback());
+  DisownFile();
+}
+
+void URLFetcherCore::FileWriter::DidCreateFile(
+    const FilePath& file_path,
+    base::PlatformFileError error_code,
+    base::PassPlatformFile file_handle,
+    bool created) {
+  DidCreateFileInternal(file_path, error_code, file_handle);
+}
+
+void URLFetcherCore::FileWriter::DidCreateTempFile(
+    base::PlatformFileError error_code,
+    base::PassPlatformFile file_handle,
+    const FilePath& file_path) {
+  DidCreateFileInternal(file_path, error_code, file_handle);
+}
+
+void URLFetcherCore::FileWriter::DidCreateFileInternal(
+    const FilePath& file_path,
+    base::PlatformFileError error_code,
+    base::PassPlatformFile file_handle) {
+  DCHECK(core_->network_task_runner_->BelongsToCurrentThread());
+
+  if (base::PLATFORM_FILE_OK != error_code) {
+    error_code_ = error_code;
+    CloseAndDeleteFile();
+    core_->delegate_task_runner_->PostTask(
+        FROM_HERE,
+        base::Bind(&URLFetcherCore::InformDelegateFetchIsComplete, core_));
+    return;
+  }
+
+  file_path_ = file_path;
+  file_handle_ = file_handle.ReleaseValue();
+  total_bytes_written_ = 0;
+
+  core_->network_task_runner_->PostTask(
+      FROM_HERE,
+      base::Bind(&URLFetcherCore::StartURLRequestWhenAppropriate, core_));
+}
+
+void URLFetcherCore::FileWriter::DidCloseFile(
+    base::PlatformFileError error_code) {
+  DCHECK(core_->network_task_runner_->BelongsToCurrentThread());
+
+  if (base::PLATFORM_FILE_OK != error_code) {
+    error_code_ = error_code;
+    CloseAndDeleteFile();
+    core_->delegate_task_runner_->PostTask(
+        FROM_HERE,
+        base::Bind(&URLFetcherCore::InformDelegateFetchIsComplete, core_));
+    return;
+  }
+
+  // If the file was successfully closed, then the URL request is complete.
+  core_->RetryOrCompleteUrlFetch();
+}
+
+
+// URLFetcherCore -------------------------------------------------------------
+
+// static
+base::LazyInstance<URLFetcherCore::Registry>
+    URLFetcherCore::g_registry = LAZY_INSTANCE_INITIALIZER;
+
+URLFetcherCore::URLFetcherCore(URLFetcher* fetcher,
+                               const GURL& original_url,
+                               URLFetcher::RequestType request_type,
+                               URLFetcherDelegate* d)
+    : fetcher_(fetcher),
+      original_url_(original_url),
+      request_type_(request_type),
+      delegate_(d),
+      delegate_task_runner_(
+          base::ThreadTaskRunnerHandle::Get()),
+      request_(NULL),
+      load_flags_(LOAD_NORMAL),
+      response_code_(URLFetcher::RESPONSE_CODE_INVALID),
+      buffer_(new IOBuffer(kBufferSize)),
+      url_request_data_key_(NULL),
+      was_fetched_via_proxy_(false),
+      is_chunked_upload_(false),
+      was_cancelled_(false),
+      response_destination_(STRING),
+      stop_on_redirect_(false),
+      stopped_on_redirect_(false),
+      automatically_retry_on_5xx_(true),
+      num_retries_on_5xx_(0),
+      max_retries_on_5xx_(0),
+      num_retries_on_network_changes_(0),
+      max_retries_on_network_changes_(0),
+      current_upload_bytes_(-1),
+      current_response_bytes_(0),
+      total_response_bytes_(-1) {
+  CHECK(original_url_.is_valid());
+}
+
+void URLFetcherCore::Start() {
+  DCHECK(delegate_task_runner_);
+  DCHECK(request_context_getter_) << "We need an URLRequestContext!";
+  if (network_task_runner_) {
+    DCHECK_EQ(network_task_runner_,
+              request_context_getter_->GetNetworkTaskRunner());
+  } else {
+    network_task_runner_ = request_context_getter_->GetNetworkTaskRunner();
+  }
+  DCHECK(network_task_runner_.get()) << "We need an IO task runner";
+
+  network_task_runner_->PostTask(
+      FROM_HERE, base::Bind(&URLFetcherCore::StartOnIOThread, this));
+}
+
+void URLFetcherCore::Stop() {
+  if (delegate_task_runner_)  // May be NULL in tests.
+    DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+
+  delegate_ = NULL;
+  fetcher_ = NULL;
+  if (!network_task_runner_.get())
+    return;
+  if (network_task_runner_->RunsTasksOnCurrentThread()) {
+    CancelURLRequest();
+  } else {
+    network_task_runner_->PostTask(
+        FROM_HERE, base::Bind(&URLFetcherCore::CancelURLRequest, this));
+  }
+}
+
+void URLFetcherCore::SetUploadData(const std::string& upload_content_type,
+                                   const std::string& upload_content) {
+  DCHECK(!is_chunked_upload_);
+  upload_content_type_ = upload_content_type;
+  upload_content_ = upload_content;
+}
+
+void URLFetcherCore::SetChunkedUpload(const std::string& content_type) {
+  DCHECK(is_chunked_upload_ ||
+         (upload_content_type_.empty() &&
+          upload_content_.empty()));
+  upload_content_type_ = content_type;
+  upload_content_.clear();
+  is_chunked_upload_ = true;
+}
+
+void URLFetcherCore::AppendChunkToUpload(const std::string& content,
+                                         bool is_last_chunk) {
+  DCHECK(delegate_task_runner_);
+  DCHECK(network_task_runner_.get());
+  network_task_runner_->PostTask(
+      FROM_HERE,
+      base::Bind(&URLFetcherCore::CompleteAddingUploadDataChunk, this, content,
+                 is_last_chunk));
+}
+
+void URLFetcherCore::SetLoadFlags(int load_flags) {
+  load_flags_ = load_flags;
+}
+
+int URLFetcherCore::GetLoadFlags() const {
+  return load_flags_;
+}
+
+void URLFetcherCore::SetReferrer(const std::string& referrer) {
+  referrer_ = referrer;
+}
+
+void URLFetcherCore::SetExtraRequestHeaders(
+    const std::string& extra_request_headers) {
+  extra_request_headers_.Clear();
+  extra_request_headers_.AddHeadersFromString(extra_request_headers);
+}
+
+void URLFetcherCore::AddExtraRequestHeader(const std::string& header_line) {
+  extra_request_headers_.AddHeaderFromString(header_line);
+}
+
+void URLFetcherCore::GetExtraRequestHeaders(
+    HttpRequestHeaders* headers) const {
+  headers->CopyFrom(extra_request_headers_);
+}
+
+void URLFetcherCore::SetRequestContext(
+    URLRequestContextGetter* request_context_getter) {
+  DCHECK(!request_context_getter_);
+  DCHECK(request_context_getter);
+  request_context_getter_ = request_context_getter;
+}
+
+void URLFetcherCore::SetFirstPartyForCookies(
+    const GURL& first_party_for_cookies) {
+  DCHECK(first_party_for_cookies_.is_empty());
+  first_party_for_cookies_ = first_party_for_cookies;
+}
+
+void URLFetcherCore::SetURLRequestUserData(
+    const void* key,
+    const URLFetcher::CreateDataCallback& create_data_callback) {
+  DCHECK(key);
+  DCHECK(!create_data_callback.is_null());
+  url_request_data_key_ = key;
+  url_request_create_data_callback_ = create_data_callback;
+}
+
+void URLFetcherCore::SetStopOnRedirect(bool stop_on_redirect) {
+  stop_on_redirect_ = stop_on_redirect;
+}
+
+void URLFetcherCore::SetAutomaticallyRetryOn5xx(bool retry) {
+  automatically_retry_on_5xx_ = retry;
+}
+
+void URLFetcherCore::SetMaxRetriesOn5xx(int max_retries) {
+  max_retries_on_5xx_ = max_retries;
+}
+
+int URLFetcherCore::GetMaxRetriesOn5xx() const {
+  return max_retries_on_5xx_;
+}
+
+base::TimeDelta URLFetcherCore::GetBackoffDelay() const {
+  return backoff_delay_;
+}
+
+void URLFetcherCore::SetAutomaticallyRetryOnNetworkChanges(int max_retries) {
+  max_retries_on_network_changes_ = max_retries;
+}
+
+void URLFetcherCore::SaveResponseToFileAtPath(
+    const FilePath& file_path,
+    scoped_refptr<base::TaskRunner> file_task_runner) {
+  DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+  file_task_runner_ = file_task_runner;
+  response_destination_ = URLFetcherCore::PERMANENT_FILE;
+  response_destination_file_path_ = file_path;
+}
+
+void URLFetcherCore::SaveResponseToTemporaryFile(
+    scoped_refptr<base::TaskRunner> file_task_runner) {
+  DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+  file_task_runner_ = file_task_runner;
+  response_destination_ = URLFetcherCore::TEMP_FILE;
+}
+
+#if defined(COBALT)
+void URLFetcherCore::DiscardResponse() {
+  response_destination_ = URLFetcherCore::DISCARD;
+}
+#endif
+
+HttpResponseHeaders* URLFetcherCore::GetResponseHeaders() const {
+  return response_headers_;
+}
+
+// TODO(panayiotis): socket_address_ is written in the IO thread,
+// if this is accessed in the UI thread, this could result in a race.
+// Same for response_headers_ above and was_fetched_via_proxy_ below.
+HostPortPair URLFetcherCore::GetSocketAddress() const {
+  return socket_address_;
+}
+
+bool URLFetcherCore::WasFetchedViaProxy() const {
+  return was_fetched_via_proxy_;
+}
+
+const GURL& URLFetcherCore::GetOriginalURL() const {
+  return original_url_;
+}
+
+const GURL& URLFetcherCore::GetURL() const {
+  return url_;
+}
+
+const URLRequestStatus& URLFetcherCore::GetStatus() const {
+  return status_;
+}
+
+int URLFetcherCore::GetResponseCode() const {
+  return response_code_;
+}
+
+const ResponseCookies& URLFetcherCore::GetCookies() const {
+  return cookies_;
+}
+
+bool URLFetcherCore::FileErrorOccurred(
+    base::PlatformFileError* out_error_code) const {
+
+  // Can't have a file error if no file is being created or written to.
+  if (!file_writer_.get())
+    return false;
+
+  base::PlatformFileError error_code = file_writer_->error_code();
+  if (error_code == base::PLATFORM_FILE_OK)
+    return false;
+
+  *out_error_code = error_code;
+  return true;
+}
+
+void URLFetcherCore::ReceivedContentWasMalformed() {
+  DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+  if (network_task_runner_.get()) {
+    network_task_runner_->PostTask(
+        FROM_HERE, base::Bind(&URLFetcherCore::NotifyMalformedContent, this));
+  }
+}
+
+bool URLFetcherCore::GetResponseAsString(
+    std::string* out_response_string) const {
+  if (response_destination_ != URLFetcherCore::STRING)
+    return false;
+
+  *out_response_string = data_;
+  UMA_HISTOGRAM_MEMORY_KB("UrlFetcher.StringResponseSize",
+                          (data_.length() / 1024));
+
+  return true;
+}
+
+bool URLFetcherCore::GetResponseAsFilePath(bool take_ownership,
+                                           FilePath* out_response_path) {
+  DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+  const bool destination_is_file =
+      response_destination_ == URLFetcherCore::TEMP_FILE ||
+      response_destination_ == URLFetcherCore::PERMANENT_FILE;
+  if (!destination_is_file || !file_writer_.get())
+    return false;
+
+  *out_response_path = file_writer_->file_path();
+
+  if (take_ownership) {
+    network_task_runner_->PostTask(
+        FROM_HERE,
+        base::Bind(&URLFetcherCore::DisownFile, this));
+  }
+  return true;
+}
+
+void URLFetcherCore::OnReceivedRedirect(URLRequest* request,
+                                        const GURL& new_url,
+                                        bool* defer_redirect) {
+  DCHECK_EQ(request, request_.get());
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+  if (stop_on_redirect_) {
+    stopped_on_redirect_ = true;
+    url_ = new_url;
+    response_code_ = request_->GetResponseCode();
+    was_fetched_via_proxy_ = request_->was_fetched_via_proxy();
+    request->Cancel();
+    OnReadCompleted(request, 0);
+  }
+}
+
+void URLFetcherCore::OnResponseStarted(URLRequest* request) {
+  DCHECK_EQ(request, request_.get());
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+  if (request_->status().is_success()) {
+    response_code_ = request_->GetResponseCode();
+    response_headers_ = request_->response_headers();
+    socket_address_ = request_->GetSocketAddress();
+    was_fetched_via_proxy_ = request_->was_fetched_via_proxy();
+    total_response_bytes_ = request_->GetExpectedContentSize();
+
+#if defined(COBALT)
+    // We update this earlier than OnReadCompleted(), so that the delegate
+    // can know about it if they call GetURL() in any callback.
+    if (!stopped_on_redirect_) {
+      url_ = request_->url();
+    }
+    InformDelegateResponseStarted();
+#endif  // defined(COBALT)
+  }
+
+  ReadResponse();
+}
+
+void URLFetcherCore::OnCertificateRequested(
+    URLRequest* request,
+    SSLCertRequestInfo* cert_request_info) {
+  DCHECK_EQ(request, request_.get());
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+
+  if (g_ignore_certificate_requests) {
+    request->ContinueWithCertificate(NULL);
+  } else {
+    request->Cancel();
+  }
+}
+
+void URLFetcherCore::OnReadCompleted(URLRequest* request,
+                                     int bytes_read) {
+  DCHECK(request == request_);
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+#if !defined(COBALT)
+  if (!stopped_on_redirect_) {
+    url_ = request->url();
+  }
+#endif
+  URLRequestThrottlerManager* throttler_manager =
+      request->context()->throttler_manager();
+  if (throttler_manager) {
+    url_throttler_entry_ = throttler_manager->RegisterRequestUrl(url_);
+  }
+
+  download_data_cache_.reset();
+  bool waiting_on_write = false;
+  do {
+    if (!request_->status().is_success() || bytes_read <= 0)
+      break;
+
+    current_response_bytes_ += bytes_read;
+    InformDelegateDownloadDataIfNecessary(bytes_read);
+
+    if (!WriteBuffer(bytes_read)) {
+      // If WriteBuffer() returns false, we have a pending write to
+      // wait on before reading further.
+      waiting_on_write = true;
+      break;
+    }
+  } while (request_->Read(buffer_, kBufferSize, &bytes_read));
+  InformDelegateDownloadData();
+
+  const URLRequestStatus status = request_->status();
+
+  if (status.is_success())
+    request_->GetResponseCookies(&cookies_);
+
+  // See comments re: HEAD requests in ReadResponse().
+  if ((!status.is_io_pending() && !waiting_on_write) ||
+      (request_type_ == URLFetcher::HEAD)) {
+    status_ = status;
+    ReleaseRequest();
+
+    // If a file is open, close it.
+    if (file_writer_.get()) {
+      // If the file is open, close it.  After closing the file,
+      // RetryOrCompleteUrlFetch() will be called.
+      file_writer_->CloseFileAndCompleteRequest();
+    } else {
+      // Otherwise, complete or retry the URL request directly.
+      RetryOrCompleteUrlFetch();
+    }
+  }
+}
+
+void URLFetcherCore::CancelAll() {
+  g_registry.Get().CancelAll();
+}
+
+int URLFetcherCore::GetNumFetcherCores() {
+  return g_registry.Get().size();
+}
+
+void URLFetcherCore::SetEnableInterceptionForTests(bool enabled) {
+  g_interception_enabled = enabled;
+}
+
+void URLFetcherCore::SetIgnoreCertificateRequests(bool ignored) {
+  g_ignore_certificate_requests = ignored;
+}
+
+URLFetcherCore::~URLFetcherCore() {
+  // |request_| should be NULL.  If not, it's unsafe to delete it here since we
+  // may not be on the IO thread.
+  DCHECK(!request_.get());
+}
+
+void URLFetcherCore::StartOnIOThread() {
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+
+  switch (response_destination_) {
+    case STRING:
+      StartURLRequestWhenAppropriate();
+      break;
+
+    case PERMANENT_FILE:
+    case TEMP_FILE:
+      DCHECK(file_task_runner_.get())
+          << "Need to set the file task runner.";
+
+      file_writer_.reset(new FileWriter(this, file_task_runner_));
+
+      // If the file is successfully created,
+      // URLFetcherCore::StartURLRequestWhenAppropriate() will be called.
+      switch (response_destination_) {
+        case PERMANENT_FILE:
+          file_writer_->CreateFileAtPath(response_destination_file_path_);
+          break;
+        case TEMP_FILE:
+          file_writer_->CreateTempFile();
+          break;
+        default:
+          NOTREACHED();
+      }
+      break;
+
+#if defined(COBALT)
+    case DISCARD:
+      StartURLRequestWhenAppropriate();
+      break;
+#endif
+
+    default:
+      NOTREACHED();
+  }
+}
+
+void URLFetcherCore::StartURLRequest() {
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+
+  if (was_cancelled_) {
+    // Since StartURLRequest() is posted as a *delayed* task, it may
+    // run after the URLFetcher was already stopped.
+    return;
+  }
+
+  DCHECK(request_context_getter_);
+  DCHECK(!request_.get());
+
+  g_registry.Get().AddURLFetcherCore(this);
+  current_response_bytes_ = 0;
+  request_.reset(request_context_getter_->GetURLRequestContext()->CreateRequest(
+      original_url_, this));
+  request_->set_stack_trace(stack_trace_);
+  int flags = request_->load_flags() | load_flags_;
+  if (!g_interception_enabled)
+    flags = flags | LOAD_DISABLE_INTERCEPT;
+
+  if (is_chunked_upload_)
+    request_->EnableChunkedUpload();
+  request_->set_load_flags(flags);
+  request_->set_referrer(referrer_);
+  request_->set_first_party_for_cookies(first_party_for_cookies_.is_empty() ?
+      original_url_ : first_party_for_cookies_);
+  if (url_request_data_key_ && !url_request_create_data_callback_.is_null()) {
+    request_->SetUserData(url_request_data_key_,
+                          url_request_create_data_callback_.Run());
+  }
+
+  switch (request_type_) {
+    case URLFetcher::GET:
+      break;
+
+    case URLFetcher::POST:
+    case URLFetcher::PUT:
+#if !defined(COBALT)
+      // Allow not specifying a content type when uploading ArrayBuffer data.
+      DCHECK(!upload_content_type_.empty());
+#endif
+      request_->set_method(
+          request_type_ == URLFetcher::POST ? "POST" : "PUT");
+      if (!upload_content_type_.empty()) {
+        extra_request_headers_.SetHeader(HttpRequestHeaders::kContentType,
+                                         upload_content_type_);
+      }
+      if (!upload_content_.empty()) {
+        scoped_ptr<UploadElementReader> reader(new UploadBytesElementReader(
+            upload_content_.data(), upload_content_.size()));
+        request_->set_upload(make_scoped_ptr(
+            UploadDataStream::CreateWithReader(reader.Pass(), 0)));
+      }
+
+      current_upload_bytes_ = -1;
+      // TODO(kinaba): http://crbug.com/118103. Implement upload callback in the
+      //  layer and avoid using timer here.
+      upload_progress_checker_timer_.reset(
+          new base::RepeatingTimer<URLFetcherCore>());
+      upload_progress_checker_timer_->Start(
+          FROM_HERE,
+          base::TimeDelta::FromMilliseconds(kUploadProgressTimerInterval),
+          this,
+          &URLFetcherCore::InformDelegateUploadProgress);
+      break;
+
+    case URLFetcher::HEAD:
+      request_->set_method("HEAD");
+      break;
+
+    case URLFetcher::DELETE_REQUEST:
+      request_->set_method("DELETE");
+      break;
+
+    default:
+      NOTREACHED();
+  }
+
+  if (!extra_request_headers_.IsEmpty())
+    request_->SetExtraRequestHeaders(extra_request_headers_);
+
+  // There might be data left over from a previous request attempt.
+  data_.clear();
+
+  // If we are writing the response to a file, the only caller
+  // of this function should have created it and not written yet.
+  DCHECK(!file_writer_.get() || file_writer_->total_bytes_written() == 0);
+
+  request_->Start();
+}
+
+void URLFetcherCore::StartURLRequestWhenAppropriate() {
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+
+  if (was_cancelled_)
+    return;
+
+  DCHECK(request_context_getter_);
+
+  int64 delay = 0LL;
+  if (original_url_throttler_entry_ == NULL) {
+    URLRequestThrottlerManager* manager =
+        request_context_getter_->GetURLRequestContext()->throttler_manager();
+    if (manager) {
+      original_url_throttler_entry_ =
+          manager->RegisterRequestUrl(original_url_);
+    }
+  }
+  if (original_url_throttler_entry_ != NULL) {
+    delay = original_url_throttler_entry_->ReserveSendingTimeForNextRequest(
+        GetBackoffReleaseTime());
+  }
+
+  if (delay == 0) {
+    StartURLRequest();
+  } else {
+    base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+        FROM_HERE, base::Bind(&URLFetcherCore::StartURLRequest, this),
+        base::TimeDelta::FromMilliseconds(delay));
+  }
+}
+
+void URLFetcherCore::CancelURLRequest() {
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+
+  if (request_.get()) {
+    request_->Cancel();
+    ReleaseRequest();
+  }
+  // Release the reference to the request context. There could be multiple
+  // references to URLFetcher::Core at this point so it may take a while to
+  // delete the object, but we cannot delay the destruction of the request
+  // context.
+  request_context_getter_ = NULL;
+  first_party_for_cookies_ = GURL();
+  url_request_data_key_ = NULL;
+  url_request_create_data_callback_.Reset();
+  was_cancelled_ = true;
+  file_writer_.reset();
+}
+
+void URLFetcherCore::OnCompletedURLRequest(
+    base::TimeDelta backoff_delay) {
+  DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+
+  // Save the status and backoff_delay so that delegates can read it.
+  if (delegate_) {
+    backoff_delay_ = backoff_delay;
+    InformDelegateFetchIsComplete();
+  }
+}
+
+void URLFetcherCore::InformDelegateFetchIsComplete() {
+  DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+  if (delegate_)
+    delegate_->OnURLFetchComplete(fetcher_);
+}
+
+void URLFetcherCore::NotifyMalformedContent() {
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+  if (url_throttler_entry_ != NULL) {
+    int status_code = response_code_;
+    if (status_code == URLFetcher::RESPONSE_CODE_INVALID) {
+      // The status code will generally be known by the time clients
+      // call the |ReceivedContentWasMalformed()| function (which ends up
+      // calling the current function) but if it's not, we need to assume
+      // the response was successful so that the total failure count
+      // used to calculate exponential back-off goes up.
+      status_code = 200;
+    }
+    url_throttler_entry_->ReceivedContentWasMalformed(status_code);
+  }
+}
+
+void URLFetcherCore::RetryOrCompleteUrlFetch() {
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+  base::TimeDelta backoff_delay;
+
+  // Checks the response from server.
+  if (response_code_ >= 500 ||
+      status_.error() == ERR_TEMPORARILY_THROTTLED) {
+    // When encountering a server error, we will send the request again
+    // after backoff time.
+    ++num_retries_on_5xx_;
+
+    // Note that backoff_delay may be 0 because (a) the
+    // URLRequestThrottlerManager and related code does not
+    // necessarily back off on the first error, (b) it only backs off
+    // on some of the 5xx status codes, (c) not all URLRequestContexts
+    // have a throttler manager.
+    base::TimeTicks backoff_release_time = GetBackoffReleaseTime();
+    backoff_delay = backoff_release_time - base::TimeTicks::Now();
+    if (backoff_delay < base::TimeDelta())
+      backoff_delay = base::TimeDelta();
+
+    if (automatically_retry_on_5xx_ &&
+        num_retries_on_5xx_ <= max_retries_on_5xx_) {
+      StartOnIOThread();
+      return;
+    }
+  } else {
+    backoff_delay = base::TimeDelta();
+  }
+
+  // Retry if the request failed due to network changes.
+  if (status_.error() == ERR_NETWORK_CHANGED &&
+      num_retries_on_network_changes_ < max_retries_on_network_changes_) {
+    ++num_retries_on_network_changes_;
+
+    // Retry soon, after flushing all the current tasks which may include
+    // further network change observers.
+    network_task_runner_->PostTask(
+        FROM_HERE, base::Bind(&URLFetcherCore::StartOnIOThread, this));
+    return;
+  }
+
+  request_context_getter_ = NULL;
+  first_party_for_cookies_ = GURL();
+  url_request_data_key_ = NULL;
+  url_request_create_data_callback_.Reset();
+  bool posted = delegate_task_runner_->PostTask(
+      FROM_HERE,
+      base::Bind(&URLFetcherCore::OnCompletedURLRequest, this, backoff_delay));
+
+  // If the delegate message loop does not exist any more, then the delegate
+  // should be gone too.
+  DCHECK(posted || !delegate_);
+}
+
+void URLFetcherCore::ReleaseRequest() {
+#if defined(COBALT)
+  if (upload_progress_checker_timer_) {
+    // The request may have completed too quickly, before the upload
+    // progress checker had a chance to run. Force it to run here.
+    InformDelegateUploadProgress();
+  }
+#endif
+
+  upload_progress_checker_timer_.reset();
+  request_.reset();
+  g_registry.Get().RemoveURLFetcherCore(this);
+}
+
+base::TimeTicks URLFetcherCore::GetBackoffReleaseTime() {
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+
+  if (original_url_throttler_entry_) {
+    base::TimeTicks original_url_backoff =
+        original_url_throttler_entry_->GetExponentialBackoffReleaseTime();
+    base::TimeTicks destination_url_backoff;
+    if (url_throttler_entry_ != NULL &&
+        original_url_throttler_entry_ != url_throttler_entry_) {
+      destination_url_backoff =
+          url_throttler_entry_->GetExponentialBackoffReleaseTime();
+    }
+
+    return original_url_backoff > destination_url_backoff ?
+        original_url_backoff : destination_url_backoff;
+  } else {
+    return base::TimeTicks();
+  }
+}
+
+void URLFetcherCore::CompleteAddingUploadDataChunk(
+    const std::string& content, bool is_last_chunk) {
+  if (was_cancelled_) {
+    // Since CompleteAddingUploadDataChunk() is posted as a *delayed* task, it
+    // may run after the URLFetcher was already stopped.
+    return;
+  }
+  DCHECK(is_chunked_upload_);
+  DCHECK(request_.get());
+  DCHECK(!content.empty());
+  request_->AppendChunkToUpload(content.data(),
+                                static_cast<int>(content.length()),
+                                is_last_chunk);
+}
+
+// Return true if the write was done and reading may continue.
+// Return false if the write is pending, and the next read will
+// be done later.
+bool URLFetcherCore::WriteBuffer(int num_bytes) {
+  bool write_complete = false;
+  switch (response_destination_) {
+    case STRING:
+      data_.append(buffer_->data(), num_bytes);
+      write_complete = true;
+      break;
+
+    case PERMANENT_FILE:
+    case TEMP_FILE:
+      file_writer_->WriteBuffer(num_bytes);
+      // WriteBuffer() sends a request the file thread.
+      // The write is not done yet.
+      write_complete = false;
+      break;
+
+#if defined(COBALT)
+    case DISCARD:
+      write_complete = true;
+      break;
+#endif
+
+    default:
+      NOTREACHED();
+  }
+  return write_complete;
+}
+
+void URLFetcherCore::ReadResponse() {
+  // Some servers may treat HEAD requests as GET requests.  To free up the
+  // network connection as soon as possible, signal that the request has
+  // completed immediately, without trying to read any data back (all we care
+  // about is the response code and headers, which we already have).
+  int bytes_read = 0;
+  if (request_->status().is_success() &&
+      (request_type_ != URLFetcher::HEAD))
+    request_->Read(buffer_, kBufferSize, &bytes_read);
+  OnReadCompleted(request_.get(), bytes_read);
+}
+
+void URLFetcherCore::DisownFile() {
+  file_writer_->DisownFile();
+}
+
+#if defined(COBALT)
+
+void URLFetcherCore::InformDelegateResponseStarted() {
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+  DCHECK(request_);
+
+  delegate_task_runner_->PostTask(
+      FROM_HERE,
+      base::Bind(
+          &URLFetcherCore::InformDelegateResponseStartedInDelegateThread,
+          this));
+}
+
+void URLFetcherCore::InformDelegateResponseStartedInDelegateThread() {
+  DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+  if (delegate_) {
+    delegate_->OnURLFetchResponseStarted(fetcher_);
+  }
+}
+
+#endif  // defined(COBALT)
+
+void URLFetcherCore::InformDelegateUploadProgress() {
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+  if (request_.get()) {
+    int64 current = request_->GetUploadProgress().position();
+    if (current_upload_bytes_ != current) {
+      current_upload_bytes_ = current;
+      int64 total = -1;
+      if (!is_chunked_upload_)
+        total = static_cast<int64>(upload_content_.size());
+      delegate_task_runner_->PostTask(
+          FROM_HERE,
+          base::Bind(
+              &URLFetcherCore::InformDelegateUploadProgressInDelegateThread,
+              this, current, total));
+    }
+  }
+}
+
+void URLFetcherCore::InformDelegateUploadProgressInDelegateThread(
+    int64 current, int64 total) {
+  DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+  if (delegate_)
+    delegate_->OnURLFetchUploadProgress(fetcher_, current, total);
+}
+
+void URLFetcherCore::InformDelegateDownloadDataIfNecessary(int bytes_read) {
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+  if (delegate_ && delegate_->ShouldSendDownloadData() && bytes_read != 0) {
+    if (!download_data_cache_) {
+      download_data_cache_.reset(new std::string);
+      download_data_cache_->reserve(kDownloadCacheSize);
+    }
+    download_data_cache_->resize(download_data_cache_->size() + bytes_read);
+    memcpy(&(*download_data_cache_)[download_data_cache_->size() - bytes_read],
+           buffer_->data(), bytes_read);
+    if (download_data_cache_->size() >= kDownloadCacheSize) {
+      delegate_task_runner_->PostTask(
+          FROM_HERE,
+          base::Bind(
+              &URLFetcherCore::InformDelegateDownloadDataInDelegateThread, this,
+              base::Passed(&download_data_cache_)));
+    }
+  }
+}
+
+void URLFetcherCore::InformDelegateDownloadData() {
+  DCHECK(network_task_runner_->BelongsToCurrentThread());
+  if (delegate_ && delegate_->ShouldSendDownloadData() &&
+      download_data_cache_) {
+    delegate_task_runner_->PostTask(
+        FROM_HERE,
+        base::Bind(&URLFetcherCore::InformDelegateDownloadDataInDelegateThread,
+                   this, base::Passed(&download_data_cache_)));
+  }
+}
+
+void URLFetcherCore::InformDelegateDownloadDataInDelegateThread(
+    scoped_ptr<std::string> download_data) {
+  DCHECK(delegate_task_runner_->BelongsToCurrentThread());
+  if (delegate_) {
+    delegate_->OnURLFetchDownloadData(fetcher_, download_data.Pass());
+  }
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_fetcher_core.h b/src/net/url_request/url_fetcher_core.h
new file mode 100644
index 0000000..ff2c174
--- /dev/null
+++ b/src/net/url_request/url_fetcher_core.h
@@ -0,0 +1,434 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_FETCHER_CORE_H_
+#define NET_URL_REQUEST_URL_FETCHER_CORE_H_
+
+#include <set>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/debug/stack_trace.h"
+#include "base/file_path.h"
+#include "base/lazy_instance.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/platform_file.h"
+#include "base/timer.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/host_port_pair.h"
+#include "net/http/http_request_headers.h"
+#include "net/url_request/url_fetcher.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_status.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+}  // namespace base
+
+namespace net {
+class HttpResponseHeaders;
+class IOBuffer;
+class URLFetcherDelegate;
+class URLRequestContextGetter;
+class URLRequestThrottlerEntryInterface;
+
+class URLFetcherCore
+    : public base::RefCountedThreadSafe<URLFetcherCore>,
+      public URLRequest::Delegate {
+ public:
+  URLFetcherCore(URLFetcher* fetcher,
+                 const GURL& original_url,
+                 URLFetcher::RequestType request_type,
+                 URLFetcherDelegate* d);
+
+  // Starts the load.  It's important that this not happen in the constructor
+  // because it causes the IO thread to begin AddRef()ing and Release()ing
+  // us.  If our caller hasn't had time to fully construct us and take a
+  // reference, the IO thread could interrupt things, run a task, Release()
+  // us, and destroy us, leaving the caller with an already-destroyed object
+  // when construction finishes.
+  void Start();
+
+  // Stops any in-progress load and ensures no callback will happen.  It is
+  // safe to call this multiple times.
+  void Stop();
+
+  // URLFetcher-like functions.
+
+  // For POST requests, set |content_type| to the MIME type of the
+  // content and set |content| to the data to upload.
+  void SetUploadData(const std::string& upload_content_type,
+                     const std::string& upload_content);
+  void SetChunkedUpload(const std::string& upload_content_type);
+  // Adds a block of data to be uploaded in a POST body. This can only be
+  // called after Start().
+  void AppendChunkToUpload(const std::string& data, bool is_last_chunk);
+  // |flags| are flags to apply to the load operation--these should be
+  // one or more of the LOAD_* flags defined in net/base/load_flags.h.
+  void SetLoadFlags(int load_flags);
+  int GetLoadFlags() const;
+  void SetReferrer(const std::string& referrer);
+  void SetExtraRequestHeaders(const std::string& extra_request_headers);
+  void AddExtraRequestHeader(const std::string& header_line);
+  void GetExtraRequestHeaders(HttpRequestHeaders* headers) const;
+  void SetRequestContext(URLRequestContextGetter* request_context_getter);
+  // Set the URL that should be consulted for the third-party cookie
+  // blocking policy.
+  void SetFirstPartyForCookies(const GURL& first_party_for_cookies);
+  // Set the key and data callback that is used when setting the user
+  // data on any URLRequest objects this object creates.
+  void SetURLRequestUserData(
+      const void* key,
+      const URLFetcher::CreateDataCallback& create_data_callback);
+  void SetStopOnRedirect(bool stop_on_redirect);
+  void SetAutomaticallyRetryOn5xx(bool retry);
+  void SetMaxRetriesOn5xx(int max_retries);
+  int GetMaxRetriesOn5xx() const;
+  base::TimeDelta GetBackoffDelay() const;
+  void SetAutomaticallyRetryOnNetworkChanges(int max_retries);
+  void SaveResponseToFileAtPath(
+      const FilePath& file_path,
+      scoped_refptr<base::TaskRunner> file_task_runner);
+  void SaveResponseToTemporaryFile(
+      scoped_refptr<base::TaskRunner> file_task_runner);
+#if defined(COBALT)
+  // Don't save the response.
+  void DiscardResponse();
+#endif
+  HttpResponseHeaders* GetResponseHeaders() const;
+  HostPortPair GetSocketAddress() const;
+  bool WasFetchedViaProxy() const;
+  const GURL& GetOriginalURL() const;
+  const GURL& GetURL() const;
+  const URLRequestStatus& GetStatus() const;
+  int GetResponseCode() const;
+  const ResponseCookies& GetCookies() const;
+  bool FileErrorOccurred(base::PlatformFileError* out_error_code) const;
+  // Reports that the received content was malformed (i.e. failed parsing
+  // or validation).  This makes the throttling logic that does exponential
+  // back-off when servers are having problems treat the current request as
+  // a failure.  Your call to this method will be ignored if your request is
+  // already considered a failure based on the HTTP response code or response
+  // headers.
+  void ReceivedContentWasMalformed();
+  bool GetResponseAsString(std::string* out_response_string) const;
+  bool GetResponseAsFilePath(bool take_ownership,
+                             FilePath* out_response_path);
+
+  // Overridden from URLRequest::Delegate:
+  virtual void OnReceivedRedirect(URLRequest* request,
+                                  const GURL& new_url,
+                                  bool* defer_redirect) OVERRIDE;
+  virtual void OnResponseStarted(URLRequest* request) OVERRIDE;
+  virtual void OnReadCompleted(URLRequest* request,
+                               int bytes_read) OVERRIDE;
+  virtual void OnCertificateRequested(
+      URLRequest* request,
+      SSLCertRequestInfo* cert_request_info) OVERRIDE;
+
+  URLFetcherDelegate* delegate() const { return delegate_; }
+  static void CancelAll();
+  static int GetNumFetcherCores();
+  static void SetEnableInterceptionForTests(bool enabled);
+  static void SetIgnoreCertificateRequests(bool ignored);
+
+ private:
+  friend class base::RefCountedThreadSafe<URLFetcherCore>;
+
+  // How should the response be stored?
+  enum ResponseDestinationType {
+    STRING,  // Default: In a std::string
+    PERMANENT_FILE,  // Write to a permanent file.
+    TEMP_FILE,  // Write to a temporary file.
+#if defined(COBALT)
+    DISCARD,  // Don't store the response.
+#endif
+  };
+
+  class Registry {
+   public:
+    Registry();
+    ~Registry();
+
+    void AddURLFetcherCore(URLFetcherCore* core);
+    void RemoveURLFetcherCore(URLFetcherCore* core);
+
+    void CancelAll();
+
+    int size() const {
+      return fetchers_.size();
+    }
+
+   private:
+    std::set<URLFetcherCore*> fetchers_;
+
+    DISALLOW_COPY_AND_ASSIGN(Registry);
+  };
+
+  // Class FileWriter encapsulates all state involved in writing
+  // response bytes to a file. It is only used if
+  // |URLFetcherCore::response_destination_| == TEMP_FILE ||
+  // |URLFetcherCore::response_destination_| == PERMANENT_FILE.  Each
+  // instance of FileWriter is owned by a URLFetcherCore, which
+  // manages its lifetime and never transfers ownership. All file operations
+  // happen on |file_task_runner_|.
+  class FileWriter {
+   public:
+    FileWriter(URLFetcherCore* core,
+               scoped_refptr<base::TaskRunner> file_task_runner);
+    ~FileWriter();
+
+    void CreateFileAtPath(const FilePath& file_path);
+    void CreateTempFile();
+
+    // Record |num_bytes_| response bytes in |core_->buffer_| to the file.
+    void WriteBuffer(int num_bytes);
+
+    // Called when a write has been done.  Continues writing if there are
+    // any more bytes to write.  Otherwise, initiates a read in core_.
+    void ContinueWrite(base::PlatformFileError error_code, int bytes_written);
+
+    // Drop ownership of the file at |file_path_|.
+    // This class will not delete it or write to it again.
+    void DisownFile();
+
+    // Close the file if it is open.
+    void CloseFileAndCompleteRequest();
+
+    // Close the file if it is open and then delete it.
+    void CloseAndDeleteFile();
+
+    const FilePath& file_path() const { return file_path_; }
+    int64 total_bytes_written() { return total_bytes_written_; }
+    base::PlatformFileError error_code() const { return error_code_; }
+
+   private:
+    // Callback which gets the result of a permanent file creation.
+    void DidCreateFile(const FilePath& file_path,
+                       base::PlatformFileError error_code,
+                       base::PassPlatformFile file_handle,
+                       bool created);
+    // Callback which gets the result of a temporary file creation.
+    void DidCreateTempFile(base::PlatformFileError error_code,
+                           base::PassPlatformFile file_handle,
+                           const FilePath& file_path);
+    // This method is used to implement DidCreateFile and DidCreateTempFile.
+    void DidCreateFileInternal(const FilePath& file_path,
+                               base::PlatformFileError error_code,
+                               base::PassPlatformFile file_handle);
+
+    // Callback which gets the result of closing the file.
+    void DidCloseFile(base::PlatformFileError error);
+
+    // Callback which gets the result of closing the file. Deletes the file if
+    // it has been created.
+    void DeleteFile(base::PlatformFileError error_code);
+
+    // The URLFetcherCore which instantiated this class.
+    URLFetcherCore* core_;
+
+    // The last error encountered on a file operation.  base::PLATFORM_FILE_OK
+    // if no error occurred.
+    base::PlatformFileError error_code_;
+
+    // Callbacks are created for use with base::FileUtilProxy.
+    base::WeakPtrFactory<URLFetcherCore::FileWriter> weak_factory_;
+
+    // Task runner on which file operations should happen.
+    scoped_refptr<base::TaskRunner> file_task_runner_;
+
+    // Path to the file.  This path is empty when there is no file.
+    FilePath file_path_;
+
+    // Handle to the file.
+    base::PlatformFile file_handle_;
+
+    // We always append to the file.  Track the total number of bytes
+    // written, so that writes know the offset to give.
+    int64 total_bytes_written_;
+
+    // How many bytes did the last Write() try to write?  Needed so
+    // that if not all the bytes get written on a Write(), we can
+    // call Write() again with the rest.
+    int pending_bytes_;
+
+    // When writing, how many bytes from the buffer have been successfully
+    // written so far?
+    int buffer_offset_;
+  };
+
+  virtual ~URLFetcherCore();
+
+  // Wrapper functions that allow us to ensure actions happen on the right
+  // thread.
+  void StartOnIOThread();
+  void StartURLRequest();
+  void StartURLRequestWhenAppropriate();
+  void CancelURLRequest();
+  void OnCompletedURLRequest(base::TimeDelta backoff_delay);
+  void InformDelegateFetchIsComplete();
+  void NotifyMalformedContent();
+  void RetryOrCompleteUrlFetch();
+
+  // Deletes the request, removes it from the registry, and removes the
+  // destruction observer.
+  void ReleaseRequest();
+
+  // Returns the max value of exponential back-off release time for
+  // |original_url_| and |url_|.
+  base::TimeTicks GetBackoffReleaseTime();
+
+  void CompleteAddingUploadDataChunk(const std::string& data,
+                                     bool is_last_chunk);
+
+  // Store the response bytes in |buffer_| in the container indicated by
+  // |response_destination_|. Return true if the write has been
+  // done, and another read can overwrite |buffer_|.  If this function
+  // returns false, it will post a task that will read more bytes once the
+  // write is complete.
+  bool WriteBuffer(int num_bytes);
+
+  // Read response bytes from the request.
+  void ReadResponse();
+
+  // Drop ownership of any file managed by |file_path_|.
+  void DisownFile();
+
+  // Notify Delegate about the progress of upload/download.
+#if defined(COBALT)
+  void InformDelegateResponseStarted();
+  void InformDelegateResponseStartedInDelegateThread();
+#endif  // defined(COBALT)
+  void InformDelegateUploadProgress();
+  void InformDelegateUploadProgressInDelegateThread(int64 current, int64 total);
+  void InformDelegateDownloadDataIfNecessary(int bytes_read);
+  void InformDelegateDownloadData();
+  void InformDelegateDownloadDataInDelegateThread(
+      scoped_ptr<std::string> download_data);
+
+  URLFetcher* fetcher_;              // Corresponding fetcher object
+  GURL original_url_;                // The URL we were asked to fetch
+  GURL url_;                         // The URL we eventually wound up at
+  URLFetcher::RequestType request_type_;  // What type of request is this?
+  URLRequestStatus status_;          // Status of the request
+  URLFetcherDelegate* delegate_;     // Object to notify on completion
+  scoped_refptr<base::SingleThreadTaskRunner> delegate_task_runner_;
+                                     // Task runner for the creating thread.
+  scoped_refptr<base::SingleThreadTaskRunner> network_task_runner_;
+                                     // Task runner for file access.
+  scoped_refptr<base::TaskRunner> file_task_runner_;
+                                     // Task runner for the thread
+                                     // on which file access happens.
+  scoped_ptr<URLRequest> request_;   // The actual request this wraps
+  int load_flags_;                   // Flags for the load operation
+  int response_code_;                // HTTP status code for the request
+  std::string data_;                 // Results of the request, when we are
+                                     // storing the response as a string.
+  scoped_refptr<IOBuffer> buffer_;   // Read buffer
+  // Caches the data to be sent to the request thread to minimize task posting.
+  scoped_ptr<std::string> download_data_cache_;
+  scoped_refptr<URLRequestContextGetter> request_context_getter_;
+                                     // Cookie/cache info for the request
+  GURL first_party_for_cookies_;     // The first party URL for the request
+  // The user data to add to each newly-created URLRequest.
+  const void* url_request_data_key_;
+  URLFetcher::CreateDataCallback url_request_create_data_callback_;
+  ResponseCookies cookies_;          // Response cookies
+  HttpRequestHeaders extra_request_headers_;
+  scoped_refptr<HttpResponseHeaders> response_headers_;
+  bool was_fetched_via_proxy_;
+  HostPortPair socket_address_;
+
+  std::string upload_content_;       // HTTP POST payload
+  std::string upload_content_type_;  // MIME type of POST payload
+  std::string referrer_;             // HTTP Referer header value
+  bool is_chunked_upload_;           // True if using chunked transfer encoding
+
+  // Used to determine how long to wait before making a request or doing a
+  // retry.
+  //
+  // Both of them can only be accessed on the IO thread.
+  //
+  // We need not only the throttler entry for |original_URL|, but also
+  // the one for |url|. For example, consider the case that URL A
+  // redirects to URL B, for which the server returns a 500
+  // response. In this case, the exponential back-off release time of
+  // URL A won't increase. If we retry without considering the
+  // back-off constraint of URL B, we may send out too many requests
+  // for URL A in a short period of time.
+  //
+  // Both of these will be NULL if
+  // URLRequestContext::throttler_manager() is NULL.
+  scoped_refptr<URLRequestThrottlerEntryInterface>
+      original_url_throttler_entry_;
+  scoped_refptr<URLRequestThrottlerEntryInterface> url_throttler_entry_;
+
+  // True if the URLFetcher has been cancelled.
+  bool was_cancelled_;
+
+  // If writing results to a file, |file_writer_| will manage creation,
+  // writing, and destruction of that file.
+  scoped_ptr<FileWriter> file_writer_;
+
+  // Where should responses be saved?
+  ResponseDestinationType response_destination_;
+
+  // Path to the file where the response is written.
+  FilePath response_destination_file_path_;
+
+  // By default any server-initiated redirects are automatically followed.  If
+  // this flag is set to true, however, a redirect will halt the fetch and call
+  // back to to the delegate immediately.
+  bool stop_on_redirect_;
+  // True when we're actually stopped due to a redirect halted by the above.  We
+  // use this to ensure that |url_| is set to the redirect destination rather
+  // than the originally-fetched URL.
+  bool stopped_on_redirect_;
+
+  // If |automatically_retry_on_5xx_| is false, 5xx responses will be
+  // propagated to the observer, if it is true URLFetcher will automatically
+  // re-execute the request, after the back-off delay has expired.
+  // true by default.
+  bool automatically_retry_on_5xx_;
+  // |num_retries_on_5xx_| indicates how many times we've failed to successfully
+  // fetch this URL due to 5xx responses.  Once this value exceeds the maximum
+  // number of retries specified by the owner URLFetcher instance,
+  // we'll give up.
+  int num_retries_on_5xx_;
+  // Maximum retries allowed when 5xx responses are received.
+  int max_retries_on_5xx_;
+  // Back-off time delay. 0 by default.
+  base::TimeDelta backoff_delay_;
+
+  // The number of retries that have been attempted due to ERR_NETWORK_CHANGED.
+  int num_retries_on_network_changes_;
+  // Maximum retries allowed when the request fails with ERR_NETWORK_CHANGED.
+  // 0 by default.
+  int max_retries_on_network_changes_;
+
+  // Timer to poll the progress of uploading for POST and PUT requests.
+  // When crbug.com/119629 is fixed, scoped_ptr is not necessary here.
+  scoped_ptr<base::RepeatingTimer<URLFetcherCore> >
+      upload_progress_checker_timer_;
+  // Number of bytes sent so far.
+  int64 current_upload_bytes_;
+  // Number of bytes received so far.
+  int64 current_response_bytes_;
+  // Total expected bytes to receive (-1 if it cannot be determined).
+  int64 total_response_bytes_;
+
+  // TODO(willchan): Get rid of this after debugging crbug.com/90971.
+  base::debug::StackTrace stack_trace_;
+
+  static base::LazyInstance<Registry> g_registry;
+
+  DISALLOW_COPY_AND_ASSIGN(URLFetcherCore);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_FETCHER_CORE_H_
diff --git a/src/net/url_request/url_fetcher_delegate.cc b/src/net/url_request/url_fetcher_delegate.cc
new file mode 100644
index 0000000..7759157
--- /dev/null
+++ b/src/net/url_request/url_fetcher_delegate.cc
@@ -0,0 +1,26 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_fetcher_delegate.h"
+
+namespace net {
+
+#if defined(COBALT)
+void URLFetcherDelegate::OnURLFetchResponseStarted(
+    const URLFetcher* source) {}
+#endif  // defined(COBALT)
+
+void URLFetcherDelegate::OnURLFetchDownloadData(
+    const URLFetcher* source, scoped_ptr<std::string> download_data) {}
+
+void URLFetcherDelegate::OnURLFetchUploadProgress(
+    const URLFetcher* source, int64 current, int64 total) {}
+
+bool URLFetcherDelegate::ShouldSendDownloadData() {
+  return false;
+}
+
+URLFetcherDelegate::~URLFetcherDelegate() {}
+
+}  // namespace net
diff --git a/src/net/url_request/url_fetcher_delegate.h b/src/net/url_request/url_fetcher_delegate.h
new file mode 100644
index 0000000..0e643f8
--- /dev/null
+++ b/src/net/url_request/url_fetcher_delegate.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_FETCHER_DELEGATE_H_
+#define NET_URL_REQUEST_URL_FETCHER_DELEGATE_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/base/net_export.h"
+
+namespace net {
+
+class URLFetcher;
+
+// A delegate interface for users of URLFetcher.
+class NET_EXPORT URLFetcherDelegate {
+ public:
+#if defined(COBALT)
+  // This will be called when the response code and headers have been received.
+  virtual void OnURLFetchResponseStarted(const URLFetcher* source);
+#endif  // defined(COBALT)
+
+  // This will be called when the URL has been fetched, successfully or not.
+  // Use accessor methods on |source| to get the results.
+  virtual void OnURLFetchComplete(const URLFetcher* source) = 0;
+
+  // This will be called when some part of the response is read.
+  // |download_data| contains the current bytes received since the last call.
+  // This will be called after ShouldSendDownloadData() and only if the latter
+  // returns true.
+  virtual void OnURLFetchDownloadData(const URLFetcher* source,
+                                      scoped_ptr<std::string> download_data);
+
+  // This indicates if OnURLFetchDownloadData should be called.
+  // This will be called before OnURLFetchDownloadData is called, and only if
+  // this returns true.
+  // Default implementation is false.
+  virtual bool ShouldSendDownloadData();
+
+  // This will be called when uploading of POST or PUT requests proceeded.
+  // |current| denotes the number of bytes sent so far, and |total| is the
+  // total size of uploading data (or -1 if chunked upload is enabled).
+  virtual void OnURLFetchUploadProgress(const URLFetcher* source,
+                                        int64 current, int64 total);
+
+ protected:
+  virtual ~URLFetcherDelegate();
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_FETCHER_DELEGATE_H_
diff --git a/src/net/url_request/url_fetcher_factory.h b/src/net/url_request/url_fetcher_factory.h
new file mode 100644
index 0000000..0c5a5eb
--- /dev/null
+++ b/src/net/url_request/url_fetcher_factory.h
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_FETCHER_FACTORY_H_
+#define NET_URL_REQUEST_URL_FETCHER_FACTORY_H_
+
+#include "net/url_request/url_fetcher.h"
+
+namespace net {
+class URLFetcherDelegate;
+
+// URLFetcher::Create uses the currently registered Factory to create the
+// URLFetcher. Factory is intended for testing.
+class URLFetcherFactory {
+ public:
+  virtual URLFetcher* CreateURLFetcher(
+      int id,
+      const GURL& url,
+      URLFetcher::RequestType request_type,
+      URLFetcherDelegate* delegate) = 0;
+
+ protected:
+  virtual ~URLFetcherFactory() {}
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_FETCHER_FACTORY_H_
diff --git a/src/net/url_request/url_fetcher_impl.cc b/src/net/url_request/url_fetcher_impl.cc
new file mode 100644
index 0000000..527a195
--- /dev/null
+++ b/src/net/url_request/url_fetcher_impl.cc
@@ -0,0 +1,216 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_fetcher_impl.h"
+
+#include "base/bind.h"
+#include "base/message_loop_proxy.h"
+#include "net/url_request/url_fetcher_core.h"
+#include "net/url_request/url_fetcher_factory.h"
+
+namespace net {
+
+static URLFetcherFactory* g_factory = NULL;
+
+URLFetcherImpl::URLFetcherImpl(const GURL& url,
+                               RequestType request_type,
+                               URLFetcherDelegate* d)
+    : ALLOW_THIS_IN_INITIALIZER_LIST(
+        core_(new URLFetcherCore(this, url, request_type, d))) {
+}
+
+URLFetcherImpl::~URLFetcherImpl() {
+  core_->Stop();
+}
+
+void URLFetcherImpl::SetUploadData(const std::string& upload_content_type,
+                                   const std::string& upload_content) {
+  core_->SetUploadData(upload_content_type, upload_content);
+}
+
+void URLFetcherImpl::SetChunkedUpload(const std::string& content_type) {
+  core_->SetChunkedUpload(content_type);
+}
+
+void URLFetcherImpl::AppendChunkToUpload(const std::string& data,
+                                         bool is_last_chunk) {
+  DCHECK(data.length());
+  core_->AppendChunkToUpload(data, is_last_chunk);
+}
+
+void URLFetcherImpl::SetReferrer(const std::string& referrer) {
+  core_->SetReferrer(referrer);
+}
+
+void URLFetcherImpl::SetLoadFlags(int load_flags) {
+  core_->SetLoadFlags(load_flags);
+}
+
+int URLFetcherImpl::GetLoadFlags() const {
+  return core_->GetLoadFlags();
+}
+
+void URLFetcherImpl::SetExtraRequestHeaders(
+    const std::string& extra_request_headers) {
+  core_->SetExtraRequestHeaders(extra_request_headers);
+}
+
+void URLFetcherImpl::AddExtraRequestHeader(const std::string& header_line) {
+  core_->AddExtraRequestHeader(header_line);
+}
+
+void URLFetcherImpl::GetExtraRequestHeaders(
+    HttpRequestHeaders* headers) const {
+  core_->GetExtraRequestHeaders(headers);
+}
+
+void URLFetcherImpl::SetRequestContext(
+    URLRequestContextGetter* request_context_getter) {
+  core_->SetRequestContext(request_context_getter);
+}
+
+void URLFetcherImpl::SetFirstPartyForCookies(
+    const GURL& first_party_for_cookies) {
+  core_->SetFirstPartyForCookies(first_party_for_cookies);
+}
+
+void URLFetcherImpl::SetURLRequestUserData(
+    const void* key,
+    const CreateDataCallback& create_data_callback) {
+  core_->SetURLRequestUserData(key, create_data_callback);
+}
+
+void URLFetcherImpl::SetStopOnRedirect(bool stop_on_redirect) {
+  core_->SetStopOnRedirect(stop_on_redirect);
+}
+
+void URLFetcherImpl::SetAutomaticallyRetryOn5xx(bool retry) {
+  core_->SetAutomaticallyRetryOn5xx(retry);
+}
+
+void URLFetcherImpl::SetMaxRetriesOn5xx(int max_retries) {
+  core_->SetMaxRetriesOn5xx(max_retries);
+}
+
+int URLFetcherImpl::GetMaxRetriesOn5xx() const {
+  return core_->GetMaxRetriesOn5xx();
+}
+
+
+base::TimeDelta URLFetcherImpl::GetBackoffDelay() const {
+  return core_->GetBackoffDelay();
+}
+
+void URLFetcherImpl::SetAutomaticallyRetryOnNetworkChanges(int max_retries) {
+  core_->SetAutomaticallyRetryOnNetworkChanges(max_retries);
+}
+
+void URLFetcherImpl::SaveResponseToFileAtPath(
+    const FilePath& file_path,
+    scoped_refptr<base::TaskRunner> file_task_runner) {
+  core_->SaveResponseToFileAtPath(file_path, file_task_runner);
+}
+
+void URLFetcherImpl::SaveResponseToTemporaryFile(
+    scoped_refptr<base::TaskRunner> file_task_runner) {
+  core_->SaveResponseToTemporaryFile(file_task_runner);
+}
+
+#if defined(COBALT)
+void URLFetcherImpl::DiscardResponse() {
+  core_->DiscardResponse();
+}
+#endif
+
+HttpResponseHeaders* URLFetcherImpl::GetResponseHeaders() const {
+  return core_->GetResponseHeaders();
+}
+
+HostPortPair URLFetcherImpl::GetSocketAddress() const {
+  return core_->GetSocketAddress();
+}
+
+bool URLFetcherImpl::WasFetchedViaProxy() const {
+  return core_->WasFetchedViaProxy();
+}
+
+void URLFetcherImpl::Start() {
+  core_->Start();
+}
+
+const GURL& URLFetcherImpl::GetOriginalURL() const {
+  return core_->GetOriginalURL();
+}
+
+const GURL& URLFetcherImpl::GetURL() const {
+  return core_->GetURL();
+}
+
+const URLRequestStatus& URLFetcherImpl::GetStatus() const {
+  return core_->GetStatus();
+}
+
+int URLFetcherImpl::GetResponseCode() const {
+  return core_->GetResponseCode();
+}
+
+const ResponseCookies& URLFetcherImpl::GetCookies() const {
+  return core_->GetCookies();
+}
+
+bool URLFetcherImpl::FileErrorOccurred(
+    base::PlatformFileError* out_error_code) const {
+  return core_->FileErrorOccurred(out_error_code);
+}
+
+void URLFetcherImpl::ReceivedContentWasMalformed() {
+  core_->ReceivedContentWasMalformed();
+}
+
+bool URLFetcherImpl::GetResponseAsString(
+    std::string* out_response_string) const {
+  return core_->GetResponseAsString(out_response_string);
+}
+
+bool URLFetcherImpl::GetResponseAsFilePath(
+    bool take_ownership,
+    FilePath* out_response_path) const {
+  return core_->GetResponseAsFilePath(take_ownership, out_response_path);
+}
+
+// static
+void URLFetcherImpl::CancelAll() {
+  URLFetcherCore::CancelAll();
+}
+
+// static
+void URLFetcherImpl::SetEnableInterceptionForTests(bool enabled) {
+  URLFetcherCore::SetEnableInterceptionForTests(enabled);
+}
+
+// static
+void URLFetcherImpl::SetIgnoreCertificateRequests(bool ignored) {
+  URLFetcherCore::SetIgnoreCertificateRequests(ignored);
+}
+
+// static
+int URLFetcherImpl::GetNumFetcherCores() {
+  return URLFetcherCore::GetNumFetcherCores();
+}
+
+URLFetcherDelegate* URLFetcherImpl::delegate() const {
+  return core_->delegate();
+}
+
+// static
+URLFetcherFactory* URLFetcherImpl::factory() {
+  return g_factory;
+}
+
+// static
+void URLFetcherImpl::set_factory(URLFetcherFactory* factory) {
+  g_factory = factory;
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_fetcher_impl.h b/src/net/url_request/url_fetcher_impl.h
new file mode 100644
index 0000000..5961b80
--- /dev/null
+++ b/src/net/url_request/url_fetcher_impl.h
@@ -0,0 +1,126 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains URLFetcher, a wrapper around URLRequest that handles
+// low-level details like thread safety, ref counting, and incremental buffer
+// reading.  This is useful for callers who simply want to get the data from a
+// URL and don't care about all the nitty-gritty details.
+//
+// NOTE(willchan): Only one "IO" thread is supported for URLFetcher.  This is a
+// temporary situation.  We will work on allowing support for multiple "io"
+// threads per process.
+
+#ifndef NET_URL_REQUEST_URL_FETCHER_IMPL_H_
+#define NET_URL_REQUEST_URL_FETCHER_IMPL_H_
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "net/base/net_export.h"
+#include "net/url_request/url_fetcher.h"
+
+namespace net {
+class URLFetcherCore;
+class URLFetcherDelegate;
+class URLFetcherFactory;
+
+class NET_EXPORT_PRIVATE URLFetcherImpl : public URLFetcher {
+ public:
+  // |url| is the URL to send the request to.
+  // |request_type| is the type of request to make.
+  // |d| the object that will receive the callback on fetch completion.
+  URLFetcherImpl(const GURL& url,
+                 RequestType request_type,
+                 URLFetcherDelegate* d);
+  virtual ~URLFetcherImpl();
+
+  // URLFetcher implementation:
+  virtual void SetUploadData(const std::string& upload_content_type,
+                             const std::string& upload_content) OVERRIDE;
+  virtual void SetChunkedUpload(
+      const std::string& upload_content_type) OVERRIDE;
+  virtual void AppendChunkToUpload(const std::string& data,
+                                   bool is_last_chunk) OVERRIDE;
+  virtual void SetLoadFlags(int load_flags) OVERRIDE;
+  virtual int GetLoadFlags() const OVERRIDE;
+  virtual void SetReferrer(const std::string& referrer) OVERRIDE;
+  virtual void SetExtraRequestHeaders(
+      const std::string& extra_request_headers) OVERRIDE;
+  virtual void AddExtraRequestHeader(const std::string& header_line) OVERRIDE;
+  virtual void GetExtraRequestHeaders(
+      HttpRequestHeaders* headers) const OVERRIDE;
+  virtual void SetRequestContext(
+      URLRequestContextGetter* request_context_getter) OVERRIDE;
+  virtual void SetFirstPartyForCookies(
+      const GURL& first_party_for_cookies) OVERRIDE;
+  virtual void SetURLRequestUserData(
+      const void* key,
+      const CreateDataCallback& create_data_callback) OVERRIDE;
+  virtual void SetStopOnRedirect(bool stop_on_redirect) OVERRIDE;
+  virtual void SetAutomaticallyRetryOn5xx(bool retry) OVERRIDE;
+  virtual void SetMaxRetriesOn5xx(int max_retries) OVERRIDE;
+  virtual int GetMaxRetriesOn5xx() const OVERRIDE;
+  virtual base::TimeDelta GetBackoffDelay() const OVERRIDE;
+  virtual void SetAutomaticallyRetryOnNetworkChanges(int max_retries) OVERRIDE;
+  virtual void SaveResponseToFileAtPath(
+      const FilePath& file_path,
+      scoped_refptr<base::TaskRunner> file_task_runner) OVERRIDE;
+  virtual void SaveResponseToTemporaryFile(
+      scoped_refptr<base::TaskRunner> file_task_runner) OVERRIDE;
+#if defined(COBALT)
+  virtual void DiscardResponse() OVERRIDE;
+#endif
+  virtual HttpResponseHeaders* GetResponseHeaders() const OVERRIDE;
+  virtual HostPortPair GetSocketAddress() const OVERRIDE;
+  virtual bool WasFetchedViaProxy() const OVERRIDE;
+  virtual void Start() OVERRIDE;
+  virtual const GURL& GetOriginalURL() const OVERRIDE;
+  virtual const GURL& GetURL() const OVERRIDE;
+  virtual const URLRequestStatus& GetStatus() const OVERRIDE;
+  virtual int GetResponseCode() const OVERRIDE;
+  virtual const ResponseCookies& GetCookies() const OVERRIDE;
+  virtual bool FileErrorOccurred(
+      base::PlatformFileError* out_error_code) const OVERRIDE;
+  virtual void ReceivedContentWasMalformed() OVERRIDE;
+  virtual bool GetResponseAsString(
+      std::string* out_response_string) const OVERRIDE;
+  virtual bool GetResponseAsFilePath(
+      bool take_ownership,
+      FilePath* out_response_path) const OVERRIDE;
+
+  static void CancelAll();
+
+  static void SetEnableInterceptionForTests(bool enabled);
+  static void SetIgnoreCertificateRequests(bool ignored);
+
+  // TODO(akalin): Make these private again once URLFetcher::Create()
+  // is in net/.
+
+  static URLFetcherFactory* factory();
+
+  // Sets the factory used by the static method Create to create a URLFetcher.
+  // URLFetcher does not take ownership of |factory|. A value of NULL results
+  // in a URLFetcher being created directly.
+  //
+  // NOTE: for safety, this should only be used through ScopedURLFetcherFactory!
+  static void set_factory(URLFetcherFactory* factory);
+
+ protected:
+  // Returns the delegate.
+  URLFetcherDelegate* delegate() const;
+
+ private:
+  friend class URLFetcherTest;
+
+  // Only used by URLFetcherTest, returns the number of URLFetcher::Core objects
+  // actively running.
+  static int GetNumFetcherCores();
+
+  const scoped_refptr<URLFetcherCore> core_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLFetcherImpl);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_FETCHER_IMPL_H_
diff --git a/src/net/url_request/url_fetcher_impl_unittest.cc b/src/net/url_request/url_fetcher_impl_unittest.cc
new file mode 100644
index 0000000..53723e6
--- /dev/null
+++ b/src/net/url_request/url_fetcher_impl_unittest.cc
@@ -0,0 +1,1449 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_fetcher_impl.h"
+
+#include <string>
+
+#include "base/bind.h"
+#include "base/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/message_loop_proxy.h"
+#include "base/stringprintf.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "build/build_config.h"
+#include "crypto/nss_util.h"
+#include "net/base/mock_host_resolver.h"
+#include "net/base/network_change_notifier.h"
+#include "net/http/http_response_headers.h"
+#include "net/test/test_server.h"
+#include "net/url_request/url_fetcher_delegate.h"
+#include "net/url_request/url_request_context_getter.h"
+#include "net/url_request/url_request_test_util.h"
+#include "net/url_request/url_request_throttler_manager.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(USE_NSS) || defined(OS_IOS)
+#include "net/ocsp/nss_ocsp.h"
+#endif
+
+namespace net {
+
+using base::Time;
+using base::TimeDelta;
+
+// TODO(eroman): Add a regression test for http://crbug.com/40505.
+
+namespace {
+
+// TODO(akalin): Move all the test data to somewhere under net/.
+const FilePath::CharType kDocRoot[] = FILE_PATH_LITERAL("chrome/test/data");
+const char kTestServerFilePrefix[] = "files/";
+
+class ThrottlingTestURLRequestContext : public TestURLRequestContext {
+ public:
+  ThrottlingTestURLRequestContext() : TestURLRequestContext(true) {
+    set_throttler_manager(&throttler_manager_);
+    Init();
+    DCHECK(throttler_manager() != NULL);
+  }
+
+ private:
+  URLRequestThrottlerManager throttler_manager_;
+};
+
+class ThrottlingTestURLRequestContextGetter
+    : public TestURLRequestContextGetter {
+ public:
+  ThrottlingTestURLRequestContextGetter(
+      base::MessageLoopProxy* io_message_loop_proxy,
+      TestURLRequestContext* request_context)
+      : TestURLRequestContextGetter(io_message_loop_proxy),
+        context_(request_context) {
+  }
+
+  // TestURLRequestContextGetter:
+  virtual TestURLRequestContext* GetURLRequestContext() OVERRIDE {
+    return context_;
+  }
+
+ protected:
+  virtual ~ThrottlingTestURLRequestContextGetter() {}
+
+  TestURLRequestContext* const context_;
+};
+
+}  // namespace
+
+class URLFetcherTest : public testing::Test,
+                       public URLFetcherDelegate {
+ public:
+  URLFetcherTest()
+      : fetcher_(NULL),
+        context_(NULL) {
+  }
+
+  static int GetNumFetcherCores() {
+    return URLFetcherImpl::GetNumFetcherCores();
+  }
+
+  // Creates a URLFetcher, using the program's main thread to do IO.
+  virtual void CreateFetcher(const GURL& url);
+
+  // URLFetcherDelegate:
+  // Subclasses that override this should either call this function or
+  // CleanupAfterFetchComplete() at the end of their processing, depending on
+  // whether they want to check for a non-empty HTTP 200 response or not.
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+
+  // Deletes |fetcher| and terminates the message loop.
+  void CleanupAfterFetchComplete();
+
+  scoped_refptr<base::MessageLoopProxy> io_message_loop_proxy() {
+    return io_message_loop_proxy_;
+  }
+
+  TestURLRequestContext* request_context() {
+    return context_.get();
+  }
+
+ protected:
+  // testing::Test:
+  virtual void SetUp() OVERRIDE {
+    testing::Test::SetUp();
+
+    context_.reset(new ThrottlingTestURLRequestContext());
+    io_message_loop_proxy_ = base::MessageLoopProxy::current();
+
+#if defined(USE_NSS) || defined(OS_IOS)
+    crypto::EnsureNSSInit();
+    EnsureNSSHttpIOInit();
+#endif
+  }
+
+  virtual void TearDown() OVERRIDE {
+#if defined(USE_NSS) || defined(OS_IOS)
+    ShutdownNSSHttpIO();
+#endif
+  }
+
+  // URLFetcher is designed to run on the main UI thread, but in our tests
+  // we assume that the current thread is the IO thread where the URLFetcher
+  // dispatches its requests to.  When we wish to simulate being used from
+  // a UI thread, we dispatch a worker thread to do so.
+  scoped_refptr<base::MessageLoopProxy> io_message_loop_proxy_;
+
+  URLFetcherImpl* fetcher_;
+  scoped_ptr<TestURLRequestContext> context_;
+};
+
+// A test fixture that uses a MockHostResolver, so that name resolutions can
+// be manipulated by the tests to keep connections in the resolving state.
+class URLFetcherMockDnsTest : public URLFetcherTest {
+ public:
+  // testing::Test:
+  virtual void SetUp() OVERRIDE;
+
+  // URLFetcherTest:
+  virtual void CreateFetcher(const GURL& url) OVERRIDE;
+
+  // URLFetcherDelegate:
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+
+ protected:
+  GURL test_url_;
+  scoped_ptr<TestServer> test_server_;
+  MockHostResolver resolver_;
+  scoped_ptr<URLFetcher> completed_fetcher_;
+};
+
+void URLFetcherTest::CreateFetcher(const GURL& url) {
+  fetcher_ = new URLFetcherImpl(url, URLFetcher::GET, this);
+  fetcher_->SetRequestContext(new ThrottlingTestURLRequestContextGetter(
+      io_message_loop_proxy(), request_context()));
+  fetcher_->Start();
+}
+
+void URLFetcherTest::OnURLFetchComplete(const URLFetcher* source) {
+  EXPECT_TRUE(source->GetStatus().is_success());
+  EXPECT_EQ(200, source->GetResponseCode());  // HTTP OK
+
+  std::string data;
+  EXPECT_TRUE(source->GetResponseAsString(&data));
+  EXPECT_FALSE(data.empty());
+
+  CleanupAfterFetchComplete();
+}
+
+void URLFetcherTest::CleanupAfterFetchComplete() {
+  delete fetcher_;  // Have to delete this here and not in the destructor,
+                    // because the destructor won't necessarily run on the
+                    // same thread that CreateFetcher() did.
+
+  io_message_loop_proxy()->PostTask(FROM_HERE, MessageLoop::QuitClosure());
+  // If the current message loop is not the IO loop, it will be shut down when
+  // the main loop returns and this thread subsequently goes out of scope.
+}
+
+void URLFetcherMockDnsTest::SetUp() {
+  URLFetcherTest::SetUp();
+
+  resolver_.set_ondemand_mode(true);
+  resolver_.rules()->AddRule("example.com", "127.0.0.1");
+
+  context_.reset(new TestURLRequestContext(true));
+  context_->set_host_resolver(&resolver_);
+  context_->Init();
+
+  test_server_.reset(new TestServer(TestServer::TYPE_HTTP,
+                                    TestServer::kLocalhost,
+                                    FilePath(kDocRoot)));
+  ASSERT_TRUE(test_server_->Start());
+
+  // test_server_.GetURL() returns a URL with 127.0.0.1 (kLocalhost), that is
+  // immediately resolved by the MockHostResolver. Use a hostname instead to
+  // trigger an async resolve.
+  test_url_ = GURL(
+      base::StringPrintf("http://example.com:%d/defaultresponse",
+      test_server_->host_port_pair().port()));
+  ASSERT_TRUE(test_url_.is_valid());
+}
+
+void URLFetcherMockDnsTest::CreateFetcher(const GURL& url) {
+  fetcher_ = new URLFetcherImpl(url, URLFetcher::GET, this);
+  fetcher_->SetRequestContext(new ThrottlingTestURLRequestContextGetter(
+      io_message_loop_proxy(), request_context()));
+}
+
+void URLFetcherMockDnsTest::OnURLFetchComplete(const URLFetcher* source) {
+  io_message_loop_proxy()->PostTask(FROM_HERE, MessageLoop::QuitClosure());
+  ASSERT_EQ(fetcher_, source);
+  EXPECT_EQ(test_url_, source->GetOriginalURL());
+  completed_fetcher_.reset(fetcher_);
+}
+
+namespace {
+
+// Version of URLFetcherTest that does a POST instead
+class URLFetcherPostTest : public URLFetcherTest {
+ public:
+  // URLFetcherTest:
+  virtual void CreateFetcher(const GURL& url) OVERRIDE;
+
+  // URLFetcherDelegate:
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+};
+
+// Version of URLFetcherTest that does a POST instead with empty upload body
+class URLFetcherEmptyPostTest : public URLFetcherTest {
+ public:
+  // URLFetcherTest:
+  virtual void CreateFetcher(const GURL& url) OVERRIDE;
+
+  // URLFetcherDelegate:
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+};
+
+// Version of URLFetcherTest that tests download progress reports.
+class URLFetcherDownloadProgressTest : public URLFetcherTest {
+ public:
+  URLFetcherDownloadProgressTest()
+      : previous_progress_(0),
+        expected_total_(0) {
+  }
+
+  // URLFetcherTest:
+  virtual void CreateFetcher(const GURL& url) OVERRIDE;
+
+  // URLFetcherDelegate:
+  virtual void OnURLFetchDownloadProgress(const URLFetcher* source,
+                                          int64 current,
+                                          int64 total) OVERRIDE;
+
+ protected:
+  // Download progress returned by the previous callback.
+  int64 previous_progress_;
+  // Size of the file being downloaded, known in advance (provided by each test
+  // case).
+  int64 expected_total_;
+};
+
+// Version of URLFetcherTest that tests progress reports at cancellation.
+class URLFetcherDownloadProgressCancelTest : public URLFetcherTest {
+ public:
+  // URLFetcherTest:
+  virtual void CreateFetcher(const GURL& url) OVERRIDE;
+
+  // URLFetcherDelegate:
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+  virtual void OnURLFetchDownloadProgress(const URLFetcher* source,
+                                          int64 current,
+                                          int64 total) OVERRIDE;
+ protected:
+  bool cancelled_;
+};
+
+// Version of URLFetcherTest that tests upload progress reports.
+class URLFetcherUploadProgressTest : public URLFetcherTest {
+ public:
+  // URLFetcherTest:
+  virtual void CreateFetcher(const GURL& url) OVERRIDE;
+
+  // URLFetcherDelegate:
+  virtual void OnURLFetchUploadProgress(const URLFetcher* source,
+                                        int64 current,
+                                        int64 total) OVERRIDE;
+ protected:
+  int64 previous_progress_;
+  std::string chunk_;
+  int64 number_of_chunks_added_;
+};
+
+// Version of URLFetcherTest that tests headers.
+class URLFetcherHeadersTest : public URLFetcherTest {
+ public:
+  // URLFetcherDelegate:
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+};
+
+// Version of URLFetcherTest that tests SocketAddress.
+class URLFetcherSocketAddressTest : public URLFetcherTest {
+ public:
+  // URLFetcherDelegate:
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+ protected:
+  std::string expected_host_;
+  uint16 expected_port_;
+};
+
+// Version of URLFetcherTest that tests stopping on a redirect.
+class URLFetcherStopOnRedirectTest : public URLFetcherTest {
+ public:
+  URLFetcherStopOnRedirectTest();
+  virtual ~URLFetcherStopOnRedirectTest();
+
+  // URLFetcherTest:
+  virtual void CreateFetcher(const GURL& url) OVERRIDE;
+
+  // URLFetcherDelegate:
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+
+ protected:
+  // The URL we should be redirected to.
+  static const char* kRedirectTarget;
+
+  bool callback_called_;  // Set to true in OnURLFetchComplete().
+};
+
+// Version of URLFetcherTest that tests overload protection.
+class URLFetcherProtectTest : public URLFetcherTest {
+ public:
+  // URLFetcherTest:
+  virtual void CreateFetcher(const GURL& url) OVERRIDE;
+
+  // URLFetcherDelegate:
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+ private:
+  Time start_time_;
+};
+
+// Version of URLFetcherTest that tests overload protection, when responses
+// passed through.
+class URLFetcherProtectTestPassedThrough : public URLFetcherTest {
+ public:
+  // URLFetcherTest:
+  virtual void CreateFetcher(const GURL& url) OVERRIDE;
+
+  // URLFetcherDelegate:
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+ private:
+  Time start_time_;
+};
+
+// Version of URLFetcherTest that tests bad HTTPS requests.
+class URLFetcherBadHTTPSTest : public URLFetcherTest {
+ public:
+  URLFetcherBadHTTPSTest();
+
+  // URLFetcherDelegate:
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+
+ private:
+  FilePath cert_dir_;
+};
+
+// Version of URLFetcherTest that tests request cancellation on shutdown.
+class URLFetcherCancelTest : public URLFetcherTest {
+ public:
+  // URLFetcherTest:
+  virtual void CreateFetcher(const GURL& url) OVERRIDE;
+
+  // URLFetcherDelegate:
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+
+  void CancelRequest();
+};
+
+// Version of TestURLRequestContext that posts a Quit task to the IO
+// thread once it is deleted.
+class CancelTestURLRequestContext : public ThrottlingTestURLRequestContext {
+ public:
+  explicit CancelTestURLRequestContext() {
+  }
+
+ private:
+  virtual ~CancelTestURLRequestContext() {
+    // The d'tor should execute in the IO thread. Post the quit task to the
+    // current thread.
+    MessageLoop::current()->PostTask(FROM_HERE, MessageLoop::QuitClosure());
+  }
+};
+
+class CancelTestURLRequestContextGetter
+    : public TestURLRequestContextGetter {
+ public:
+  CancelTestURLRequestContextGetter(
+      base::MessageLoopProxy* io_message_loop_proxy,
+      const GURL& throttle_for_url)
+      : TestURLRequestContextGetter(io_message_loop_proxy),
+        io_message_loop_proxy_(io_message_loop_proxy),
+        context_created_(false, false),
+        throttle_for_url_(throttle_for_url) {
+  }
+
+  // TestURLRequestContextGetter:
+  virtual TestURLRequestContext* GetURLRequestContext() OVERRIDE {
+    if (!context_.get()) {
+      context_.reset(new CancelTestURLRequestContext());
+      DCHECK(context_->throttler_manager());
+
+      // Registers an entry for test url. The backoff time is calculated by:
+      //     new_backoff = 2.0 * old_backoff + 0
+      // The initial backoff is 2 seconds and maximum backoff is 4 seconds.
+      // Maximum retries allowed is set to 2.
+      scoped_refptr<URLRequestThrottlerEntry> entry(
+          new URLRequestThrottlerEntry(
+              context_->throttler_manager(),
+              "", 200, 3, 2000, 2.0, 0.0, 4000));
+      context_->throttler_manager()->OverrideEntryForTests(
+          throttle_for_url_, entry);
+
+      context_created_.Signal();
+    }
+    return context_.get();
+  }
+
+  virtual scoped_refptr<base::MessageLoopProxy> GetIOMessageLoopProxy() const {
+    return io_message_loop_proxy_;
+  }
+
+  void WaitForContextCreation() {
+    context_created_.Wait();
+  }
+
+ protected:
+  virtual ~CancelTestURLRequestContextGetter() {}
+
+ private:
+  scoped_ptr<TestURLRequestContext> context_;
+  scoped_refptr<base::MessageLoopProxy> io_message_loop_proxy_;
+  base::WaitableEvent context_created_;
+  GURL throttle_for_url_;
+};
+
+// Version of URLFetcherTest that tests retying the same request twice.
+class URLFetcherMultipleAttemptTest : public URLFetcherTest {
+ public:
+  // URLFetcherDelegate:
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+ private:
+  std::string data_;
+};
+
+class URLFetcherFileTest : public URLFetcherTest {
+ public:
+  URLFetcherFileTest() : take_ownership_of_file_(false),
+                         expected_file_error_(base::PLATFORM_FILE_OK) {}
+
+  void CreateFetcherForFile(const GURL& url, const FilePath& file_path);
+  void CreateFetcherForTempFile(const GURL& url);
+
+  // URLFetcherDelegate:
+  virtual void OnURLFetchComplete(const URLFetcher* source) OVERRIDE;
+
+ protected:
+  FilePath expected_file_;
+  FilePath file_path_;
+
+  // Set by the test. Used in OnURLFetchComplete() to decide if
+  // the URLFetcher should own the temp file, so that we can test
+  // disowning prevents the file from being deleted.
+  bool take_ownership_of_file_;
+
+  // Expected file error code for the test.
+  // PLATFORM_FILE_OK when expecting success.
+  base::PlatformFileError expected_file_error_;
+};
+
+void URLFetcherPostTest::CreateFetcher(const GURL& url) {
+  fetcher_ = new URLFetcherImpl(url, URLFetcher::POST, this);
+  fetcher_->SetRequestContext(new ThrottlingTestURLRequestContextGetter(
+      io_message_loop_proxy(), request_context()));
+  fetcher_->SetUploadData("application/x-www-form-urlencoded",
+                          "bobsyeruncle");
+  fetcher_->Start();
+}
+
+void URLFetcherPostTest::OnURLFetchComplete(const URLFetcher* source) {
+  std::string data;
+  EXPECT_TRUE(source->GetResponseAsString(&data));
+  EXPECT_EQ(std::string("bobsyeruncle"), data);
+  URLFetcherTest::OnURLFetchComplete(source);
+}
+
+void URLFetcherEmptyPostTest::CreateFetcher(const GURL& url) {
+  fetcher_ = new URLFetcherImpl(url, URLFetcher::POST, this);
+  fetcher_->SetRequestContext(new TestURLRequestContextGetter(
+      io_message_loop_proxy()));
+  fetcher_->SetUploadData("text/plain", "");
+  fetcher_->Start();
+}
+
+void URLFetcherEmptyPostTest::OnURLFetchComplete(const URLFetcher* source) {
+  EXPECT_TRUE(source->GetStatus().is_success());
+  EXPECT_EQ(200, source->GetResponseCode());  // HTTP OK
+
+  std::string data;
+  EXPECT_TRUE(source->GetResponseAsString(&data));
+  EXPECT_TRUE(data.empty());
+
+  CleanupAfterFetchComplete();
+  // Do not call the super class method URLFetcherTest::OnURLFetchComplete,
+  // since it expects a non-empty response.
+}
+
+void URLFetcherDownloadProgressTest::CreateFetcher(const GURL& url) {
+  fetcher_ = new URLFetcherImpl(url, URLFetcher::GET, this);
+  fetcher_->SetRequestContext(new ThrottlingTestURLRequestContextGetter(
+      io_message_loop_proxy(), request_context()));
+  fetcher_->Start();
+}
+
+void URLFetcherDownloadProgressTest::OnURLFetchDownloadProgress(
+    const URLFetcher* source, int64 progress, int64 total) {
+  // Increasing between 0 and total.
+  EXPECT_LE(0, progress);
+  EXPECT_GE(total, progress);
+  EXPECT_LE(previous_progress_, progress);
+  EXPECT_EQ(expected_total_, total);
+  previous_progress_ = progress;
+}
+
+void URLFetcherDownloadProgressCancelTest::CreateFetcher(const GURL& url) {
+  fetcher_ = new URLFetcherImpl(url, URLFetcher::GET, this);
+  fetcher_->SetRequestContext(new ThrottlingTestURLRequestContextGetter(
+      io_message_loop_proxy(), request_context()));
+  cancelled_ = false;
+  fetcher_->Start();
+}
+
+void URLFetcherDownloadProgressCancelTest::OnURLFetchDownloadProgress(
+    const URLFetcher* source, int64 current, int64 total) {
+  EXPECT_FALSE(cancelled_);
+  if (!cancelled_) {
+    cancelled_ = true;
+    CleanupAfterFetchComplete();
+  }
+}
+
+void URLFetcherDownloadProgressCancelTest::OnURLFetchComplete(
+    const URLFetcher* source) {
+  // Should have been cancelled.
+  ADD_FAILURE();
+  CleanupAfterFetchComplete();
+}
+
+void URLFetcherUploadProgressTest::CreateFetcher(const GURL& url) {
+  fetcher_ = new URLFetcherImpl(url, URLFetcher::POST, this);
+  fetcher_->SetRequestContext(new ThrottlingTestURLRequestContextGetter(
+      io_message_loop_proxy(), request_context()));
+  previous_progress_ = 0;
+  // Large enough data to require more than one read from UploadDataStream.
+  chunk_.assign(1<<16, 'a');
+  // Use chunked upload to wait for a timer event of progress notification.
+  fetcher_->SetChunkedUpload("application/x-www-form-urlencoded");
+  fetcher_->Start();
+  number_of_chunks_added_ = 1;
+  fetcher_->AppendChunkToUpload(chunk_, false);
+}
+
+void URLFetcherUploadProgressTest::OnURLFetchUploadProgress(
+    const URLFetcher* source, int64 current, int64 total) {
+  // Increasing between 0 and total.
+  EXPECT_LE(0, current);
+  EXPECT_GE(static_cast<int64>(chunk_.size()) * number_of_chunks_added_,
+            current);
+  EXPECT_LE(previous_progress_, current);
+  previous_progress_ = current;
+  EXPECT_EQ(-1, total);
+
+  if (number_of_chunks_added_ < 2) {
+    number_of_chunks_added_ += 1;
+    fetcher_->AppendChunkToUpload(chunk_, true);
+  }
+}
+
+void URLFetcherHeadersTest::OnURLFetchComplete(
+    const URLFetcher* source) {
+  std::string header;
+  EXPECT_TRUE(source->GetResponseHeaders()->GetNormalizedHeader("cache-control",
+                                                                &header));
+  EXPECT_EQ("private", header);
+  URLFetcherTest::OnURLFetchComplete(source);
+}
+
+void URLFetcherSocketAddressTest::OnURLFetchComplete(
+    const URLFetcher* source) {
+  EXPECT_EQ("127.0.0.1", source->GetSocketAddress().host());
+  EXPECT_EQ(expected_port_, source->GetSocketAddress().port());
+  URLFetcherTest::OnURLFetchComplete(source);
+}
+
+// static
+const char* URLFetcherStopOnRedirectTest::kRedirectTarget =
+    "http://redirect.target.com";
+
+URLFetcherStopOnRedirectTest::URLFetcherStopOnRedirectTest()
+    : callback_called_(false) {
+}
+
+URLFetcherStopOnRedirectTest::~URLFetcherStopOnRedirectTest() {
+}
+
+void URLFetcherStopOnRedirectTest::CreateFetcher(const GURL& url) {
+  fetcher_ = new URLFetcherImpl(url, URLFetcher::GET, this);
+  fetcher_->SetRequestContext(new ThrottlingTestURLRequestContextGetter(
+      io_message_loop_proxy(), request_context()));
+  fetcher_->SetStopOnRedirect(true);
+  fetcher_->Start();
+}
+
+void URLFetcherStopOnRedirectTest::OnURLFetchComplete(
+    const URLFetcher* source) {
+  callback_called_ = true;
+  EXPECT_EQ(GURL(kRedirectTarget), source->GetURL());
+  EXPECT_EQ(URLRequestStatus::CANCELED, source->GetStatus().status());
+  EXPECT_EQ(ERR_ABORTED, source->GetStatus().error());
+  EXPECT_EQ(301, source->GetResponseCode());
+  CleanupAfterFetchComplete();
+}
+
+void URLFetcherProtectTest::CreateFetcher(const GURL& url) {
+  fetcher_ = new URLFetcherImpl(url, URLFetcher::GET, this);
+  fetcher_->SetRequestContext(new ThrottlingTestURLRequestContextGetter(
+      io_message_loop_proxy(), request_context()));
+  start_time_ = Time::Now();
+  fetcher_->SetMaxRetriesOn5xx(11);
+  fetcher_->Start();
+}
+
+void URLFetcherProtectTest::OnURLFetchComplete(const URLFetcher* source) {
+  const TimeDelta one_second = TimeDelta::FromMilliseconds(1000);
+  if (source->GetResponseCode() >= 500) {
+    // Now running ServerUnavailable test.
+    // It takes more than 1 second to finish all 11 requests.
+    EXPECT_TRUE(Time::Now() - start_time_ >= one_second);
+    EXPECT_TRUE(source->GetStatus().is_success());
+    std::string data;
+    EXPECT_TRUE(source->GetResponseAsString(&data));
+    EXPECT_FALSE(data.empty());
+    CleanupAfterFetchComplete();
+  } else {
+    // Now running Overload test.
+    static int count = 0;
+    count++;
+    if (count < 20) {
+      fetcher_->SetRequestContext(
+          new ThrottlingTestURLRequestContextGetter(
+              io_message_loop_proxy(), request_context()));
+      fetcher_->Start();
+    } else {
+      // We have already sent 20 requests continuously. And we expect that
+      // it takes more than 1 second due to the overload protection settings.
+      EXPECT_TRUE(Time::Now() - start_time_ >= one_second);
+      URLFetcherTest::OnURLFetchComplete(source);
+    }
+  }
+}
+
+void URLFetcherProtectTestPassedThrough::CreateFetcher(const GURL& url) {
+  fetcher_ = new URLFetcherImpl(url, URLFetcher::GET, this);
+  fetcher_->SetRequestContext(new ThrottlingTestURLRequestContextGetter(
+      io_message_loop_proxy(), request_context()));
+  fetcher_->SetAutomaticallyRetryOn5xx(false);
+  start_time_ = Time::Now();
+  fetcher_->SetMaxRetriesOn5xx(11);
+  fetcher_->Start();
+}
+
+void URLFetcherProtectTestPassedThrough::OnURLFetchComplete(
+    const URLFetcher* source) {
+  const TimeDelta one_minute = TimeDelta::FromMilliseconds(60000);
+  if (source->GetResponseCode() >= 500) {
+    // Now running ServerUnavailable test.
+    // It should get here on the first attempt, so almost immediately and
+    // *not* to attempt to execute all 11 requests (2.5 minutes).
+    EXPECT_TRUE(Time::Now() - start_time_ < one_minute);
+    EXPECT_TRUE(source->GetStatus().is_success());
+    // Check that suggested back off time is bigger than 0.
+    EXPECT_GT(fetcher_->GetBackoffDelay().InMicroseconds(), 0);
+    std::string data;
+    EXPECT_TRUE(source->GetResponseAsString(&data));
+    EXPECT_FALSE(data.empty());
+  } else {
+    // We should not get here!
+    ADD_FAILURE();
+  }
+
+  CleanupAfterFetchComplete();
+}
+
+
+URLFetcherBadHTTPSTest::URLFetcherBadHTTPSTest() {
+  PathService::Get(base::DIR_SOURCE_ROOT, &cert_dir_);
+  cert_dir_ = cert_dir_.AppendASCII("chrome");
+  cert_dir_ = cert_dir_.AppendASCII("test");
+  cert_dir_ = cert_dir_.AppendASCII("data");
+  cert_dir_ = cert_dir_.AppendASCII("ssl");
+  cert_dir_ = cert_dir_.AppendASCII("certificates");
+}
+
+// The "server certificate expired" error should result in automatic
+// cancellation of the request by
+// URLRequest::Delegate::OnSSLCertificateError.
+void URLFetcherBadHTTPSTest::OnURLFetchComplete(
+    const URLFetcher* source) {
+  // This part is different from URLFetcherTest::OnURLFetchComplete
+  // because this test expects the request to be cancelled.
+  EXPECT_EQ(URLRequestStatus::CANCELED, source->GetStatus().status());
+  EXPECT_EQ(ERR_ABORTED, source->GetStatus().error());
+  EXPECT_EQ(-1, source->GetResponseCode());
+  EXPECT_TRUE(source->GetCookies().empty());
+  std::string data;
+  EXPECT_TRUE(source->GetResponseAsString(&data));
+  EXPECT_TRUE(data.empty());
+  CleanupAfterFetchComplete();
+}
+
+void URLFetcherCancelTest::CreateFetcher(const GURL& url) {
+  fetcher_ = new URLFetcherImpl(url, URLFetcher::GET, this);
+  CancelTestURLRequestContextGetter* context_getter =
+      new CancelTestURLRequestContextGetter(io_message_loop_proxy(),
+                                            url);
+  fetcher_->SetRequestContext(context_getter);
+  fetcher_->SetMaxRetriesOn5xx(2);
+  fetcher_->Start();
+  // We need to wait for the creation of the URLRequestContext, since we
+  // rely on it being destroyed as a signal to end the test.
+  context_getter->WaitForContextCreation();
+  CancelRequest();
+}
+
+void URLFetcherCancelTest::OnURLFetchComplete(
+    const URLFetcher* source) {
+  // We should have cancelled the request before completion.
+  ADD_FAILURE();
+  CleanupAfterFetchComplete();
+}
+
+void URLFetcherCancelTest::CancelRequest() {
+  delete fetcher_;
+  // The URLFetcher's test context will post a Quit task once it is
+  // deleted. So if this test simply hangs, it means cancellation
+  // did not work.
+}
+
+void URLFetcherMultipleAttemptTest::OnURLFetchComplete(
+    const URLFetcher* source) {
+  EXPECT_TRUE(source->GetStatus().is_success());
+  EXPECT_EQ(200, source->GetResponseCode());  // HTTP OK
+  std::string data;
+  EXPECT_TRUE(source->GetResponseAsString(&data));
+  EXPECT_FALSE(data.empty());
+  if (!data.empty() && data_.empty()) {
+    data_ = data;
+    fetcher_->SetRequestContext(new ThrottlingTestURLRequestContextGetter(
+        io_message_loop_proxy(), request_context()));
+    fetcher_->Start();
+  } else {
+    EXPECT_EQ(data, data_);
+    CleanupAfterFetchComplete();
+  }
+}
+
+void URLFetcherFileTest::CreateFetcherForFile(const GURL& url,
+                                              const FilePath& file_path) {
+  fetcher_ = new URLFetcherImpl(url, URLFetcher::GET, this);
+  fetcher_->SetRequestContext(new ThrottlingTestURLRequestContextGetter(
+      io_message_loop_proxy(), request_context()));
+
+  // Use the IO message loop to do the file operations in this test.
+  fetcher_->SaveResponseToFileAtPath(file_path, io_message_loop_proxy());
+  fetcher_->Start();
+}
+
+void URLFetcherFileTest::CreateFetcherForTempFile(const GURL& url) {
+  fetcher_ = new URLFetcherImpl(url, URLFetcher::GET, this);
+  fetcher_->SetRequestContext(new ThrottlingTestURLRequestContextGetter(
+      io_message_loop_proxy(), request_context()));
+
+  // Use the IO message loop to do the file operations in this test.
+  fetcher_->SaveResponseToTemporaryFile(io_message_loop_proxy());
+  fetcher_->Start();
+}
+
+void URLFetcherFileTest::OnURLFetchComplete(const URLFetcher* source) {
+  if (expected_file_error_ == base::PLATFORM_FILE_OK) {
+    EXPECT_TRUE(source->GetStatus().is_success());
+    EXPECT_EQ(source->GetResponseCode(), 200);
+
+    base::PlatformFileError error_code = base::PLATFORM_FILE_OK;
+    EXPECT_FALSE(fetcher_->FileErrorOccurred(&error_code));
+
+    EXPECT_TRUE(source->GetResponseAsFilePath(
+        take_ownership_of_file_, &file_path_));
+
+    EXPECT_TRUE(file_util::ContentsEqual(expected_file_, file_path_));
+  } else {
+    base::PlatformFileError error_code = base::PLATFORM_FILE_OK;
+    EXPECT_TRUE(fetcher_->FileErrorOccurred(&error_code));
+    EXPECT_EQ(expected_file_error_, error_code);
+  }
+  CleanupAfterFetchComplete();
+}
+
+TEST_F(URLFetcherTest, SameThreadsTest) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  // Create the fetcher on the main thread.  Since IO will happen on the main
+  // thread, this will test URLFetcher's ability to do everything on one
+  // thread.
+  CreateFetcher(test_server.GetURL("defaultresponse"));
+
+  MessageLoop::current()->Run();
+}
+
+#if defined(OS_MACOSX)
+// SIGSEGV on Mac: http://crbug.com/60426
+TEST_F(URLFetcherTest, DISABLED_DifferentThreadsTest) {
+#else
+TEST_F(URLFetcherTest, DifferentThreadsTest) {
+#endif
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  // Create a separate thread that will create the URLFetcher.  The current
+  // (main) thread will do the IO, and when the fetch is complete it will
+  // terminate the main thread's message loop; then the other thread's
+  // message loop will be shut down automatically as the thread goes out of
+  // scope.
+  base::Thread t("URLFetcher test thread");
+  ASSERT_TRUE(t.Start());
+  t.message_loop()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLFetcherTest::CreateFetcher,
+                 base::Unretained(this),
+                 test_server.GetURL("defaultresponse")));
+
+  MessageLoop::current()->Run();
+}
+
+void CancelAllOnIO() {
+  EXPECT_EQ(1, URLFetcherTest::GetNumFetcherCores());
+  URLFetcherImpl::CancelAll();
+  EXPECT_EQ(0, URLFetcherTest::GetNumFetcherCores());
+}
+
+// Tests to make sure CancelAll() will successfully cancel existing URLFetchers.
+TEST_F(URLFetcherTest, CancelAll) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+  EXPECT_EQ(0, GetNumFetcherCores());
+
+  CreateFetcher(test_server.GetURL("defaultresponse"));
+  io_message_loop_proxy()->PostTaskAndReply(
+      FROM_HERE,
+      base::Bind(&CancelAllOnIO),
+      MessageLoop::QuitClosure());
+  MessageLoop::current()->Run();
+  EXPECT_EQ(0, GetNumFetcherCores());
+  delete fetcher_;
+}
+
+TEST_F(URLFetcherMockDnsTest, DontRetryOnNetworkChangedByDefault) {
+  EXPECT_EQ(0, GetNumFetcherCores());
+  EXPECT_FALSE(resolver_.has_pending_requests());
+
+  // This posts a task to start the fetcher.
+  CreateFetcher(test_url_);
+  fetcher_->Start();
+  EXPECT_EQ(0, GetNumFetcherCores());
+  MessageLoop::current()->RunUntilIdle();
+
+  // The fetcher is now running, but is pending the host resolve.
+  EXPECT_EQ(1, GetNumFetcherCores());
+  EXPECT_TRUE(resolver_.has_pending_requests());
+  ASSERT_FALSE(completed_fetcher_);
+
+  // A network change notification aborts the connect job.
+  NetworkChangeNotifier::NotifyObserversOfIPAddressChangeForTests();
+  MessageLoop::current()->RunUntilIdle();
+  EXPECT_EQ(0, GetNumFetcherCores());
+  EXPECT_FALSE(resolver_.has_pending_requests());
+  ASSERT_TRUE(completed_fetcher_);
+
+  // And the owner of the fetcher gets the ERR_NETWORK_CHANGED error.
+  EXPECT_EQ(ERR_NETWORK_CHANGED, completed_fetcher_->GetStatus().error());
+}
+
+TEST_F(URLFetcherMockDnsTest, RetryOnNetworkChangedAndFail) {
+  EXPECT_EQ(0, GetNumFetcherCores());
+  EXPECT_FALSE(resolver_.has_pending_requests());
+
+  // This posts a task to start the fetcher.
+  CreateFetcher(test_url_);
+  fetcher_->SetAutomaticallyRetryOnNetworkChanges(3);
+  fetcher_->Start();
+  EXPECT_EQ(0, GetNumFetcherCores());
+  MessageLoop::current()->RunUntilIdle();
+
+  // The fetcher is now running, but is pending the host resolve.
+  EXPECT_EQ(1, GetNumFetcherCores());
+  EXPECT_TRUE(resolver_.has_pending_requests());
+  ASSERT_FALSE(completed_fetcher_);
+
+  // Make it fail 3 times.
+  for (int i = 0; i < 3; ++i) {
+    // A network change notification aborts the connect job.
+    NetworkChangeNotifier::NotifyObserversOfIPAddressChangeForTests();
+    MessageLoop::current()->RunUntilIdle();
+
+    // But the fetcher retries automatically.
+    EXPECT_EQ(1, GetNumFetcherCores());
+    EXPECT_TRUE(resolver_.has_pending_requests());
+    ASSERT_FALSE(completed_fetcher_);
+  }
+
+  // A 4th failure doesn't trigger another retry, and propagates the error
+  // to the owner of the fetcher.
+  NetworkChangeNotifier::NotifyObserversOfIPAddressChangeForTests();
+  MessageLoop::current()->RunUntilIdle();
+  EXPECT_EQ(0, GetNumFetcherCores());
+  EXPECT_FALSE(resolver_.has_pending_requests());
+  ASSERT_TRUE(completed_fetcher_);
+
+  // And the owner of the fetcher gets the ERR_NETWORK_CHANGED error.
+  EXPECT_EQ(ERR_NETWORK_CHANGED, completed_fetcher_->GetStatus().error());
+}
+
+TEST_F(URLFetcherMockDnsTest, RetryOnNetworkChangedAndSucceed) {
+  EXPECT_EQ(0, GetNumFetcherCores());
+  EXPECT_FALSE(resolver_.has_pending_requests());
+
+  // This posts a task to start the fetcher.
+  CreateFetcher(test_url_);
+  fetcher_->SetAutomaticallyRetryOnNetworkChanges(3);
+  fetcher_->Start();
+  EXPECT_EQ(0, GetNumFetcherCores());
+  MessageLoop::current()->RunUntilIdle();
+
+  // The fetcher is now running, but is pending the host resolve.
+  EXPECT_EQ(1, GetNumFetcherCores());
+  EXPECT_TRUE(resolver_.has_pending_requests());
+  ASSERT_FALSE(completed_fetcher_);
+
+  // Make it fail 3 times.
+  for (int i = 0; i < 3; ++i) {
+    // A network change notification aborts the connect job.
+    NetworkChangeNotifier::NotifyObserversOfIPAddressChangeForTests();
+    MessageLoop::current()->RunUntilIdle();
+
+    // But the fetcher retries automatically.
+    EXPECT_EQ(1, GetNumFetcherCores());
+    EXPECT_TRUE(resolver_.has_pending_requests());
+    ASSERT_FALSE(completed_fetcher_);
+  }
+
+  // Now let it succeed by resolving the pending request.
+  resolver_.ResolveAllPending();
+  MessageLoop::current()->Run();
+
+  // URLFetcherMockDnsTest::OnURLFetchComplete() will quit the loop.
+  EXPECT_EQ(0, GetNumFetcherCores());
+  EXPECT_FALSE(resolver_.has_pending_requests());
+  ASSERT_TRUE(completed_fetcher_);
+
+  // This time the request succeeded.
+  EXPECT_EQ(OK, completed_fetcher_->GetStatus().error());
+  EXPECT_EQ(200, completed_fetcher_->GetResponseCode());
+}
+
+#if defined(OS_MACOSX)
+// SIGSEGV on Mac: http://crbug.com/60426
+TEST_F(URLFetcherPostTest, DISABLED_Basic) {
+#else
+TEST_F(URLFetcherPostTest, Basic) {
+#endif
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  CreateFetcher(test_server.GetURL("echo"));
+  MessageLoop::current()->Run();
+}
+
+TEST_F(URLFetcherEmptyPostTest, Basic) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  CreateFetcher(test_server.GetURL("echo"));
+  MessageLoop::current()->Run();
+}
+
+#if defined(OS_MACOSX)
+// SIGSEGV on Mac: http://crbug.com/60426
+TEST_F(URLFetcherUploadProgressTest, DISABLED_Basic) {
+#else
+TEST_F(URLFetcherUploadProgressTest, Basic) {
+#endif
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  CreateFetcher(test_server.GetURL("echo"));
+  MessageLoop::current()->Run();
+}
+
+TEST_F(URLFetcherDownloadProgressTest, Basic) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  // Get a file large enough to require more than one read into
+  // URLFetcher::Core's IOBuffer.
+  static const char kFileToFetch[] = "animate1.gif";
+  // Hardcoded file size - it cannot be easily fetched when a remote http server
+  // is used for testing.
+  static const int64 kFileSize = 19021;
+
+  expected_total_ = kFileSize;
+
+  CreateFetcher(test_server.GetURL(
+      std::string(kTestServerFilePrefix) + kFileToFetch));
+
+  MessageLoop::current()->Run();
+}
+
+TEST_F(URLFetcherDownloadProgressCancelTest, CancelWhileProgressReport) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  // Get a file large enough to require more than one read into
+  // URLFetcher::Core's IOBuffer.
+  static const char kFileToFetch[] = "animate1.gif";
+  CreateFetcher(test_server.GetURL(
+      std::string(kTestServerFilePrefix) + kFileToFetch));
+
+  MessageLoop::current()->Run();
+}
+
+TEST_F(URLFetcherHeadersTest, Headers) {
+  TestServer test_server(
+      TestServer::TYPE_HTTP,
+      TestServer::kLocalhost,
+      FilePath(FILE_PATH_LITERAL("net/data/url_request_unittest")));
+  ASSERT_TRUE(test_server.Start());
+
+  CreateFetcher(test_server.GetURL("files/with-headers.html"));
+  MessageLoop::current()->Run();
+  // The actual tests are in the URLFetcherHeadersTest fixture.
+}
+
+TEST_F(URLFetcherSocketAddressTest, SocketAddress) {
+  TestServer test_server(
+      TestServer::TYPE_HTTP,
+      TestServer::kLocalhost,
+      FilePath(FILE_PATH_LITERAL("net/data/url_request_unittest")));
+  ASSERT_TRUE(test_server.Start());
+  expected_port_ = test_server.host_port_pair().port();
+
+  // Reusing "with-headers.html" but doesn't really matter.
+  CreateFetcher(test_server.GetURL("files/with-headers.html"));
+  MessageLoop::current()->Run();
+  // The actual tests are in the URLFetcherSocketAddressTest fixture.
+}
+
+TEST_F(URLFetcherStopOnRedirectTest, StopOnRedirect) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  CreateFetcher(
+      test_server.GetURL(std::string("server-redirect?") + kRedirectTarget));
+  MessageLoop::current()->Run();
+  EXPECT_TRUE(callback_called_);
+}
+
+TEST_F(URLFetcherProtectTest, Overload) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  GURL url(test_server.GetURL("defaultresponse"));
+
+  // Registers an entry for test url. It only allows 3 requests to be sent
+  // in 200 milliseconds.
+  scoped_refptr<URLRequestThrottlerEntry> entry(
+      new URLRequestThrottlerEntry(
+          request_context()->throttler_manager(),
+          "", 200, 3, 1, 2.0, 0.0, 256));
+  request_context()->throttler_manager()->OverrideEntryForTests(url, entry);
+
+  CreateFetcher(url);
+
+  MessageLoop::current()->Run();
+}
+
+TEST_F(URLFetcherProtectTest, ServerUnavailable) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  GURL url(test_server.GetURL("files/server-unavailable.html"));
+
+  // Registers an entry for test url. The backoff time is calculated by:
+  //     new_backoff = 2.0 * old_backoff + 0
+  // and maximum backoff time is 256 milliseconds.
+  // Maximum retries allowed is set to 11.
+  scoped_refptr<URLRequestThrottlerEntry> entry(
+      new URLRequestThrottlerEntry(
+          request_context()->throttler_manager(),
+          "", 200, 3, 1, 2.0, 0.0, 256));
+  request_context()->throttler_manager()->OverrideEntryForTests(url, entry);
+
+  CreateFetcher(url);
+
+  MessageLoop::current()->Run();
+}
+
+TEST_F(URLFetcherProtectTestPassedThrough, ServerUnavailablePropagateResponse) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  GURL url(test_server.GetURL("files/server-unavailable.html"));
+
+  // Registers an entry for test url. The backoff time is calculated by:
+  //     new_backoff = 2.0 * old_backoff + 0
+  // and maximum backoff time is 150000 milliseconds.
+  // Maximum retries allowed is set to 11.
+  scoped_refptr<URLRequestThrottlerEntry> entry(
+      new URLRequestThrottlerEntry(
+          request_context()->throttler_manager(),
+          "", 200, 3, 100, 2.0, 0.0, 150000));
+  // Total time if *not* for not doing automatic backoff would be 150s.
+  // In reality it should be "as soon as server responds".
+  request_context()->throttler_manager()->OverrideEntryForTests(url, entry);
+
+  CreateFetcher(url);
+
+  MessageLoop::current()->Run();
+}
+
+#if defined(OS_MACOSX)
+// SIGSEGV on Mac: http://crbug.com/60426
+TEST_F(URLFetcherBadHTTPSTest, DISABLED_BadHTTPSTest) {
+#else
+TEST_F(URLFetcherBadHTTPSTest, BadHTTPSTest) {
+#endif
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_EXPIRED);
+  TestServer test_server(TestServer::TYPE_HTTPS,
+                         ssl_options,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  CreateFetcher(test_server.GetURL("defaultresponse"));
+  MessageLoop::current()->Run();
+}
+
+#if defined(OS_MACOSX)
+// SIGSEGV on Mac: http://crbug.com/60426
+TEST_F(URLFetcherCancelTest, DISABLED_ReleasesContext) {
+#else
+TEST_F(URLFetcherCancelTest, ReleasesContext) {
+#endif
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  GURL url(test_server.GetURL("files/server-unavailable.html"));
+
+  // Create a separate thread that will create the URLFetcher.  The current
+  // (main) thread will do the IO, and when the fetch is complete it will
+  // terminate the main thread's message loop; then the other thread's
+  // message loop will be shut down automatically as the thread goes out of
+  // scope.
+  base::Thread t("URLFetcher test thread");
+  ASSERT_TRUE(t.Start());
+  t.message_loop()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLFetcherCancelTest::CreateFetcher,
+                 base::Unretained(this), url));
+
+  MessageLoop::current()->Run();
+}
+
+TEST_F(URLFetcherCancelTest, CancelWhileDelayedStartTaskPending) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  GURL url(test_server.GetURL("files/server-unavailable.html"));
+
+  // Register an entry for test url.
+  // Using a sliding window of 4 seconds, and max of 1 request, under a fast
+  // run we expect to have a 4 second delay when posting the Start task.
+  scoped_refptr<URLRequestThrottlerEntry> entry(
+      new URLRequestThrottlerEntry(
+          request_context()->throttler_manager(),
+          "", 4000, 1, 2000, 2.0, 0.0, 4000));
+  request_context()->throttler_manager()->OverrideEntryForTests(url, entry);
+  // Fake that a request has just started.
+  entry->ReserveSendingTimeForNextRequest(base::TimeTicks());
+
+  // The next request we try to send will be delayed by ~4 seconds.
+  // The slower the test runs, the less the delay will be (since it takes the
+  // time difference from now).
+
+  base::Thread t("URLFetcher test thread");
+  ASSERT_TRUE(t.Start());
+  t.message_loop()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLFetcherTest::CreateFetcher, base::Unretained(this), url));
+
+  MessageLoop::current()->Run();
+}
+
+TEST_F(URLFetcherMultipleAttemptTest, SameData) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  // Create the fetcher on the main thread.  Since IO will happen on the main
+  // thread, this will test URLFetcher's ability to do everything on one
+  // thread.
+  CreateFetcher(test_server.GetURL("defaultresponse"));
+
+  MessageLoop::current()->Run();
+}
+
+TEST_F(URLFetcherFileTest, SmallGet) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  // Get a small file.
+  static const char kFileToFetch[] = "simple.html";
+  expected_file_ = test_server.GetDocumentRoot().AppendASCII(kFileToFetch);
+  CreateFetcherForFile(
+      test_server.GetURL(std::string(kTestServerFilePrefix) + kFileToFetch),
+      temp_dir.path().AppendASCII(kFileToFetch));
+
+  MessageLoop::current()->Run();  // OnURLFetchComplete() will Quit().
+
+  ASSERT_FALSE(file_util::PathExists(file_path_))
+      << file_path_.value() << " not removed.";
+}
+
+TEST_F(URLFetcherFileTest, LargeGet) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  // Get a file large enough to require more than one read into
+  // URLFetcher::Core's IOBuffer.
+  static const char kFileToFetch[] = "animate1.gif";
+  expected_file_ = test_server.GetDocumentRoot().AppendASCII(kFileToFetch);
+  CreateFetcherForFile(
+      test_server.GetURL(std::string(kTestServerFilePrefix) + kFileToFetch),
+      temp_dir.path().AppendASCII(kFileToFetch));
+
+  MessageLoop::current()->Run();  // OnURLFetchComplete() will Quit().
+}
+
+TEST_F(URLFetcherFileTest, CanTakeOwnershipOfFile) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  // Get a small file.
+  static const char kFileToFetch[] = "simple.html";
+  expected_file_ = test_server.GetDocumentRoot().AppendASCII(kFileToFetch);
+  CreateFetcherForFile(
+      test_server.GetURL(std::string(kTestServerFilePrefix) + kFileToFetch),
+      temp_dir.path().AppendASCII(kFileToFetch));
+
+  MessageLoop::current()->Run();  // OnURLFetchComplete() will Quit().
+
+  MessageLoop::current()->RunUntilIdle();
+  ASSERT_FALSE(file_util::PathExists(file_path_))
+      << file_path_.value() << " not removed.";
+}
+
+
+TEST_F(URLFetcherFileTest, OverwriteExistingFile) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  // Create a file before trying to fetch.
+  static const char kFileToFetch[] = "simple.html";
+  static const char kData[] = "abcdefghijklmnopqrstuvwxyz";
+  file_path_ = temp_dir.path().AppendASCII(kFileToFetch);
+  const int data_size = arraysize(kData);
+  ASSERT_EQ(file_util::WriteFile(file_path_, kData, data_size), data_size);
+  ASSERT_TRUE(file_util::PathExists(file_path_));
+  expected_file_ = test_server.GetDocumentRoot().AppendASCII(kFileToFetch);
+  ASSERT_FALSE(file_util::ContentsEqual(file_path_, expected_file_));
+
+  // Get a small file.
+  CreateFetcherForFile(
+      test_server.GetURL(std::string(kTestServerFilePrefix) + kFileToFetch),
+      file_path_);
+
+  MessageLoop::current()->Run();  // OnURLFetchComplete() will Quit().
+}
+
+TEST_F(URLFetcherFileTest, TryToOverwriteDirectory) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  base::ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+  // Create a directory before trying to fetch.
+  static const char kFileToFetch[] = "simple.html";
+  file_path_ = temp_dir.path().AppendASCII(kFileToFetch);
+  ASSERT_TRUE(file_util::CreateDirectory(file_path_));
+  ASSERT_TRUE(file_util::PathExists(file_path_));
+
+  // Get a small file.
+  expected_file_error_ = base::PLATFORM_FILE_ERROR_ACCESS_DENIED;
+  expected_file_ = test_server.GetDocumentRoot().AppendASCII(kFileToFetch);
+  CreateFetcherForFile(
+      test_server.GetURL(std::string(kTestServerFilePrefix) + kFileToFetch),
+      file_path_);
+
+  MessageLoop::current()->Run();  // OnURLFetchComplete() will Quit().
+
+  MessageLoop::current()->RunUntilIdle();
+}
+
+TEST_F(URLFetcherFileTest, SmallGetToTempFile) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  // Get a small file.
+  static const char kFileToFetch[] = "simple.html";
+  expected_file_ = test_server.GetDocumentRoot().AppendASCII(kFileToFetch);
+  CreateFetcherForTempFile(
+      test_server.GetURL(std::string(kTestServerFilePrefix) + kFileToFetch));
+
+  MessageLoop::current()->Run();  // OnURLFetchComplete() will Quit().
+
+  ASSERT_FALSE(file_util::PathExists(file_path_))
+      << file_path_.value() << " not removed.";
+}
+
+TEST_F(URLFetcherFileTest, LargeGetToTempFile) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  // Get a file large enough to require more than one read into
+  // URLFetcher::Core's IOBuffer.
+  static const char kFileToFetch[] = "animate1.gif";
+  expected_file_ = test_server.GetDocumentRoot().AppendASCII(kFileToFetch);
+  CreateFetcherForTempFile(test_server.GetURL(
+      std::string(kTestServerFilePrefix) + kFileToFetch));
+
+  MessageLoop::current()->Run();  // OnURLFetchComplete() will Quit().
+}
+
+TEST_F(URLFetcherFileTest, CanTakeOwnershipOfTempFile) {
+  TestServer test_server(TestServer::TYPE_HTTP,
+                         TestServer::kLocalhost,
+                         FilePath(kDocRoot));
+  ASSERT_TRUE(test_server.Start());
+
+  // Get a small file.
+  static const char kFileToFetch[] = "simple.html";
+  expected_file_ = test_server.GetDocumentRoot().AppendASCII(kFileToFetch);
+  CreateFetcherForTempFile(test_server.GetURL(
+      std::string(kTestServerFilePrefix) + kFileToFetch));
+
+  MessageLoop::current()->Run();  // OnURLFetchComplete() will Quit().
+
+  MessageLoop::current()->RunUntilIdle();
+  ASSERT_FALSE(file_util::PathExists(file_path_))
+      << file_path_.value() << " not removed.";
+}
+
+}  // namespace
+
+}  // namespace net
diff --git a/src/net/url_request/url_request.cc b/src/net/url_request/url_request.cc
new file mode 100644
index 0000000..735a34d
--- /dev/null
+++ b/src/net/url_request/url_request.cc
@@ -0,0 +1,984 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/compiler_specific.h"
+#include "base/debug/stack_trace.h"
+#include "base/lazy_instance.h"
+#include "base/memory/singleton.h"
+#include "base/message_loop.h"
+#include "base/metrics/stats_counters.h"
+#include "base/stl_util.h"
+#include "base/synchronization/lock.h"
+#include "net/base/auth.h"
+#include "net/base/host_port_pair.h"
+#include "net/base/load_flags.h"
+#include "net/base/net_errors.h"
+#include "net/base/net_log.h"
+#include "net/base/network_change_notifier.h"
+#include "net/base/network_delegate.h"
+#include "net/base/ssl_cert_request_info.h"
+#include "net/base/upload_data_stream.h"
+#include "net/http/http_response_headers.h"
+#include "net/http/http_util.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_error_job.h"
+#include "net/url_request/url_request_job.h"
+#include "net/url_request/url_request_job_manager.h"
+#include "net/url_request/url_request_netlog_params.h"
+#include "net/url_request/url_request_redirect_job.h"
+
+using base::Time;
+using std::string;
+
+namespace net {
+
+namespace {
+
+// Max number of http redirects to follow.  Same number as gecko.
+const int kMaxRedirects = 20;
+
+// Discard headers which have meaning in POST (Content-Length, Content-Type,
+// Origin).
+void StripPostSpecificHeaders(HttpRequestHeaders* headers) {
+  // These are headers that may be attached to a POST.
+  headers->RemoveHeader(HttpRequestHeaders::kContentLength);
+  headers->RemoveHeader(HttpRequestHeaders::kContentType);
+  headers->RemoveHeader(HttpRequestHeaders::kOrigin);
+}
+
+// TODO(battre): Delete this, see http://crbug.com/89321:
+// This counter keeps track of the identifiers used for URL requests so far.
+// 0 is reserved to represent an invalid ID.
+uint64 g_next_url_request_identifier = 1;
+
+// This lock protects g_next_url_request_identifier.
+base::LazyInstance<base::Lock>::Leaky
+    g_next_url_request_identifier_lock = LAZY_INSTANCE_INITIALIZER;
+
+// Returns an prior unused identifier for URL requests.
+uint64 GenerateURLRequestIdentifier() {
+  base::AutoLock lock(g_next_url_request_identifier_lock.Get());
+  return g_next_url_request_identifier++;
+}
+
+// True once the first URLRequest was started.
+bool g_url_requests_started = false;
+
+// True if cookies are accepted by default.
+bool g_default_can_use_cookies = true;
+
+}  // namespace
+
+URLRequest::ProtocolFactory*
+URLRequest::Deprecated::RegisterProtocolFactory(const std::string& scheme,
+                                                ProtocolFactory* factory) {
+  return URLRequest::RegisterProtocolFactory(scheme, factory);
+}
+
+void URLRequest::Deprecated::RegisterRequestInterceptor(
+    Interceptor* interceptor) {
+  URLRequest::RegisterRequestInterceptor(interceptor);
+}
+
+void URLRequest::Deprecated::UnregisterRequestInterceptor(
+    Interceptor* interceptor) {
+  URLRequest::UnregisterRequestInterceptor(interceptor);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// URLRequest::Interceptor
+
+URLRequestJob* URLRequest::Interceptor::MaybeInterceptRedirect(
+    URLRequest* request,
+    NetworkDelegate* network_delegate,
+    const GURL& location) {
+  return NULL;
+}
+
+URLRequestJob* URLRequest::Interceptor::MaybeInterceptResponse(
+    URLRequest* request, NetworkDelegate* network_delegate) {
+  return NULL;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// URLRequest::Delegate
+
+void URLRequest::Delegate::OnReceivedRedirect(URLRequest* request,
+                                              const GURL& new_url,
+                                              bool* defer_redirect) {
+}
+
+void URLRequest::Delegate::OnAuthRequired(URLRequest* request,
+                                          AuthChallengeInfo* auth_info) {
+  request->CancelAuth();
+}
+
+void URLRequest::Delegate::OnCertificateRequested(
+    URLRequest* request,
+    SSLCertRequestInfo* cert_request_info) {
+  request->Cancel();
+}
+
+void URLRequest::Delegate::OnSSLCertificateError(URLRequest* request,
+                                                 const SSLInfo& ssl_info,
+                                                 bool is_hsts_ok) {
+  request->Cancel();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// URLRequest
+
+// TODO(shalev): Get rid of this constructor in favour of the one below it.
+URLRequest::URLRequest(const GURL& url,
+                       Delegate* delegate,
+                       const URLRequestContext* context)
+    : context_(context),
+      network_delegate_(context->network_delegate()),
+      net_log_(BoundNetLog::Make(context->net_log(),
+                                 NetLog::SOURCE_URL_REQUEST)),
+      url_chain_(1, url),
+      method_("GET"),
+      referrer_policy_(CLEAR_REFERRER_ON_TRANSITION_FROM_SECURE_TO_INSECURE),
+      load_flags_(LOAD_NORMAL),
+      delegate_(delegate),
+      is_pending_(false),
+      is_redirecting_(false),
+      redirect_limit_(kMaxRedirects),
+      priority_(LOWEST),
+      identifier_(GenerateURLRequestIdentifier()),
+      blocked_on_delegate_(false),
+      ALLOW_THIS_IN_INITIALIZER_LIST(before_request_callback_(
+          base::Bind(&URLRequest::BeforeRequestComplete,
+                     base::Unretained(this)))),
+      has_notified_completion_(false),
+      received_response_content_length_(0),
+      creation_time_(base::TimeTicks::Now()) {
+  SIMPLE_STATS_COUNTER("URLRequestCount");
+
+  // Sanity check out environment.
+  DCHECK(MessageLoop::current()) << "The current MessageLoop must exist";
+
+  DCHECK(MessageLoop::current()->IsType(MessageLoop::TYPE_IO)) << ""
+      "The current MessageLoop must be TYPE_IO";
+
+  CHECK(context);
+  context->url_requests()->insert(this);
+
+  net_log_.BeginEvent(NetLog::TYPE_REQUEST_ALIVE);
+}
+
+URLRequest::URLRequest(const GURL& url,
+                       Delegate* delegate,
+                       const URLRequestContext* context,
+                       NetworkDelegate* network_delegate)
+    : context_(context),
+      network_delegate_(network_delegate),
+      net_log_(BoundNetLog::Make(context->net_log(),
+                                 NetLog::SOURCE_URL_REQUEST)),
+      url_chain_(1, url),
+      method_("GET"),
+      referrer_policy_(CLEAR_REFERRER_ON_TRANSITION_FROM_SECURE_TO_INSECURE),
+      load_flags_(LOAD_NORMAL),
+      delegate_(delegate),
+      is_pending_(false),
+      is_redirecting_(false),
+      redirect_limit_(kMaxRedirects),
+      priority_(LOWEST),
+      identifier_(GenerateURLRequestIdentifier()),
+      blocked_on_delegate_(false),
+      ALLOW_THIS_IN_INITIALIZER_LIST(before_request_callback_(
+          base::Bind(&URLRequest::BeforeRequestComplete,
+                     base::Unretained(this)))),
+      has_notified_completion_(false),
+      received_response_content_length_(0),
+      creation_time_(base::TimeTicks::Now()) {
+  SIMPLE_STATS_COUNTER("URLRequestCount");
+
+  // Sanity check out environment.
+  DCHECK(MessageLoop::current()) << "The current MessageLoop must exist";
+
+  DCHECK(MessageLoop::current()->IsType(MessageLoop::TYPE_IO)) << ""
+      "The current MessageLoop must be TYPE_IO";
+
+  CHECK(context);
+  context->url_requests()->insert(this);
+
+  net_log_.BeginEvent(NetLog::TYPE_REQUEST_ALIVE);
+}
+
+URLRequest::~URLRequest() {
+  Cancel();
+
+  if (network_delegate_) {
+    network_delegate_->NotifyURLRequestDestroyed(this);
+    if (job_)
+      job_->NotifyURLRequestDestroyed();
+  }
+
+  if (job_)
+    OrphanJob();
+
+  int deleted = context_->url_requests()->erase(this);
+  CHECK_EQ(1, deleted);
+
+  int net_error = OK;
+  // Log error only on failure, not cancellation, as even successful requests
+  // are "cancelled" on destruction.
+  if (status_.status() == URLRequestStatus::FAILED)
+    net_error = status_.error();
+  net_log_.EndEventWithNetErrorCode(NetLog::TYPE_REQUEST_ALIVE, net_error);
+}
+
+// static
+URLRequest::ProtocolFactory* URLRequest::RegisterProtocolFactory(
+    const string& scheme, ProtocolFactory* factory) {
+  return URLRequestJobManager::GetInstance()->RegisterProtocolFactory(scheme,
+                                                                      factory);
+}
+
+// static
+void URLRequest::RegisterRequestInterceptor(Interceptor* interceptor) {
+  URLRequestJobManager::GetInstance()->RegisterRequestInterceptor(interceptor);
+}
+
+// static
+void URLRequest::UnregisterRequestInterceptor(Interceptor* interceptor) {
+  URLRequestJobManager::GetInstance()->UnregisterRequestInterceptor(
+      interceptor);
+}
+
+void URLRequest::EnableChunkedUpload() {
+  DCHECK(!upload_data_stream_ || upload_data_stream_->is_chunked());
+  if (!upload_data_stream_) {
+    upload_data_stream_.reset(
+        new UploadDataStream(UploadDataStream::CHUNKED, 0));
+  }
+}
+
+void URLRequest::AppendChunkToUpload(const char* bytes,
+                                     int bytes_len,
+                                     bool is_last_chunk) {
+  DCHECK(upload_data_stream_);
+  DCHECK(upload_data_stream_->is_chunked());
+  DCHECK_GT(bytes_len, 0);
+  upload_data_stream_->AppendChunk(bytes, bytes_len, is_last_chunk);
+}
+
+void URLRequest::set_upload(scoped_ptr<UploadDataStream> upload) {
+  DCHECK(!upload->is_chunked());
+  upload_data_stream_ = upload.Pass();
+}
+
+const UploadDataStream* URLRequest::get_upload() const {
+  return upload_data_stream_.get();
+}
+
+bool URLRequest::has_upload() const {
+  return upload_data_stream_.get() != NULL;
+}
+
+void URLRequest::SetExtraRequestHeaderById(int id, const string& value,
+                                           bool overwrite) {
+  DCHECK(!is_pending_ || is_redirecting_);
+  NOTREACHED() << "implement me!";
+}
+
+void URLRequest::SetExtraRequestHeaderByName(const string& name,
+                                             const string& value,
+                                             bool overwrite) {
+  DCHECK(!is_pending_ || is_redirecting_);
+  if (overwrite) {
+    extra_request_headers_.SetHeader(name, value);
+  } else {
+    extra_request_headers_.SetHeaderIfMissing(name, value);
+  }
+}
+
+void URLRequest::RemoveRequestHeaderByName(const string& name) {
+  DCHECK(!is_pending_ || is_redirecting_);
+  extra_request_headers_.RemoveHeader(name);
+}
+
+void URLRequest::SetExtraRequestHeaders(
+    const HttpRequestHeaders& headers) {
+  DCHECK(!is_pending_);
+  extra_request_headers_ = headers;
+
+  // NOTE: This method will likely become non-trivial once the other setters
+  // for request headers are implemented.
+}
+
+LoadStateWithParam URLRequest::GetLoadState() const {
+  // Only return LOAD_STATE_WAITING_FOR_DELEGATE if there's a load state param.
+  if (blocked_on_delegate_ && !load_state_param_.empty()) {
+    return LoadStateWithParam(LOAD_STATE_WAITING_FOR_DELEGATE,
+                              load_state_param_);
+  }
+  return LoadStateWithParam(job_ ? job_->GetLoadState() : LOAD_STATE_IDLE,
+                            string16());
+}
+
+UploadProgress URLRequest::GetUploadProgress() const {
+  if (!job_) {
+    // We haven't started or the request was cancelled
+    return UploadProgress();
+  }
+  if (final_upload_progress_.position()) {
+    // The first job completed and none of the subsequent series of
+    // GETs when following redirects will upload anything, so we return the
+    // cached results from the initial job, the POST.
+    return final_upload_progress_;
+  }
+  return job_->GetUploadProgress();
+}
+
+void URLRequest::GetResponseHeaderById(int id, string* value) {
+  DCHECK(job_);
+  NOTREACHED() << "implement me!";
+}
+
+void URLRequest::GetResponseHeaderByName(const string& name, string* value) {
+  DCHECK(value);
+  if (response_info_.headers) {
+    response_info_.headers->GetNormalizedHeader(name, value);
+  } else {
+    value->clear();
+  }
+}
+
+void URLRequest::GetAllResponseHeaders(string* headers) {
+  DCHECK(headers);
+  if (response_info_.headers) {
+    response_info_.headers->GetNormalizedHeaders(headers);
+  } else {
+    headers->clear();
+  }
+}
+
+HostPortPair URLRequest::GetSocketAddress() const {
+  DCHECK(job_);
+  return job_->GetSocketAddress();
+}
+
+HttpResponseHeaders* URLRequest::response_headers() const {
+  return response_info_.headers.get();
+}
+
+bool URLRequest::GetResponseCookies(ResponseCookies* cookies) {
+  DCHECK(job_);
+  return job_->GetResponseCookies(cookies);
+}
+
+void URLRequest::GetMimeType(string* mime_type) {
+  DCHECK(job_);
+  job_->GetMimeType(mime_type);
+}
+
+void URLRequest::GetCharset(string* charset) {
+  DCHECK(job_);
+  job_->GetCharset(charset);
+}
+
+int URLRequest::GetResponseCode() {
+  DCHECK(job_);
+  return job_->GetResponseCode();
+}
+
+// static
+void URLRequest::SetDefaultCookiePolicyToBlock() {
+  CHECK(!g_url_requests_started);
+  g_default_can_use_cookies = false;
+}
+
+// static
+bool URLRequest::IsHandledProtocol(const std::string& scheme) {
+  return URLRequestJobManager::GetInstance()->SupportsScheme(scheme);
+}
+
+// static
+bool URLRequest::IsHandledURL(const GURL& url) {
+  if (!url.is_valid()) {
+    // We handle error cases.
+    return true;
+  }
+
+  return IsHandledProtocol(url.scheme());
+}
+
+void URLRequest::set_first_party_for_cookies(
+    const GURL& first_party_for_cookies) {
+  first_party_for_cookies_ = first_party_for_cookies;
+}
+
+void URLRequest::set_method(const std::string& method) {
+  DCHECK(!is_pending_);
+  method_ = method;
+}
+
+void URLRequest::set_referrer(const std::string& referrer) {
+  DCHECK(!is_pending_);
+  referrer_ = referrer;
+}
+
+GURL URLRequest::GetSanitizedReferrer() const {
+  GURL ret(referrer());
+
+  // Ensure that we do not send username and password fields in the referrer.
+  if (ret.has_username() || ret.has_password()) {
+    GURL::Replacements referrer_mods;
+    referrer_mods.ClearUsername();
+    referrer_mods.ClearPassword();
+    ret = ret.ReplaceComponents(referrer_mods);
+  }
+
+  return ret;
+}
+
+void URLRequest::set_referrer_policy(ReferrerPolicy referrer_policy) {
+  DCHECK(!is_pending_);
+  referrer_policy_ = referrer_policy;
+}
+
+void URLRequest::set_delegate(Delegate* delegate) {
+  delegate_ = delegate;
+}
+
+void URLRequest::Start() {
+  DCHECK_EQ(network_delegate_, context_->network_delegate());
+
+  g_url_requests_started = true;
+  response_info_.request_time = Time::Now();
+
+  // Only notify the delegate for the initial request.
+  if (network_delegate_) {
+    int error = network_delegate_->NotifyBeforeURLRequest(
+        this, before_request_callback_, &delegate_redirect_url_);
+    if (error == net::ERR_IO_PENDING) {
+      // Paused on the delegate, will invoke |before_request_callback_| later.
+      SetBlockedOnDelegate();
+    } else {
+      BeforeRequestComplete(error);
+    }
+    return;
+  }
+
+  StartJob(URLRequestJobManager::GetInstance()->CreateJob(
+      this, network_delegate_));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void URLRequest::BeforeRequestComplete(int error) {
+  DCHECK(!job_);
+  DCHECK_NE(ERR_IO_PENDING, error);
+  DCHECK_EQ(network_delegate_, context_->network_delegate());
+
+  // Check that there are no callbacks to already canceled requests.
+  DCHECK_NE(URLRequestStatus::CANCELED, status_.status());
+
+  if (blocked_on_delegate_)
+    SetUnblockedOnDelegate();
+
+  if (error != OK) {
+    std::string source("delegate");
+    net_log_.AddEvent(NetLog::TYPE_CANCELLED,
+                      NetLog::StringCallback("source", &source));
+    StartJob(new URLRequestErrorJob(this, network_delegate_, error));
+  } else if (!delegate_redirect_url_.is_empty()) {
+    GURL new_url;
+    new_url.Swap(&delegate_redirect_url_);
+
+    URLRequestRedirectJob* job = new URLRequestRedirectJob(
+        this, network_delegate_, new_url,
+        // Use status code 307 to preserve the method, so POST requests work.
+        URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT);
+    StartJob(job);
+  } else {
+    StartJob(URLRequestJobManager::GetInstance()->CreateJob(
+        this, network_delegate_));
+  }
+}
+
+void URLRequest::StartJob(URLRequestJob* job) {
+  DCHECK(!is_pending_);
+  DCHECK(!job_);
+
+  net_log_.BeginEvent(
+      NetLog::TYPE_URL_REQUEST_START_JOB,
+      base::Bind(&NetLogURLRequestStartCallback,
+                 &url(), &method_, load_flags_, priority_,
+                 upload_data_stream_ ? upload_data_stream_->identifier() : -1));
+
+  job_ = job;
+  job_->SetExtraRequestHeaders(extra_request_headers_);
+
+  if (upload_data_stream_.get())
+    job_->SetUpload(upload_data_stream_.get());
+
+  is_pending_ = true;
+  is_redirecting_ = false;
+
+  response_info_.was_cached = false;
+
+  // Don't allow errors to be sent from within Start().
+  // TODO(brettw) this may cause NotifyDone to be sent synchronously,
+  // we probably don't want this: they should be sent asynchronously so
+  // the caller does not get reentered.
+  job_->Start();
+}
+
+void URLRequest::Restart() {
+  // Should only be called if the original job didn't make any progress.
+  DCHECK(job_ && !job_->has_response_started());
+  RestartWithJob(URLRequestJobManager::GetInstance()->CreateJob(
+      this, network_delegate_));
+}
+
+void URLRequest::RestartWithJob(URLRequestJob *job) {
+  DCHECK(job->request() == this);
+  PrepareToRestart();
+  StartJob(job);
+}
+
+void URLRequest::Cancel() {
+  DoCancel(ERR_ABORTED, SSLInfo());
+}
+
+void URLRequest::CancelWithError(int error) {
+  DoCancel(error, SSLInfo());
+}
+
+void URLRequest::CancelWithSSLError(int error, const SSLInfo& ssl_info) {
+  // This should only be called on a started request.
+  if (!is_pending_ || !job_ || job_->has_response_started()) {
+    NOTREACHED();
+    return;
+  }
+  DoCancel(error, ssl_info);
+}
+
+void URLRequest::DoCancel(int error, const SSLInfo& ssl_info) {
+  DCHECK(error < 0);
+
+  // If the URL request already has an error status, then canceling is a no-op.
+  // Plus, we don't want to change the error status once it has been set.
+  if (status_.is_success()) {
+    status_.set_status(URLRequestStatus::CANCELED);
+    status_.set_error(error);
+    response_info_.ssl_info = ssl_info;
+
+    // If the request hasn't already been completed, log a cancellation event.
+    if (!has_notified_completion_) {
+      // Don't log an error code on ERR_ABORTED, since that's redundant.
+      net_log_.AddEventWithNetErrorCode(NetLog::TYPE_CANCELLED,
+                                        error == ERR_ABORTED ? OK : error);
+    }
+  }
+
+  if (is_pending_ && job_)
+    job_->Kill();
+
+  // We need to notify about the end of this job here synchronously. The
+  // Job sends an asynchronous notification but by the time this is processed,
+  // our |context_| is NULL.
+  NotifyRequestCompleted();
+
+  // The Job will call our NotifyDone method asynchronously.  This is done so
+  // that the Delegate implementation can call Cancel without having to worry
+  // about being called recursively.
+}
+
+bool URLRequest::Read(IOBuffer* dest, int dest_size, int* bytes_read) {
+  DCHECK(job_);
+  DCHECK(bytes_read);
+  *bytes_read = 0;
+
+  // This handles a cancel that happens while paused.
+  // TODO(ahendrickson): DCHECK() that it is not done after
+  // http://crbug.com/115705 is fixed.
+  if (job_->is_done())
+    return false;
+
+  if (dest_size == 0) {
+    // Caller is not too bright.  I guess we've done what they asked.
+    return true;
+  }
+
+  // Once the request fails or is cancelled, read will just return 0 bytes
+  // to indicate end of stream.
+  if (!status_.is_success()) {
+    return true;
+  }
+
+  bool rv = job_->Read(dest, dest_size, bytes_read);
+  // If rv is false, the status cannot be success.
+  DCHECK(rv || status_.status() != URLRequestStatus::SUCCESS);
+  if (rv && *bytes_read <= 0 && status_.is_success())
+    NotifyRequestCompleted();
+  return rv;
+}
+
+void URLRequest::StopCaching() {
+  DCHECK(job_);
+  job_->StopCaching();
+}
+
+void URLRequest::NotifyReceivedRedirect(const GURL& location,
+                                        bool* defer_redirect) {
+  is_redirecting_ = true;
+
+  URLRequestJob* job =
+      URLRequestJobManager::GetInstance()->MaybeInterceptRedirect(
+          this, network_delegate_, location);
+  if (job) {
+    RestartWithJob(job);
+  } else if (delegate_) {
+    delegate_->OnReceivedRedirect(this, location, defer_redirect);
+    // |this| may be have been destroyed here.
+  }
+}
+
+void URLRequest::NotifyResponseStarted() {
+  int net_error = OK;
+  if (!status_.is_success())
+    net_error = status_.error();
+  net_log_.EndEventWithNetErrorCode(NetLog::TYPE_URL_REQUEST_START_JOB,
+                                    net_error);
+
+  URLRequestJob* job =
+      URLRequestJobManager::GetInstance()->MaybeInterceptResponse(
+          this, network_delegate_);
+  if (job) {
+    RestartWithJob(job);
+  } else {
+    if (delegate_) {
+      // In some cases (e.g. an event was canceled), we might have sent the
+      // completion event and receive a NotifyResponseStarted() later.
+      if (!has_notified_completion_ && status_.is_success()) {
+        if (network_delegate_)
+          network_delegate_->NotifyResponseStarted(this);
+      }
+
+      // Notify in case the entire URL Request has been finished.
+      if (!has_notified_completion_ && !status_.is_success())
+        NotifyRequestCompleted();
+
+      delegate_->OnResponseStarted(this);
+      // Nothing may appear below this line as OnResponseStarted may delete
+      // |this|.
+    }
+  }
+}
+
+void URLRequest::FollowDeferredRedirect() {
+  CHECK(job_);
+  CHECK(status_.is_success());
+
+  job_->FollowDeferredRedirect();
+}
+
+void URLRequest::SetAuth(const AuthCredentials& credentials) {
+  DCHECK(job_);
+  DCHECK(job_->NeedsAuth());
+
+  job_->SetAuth(credentials);
+}
+
+void URLRequest::CancelAuth() {
+  DCHECK(job_);
+  DCHECK(job_->NeedsAuth());
+
+  job_->CancelAuth();
+}
+
+void URLRequest::ContinueWithCertificate(X509Certificate* client_cert) {
+  DCHECK(job_);
+
+  job_->ContinueWithCertificate(client_cert);
+}
+
+void URLRequest::ContinueDespiteLastError() {
+  DCHECK(job_);
+
+  job_->ContinueDespiteLastError();
+}
+
+void URLRequest::PrepareToRestart() {
+  DCHECK(job_);
+
+  // Close the current URL_REQUEST_START_JOB, since we will be starting a new
+  // one.
+  net_log_.EndEvent(NetLog::TYPE_URL_REQUEST_START_JOB);
+
+  OrphanJob();
+
+  response_info_ = HttpResponseInfo();
+  response_info_.request_time = Time::Now();
+  status_ = URLRequestStatus();
+  is_pending_ = false;
+}
+
+void URLRequest::OrphanJob() {
+  // When calling this function, please check that URLRequestHttpJob is
+  // not in between calling NetworkDelegate::NotifyHeadersReceived receiving
+  // the call back. This is currently guaranteed by the following strategies:
+  // - OrphanJob is called on JobRestart, in this case the URLRequestJob cannot
+  //   be receiving any headers at that time.
+  // - OrphanJob is called in ~URLRequest, in this case
+  //   NetworkDelegate::NotifyURLRequestDestroyed notifies the NetworkDelegate
+  //   that the callback becomes invalid.
+  job_->Kill();
+  job_->DetachRequest();  // ensures that the job will not call us again
+  job_ = NULL;
+}
+
+int URLRequest::Redirect(const GURL& location, int http_status_code) {
+  if (net_log_.IsLoggingAllEvents()) {
+    net_log_.AddEvent(
+        NetLog::TYPE_URL_REQUEST_REDIRECTED,
+        NetLog::StringCallback("location", &location.possibly_invalid_spec()));
+  }
+
+  if (network_delegate_)
+    network_delegate_->NotifyBeforeRedirect(this, location);
+
+  if (redirect_limit_ <= 0) {
+    DVLOG(1) << "disallowing redirect: exceeds limit";
+    return ERR_TOO_MANY_REDIRECTS;
+  }
+
+  if (!location.is_valid())
+    return ERR_INVALID_URL;
+
+  if (!job_->IsSafeRedirect(location)) {
+    DVLOG(1) << "disallowing redirect: unsafe protocol";
+    return ERR_UNSAFE_REDIRECT;
+  }
+
+  if (!final_upload_progress_.position())
+    final_upload_progress_ = job_->GetUploadProgress();
+  PrepareToRestart();
+
+  // For 303 redirects, all request methods except HEAD are converted to GET,
+  // as per the latest httpbis draft.  The draft also allows POST requests to
+  // be converted to GETs when following 301/302 redirects, for historical
+  // reasons. Most major browsers do this and so shall we.  Both RFC 2616 and
+  // the httpbis draft say to prompt the user to confirm the generation of new
+  // requests, other than GET and HEAD requests, but IE omits these prompts and
+  // so shall we.
+  // See:  https://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-17#section-7.3
+  bool was_post = method_ == "POST";
+  if ((http_status_code == 303 && method_ != "HEAD") ||
+      ((http_status_code == 301 || http_status_code == 302) && was_post)) {
+    method_ = "GET";
+    upload_data_stream_.reset();
+    if (was_post) {
+      // If being switched from POST to GET, must remove headers that were
+      // specific to the POST and don't have meaning in GET. For example
+      // the inclusion of a multipart Content-Type header in GET can cause
+      // problems with some servers:
+      // http://code.google.com/p/chromium/issues/detail?id=843
+      StripPostSpecificHeaders(&extra_request_headers_);
+    }
+  }
+
+  // Suppress the referrer if we're redirecting out of https.
+  if (referrer_policy_ ==
+          CLEAR_REFERRER_ON_TRANSITION_FROM_SECURE_TO_INSECURE &&
+      GURL(referrer_).SchemeIsSecure() && !location.SchemeIsSecure()) {
+    referrer_.clear();
+  }
+
+  url_chain_.push_back(location);
+  --redirect_limit_;
+
+  Start();
+  return OK;
+}
+
+const URLRequestContext* URLRequest::context() const {
+  return context_;
+}
+
+int64 URLRequest::GetExpectedContentSize() const {
+  int64 expected_content_size = -1;
+  if (job_)
+    expected_content_size = job_->expected_content_size();
+
+  return expected_content_size;
+}
+
+bool URLRequest::GetHSTSRedirect(GURL* redirect_url) const {
+  const GURL& url = this->url();
+  if (!url.SchemeIs("http"))
+    return false;
+  TransportSecurityState::DomainState domain_state;
+  if (context()->transport_security_state() &&
+      context()->transport_security_state()->GetDomainState(
+          url.host(),
+          SSLConfigService::IsSNIAvailable(context()->ssl_config_service()),
+          &domain_state) &&
+      domain_state.ShouldRedirectHTTPToHTTPS()) {
+    url_canon::Replacements<char> replacements;
+    const char kNewScheme[] = "https";
+    replacements.SetScheme(kNewScheme,
+                           url_parse::Component(0, strlen(kNewScheme)));
+    *redirect_url = url.ReplaceComponents(replacements);
+    return true;
+  }
+  return false;
+}
+
+void URLRequest::NotifyAuthRequired(AuthChallengeInfo* auth_info) {
+  NetworkDelegate::AuthRequiredResponse rv =
+      NetworkDelegate::AUTH_REQUIRED_RESPONSE_NO_ACTION;
+  auth_info_ = auth_info;
+  if (network_delegate_) {
+    rv = network_delegate_->NotifyAuthRequired(
+        this,
+        *auth_info,
+        base::Bind(&URLRequest::NotifyAuthRequiredComplete,
+                   base::Unretained(this)),
+        &auth_credentials_);
+  }
+
+  if (rv == NetworkDelegate::AUTH_REQUIRED_RESPONSE_IO_PENDING) {
+    SetBlockedOnDelegate();
+  } else {
+    NotifyAuthRequiredComplete(rv);
+  }
+}
+
+void URLRequest::NotifyAuthRequiredComplete(
+    NetworkDelegate::AuthRequiredResponse result) {
+  SetUnblockedOnDelegate();
+
+  // Check that there are no callbacks to already canceled requests.
+  DCHECK_NE(URLRequestStatus::CANCELED, status_.status());
+
+  // NotifyAuthRequired may be called multiple times, such as
+  // when an authentication attempt fails. Clear out the data
+  // so it can be reset on another round.
+  AuthCredentials credentials = auth_credentials_;
+  auth_credentials_ = AuthCredentials();
+  scoped_refptr<AuthChallengeInfo> auth_info;
+  auth_info.swap(auth_info_);
+
+  switch (result) {
+    case NetworkDelegate::AUTH_REQUIRED_RESPONSE_NO_ACTION:
+      // Defer to the URLRequest::Delegate, since the NetworkDelegate
+      // didn't take an action.
+      if (delegate_)
+        delegate_->OnAuthRequired(this, auth_info.get());
+      break;
+
+    case NetworkDelegate::AUTH_REQUIRED_RESPONSE_SET_AUTH:
+      SetAuth(credentials);
+      break;
+
+    case NetworkDelegate::AUTH_REQUIRED_RESPONSE_CANCEL_AUTH:
+      CancelAuth();
+      break;
+
+    case NetworkDelegate::AUTH_REQUIRED_RESPONSE_IO_PENDING:
+      NOTREACHED();
+      break;
+  }
+}
+
+void URLRequest::NotifyCertificateRequested(
+    SSLCertRequestInfo* cert_request_info) {
+  if (delegate_)
+    delegate_->OnCertificateRequested(this, cert_request_info);
+}
+
+void URLRequest::NotifySSLCertificateError(const SSLInfo& ssl_info,
+                                           bool fatal) {
+  if (delegate_)
+    delegate_->OnSSLCertificateError(this, ssl_info, fatal);
+}
+
+bool URLRequest::CanGetCookies(const CookieList& cookie_list) const {
+  DCHECK(!(load_flags_ & LOAD_DO_NOT_SEND_COOKIES));
+  if (network_delegate_) {
+    return network_delegate_->CanGetCookies(*this, cookie_list);
+  }
+  return g_default_can_use_cookies;
+}
+
+bool URLRequest::CanSetCookie(const std::string& cookie_line,
+                              CookieOptions* options) const {
+  DCHECK(!(load_flags_ & LOAD_DO_NOT_SAVE_COOKIES));
+  if (network_delegate_) {
+    return network_delegate_->CanSetCookie(*this, cookie_line, options);
+  }
+  return g_default_can_use_cookies;
+}
+
+
+void URLRequest::NotifyReadCompleted(int bytes_read) {
+  // Notify in case the entire URL Request has been finished.
+  if (bytes_read <= 0)
+    NotifyRequestCompleted();
+
+  // Notify NetworkChangeNotifier that we just received network data.
+  // This is to identify cases where the NetworkChangeNotifier thinks we
+  // are off-line but we are still receiving network data (crbug.com/124069).
+  if (bytes_read > 0 && !was_cached())
+    NetworkChangeNotifier::NotifyDataReceived(url());
+
+  if (delegate_)
+    delegate_->OnReadCompleted(this, bytes_read);
+
+  // Nothing below this line as OnReadCompleted may delete |this|.
+}
+
+void URLRequest::NotifyRequestCompleted() {
+  // TODO(battre): Get rid of this check, according to willchan it should
+  // not be needed.
+  if (has_notified_completion_)
+    return;
+
+  is_pending_ = false;
+  is_redirecting_ = false;
+  has_notified_completion_ = true;
+  if (network_delegate_)
+    network_delegate_->NotifyCompleted(this, job_ != NULL);
+}
+
+void URLRequest::SetBlockedOnDelegate() {
+  blocked_on_delegate_ = true;
+  if (!load_state_param_.empty()) {
+    net_log_.BeginEvent(NetLog::TYPE_URL_REQUEST_BLOCKED_ON_DELEGATE,
+                        NetLog::StringCallback("delegate", &load_state_param_));
+  } else {
+    net_log_.BeginEvent(NetLog::TYPE_URL_REQUEST_BLOCKED_ON_DELEGATE);
+  }
+}
+
+void URLRequest::SetUnblockedOnDelegate() {
+  if (!blocked_on_delegate_)
+    return;
+  blocked_on_delegate_ = false;
+  load_state_param_.clear();
+  net_log_.EndEvent(NetLog::TYPE_URL_REQUEST_BLOCKED_ON_DELEGATE);
+}
+
+void URLRequest::set_stack_trace(const base::debug::StackTrace& stack_trace) {
+  base::debug::StackTrace* stack_trace_copy =
+      new base::debug::StackTrace(NULL, 0);
+  *stack_trace_copy = stack_trace;
+  stack_trace_.reset(stack_trace_copy);
+}
+
+const base::debug::StackTrace* URLRequest::stack_trace() const {
+  return stack_trace_.get();
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request.h b/src/net/url_request/url_request.h
new file mode 100644
index 0000000..4081e75
--- /dev/null
+++ b/src/net/url_request/url_request.h
@@ -0,0 +1,849 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_H_
+#define NET_URL_REQUEST_URL_REQUEST_H_
+
+#include <string>
+#include <vector>
+
+#include "base/debug/leak_tracker.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "base/string16.h"
+#include "base/supports_user_data.h"
+#include "base/time.h"
+#include "base/threading/non_thread_safe.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/auth.h"
+#include "net/base/completion_callback.h"
+#include "net/base/load_states.h"
+#include "net/base/net_export.h"
+#include "net/base/net_log.h"
+#include "net/base/network_delegate.h"
+#include "net/base/request_priority.h"
+#include "net/base/upload_progress.h"
+#include "net/cookies/canonical_cookie.h"
+#include "net/http/http_request_headers.h"
+#include "net/http/http_response_info.h"
+#include "net/url_request/url_request_status.h"
+
+class FilePath;
+// Temporary layering violation to allow existing users of a deprecated
+// interface.
+class ChildProcessSecurityPolicyTest;
+class ComponentUpdateInterceptor;
+class TestAutomationProvider;
+class URLRequestAutomationJob;
+
+namespace base {
+namespace debug {
+class StackTrace;
+}
+}
+
+// Temporary layering violation to allow existing users of a deprecated
+// interface.
+namespace appcache {
+class AppCacheInterceptor;
+class AppCacheRequestHandlerTest;
+class AppCacheURLRequestJobTest;
+}
+
+// Temporary layering violation to allow existing users of a deprecated
+// interface.
+namespace content {
+class ResourceDispatcherHostTest;
+}
+
+// Temporary layering violation to allow existing users of a deprecated
+// interface.
+namespace extensions {
+class AutoUpdateInterceptor;
+class UserScriptListenerTest;
+}
+
+// Temporary layering violation to allow existing users of a deprecated
+// interface.
+namespace fileapi {
+class FileSystemDirURLRequestJobTest;
+class FileSystemURLRequestJobTest;
+class FileWriterDelegateTest;
+}
+
+// Temporary layering violation to allow existing users of a deprecated
+// interface.
+namespace policy {
+class CannedResponseInterceptor;
+}
+
+// Temporary layering violation to allow existing users of a deprecated
+// interface.
+namespace webkit_blob {
+class BlobURLRequestJobTest;
+}
+
+namespace net {
+
+class CookieOptions;
+class HostPortPair;
+class IOBuffer;
+class SSLCertRequestInfo;
+class SSLInfo;
+class UploadDataStream;
+class URLRequestContext;
+class URLRequestJob;
+class X509Certificate;
+
+// This stores the values of the Set-Cookie headers received during the request.
+// Each item in the vector corresponds to a Set-Cookie: line received,
+// excluding the "Set-Cookie:" part.
+typedef std::vector<std::string> ResponseCookies;
+
+//-----------------------------------------------------------------------------
+// A class  representing the asynchronous load of a data stream from an URL.
+//
+// The lifetime of an instance of this class is completely controlled by the
+// consumer, and the instance is not required to live on the heap or be
+// allocated in any special way.  It is also valid to delete an URLRequest
+// object during the handling of a callback to its delegate.  Of course, once
+// the URLRequest is deleted, no further callbacks to its delegate will occur.
+//
+// NOTE: All usage of all instances of this class should be on the same thread.
+//
+class NET_EXPORT URLRequest : NON_EXPORTED_BASE(public base::NonThreadSafe),
+                              public base::SupportsUserData {
+ public:
+  // Callback function implemented by protocol handlers to create new jobs.
+  // The factory may return NULL to indicate an error, which will cause other
+  // factories to be queried.  If no factory handles the request, then the
+  // default job will be used.
+  typedef URLRequestJob* (ProtocolFactory)(URLRequest* request,
+                                           NetworkDelegate* network_delegate,
+                                           const std::string& scheme);
+
+  // HTTP request/response header IDs (via some preprocessor fun) for use with
+  // SetRequestHeaderById and GetResponseHeaderById.
+  enum {
+#define HTTP_ATOM(x) HTTP_ ## x,
+#include "net/http/http_atom_list.h"
+#undef HTTP_ATOM
+  };
+
+  // Referrer policies (see set_referrer_policy): During server redirects, the
+  // referrer header might be cleared, if the protocol changes from HTTPS to
+  // HTTP. This is the default behavior of URLRequest, corresponding to
+  // CLEAR_REFERRER_ON_TRANSITION_FROM_SECURE_TO_INSECURE. Alternatively, the
+  // referrer policy can be set to never change the referrer header. This
+  // behavior corresponds to NEVER_CLEAR_REFERRER. Embedders will want to use
+  // NEVER_CLEAR_REFERRER when implementing the meta-referrer support
+  // (http://wiki.whatwg.org/wiki/Meta_referrer) and sending requests with a
+  // non-default referrer policy. Only the default referrer policy requires
+  // the referrer to be cleared on transitions from HTTPS to HTTP.
+  enum ReferrerPolicy {
+    CLEAR_REFERRER_ON_TRANSITION_FROM_SECURE_TO_INSECURE,
+    NEVER_CLEAR_REFERRER,
+  };
+
+  // This class handles network interception.  Use with
+  // (Un)RegisterRequestInterceptor.
+  class NET_EXPORT Interceptor {
+  public:
+    virtual ~Interceptor() {}
+
+    // Called for every request made.  Should return a new job to handle the
+    // request if it should be intercepted, or NULL to allow the request to
+    // be handled in the normal manner.
+    virtual URLRequestJob* MaybeIntercept(
+        URLRequest* request, NetworkDelegate* network_delegate) = 0;
+
+    // Called after having received a redirect response, but prior to the
+    // the request delegate being informed of the redirect. Can return a new
+    // job to replace the existing job if it should be intercepted, or NULL
+    // to allow the normal handling to continue. If a new job is provided,
+    // the delegate never sees the original redirect response, instead the
+    // response produced by the intercept job will be returned.
+    virtual URLRequestJob* MaybeInterceptRedirect(
+        URLRequest* request,
+        NetworkDelegate* network_delegate,
+        const GURL& location);
+
+    // Called after having received a final response, but prior to the
+    // the request delegate being informed of the response. This is also
+    // called when there is no server response at all to allow interception
+    // on dns or network errors. Can return a new job to replace the existing
+    // job if it should be intercepted, or NULL to allow the normal handling to
+    // continue. If a new job is provided, the delegate never sees the original
+    // response, instead the response produced by the intercept job will be
+    // returned.
+    virtual URLRequestJob* MaybeInterceptResponse(
+        URLRequest* request, NetworkDelegate* network_delegate);
+  };
+
+  // Deprecated interfaces in net::URLRequest. They have been moved to
+  // URLRequest's private section to prevent new uses. Existing uses are
+  // explicitly friended here and should be removed over time.
+  class NET_EXPORT Deprecated {
+   private:
+    // TODO(willchan): Kill off these friend declarations.
+    friend class extensions::AutoUpdateInterceptor;
+    friend class ::ChildProcessSecurityPolicyTest;
+    friend class ::ComponentUpdateInterceptor;
+    friend class ::TestAutomationProvider;
+    friend class ::URLRequestAutomationJob;
+    friend class TestInterceptor;
+    friend class URLRequestFilter;
+    friend class appcache::AppCacheInterceptor;
+    friend class appcache::AppCacheRequestHandlerTest;
+    friend class appcache::AppCacheURLRequestJobTest;
+    friend class content::ResourceDispatcherHostTest;
+    friend class extensions::UserScriptListenerTest;
+    friend class fileapi::FileSystemDirURLRequestJobTest;
+    friend class fileapi::FileSystemURLRequestJobTest;
+    friend class fileapi::FileWriterDelegateTest;
+    friend class policy::CannedResponseInterceptor;
+    friend class webkit_blob::BlobURLRequestJobTest;
+
+    // Use URLRequestJobFactory::ProtocolHandler instead.
+    static ProtocolFactory* RegisterProtocolFactory(const std::string& scheme,
+                                                    ProtocolFactory* factory);
+
+    // Use URLRequestJobFactory::Interceptor instead.
+    static void RegisterRequestInterceptor(Interceptor* interceptor);
+    static void UnregisterRequestInterceptor(Interceptor* interceptor);
+
+    DISALLOW_IMPLICIT_CONSTRUCTORS(Deprecated);
+  };
+
+  // The delegate's methods are called from the message loop of the thread
+  // on which the request's Start() method is called. See above for the
+  // ordering of callbacks.
+  //
+  // The callbacks will be called in the following order:
+  //   Start()
+  //    - OnCertificateRequested* (zero or more calls, if the SSL server and/or
+  //      SSL proxy requests a client certificate for authentication)
+  //    - OnSSLCertificateError* (zero or one call, if the SSL server's
+  //      certificate has an error)
+  //    - OnReceivedRedirect* (zero or more calls, for the number of redirects)
+  //    - OnAuthRequired* (zero or more calls, for the number of
+  //      authentication failures)
+  //    - OnResponseStarted
+  //   Read() initiated by delegate
+  //    - OnReadCompleted* (zero or more calls until all data is read)
+  //
+  // Read() must be called at least once. Read() returns true when it completed
+  // immediately, and false if an IO is pending or if there is an error.  When
+  // Read() returns false, the caller can check the Request's status() to see
+  // if an error occurred, or if the IO is just pending.  When Read() returns
+  // true with zero bytes read, it indicates the end of the response.
+  //
+  class NET_EXPORT Delegate {
+   public:
+    // Called upon a server-initiated redirect.  The delegate may call the
+    // request's Cancel method to prevent the redirect from being followed.
+    // Since there may be multiple chained redirects, there may also be more
+    // than one redirect call.
+    //
+    // When this function is called, the request will still contain the
+    // original URL, the destination of the redirect is provided in 'new_url'.
+    // If the delegate does not cancel the request and |*defer_redirect| is
+    // false, then the redirect will be followed, and the request's URL will be
+    // changed to the new URL.  Otherwise if the delegate does not cancel the
+    // request and |*defer_redirect| is true, then the redirect will be
+    // followed once FollowDeferredRedirect is called on the URLRequest.
+    //
+    // The caller must set |*defer_redirect| to false, so that delegates do not
+    // need to set it if they are happy with the default behavior of not
+    // deferring redirect.
+    virtual void OnReceivedRedirect(URLRequest* request,
+                                    const GURL& new_url,
+                                    bool* defer_redirect);
+
+    // Called when we receive an authentication failure.  The delegate should
+    // call request->SetAuth() with the user's credentials once it obtains them,
+    // or request->CancelAuth() to cancel the login and display the error page.
+    // When it does so, the request will be reissued, restarting the sequence
+    // of On* callbacks.
+    virtual void OnAuthRequired(URLRequest* request,
+                                AuthChallengeInfo* auth_info);
+
+    // Called when we receive an SSL CertificateRequest message for client
+    // authentication.  The delegate should call
+    // request->ContinueWithCertificate() with the client certificate the user
+    // selected, or request->ContinueWithCertificate(NULL) to continue the SSL
+    // handshake without a client certificate.
+    virtual void OnCertificateRequested(
+        URLRequest* request,
+        SSLCertRequestInfo* cert_request_info);
+
+    // Called when using SSL and the server responds with a certificate with
+    // an error, for example, whose common name does not match the common name
+    // we were expecting for that host.  The delegate should either do the
+    // safe thing and Cancel() the request or decide to proceed by calling
+    // ContinueDespiteLastError().  cert_error is a ERR_* error code
+    // indicating what's wrong with the certificate.
+    // If |fatal| is true then the host in question demands a higher level
+    // of security (due e.g. to HTTP Strict Transport Security, user
+    // preference, or built-in policy). In this case, errors must not be
+    // bypassable by the user.
+    virtual void OnSSLCertificateError(URLRequest* request,
+                                       const SSLInfo& ssl_info,
+                                       bool fatal);
+
+    // After calling Start(), the delegate will receive an OnResponseStarted
+    // callback when the request has completed.  If an error occurred, the
+    // request->status() will be set.  On success, all redirects have been
+    // followed and the final response is beginning to arrive.  At this point,
+    // meta data about the response is available, including for example HTTP
+    // response headers if this is a request for a HTTP resource.
+    virtual void OnResponseStarted(URLRequest* request) = 0;
+
+    // Called when the a Read of the response body is completed after an
+    // IO_PENDING status from a Read() call.
+    // The data read is filled into the buffer which the caller passed
+    // to Read() previously.
+    //
+    // If an error occurred, request->status() will contain the error,
+    // and bytes read will be -1.
+    virtual void OnReadCompleted(URLRequest* request, int bytes_read) = 0;
+
+   protected:
+    virtual ~Delegate() {}
+  };
+
+  // TODO(shalev): Get rid of this constructor in favour of the one below it.
+  // Initialize an URL request.
+  URLRequest(const GURL& url,
+             Delegate* delegate,
+             const URLRequestContext* context);
+
+  URLRequest(const GURL& url,
+             Delegate* delegate,
+             const URLRequestContext* context,
+             NetworkDelegate* network_delegate);
+
+  // If destroyed after Start() has been called but while IO is pending,
+  // then the request will be effectively canceled and the delegate
+  // will not have any more of its methods called.
+  virtual ~URLRequest();
+
+  // Changes the default cookie policy from allowing all cookies to blocking all
+  // cookies. Embedders that want to implement a more flexible policy should
+  // change the default to blocking all cookies, and provide a NetworkDelegate
+  // with the URLRequestContext that maintains the CookieStore.
+  // The cookie policy default has to be set before the first URLRequest is
+  // started. Once it was set to block all cookies, it cannot be changed back.
+  static void SetDefaultCookiePolicyToBlock();
+
+  // Returns true if the scheme can be handled by URLRequest. False otherwise.
+  static bool IsHandledProtocol(const std::string& scheme);
+
+  // Returns true if the url can be handled by URLRequest. False otherwise.
+  // The function returns true for invalid urls because URLRequest knows how
+  // to handle those.
+  // NOTE: This will also return true for URLs that are handled by
+  // ProtocolFactories that only work for requests that are scoped to a
+  // Profile.
+  static bool IsHandledURL(const GURL& url);
+
+  // The original url is the url used to initialize the request, and it may
+  // differ from the url if the request was redirected.
+  const GURL& original_url() const { return url_chain_.front(); }
+  // The chain of urls traversed by this request.  If the request had no
+  // redirects, this vector will contain one element.
+  const std::vector<GURL>& url_chain() const { return url_chain_; }
+  const GURL& url() const { return url_chain_.back(); }
+
+  // The URL that should be consulted for the third-party cookie blocking
+  // policy.
+  //
+  // WARNING: This URL must only be used for the third-party cookie blocking
+  //          policy. It MUST NEVER be used for any kind of SECURITY check.
+  //
+  //          For example, if a top-level navigation is redirected, the
+  //          first-party for cookies will be the URL of the first URL in the
+  //          redirect chain throughout the whole redirect. If it was used for
+  //          a security check, an attacker might try to get around this check
+  //          by starting from some page that redirects to the
+  //          host-to-be-attacked.
+  const GURL& first_party_for_cookies() const {
+    return first_party_for_cookies_;
+  }
+  // This method may be called before Start() or FollowDeferredRedirect() is
+  // called.
+  void set_first_party_for_cookies(const GURL& first_party_for_cookies);
+
+  // The request method, as an uppercase string.  "GET" is the default value.
+  // The request method may only be changed before Start() is called and
+  // should only be assigned an uppercase value.
+  const std::string& method() const { return method_; }
+  void set_method(const std::string& method);
+
+  // The referrer URL for the request.  This header may actually be suppressed
+  // from the underlying network request for security reasons (e.g., a HTTPS
+  // URL will not be sent as the referrer for a HTTP request).  The referrer
+  // may only be changed before Start() is called.
+  const std::string& referrer() const { return referrer_; }
+  void set_referrer(const std::string& referrer);
+  // Returns the referrer header with potential username and password removed.
+  GURL GetSanitizedReferrer() const;
+
+  // The referrer policy to apply when updating the referrer during redirects.
+  // The referrer policy may only be changed before Start() is called.
+  void set_referrer_policy(ReferrerPolicy referrer_policy);
+
+  // Sets the delegate of the request.  This value may be changed at any time,
+  // and it is permissible for it to be null.
+  void set_delegate(Delegate* delegate);
+
+  // Indicates that the request body should be sent using chunked transfer
+  // encoding. This method may only be called before Start() is called.
+  void EnableChunkedUpload();
+
+  // Appends the given bytes to the request's upload data to be sent
+  // immediately via chunked transfer encoding. When all data has been sent,
+  // call MarkEndOfChunks() to indicate the end of upload data.
+  //
+  // This method may be called only after calling EnableChunkedUpload().
+  void AppendChunkToUpload(const char* bytes,
+                           int bytes_len,
+                           bool is_last_chunk);
+
+  // Sets the upload data.
+  void set_upload(scoped_ptr<UploadDataStream> upload);
+
+  // Gets the upload data.
+  const UploadDataStream* get_upload() const;
+
+  // Returns true if the request has a non-empty message body to upload.
+  bool has_upload() const;
+
+  // Set an extra request header by ID or name, or remove one by name.  These
+  // methods may only be called before Start() is called, or before a new
+  // redirect in the request chain.
+  void SetExtraRequestHeaderById(int header_id, const std::string& value,
+                                 bool overwrite);
+  void SetExtraRequestHeaderByName(const std::string& name,
+                                   const std::string& value, bool overwrite);
+  void RemoveRequestHeaderByName(const std::string& name);
+
+  // Sets all extra request headers.  Any extra request headers set by other
+  // methods are overwritten by this method.  This method may only be called
+  // before Start() is called.  It is an error to call it later.
+  void SetExtraRequestHeaders(const HttpRequestHeaders& headers);
+
+  const HttpRequestHeaders& extra_request_headers() const {
+    return extra_request_headers_;
+  }
+
+  // Returns the current load state for the request. |param| is an optional
+  // parameter describing details related to the load state. Not all load states
+  // have a parameter.
+  LoadStateWithParam GetLoadState() const;
+  void SetLoadStateParam(const string16& param) {
+    load_state_param_ = param;
+  }
+
+  // Returns the current upload progress in bytes. When the upload data is
+  // chunked, size is set to zero, but position will not be.
+  UploadProgress GetUploadProgress() const;
+
+  // Get response header(s) by ID or name.  These methods may only be called
+  // once the delegate's OnResponseStarted method has been called.  Headers
+  // that appear more than once in the response are coalesced, with values
+  // separated by commas (per RFC 2616). This will not work with cookies since
+  // comma can be used in cookie values.
+  // TODO(darin): add API to enumerate response headers.
+  void GetResponseHeaderById(int header_id, std::string* value);
+  void GetResponseHeaderByName(const std::string& name, std::string* value);
+
+  // Get all response headers, \n-delimited and \n\0-terminated.  This includes
+  // the response status line.  Restrictions on GetResponseHeaders apply.
+  void GetAllResponseHeaders(std::string* headers);
+
+  // The time when |this| was constructed.
+  base::TimeTicks creation_time() const { return creation_time_; }
+
+  // The time at which the returned response was requested.  For cached
+  // responses, this is the last time the cache entry was validated.
+  const base::Time& request_time() const {
+    return response_info_.request_time;
+  }
+
+  // The time at which the returned response was generated.  For cached
+  // responses, this is the last time the cache entry was validated.
+  const base::Time& response_time() const {
+    return response_info_.response_time;
+  }
+
+  // Indicate if this response was fetched from disk cache.
+  bool was_cached() const { return response_info_.was_cached; }
+
+  // Returns true if the URLRequest was delivered through a proxy.
+  bool was_fetched_via_proxy() const {
+    return response_info_.was_fetched_via_proxy;
+  }
+
+  // Returns the host and port that the content was fetched from.  See
+  // http_response_info.h for caveats relating to cached content.
+  HostPortPair GetSocketAddress() const;
+
+  // Get all response headers, as a HttpResponseHeaders object.  See comments
+  // in HttpResponseHeaders class as to the format of the data.
+  HttpResponseHeaders* response_headers() const;
+
+  // Get the SSL connection info.
+  const SSLInfo& ssl_info() const {
+    return response_info_.ssl_info;
+  }
+
+  // Returns the cookie values included in the response, if the request is one
+  // that can have cookies.  Returns true if the request is a cookie-bearing
+  // type, false otherwise.  This method may only be called once the
+  // delegate's OnResponseStarted method has been called.
+  bool GetResponseCookies(ResponseCookies* cookies);
+
+  // Get the mime type.  This method may only be called once the delegate's
+  // OnResponseStarted method has been called.
+  void GetMimeType(std::string* mime_type);
+
+  // Get the charset (character encoding).  This method may only be called once
+  // the delegate's OnResponseStarted method has been called.
+  void GetCharset(std::string* charset);
+
+  // Returns the HTTP response code (e.g., 200, 404, and so on).  This method
+  // may only be called once the delegate's OnResponseStarted method has been
+  // called.  For non-HTTP requests, this method returns -1.
+  int GetResponseCode();
+
+  // Get the HTTP response info in its entirety.
+  const HttpResponseInfo& response_info() const { return response_info_; }
+
+  // Access the LOAD_* flags modifying this request (see load_flags.h).
+  int load_flags() const { return load_flags_; }
+  void set_load_flags(int flags) { load_flags_ = flags; }
+
+  // Returns true if the request is "pending" (i.e., if Start() has been called,
+  // and the response has not yet been called).
+  bool is_pending() const { return is_pending_; }
+
+  // Returns true if the request is in the process of redirecting to a new
+  // URL but has not yet initiated the new request.
+  bool is_redirecting() const { return is_redirecting_; }
+
+  // Returns the error status of the request.
+  const URLRequestStatus& status() const { return status_; }
+
+  // Returns a globally unique identifier for this request.
+  uint64 identifier() const { return identifier_; }
+
+  // This method is called to start the request.  The delegate will receive
+  // a OnResponseStarted callback when the request is started.
+  void Start();
+
+  // This method may be called at any time after Start() has been called to
+  // cancel the request.  This method may be called many times, and it has
+  // no effect once the response has completed.  It is guaranteed that no
+  // methods of the delegate will be called after the request has been
+  // cancelled, except that this may call the delegate's OnReadCompleted()
+  // during the call to Cancel itself.
+  void Cancel();
+
+  // Cancels the request and sets the error to |error| (see net_error_list.h
+  // for values).
+  void CancelWithError(int error);
+
+  // Cancels the request and sets the error to |error| (see net_error_list.h
+  // for values) and attaches |ssl_info| as the SSLInfo for that request.  This
+  // is useful to attach a certificate and certificate error to a canceled
+  // request.
+  void CancelWithSSLError(int error, const SSLInfo& ssl_info);
+
+  // Read initiates an asynchronous read from the response, and must only
+  // be called after the OnResponseStarted callback is received with a
+  // successful status.
+  // If data is available, Read will return true, and the data and length will
+  // be returned immediately.  If data is not available, Read returns false,
+  // and an asynchronous Read is initiated.  The Read is finished when
+  // the caller receives the OnReadComplete callback.  Unless the request was
+  // cancelled, OnReadComplete will always be called, even if the read failed.
+  //
+  // The buf parameter is a buffer to receive the data.  If the operation
+  // completes asynchronously, the implementation will reference the buffer
+  // until OnReadComplete is called.  The buffer must be at least max_bytes in
+  // length.
+  //
+  // The max_bytes parameter is the maximum number of bytes to read.
+  //
+  // The bytes_read parameter is an output parameter containing the
+  // the number of bytes read.  A value of 0 indicates that there is no
+  // more data available to read from the stream.
+  //
+  // If a read error occurs, Read returns false and the request->status
+  // will be set to an error.
+  bool Read(IOBuffer* buf, int max_bytes, int* bytes_read);
+
+  // If this request is being cached by the HTTP cache, stop subsequent caching.
+  // Note that this method has no effect on other (simultaneous or not) requests
+  // for the same resource. The typical example is a request that results in
+  // the data being stored to disk (downloaded instead of rendered) so we don't
+  // want to store it twice.
+  void StopCaching();
+
+  // This method may be called to follow a redirect that was deferred in
+  // response to an OnReceivedRedirect call.
+  void FollowDeferredRedirect();
+
+  // One of the following two methods should be called in response to an
+  // OnAuthRequired() callback (and only then).
+  // SetAuth will reissue the request with the given credentials.
+  // CancelAuth will give up and display the error page.
+  void SetAuth(const AuthCredentials& credentials);
+  void CancelAuth();
+
+  // This method can be called after the user selects a client certificate to
+  // instruct this URLRequest to continue with the request with the
+  // certificate.  Pass NULL if the user doesn't have a client certificate.
+  void ContinueWithCertificate(X509Certificate* client_cert);
+
+  // This method can be called after some error notifications to instruct this
+  // URLRequest to ignore the current error and continue with the request.  To
+  // cancel the request instead, call Cancel().
+  void ContinueDespiteLastError();
+
+  // Used to specify the context (cookie store, cache) for this request.
+  const URLRequestContext* context() const;
+
+  const BoundNetLog& net_log() const { return net_log_; }
+
+  // Returns the expected content size if available
+  int64 GetExpectedContentSize() const;
+
+  // Returns the priority level for this request.
+  RequestPriority priority() const { return priority_; }
+  void set_priority(RequestPriority priority) {
+    DCHECK_GE(priority, MINIMUM_PRIORITY);
+    DCHECK_LT(priority, NUM_PRIORITIES);
+    priority_ = priority;
+  }
+
+  // Returns true iff this request would be internally redirected to HTTPS
+  // due to HSTS. If so, |redirect_url| is rewritten to the new HTTPS URL.
+  bool GetHSTSRedirect(GURL* redirect_url) const;
+
+  // TODO(willchan): Undo this. Only temporarily public.
+  bool has_delegate() const { return delegate_ != NULL; }
+
+  // NOTE(willchan): This is just temporary for debugging
+  // http://crbug.com/90971.
+  // Allows to setting debug info into the URLRequest.
+  void set_stack_trace(const base::debug::StackTrace& stack_trace);
+  const base::debug::StackTrace* stack_trace() const;
+
+  void set_received_response_content_length(int64 received_content_length) {
+    received_response_content_length_ = received_content_length;
+  }
+  int64 received_response_content_length() {
+    return received_response_content_length_;
+  }
+
+ protected:
+  // Allow the URLRequestJob class to control the is_pending() flag.
+  void set_is_pending(bool value) { is_pending_ = value; }
+
+  // Allow the URLRequestJob class to set our status too
+  void set_status(const URLRequestStatus& value) { status_ = value; }
+
+  // Allow the URLRequestJob to redirect this request.  Returns OK if
+  // successful, otherwise an error code is returned.
+  int Redirect(const GURL& location, int http_status_code);
+
+  // Called by URLRequestJob to allow interception when a redirect occurs.
+  void NotifyReceivedRedirect(const GURL& location, bool* defer_redirect);
+
+  // Allow an interceptor's URLRequestJob to restart this request.
+  // Should only be called if the original job has not started a response.
+  void Restart();
+
+ private:
+  friend class URLRequestJob;
+
+  // Registers a new protocol handler for the given scheme. If the scheme is
+  // already handled, this will overwrite the given factory. To delete the
+  // protocol factory, use NULL for the factory BUT this WILL NOT put back
+  // any previously registered protocol factory. It will have returned
+  // the previously registered factory (or NULL if none is registered) when
+  // the scheme was first registered so that the caller can manually put it
+  // back if desired.
+  //
+  // The scheme must be all-lowercase ASCII. See the ProtocolFactory
+  // declaration for its requirements.
+  //
+  // The registered protocol factory may return NULL, which will cause the
+  // regular "built-in" protocol factory to be used.
+  //
+  static ProtocolFactory* RegisterProtocolFactory(const std::string& scheme,
+                                                  ProtocolFactory* factory);
+
+  // Registers or unregisters a network interception class.
+  static void RegisterRequestInterceptor(Interceptor* interceptor);
+  static void UnregisterRequestInterceptor(Interceptor* interceptor);
+
+  // Resumes or blocks a request paused by the NetworkDelegate::OnBeforeRequest
+  // handler. If |blocked| is true, the request is blocked and an error page is
+  // returned indicating so. This should only be called after Start is called
+  // and OnBeforeRequest returns true (signalling that the request should be
+  // paused).
+  void BeforeRequestComplete(int error);
+
+  void StartJob(URLRequestJob* job);
+
+  // Restarting involves replacing the current job with a new one such as what
+  // happens when following a HTTP redirect.
+  void RestartWithJob(URLRequestJob* job);
+  void PrepareToRestart();
+
+  // Detaches the job from this request in preparation for this object going
+  // away or the job being replaced. The job will not call us back when it has
+  // been orphaned.
+  void OrphanJob();
+
+  // Cancels the request and set the error and ssl info for this request to the
+  // passed values.
+  void DoCancel(int error, const SSLInfo& ssl_info);
+
+  // Notifies the network delegate that the request has been completed.
+  // This does not imply a successful completion. Also a canceled request is
+  // considered completed.
+  void NotifyRequestCompleted();
+
+  // Called by URLRequestJob to allow interception when the final response
+  // occurs.
+  void NotifyResponseStarted();
+
+  // These functions delegate to |delegate_| and may only be used if
+  // |delegate_| is not NULL. See URLRequest::Delegate for the meaning
+  // of these functions.
+  void NotifyAuthRequired(AuthChallengeInfo* auth_info);
+  void NotifyAuthRequiredComplete(NetworkDelegate::AuthRequiredResponse result);
+  void NotifyCertificateRequested(SSLCertRequestInfo* cert_request_info);
+  void NotifySSLCertificateError(const SSLInfo& ssl_info, bool fatal);
+  void NotifyReadCompleted(int bytes_read);
+
+  // These functions delegate to |network_delegate_| if it is not NULL.
+  // If |network_delegate_| is NULL, cookies can be used unless
+  // SetDefaultCookiePolicyToBlock() has been called.
+  bool CanGetCookies(const CookieList& cookie_list) const;
+  bool CanSetCookie(const std::string& cookie_line,
+                    CookieOptions* options) const;
+
+  // Called when the delegate blocks or unblocks this request when intercepting
+  // certain requests.
+  void SetBlockedOnDelegate();
+  void SetUnblockedOnDelegate();
+
+  // Contextual information used for this request. Cannot be NULL. This contains
+  // most of the dependencies which are shared between requests (disk cache,
+  // cookie store, socket pool, etc.)
+  const URLRequestContext* context_;
+
+  NetworkDelegate* network_delegate_;
+
+  // Tracks the time spent in various load states throughout this request.
+  BoundNetLog net_log_;
+
+  scoped_refptr<URLRequestJob> job_;
+  scoped_ptr<UploadDataStream> upload_data_stream_;
+  std::vector<GURL> url_chain_;
+  GURL first_party_for_cookies_;
+  GURL delegate_redirect_url_;
+  std::string method_;  // "GET", "POST", etc. Should be all uppercase.
+  std::string referrer_;
+  ReferrerPolicy referrer_policy_;
+  HttpRequestHeaders extra_request_headers_;
+  int load_flags_;  // Flags indicating the request type for the load;
+                    // expected values are LOAD_* enums above.
+
+  // Never access methods of the |delegate_| directly. Always use the
+  // Notify... methods for this.
+  Delegate* delegate_;
+
+  // Current error status of the job. When no error has been encountered, this
+  // will be SUCCESS. If multiple errors have been encountered, this will be
+  // the first non-SUCCESS status seen.
+  URLRequestStatus status_;
+
+  // The HTTP response info, lazily initialized.
+  HttpResponseInfo response_info_;
+
+  // Tells us whether the job is outstanding. This is true from the time
+  // Start() is called to the time we dispatch RequestComplete and indicates
+  // whether the job is active.
+  bool is_pending_;
+
+  // Indicates if the request is in the process of redirecting to a new
+  // location.  It is true from the time the headers complete until a
+  // new request begins.
+  bool is_redirecting_;
+
+  // Number of times we're willing to redirect.  Used to guard against
+  // infinite redirects.
+  int redirect_limit_;
+
+  // Cached value for use after we've orphaned the job handling the
+  // first transaction in a request involving redirects.
+  UploadProgress final_upload_progress_;
+
+  // The priority level for this request.  Objects like ClientSocketPool use
+  // this to determine which URLRequest to allocate sockets to first.
+  RequestPriority priority_;
+
+  // TODO(battre): The only consumer of the identifier_ is currently the
+  // web request API. We need to match identifiers of requests between the
+  // web request API and the web navigation API. As the URLRequest does not
+  // exist when the web navigation API is triggered, the tracking probably
+  // needs to be done outside of the URLRequest anyway. Therefore, this
+  // identifier should be deleted here. http://crbug.com/89321
+  // A globally unique identifier for this request.
+  const uint64 identifier_;
+
+  // True if this request is blocked waiting for the network delegate to resume
+  // it.
+  bool blocked_on_delegate_;
+
+  // An optional parameter that provides additional information about the load
+  // state. Only used with the LOAD_STATE_WAITING_FOR_DELEGATE state.
+  string16 load_state_param_;
+
+  base::debug::LeakTracker<URLRequest> leak_tracker_;
+
+  // Callback passed to the network delegate to notify us when a blocked request
+  // is ready to be resumed or canceled.
+  CompletionCallback before_request_callback_;
+
+  // Safe-guard to ensure that we do not send multiple "I am completed"
+  // messages to network delegate.
+  // TODO(battre): Remove this. http://crbug.com/89049
+  bool has_notified_completion_;
+
+  // Authentication data used by the NetworkDelegate for this request,
+  // if one is present. |auth_credentials_| may be filled in when calling
+  // |NotifyAuthRequired| on the NetworkDelegate. |auth_info_| holds
+  // the authentication challenge being handled by |NotifyAuthRequired|.
+  AuthCredentials auth_credentials_;
+  scoped_refptr<AuthChallengeInfo> auth_info_;
+
+  int64 received_response_content_length_;
+
+  base::TimeTicks creation_time_;
+
+  scoped_ptr<const base::debug::StackTrace> stack_trace_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequest);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_H_
diff --git a/src/net/url_request/url_request_about_job.cc b/src/net/url_request/url_request_about_job.cc
new file mode 100644
index 0000000..242a735
--- /dev/null
+++ b/src/net/url_request/url_request_about_job.cc
@@ -0,0 +1,50 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Simple implementation of about: protocol handler that treats everything as
+// about:blank.  No other about: features should be available to web content,
+// so they're not implemented here.
+
+#include "net/url_request/url_request_about_job.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/message_loop.h"
+
+namespace net {
+
+URLRequestAboutJob::URLRequestAboutJob(URLRequest* request,
+                                       NetworkDelegate* network_delegate)
+    : URLRequestJob(request, network_delegate),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {
+}
+
+// static
+URLRequestJob* URLRequestAboutJob::Factory(URLRequest* request,
+                                           NetworkDelegate* network_delegate,
+                                           const std::string& scheme) {
+  return new URLRequestAboutJob(request, network_delegate);
+}
+
+void URLRequestAboutJob::Start() {
+  // Start reading asynchronously so that all error reporting and data
+  // callbacks happen as they would for network requests.
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLRequestAboutJob::StartAsync, weak_factory_.GetWeakPtr()));
+}
+
+bool URLRequestAboutJob::GetMimeType(std::string* mime_type) const {
+  *mime_type = "text/html";
+  return true;
+}
+
+URLRequestAboutJob::~URLRequestAboutJob() {
+}
+
+void URLRequestAboutJob::StartAsync() {
+  NotifyHeadersComplete();
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_about_job.h b/src/net/url_request/url_request_about_job.h
new file mode 100644
index 0000000..ea2b83f
--- /dev/null
+++ b/src/net/url_request/url_request_about_job.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_ABOUT_JOB_H_
+#define NET_URL_REQUEST_URL_REQUEST_ABOUT_JOB_H_
+
+#include <string>
+
+#include "base/memory/weak_ptr.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_job.h"
+
+namespace net {
+
+class NET_EXPORT URLRequestAboutJob : public URLRequestJob {
+ public:
+  URLRequestAboutJob(URLRequest* request, NetworkDelegate* network_delegate);
+
+  static URLRequest::ProtocolFactory Factory;
+
+  // URLRequestJob:
+  virtual void Start() OVERRIDE;
+  virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
+
+ private:
+  virtual ~URLRequestAboutJob();
+
+  void StartAsync();
+
+  base::WeakPtrFactory<URLRequestAboutJob> weak_factory_;
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_ABOUT_JOB_H_
diff --git a/src/net/url_request/url_request_context.cc b/src/net/url_request/url_request_context.cc
new file mode 100644
index 0000000..7dd2bb3
--- /dev/null
+++ b/src/net/url_request/url_request_context.cc
@@ -0,0 +1,128 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_context.h"
+
+#include "base/compiler_specific.h"
+#include "base/debug/alias.h"
+#include "base/debug/stack_trace.h"
+#include "base/string_util.h"
+#include "net/base/host_resolver.h"
+#include "net/cookies/cookie_store.h"
+#include "net/ftp/ftp_transaction_factory.h"
+#include "net/http/http_transaction_factory.h"
+#include "net/url_request/http_user_agent_settings.h"
+#include "net/url_request/url_request.h"
+
+namespace net {
+
+URLRequestContext::URLRequestContext()
+    : net_log_(NULL),
+      host_resolver_(NULL),
+      cert_verifier_(NULL),
+      server_bound_cert_service_(NULL),
+      fraudulent_certificate_reporter_(NULL),
+      http_auth_handler_factory_(NULL),
+      proxy_service_(NULL),
+      network_delegate_(NULL),
+      http_server_properties_(NULL),
+      http_user_agent_settings_(NULL),
+      transport_security_state_(NULL),
+#if !defined(DISABLE_FTP_SUPPORT)
+      ftp_auth_cache_(new FtpAuthCache),
+#endif
+      http_transaction_factory_(NULL),
+      ftp_transaction_factory_(NULL),
+      job_factory_(NULL),
+      throttler_manager_(NULL),
+      url_requests_(new std::set<const URLRequest*>) {
+}
+
+URLRequestContext::~URLRequestContext() {
+#if !defined(__LB_SHELL__FOR_RELEASE__)
+  AssertNoURLRequests();
+#endif
+}
+
+void URLRequestContext::CopyFrom(const URLRequestContext* other) {
+  // Copy URLRequestContext parameters.
+  set_net_log(other->net_log_);
+  set_host_resolver(other->host_resolver_);
+  set_cert_verifier(other->cert_verifier_);
+  set_server_bound_cert_service(other->server_bound_cert_service_);
+  set_fraudulent_certificate_reporter(other->fraudulent_certificate_reporter_);
+  set_http_auth_handler_factory(other->http_auth_handler_factory_);
+  set_proxy_service(other->proxy_service_);
+  set_ssl_config_service(other->ssl_config_service_);
+  set_network_delegate(other->network_delegate_);
+  set_http_server_properties(other->http_server_properties_);
+  set_cookie_store(other->cookie_store_);
+  set_transport_security_state(other->transport_security_state_);
+  // FTPAuthCache is unique per context.
+  set_http_transaction_factory(other->http_transaction_factory_);
+  set_ftp_transaction_factory(other->ftp_transaction_factory_);
+  set_job_factory(other->job_factory_);
+  set_throttler_manager(other->throttler_manager_);
+  set_http_user_agent_settings(other->http_user_agent_settings_);
+}
+
+const HttpNetworkSession::Params* URLRequestContext::GetNetworkSessionParams(
+    ) const {
+  HttpTransactionFactory* transaction_factory = http_transaction_factory();
+  if (!transaction_factory)
+    return NULL;
+  HttpNetworkSession* network_session = transaction_factory->GetSession();
+  if (!network_session)
+    return NULL;
+  return &network_session->params();
+}
+
+URLRequest* URLRequestContext::CreateRequest(
+    const GURL& url, URLRequest::Delegate* delegate) const {
+  return new URLRequest(url, delegate, this, network_delegate_);
+}
+
+void URLRequestContext::set_cookie_store(CookieStore* cookie_store) {
+  cookie_store_ = cookie_store;
+}
+
+std::string URLRequestContext::GetAcceptCharset() const {
+  return http_user_agent_settings_ ?
+      http_user_agent_settings_->GetAcceptCharset() : EmptyString();
+}
+
+std::string URLRequestContext::GetAcceptLanguage() const {
+  return http_user_agent_settings_ ?
+      http_user_agent_settings_->GetAcceptLanguage() : EmptyString();
+}
+
+std::string URLRequestContext::GetUserAgent() const {
+  return http_user_agent_settings_ ? http_user_agent_settings_->GetUserAgent()
+                                   : EmptyString();
+}
+
+void URLRequestContext::AssertNoURLRequests() const {
+  int num_requests = url_requests_->size();
+  if (num_requests != 0) {
+    // We're leaking URLRequests :( Dump the URL of the first one and record how
+    // many we leaked so we have an idea of how bad it is.
+    char url_buf[128];
+    const URLRequest* request = *url_requests_->begin();
+    base::strlcpy(url_buf, request->url().spec().c_str(), arraysize(url_buf));
+    bool has_delegate = request->has_delegate();
+    int load_flags = request->load_flags();
+    base::debug::StackTrace stack_trace(NULL, 0);
+    if (request->stack_trace())
+      stack_trace = *request->stack_trace();
+    base::debug::Alias(url_buf);
+    base::debug::Alias(&num_requests);
+    base::debug::Alias(&has_delegate);
+    base::debug::Alias(&load_flags);
+    base::debug::Alias(&stack_trace);
+    CHECK(false) << "Leaked " << num_requests << " URLRequest(s). First URL: "
+                 << request->url().spec().c_str() << ".";
+  }
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_context.h b/src/net/url_request/url_request_context.h
new file mode 100644
index 0000000..f843789
--- /dev/null
+++ b/src/net/url_request/url_request_context.h
@@ -0,0 +1,264 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This class represents contextual information (cookies, cache, etc.)
+// that's useful when processing resource requests.
+// The class is reference-counted so that it can be cleaned up after any
+// requests that are using it have been completed.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_CONTEXT_H_
+#define NET_URL_REQUEST_URL_REQUEST_CONTEXT_H_
+
+#include <set>
+#include <string>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/non_thread_safe.h"
+#include "net/base/net_export.h"
+#include "net/base/net_log.h"
+#include "net/base/ssl_config_service.h"
+#include "net/base/transport_security_state.h"
+#include "net/http/http_network_session.h"
+#include "net/http/http_server_properties.h"
+#include "net/ftp/ftp_auth_cache.h"
+#include "net/url_request/url_request.h"
+
+namespace net {
+class CertVerifier;
+class CookieStore;
+class FraudulentCertificateReporter;
+class FtpTransactionFactory;
+class HostResolver;
+class HttpAuthHandlerFactory;
+class HttpTransactionFactory;
+class HttpUserAgentSettings;
+class NetworkDelegate;
+class ServerBoundCertService;
+class ProxyService;
+class URLRequest;
+class URLRequestJobFactory;
+class URLRequestThrottlerManager;
+
+// Subclass to provide application-specific context for URLRequest
+// instances. Note that URLRequestContext typically does not provide storage for
+// these member variables, since they may be shared. For the ones that aren't
+// shared, URLRequestContextStorage can be helpful in defining their storage.
+class NET_EXPORT URLRequestContext
+    : NON_EXPORTED_BASE(public base::NonThreadSafe) {
+ public:
+  URLRequestContext();
+  virtual ~URLRequestContext();
+
+  // Copies the state from |other| into this context.
+  void CopyFrom(const URLRequestContext* other);
+
+  // May return NULL if this context doesn't have an associated network session.
+  const HttpNetworkSession::Params* GetNetworkSessionParams() const;
+
+  URLRequest* CreateRequest(
+      const GURL& url, URLRequest::Delegate* delegate) const;
+
+  NetLog* net_log() const {
+    return net_log_;
+  }
+
+  void set_net_log(NetLog* net_log) {
+    net_log_ = net_log;
+  }
+
+  HostResolver* host_resolver() const {
+    return host_resolver_;
+  }
+
+  void set_host_resolver(HostResolver* host_resolver) {
+    host_resolver_ = host_resolver;
+  }
+
+  CertVerifier* cert_verifier() const {
+    return cert_verifier_;
+  }
+
+  void set_cert_verifier(CertVerifier* cert_verifier) {
+    cert_verifier_ = cert_verifier;
+  }
+
+  ServerBoundCertService* server_bound_cert_service() const {
+    return server_bound_cert_service_;
+  }
+
+  void set_server_bound_cert_service(
+      ServerBoundCertService* server_bound_cert_service) {
+    server_bound_cert_service_ = server_bound_cert_service;
+  }
+
+  FraudulentCertificateReporter* fraudulent_certificate_reporter() const {
+    return fraudulent_certificate_reporter_;
+  }
+  void set_fraudulent_certificate_reporter(
+      FraudulentCertificateReporter* fraudulent_certificate_reporter) {
+    fraudulent_certificate_reporter_ = fraudulent_certificate_reporter;
+  }
+
+  // Get the proxy service for this context.
+  ProxyService* proxy_service() const { return proxy_service_; }
+  void set_proxy_service(ProxyService* proxy_service) {
+    proxy_service_ = proxy_service;
+  }
+
+  // Get the ssl config service for this context.
+  SSLConfigService* ssl_config_service() const { return ssl_config_service_; }
+  void set_ssl_config_service(SSLConfigService* service) {
+    ssl_config_service_ = service;
+  }
+
+  // Gets the HTTP Authentication Handler Factory for this context.
+  // The factory is only valid for the lifetime of this URLRequestContext
+  HttpAuthHandlerFactory* http_auth_handler_factory() const {
+    return http_auth_handler_factory_;
+  }
+  void set_http_auth_handler_factory(HttpAuthHandlerFactory* factory) {
+    http_auth_handler_factory_ = factory;
+  }
+
+  // Gets the http transaction factory for this context.
+  HttpTransactionFactory* http_transaction_factory() const {
+    return http_transaction_factory_;
+  }
+  void set_http_transaction_factory(HttpTransactionFactory* factory) {
+    http_transaction_factory_ = factory;
+  }
+
+  // Gets the ftp transaction factory for this context.
+  FtpTransactionFactory* ftp_transaction_factory() const {
+    return ftp_transaction_factory_;
+  }
+  void set_ftp_transaction_factory(FtpTransactionFactory* factory) {
+    ftp_transaction_factory_ = factory;
+  }
+
+  void set_network_delegate(NetworkDelegate* network_delegate) {
+    network_delegate_ = network_delegate;
+  }
+  NetworkDelegate* network_delegate() const { return network_delegate_; }
+
+  void set_http_server_properties(
+      HttpServerProperties* http_server_properties) {
+    http_server_properties_ = http_server_properties;
+  }
+  HttpServerProperties* http_server_properties() const {
+    return http_server_properties_;
+  }
+
+  // Gets the cookie store for this context (may be null, in which case
+  // cookies are not stored).
+  CookieStore* cookie_store() const { return cookie_store_.get(); }
+  void set_cookie_store(CookieStore* cookie_store);
+
+  TransportSecurityState* transport_security_state() const {
+      return transport_security_state_;
+  }
+  void set_transport_security_state(
+      TransportSecurityState* state) {
+    transport_security_state_ = state;
+  }
+
+  // Gets the FTP authentication cache for this context.
+  FtpAuthCache* ftp_auth_cache() const {
+#if !defined(DISABLE_FTP_SUPPORT)
+    return ftp_auth_cache_.get();
+#else
+    return NULL;
+#endif
+  }
+
+  // ---------------------------------------------------------------------------
+  // Legacy accessors that delegate to http_user_agent_settings_.
+  // TODO(pauljensen): Remove after all clients are updated to directly access
+  // http_user_agent_settings_.
+  // Gets the value of 'Accept-Charset' header field.
+  std::string GetAcceptCharset() const;
+  // Gets the value of 'Accept-Language' header field.
+  std::string GetAcceptLanguage() const;
+  // Gets the UA string.
+  std::string GetUserAgent() const;
+  // ---------------------------------------------------------------------------
+
+  const URLRequestJobFactory* job_factory() const { return job_factory_; }
+  void set_job_factory(const URLRequestJobFactory* job_factory) {
+    job_factory_ = job_factory;
+  }
+
+  // May be NULL.
+  URLRequestThrottlerManager* throttler_manager() const {
+    return throttler_manager_;
+  }
+  void set_throttler_manager(URLRequestThrottlerManager* throttler_manager) {
+    throttler_manager_ = throttler_manager;
+  }
+
+  // Gets the URLRequest objects that hold a reference to this
+  // URLRequestContext.
+  std::set<const URLRequest*>* url_requests() const {
+    return url_requests_.get();
+  }
+
+  void AssertNoURLRequests() const;
+
+  // Get the underlying |HttpUserAgentSettings| implementation that provides
+  // the HTTP Accept-Language, Accept-Charset and User-Agent header values.
+  const HttpUserAgentSettings* http_user_agent_settings() const {
+    return http_user_agent_settings_;
+  }
+  void set_http_user_agent_settings(
+      HttpUserAgentSettings* http_user_agent_settings) {
+    http_user_agent_settings_ = http_user_agent_settings;
+  }
+
+ private:
+  // ---------------------------------------------------------------------------
+  // Important: When adding any new members below, consider whether they need to
+  // be added to CopyFrom.
+  // ---------------------------------------------------------------------------
+
+  // Ownership for these members are not defined here. Clients should either
+  // provide storage elsewhere or have a subclass take ownership.
+  NetLog* net_log_;
+  HostResolver* host_resolver_;
+  CertVerifier* cert_verifier_;
+  ServerBoundCertService* server_bound_cert_service_;
+  FraudulentCertificateReporter* fraudulent_certificate_reporter_;
+  HttpAuthHandlerFactory* http_auth_handler_factory_;
+  ProxyService* proxy_service_;
+  scoped_refptr<SSLConfigService> ssl_config_service_;
+  NetworkDelegate* network_delegate_;
+  HttpServerProperties* http_server_properties_;
+  HttpUserAgentSettings* http_user_agent_settings_;
+  scoped_refptr<CookieStore> cookie_store_;
+  TransportSecurityState* transport_security_state_;
+#if !defined(DISABLE_FTP_SUPPORT)
+  scoped_ptr<FtpAuthCache> ftp_auth_cache_;
+#endif
+  // The charset of the referrer where this request comes from. It's not
+  // used in communication with a server but is used to construct a suggested
+  // filename for file download.
+  HttpTransactionFactory* http_transaction_factory_;
+  FtpTransactionFactory* ftp_transaction_factory_;
+  const URLRequestJobFactory* job_factory_;
+  URLRequestThrottlerManager* throttler_manager_;
+
+  // ---------------------------------------------------------------------------
+  // Important: When adding any new members below, consider whether they need to
+  // be added to CopyFrom.
+  // ---------------------------------------------------------------------------
+
+  scoped_ptr<std::set<const URLRequest*> > url_requests_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestContext);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_CONTEXT_H_
diff --git a/src/net/url_request/url_request_context_builder.cc b/src/net/url_request/url_request_context_builder.cc
new file mode 100644
index 0000000..232ed22
--- /dev/null
+++ b/src/net/url_request/url_request_context_builder.cc
@@ -0,0 +1,304 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_context_builder.h"
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/string_util.h"
+#include "base/threading/thread.h"
+#include "base/thread_task_runner_handle.h"
+#include "net/base/cert_verifier.h"
+#include "net/base/host_resolver.h"
+#include "net/base/net_errors.h"
+#include "net/base/network_delegate.h"
+#include "net/base/ssl_config_service_defaults.h"
+#include "net/base/transport_security_state.h"
+#include "net/cookies/cookie_monster.h"
+#include "net/ftp/ftp_network_layer.h"
+#include "net/http/http_auth_handler_factory.h"
+#include "net/http/http_cache.h"
+#include "net/http/http_network_layer.h"
+#include "net/http/http_network_session.h"
+#include "net/http/http_server_properties_impl.h"
+#include "net/proxy/proxy_service.h"
+#include "net/url_request/static_http_user_agent_settings.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_context_storage.h"
+
+namespace net {
+
+namespace {
+
+class BasicNetworkDelegate : public NetworkDelegate {
+ public:
+  BasicNetworkDelegate() {}
+  virtual ~BasicNetworkDelegate() {}
+
+ private:
+  virtual int OnBeforeURLRequest(URLRequest* request,
+                                 const CompletionCallback& callback,
+                                 GURL* new_url) OVERRIDE {
+    return OK;
+  }
+
+  virtual int OnBeforeSendHeaders(URLRequest* request,
+                                  const CompletionCallback& callback,
+                                  HttpRequestHeaders* headers) OVERRIDE {
+    return OK;
+  }
+
+  virtual void OnSendHeaders(URLRequest* request,
+                             const HttpRequestHeaders& headers) OVERRIDE {}
+
+  virtual int OnHeadersReceived(
+      URLRequest* request,
+      const CompletionCallback& callback,
+      const HttpResponseHeaders* original_response_headers,
+      scoped_refptr<HttpResponseHeaders>* override_response_headers)
+      OVERRIDE {
+    return OK;
+  }
+
+  virtual void OnBeforeRedirect(URLRequest* request,
+                                const GURL& new_location) OVERRIDE {}
+
+  virtual void OnResponseStarted(URLRequest* request) OVERRIDE {}
+
+  virtual void OnRawBytesRead(const URLRequest& request,
+                              int bytes_read) OVERRIDE {}
+
+  virtual void OnCompleted(URLRequest* request, bool started) OVERRIDE {}
+
+  virtual void OnURLRequestDestroyed(URLRequest* request) OVERRIDE {}
+
+  virtual void OnPACScriptError(int line_number,
+                                const string16& error) OVERRIDE {}
+
+  virtual NetworkDelegate::AuthRequiredResponse OnAuthRequired(
+      URLRequest* request,
+      const AuthChallengeInfo& auth_info,
+      const AuthCallback& callback,
+      AuthCredentials* credentials) OVERRIDE {
+    return NetworkDelegate::AUTH_REQUIRED_RESPONSE_NO_ACTION;
+  }
+
+  virtual bool OnCanGetCookies(const URLRequest& request,
+                               const CookieList& cookie_list) OVERRIDE {
+    return true;
+  }
+
+  virtual bool OnCanSetCookie(const URLRequest& request,
+                              const std::string& cookie_line,
+                              CookieOptions* options) OVERRIDE {
+    return true;
+  }
+
+  virtual bool OnCanAccessFile(const net::URLRequest& request,
+                               const FilePath& path) const OVERRIDE {
+    return true;
+  }
+
+  virtual bool OnCanThrottleRequest(const URLRequest& request) const OVERRIDE {
+    return false;
+  }
+
+  virtual int OnBeforeSocketStreamConnect(
+      SocketStream* stream,
+      const CompletionCallback& callback) OVERRIDE {
+    return OK;
+  }
+
+  virtual void OnRequestWaitStateChange(const URLRequest& request,
+                                        RequestWaitState state) OVERRIDE {
+  }
+
+  DISALLOW_COPY_AND_ASSIGN(BasicNetworkDelegate);
+};
+
+class BasicURLRequestContext : public URLRequestContext {
+ public:
+  BasicURLRequestContext()
+      : cache_thread_("Cache Thread"),
+        file_thread_("File Thread"),
+        ALLOW_THIS_IN_INITIALIZER_LIST(storage_(this)) {}
+
+  URLRequestContextStorage* storage() {
+    return &storage_;
+  }
+
+  void StartCacheThread() {
+    cache_thread_.StartWithOptions(
+        base::Thread::Options(MessageLoop::TYPE_IO, 0));
+  }
+
+  scoped_refptr<base::MessageLoopProxy> cache_message_loop_proxy() {
+    DCHECK(cache_thread_.IsRunning());
+    return cache_thread_.message_loop_proxy();
+  }
+
+  void StartFileThread() {
+    file_thread_.StartWithOptions(
+        base::Thread::Options(MessageLoop::TYPE_DEFAULT, 0));
+  }
+
+  MessageLoop* file_message_loop() {
+    DCHECK(file_thread_.IsRunning());
+    return file_thread_.message_loop();
+  }
+
+ protected:
+  virtual ~BasicURLRequestContext() {}
+
+ private:
+  base::Thread cache_thread_;
+  base::Thread file_thread_;
+  URLRequestContextStorage storage_;
+  DISALLOW_COPY_AND_ASSIGN(BasicURLRequestContext);
+};
+
+}  // namespace
+
+URLRequestContextBuilder::HttpCacheParams::HttpCacheParams()
+    : type(IN_MEMORY),
+      max_size(0) {}
+URLRequestContextBuilder::HttpCacheParams::~HttpCacheParams() {}
+
+URLRequestContextBuilder::HttpNetworkSessionParams::HttpNetworkSessionParams()
+    : ignore_certificate_errors(false),
+      host_mapping_rules(NULL),
+      http_pipelining_enabled(false),
+      testing_fixed_http_port(0),
+      testing_fixed_https_port(0),
+      trusted_spdy_proxy() {}
+
+URLRequestContextBuilder::HttpNetworkSessionParams::~HttpNetworkSessionParams()
+{}
+
+URLRequestContextBuilder::URLRequestContextBuilder()
+    : ftp_enabled_(false),
+      http_cache_enabled_(true) {}
+URLRequestContextBuilder::~URLRequestContextBuilder() {}
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+void URLRequestContextBuilder::set_proxy_config_service(
+    ProxyConfigService* proxy_config_service) {
+  proxy_config_service_.reset(proxy_config_service);
+}
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+URLRequestContext* URLRequestContextBuilder::Build() {
+#if __LB_ENABLE_NATIVE_HTTP_STACK__
+  return NULL;
+#else
+  BasicURLRequestContext* context = new BasicURLRequestContext;
+  URLRequestContextStorage* storage = context->storage();
+
+  storage->set_http_user_agent_settings(new StaticHttpUserAgentSettings(
+      accept_language_, accept_charset_, user_agent_));
+
+  if (!network_delegate_)
+    network_delegate_.reset(new BasicNetworkDelegate);
+  storage->set_network_delegate(network_delegate_.release());
+
+  storage->set_host_resolver(net::HostResolver::CreateDefaultResolver(NULL));
+
+#if !DISABLE_FTP_SUPPORT
+  if (ftp_enabled_) {
+    storage->set_ftp_transaction_factory(
+        new FtpNetworkLayer(context->host_resolver()));
+  }
+#endif
+
+  context->StartFileThread();
+
+  // TODO(willchan): Switch to using this code when
+  // ProxyService::CreateSystemProxyConfigService()'s signature doesn't suck.
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  ProxyConfigService* proxy_config_service = proxy_config_service_.release();
+#else
+  ProxyConfigService* proxy_config_service =
+      ProxyService::CreateSystemProxyConfigService(
+          base::ThreadTaskRunnerHandle::Get(),
+          context->file_message_loop());
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+  storage->set_proxy_service(
+      ProxyService::CreateUsingSystemProxyResolver(
+          proxy_config_service,
+          4,  // TODO(willchan): Find a better constant somewhere.
+          context->net_log()));
+  storage->set_ssl_config_service(new net::SSLConfigServiceDefaults);
+  storage->set_http_auth_handler_factory(
+      net::HttpAuthHandlerRegistryFactory::CreateDefault(
+          context->host_resolver()));
+  storage->set_cookie_store(new CookieMonster(NULL, NULL));
+  storage->set_transport_security_state(new net::TransportSecurityState());
+  storage->set_http_server_properties(new net::HttpServerPropertiesImpl);
+  storage->set_cert_verifier(CertVerifier::CreateDefault());
+
+  net::HttpNetworkSession::Params network_session_params;
+  network_session_params.host_resolver = context->host_resolver();
+  network_session_params.cert_verifier = context->cert_verifier();
+  network_session_params.transport_security_state =
+      context->transport_security_state();
+  network_session_params.proxy_service = context->proxy_service();
+  network_session_params.ssl_config_service =
+      context->ssl_config_service();
+  network_session_params.http_auth_handler_factory =
+      context->http_auth_handler_factory();
+  network_session_params.network_delegate =
+      context->network_delegate();
+  network_session_params.http_server_properties =
+      context->http_server_properties();
+  network_session_params.net_log = context->net_log();
+  network_session_params.ignore_certificate_errors =
+      http_network_session_params_.ignore_certificate_errors;
+  network_session_params.host_mapping_rules =
+      http_network_session_params_.host_mapping_rules;
+  network_session_params.http_pipelining_enabled =
+      http_network_session_params_.http_pipelining_enabled;
+  network_session_params.testing_fixed_http_port =
+      http_network_session_params_.testing_fixed_http_port;
+  network_session_params.testing_fixed_https_port =
+      http_network_session_params_.testing_fixed_https_port;
+  network_session_params.trusted_spdy_proxy =
+      http_network_session_params_.trusted_spdy_proxy;
+
+  HttpTransactionFactory* http_transaction_factory = NULL;
+  if (http_cache_enabled_) {
+    network_session_params.server_bound_cert_service =
+        context->server_bound_cert_service();
+    HttpCache::BackendFactory* http_cache_backend = NULL;
+    if (http_cache_params_.type == HttpCacheParams::DISK) {
+      context->StartCacheThread();
+      http_cache_backend =
+          new HttpCache::DefaultBackend(DISK_CACHE,
+                                        http_cache_params_.path,
+                                        http_cache_params_.max_size,
+                                        context->cache_message_loop_proxy());
+    } else {
+      http_cache_backend =
+          HttpCache::DefaultBackend::InMemory(http_cache_params_.max_size);
+    }
+
+    http_transaction_factory = new HttpCache(
+        network_session_params, http_cache_backend);
+  } else {
+    scoped_refptr<net::HttpNetworkSession> network_session(
+        new net::HttpNetworkSession(network_session_params));
+
+    http_transaction_factory = new HttpNetworkLayer(network_session);
+  }
+  storage->set_http_transaction_factory(http_transaction_factory);
+
+  // TODO(willchan): Support sdch.
+
+  return context;
+#endif
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_context_builder.h b/src/net/url_request/url_request_context_builder.h
new file mode 100644
index 0000000..fd21f8a
--- /dev/null
+++ b/src/net/url_request/url_request_context_builder.h
@@ -0,0 +1,135 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This class is useful for building a simple URLRequestContext. Most creators
+// of new URLRequestContexts should use this helper class to construct it. Call
+// any configuration params, and when done, invoke Build() to construct the
+// URLRequestContext. This URLRequestContext will own all its own storage.
+//
+// URLRequestContextBuilder and its associated params classes are initially
+// populated with "sane" default values. Read through the comments to figure out
+// what these are.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_CONTEXT_BUILDER_H_
+#define NET_URL_REQUEST_URL_REQUEST_CONTEXT_BUILDER_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/file_path.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "build/build_config.h"
+#include "net/base/net_export.h"
+
+namespace net {
+
+class HostMappingRules;
+class ProxyConfigService;
+class URLRequestContext;
+class NetworkDelegate;
+
+class NET_EXPORT URLRequestContextBuilder {
+ public:
+  struct NET_EXPORT HttpCacheParams {
+    enum Type {
+      IN_MEMORY,
+      DISK,
+    };
+
+    HttpCacheParams();
+    ~HttpCacheParams();
+
+    // The type of HTTP cache. Default is IN_MEMORY.
+    Type type;
+
+    // The max size of the cache in bytes. Default is algorithmically determined
+    // based off available disk space.
+    int max_size;
+
+    // The cache path (when type is DISK).
+    FilePath path;
+  };
+
+  struct NET_EXPORT HttpNetworkSessionParams {
+    HttpNetworkSessionParams();
+    ~HttpNetworkSessionParams();
+
+    // These fields mirror those in net::HttpNetworkSession::Params;
+    bool ignore_certificate_errors;
+    HostMappingRules* host_mapping_rules;
+    bool http_pipelining_enabled;
+    uint16 testing_fixed_http_port;
+    uint16 testing_fixed_https_port;
+    std::string trusted_spdy_proxy;
+  };
+
+  URLRequestContextBuilder();
+  ~URLRequestContextBuilder();
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  void set_proxy_config_service(ProxyConfigService* proxy_config_service);
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+  // Call these functions to specify hard-coded Accept-Language,
+  // Accept-Charset, or User-Agent header values for all requests that don't
+  // have the headers already set.
+  void set_accept_language(const std::string& accept_language) {
+    accept_language_ = accept_language;
+  }
+  void set_accept_charset(const std::string& accept_charset) {
+    accept_charset_ = accept_charset;
+  }
+  void set_user_agent(const std::string& user_agent) {
+    user_agent_ = user_agent;
+  }
+
+  // By default it's disabled.
+  void set_ftp_enabled(bool enable) {
+    ftp_enabled_ = enable;
+  }
+
+  // Uses BasicNetworkDelegate by default. Note that calling Build will unset
+  // any custom delegate in builder, so this must be called each time before
+  // Build is called.
+  void set_network_delegate(NetworkDelegate* delegate) {
+    network_delegate_.reset(delegate);
+  }
+
+  // By default HttpCache is enabled with a default constructed HttpCacheParams.
+  void EnableHttpCache(const HttpCacheParams& params) {
+    http_cache_params_ = params;
+  }
+
+  void DisableHttpCache() {
+    http_cache_params_ = HttpCacheParams();
+  }
+
+  // Override default net::HttpNetworkSession::Params settings.
+  void set_http_network_session_params(
+      const HttpNetworkSessionParams& http_network_session_params) {
+    http_network_session_params_ = http_network_session_params;
+  }
+
+  URLRequestContext* Build();
+
+ private:
+  std::string accept_language_;
+  std::string accept_charset_;
+  std::string user_agent_;
+  bool ftp_enabled_;
+  bool http_cache_enabled_;
+  HttpCacheParams http_cache_params_;
+  HttpNetworkSessionParams http_network_session_params_;
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+  scoped_ptr<ProxyConfigService> proxy_config_service_;
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+  scoped_ptr<NetworkDelegate> network_delegate_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestContextBuilder);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_CONTEXT_BUILDER_H_
diff --git a/src/net/url_request/url_request_context_builder_unittest.cc b/src/net/url_request/url_request_context_builder_unittest.cc
new file mode 100644
index 0000000..bc93416
--- /dev/null
+++ b/src/net/url_request/url_request_context_builder_unittest.cc
@@ -0,0 +1,83 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_context_builder.h"
+
+#include "build/build_config.h"
+#include "net/test/test_server.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#include "net/proxy/proxy_config.h"
+#include "net/proxy/proxy_config_service_fixed.h"
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+
+namespace net {
+
+namespace {
+
+// A subclass of TestServer that uses a statically-configured hostname. This is
+// to work around mysterious failures in chrome_frame_net_tests. See:
+// http://crbug.com/114369
+class LocalHttpTestServer : public TestServer {
+ public:
+  explicit LocalHttpTestServer(const FilePath& document_root)
+      : TestServer(TestServer::TYPE_HTTP,
+                   ScopedCustomUrlRequestTestHttpHost::value(),
+                   document_root) {}
+  LocalHttpTestServer()
+      : TestServer(TestServer::TYPE_HTTP,
+                   ScopedCustomUrlRequestTestHttpHost::value(),
+                   FilePath()) {}
+};
+
+class URLRequestContextBuilderTest : public PlatformTest {
+ protected:
+  URLRequestContextBuilderTest()
+      : test_server_(
+          FilePath(FILE_PATH_LITERAL("net/data/url_request_unittest"))) {
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+    builder_.set_proxy_config_service(
+        new ProxyConfigServiceFixed(ProxyConfig::CreateDirect()));
+#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
+  }
+
+  LocalHttpTestServer test_server_;
+  URLRequestContextBuilder builder_;
+};
+
+TEST_F(URLRequestContextBuilderTest, DefaultSettings) {
+  ASSERT_TRUE(test_server_.Start());
+
+  scoped_ptr<URLRequestContext> context(builder_.Build());
+  TestDelegate delegate;
+  URLRequest request(
+      test_server_.GetURL("echoheader?Foo"), &delegate, context.get());
+  request.set_method("GET");
+  request.SetExtraRequestHeaderByName("Foo", "Bar", false);
+  request.Start();
+  MessageLoop::current()->Run();
+  EXPECT_EQ("Bar", delegate.data_received());
+}
+
+TEST_F(URLRequestContextBuilderTest, UserAgent) {
+  ASSERT_TRUE(test_server_.Start());
+
+  builder_.set_user_agent("Bar");
+  scoped_ptr<URLRequestContext> context(builder_.Build());
+  TestDelegate delegate;
+  URLRequest request(
+      test_server_.GetURL("echoheader?User-Agent"), &delegate, context.get());
+  request.set_method("GET");
+  request.Start();
+  MessageLoop::current()->Run();
+  EXPECT_EQ("Bar", delegate.data_received());
+}
+
+}  // namespace
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_context_getter.cc b/src/net/url_request/url_request_context_getter.cc
new file mode 100644
index 0000000..44b7bf4
--- /dev/null
+++ b/src/net/url_request/url_request_context_getter.cc
@@ -0,0 +1,38 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_context_getter.h"
+
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
+#include "net/url_request/url_request_context.h"
+
+namespace net {
+
+URLRequestContextGetter::URLRequestContextGetter() {}
+
+URLRequestContextGetter::~URLRequestContextGetter() {}
+
+void URLRequestContextGetter::OnDestruct() const {
+  scoped_refptr<base::SingleThreadTaskRunner> network_task_runner =
+      GetNetworkTaskRunner();
+  DCHECK(network_task_runner);
+  if (network_task_runner) {
+    if (network_task_runner->BelongsToCurrentThread()) {
+      delete this;
+    } else {
+      if (!network_task_runner->DeleteSoon(FROM_HERE, this)) {
+        // Can't force-delete the object here, because some derived classes
+        // can only be deleted on the owning thread, so just emit a warning to
+        // aid in debugging.
+        DLOG(WARNING) << "URLRequestContextGetter leaking due to no owning"
+                      << " thread.";
+      }
+    }
+  }
+  // If no IO message loop proxy was available, we will just leak memory.
+  // This is also true if the IO thread is gone.
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_context_getter.h b/src/net/url_request/url_request_context_getter.h
new file mode 100644
index 0000000..4c8e4af
--- /dev/null
+++ b/src/net/url_request/url_request_context_getter.h
@@ -0,0 +1,58 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_COMMON_NET_URL_REQUEST_CONTEXT_GETTER_H_
+#define CHROME_COMMON_NET_URL_REQUEST_CONTEXT_GETTER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner_helpers.h"
+#include "net/base/net_export.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+}  // namespace base
+
+namespace net {
+class CookieStore;
+class URLRequestContext;
+
+struct URLRequestContextGetterTraits;
+
+// Interface for retrieving an net::URLRequestContext.
+class NET_EXPORT URLRequestContextGetter
+    : public base::RefCountedThreadSafe<URLRequestContextGetter,
+                                        URLRequestContextGetterTraits> {
+ public:
+  virtual URLRequestContext* GetURLRequestContext() = 0;
+
+  // Returns a SingleThreadTaskRunner corresponding to the thread on
+  // which the network IO happens (the thread on which the returned
+  // net::URLRequestContext may be used).
+  virtual scoped_refptr<base::SingleThreadTaskRunner>
+      GetNetworkTaskRunner() const = 0;
+
+ protected:
+  friend class base::RefCountedThreadSafe<URLRequestContextGetter,
+                                          URLRequestContextGetterTraits>;
+  friend class base::DeleteHelper<URLRequestContextGetter>;
+  friend struct URLRequestContextGetterTraits;
+
+  URLRequestContextGetter();
+  virtual ~URLRequestContextGetter();
+
+ private:
+  // OnDestruct is meant to ensure deletion on the thread on which the request
+  // IO happens.
+  void OnDestruct() const;
+};
+
+struct URLRequestContextGetterTraits {
+  static void Destruct(const URLRequestContextGetter* context_getter) {
+    context_getter->OnDestruct();
+  }
+};
+
+}  // namespace net
+
+#endif  // CHROME_COMMON_NET_URL_REQUEST_CONTEXT_GETTER_H_
diff --git a/src/net/url_request/url_request_context_storage.cc b/src/net/url_request/url_request_context_storage.cc
new file mode 100644
index 0000000..cc57078
--- /dev/null
+++ b/src/net/url_request/url_request_context_storage.cc
@@ -0,0 +1,133 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_context_storage.h"
+
+#include "base/logging.h"
+#include "net/base/cert_verifier.h"
+#include "net/base/host_resolver.h"
+#include "net/base/net_log.h"
+#include "net/base/network_delegate.h"
+#include "net/base/server_bound_cert_service.h"
+#include "net/cookies/cookie_store.h"
+#include "net/ftp/ftp_transaction_factory.h"
+#include "net/http/http_auth_handler_factory.h"
+#include "net/http/http_server_properties.h"
+#include "net/http/http_transaction_factory.h"
+#include "net/proxy/proxy_service.h"
+#include "net/url_request/fraudulent_certificate_reporter.h"
+#include "net/url_request/http_user_agent_settings.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_job_factory.h"
+#include "net/url_request/url_request_throttler_manager.h"
+
+namespace net {
+
+URLRequestContextStorage::URLRequestContextStorage(URLRequestContext* context)
+    : context_(context) {
+  DCHECK(context);
+}
+
+URLRequestContextStorage::~URLRequestContextStorage() {}
+
+void URLRequestContextStorage::set_net_log(NetLog* net_log) {
+  context_->set_net_log(net_log);
+  net_log_.reset(net_log);
+}
+
+void URLRequestContextStorage::set_host_resolver(
+    scoped_ptr<HostResolver> host_resolver) {
+  context_->set_host_resolver(host_resolver.get());
+  host_resolver_ = host_resolver.Pass();
+}
+
+void URLRequestContextStorage::set_cert_verifier(CertVerifier* cert_verifier) {
+  context_->set_cert_verifier(cert_verifier);
+  cert_verifier_.reset(cert_verifier);
+}
+
+void URLRequestContextStorage::set_server_bound_cert_service(
+    ServerBoundCertService* server_bound_cert_service) {
+  context_->set_server_bound_cert_service(server_bound_cert_service);
+  server_bound_cert_service_.reset(server_bound_cert_service);
+}
+
+void URLRequestContextStorage::set_fraudulent_certificate_reporter(
+    FraudulentCertificateReporter* fraudulent_certificate_reporter) {
+  context_->set_fraudulent_certificate_reporter(
+      fraudulent_certificate_reporter);
+  fraudulent_certificate_reporter_.reset(fraudulent_certificate_reporter);
+}
+
+void URLRequestContextStorage::set_http_auth_handler_factory(
+    HttpAuthHandlerFactory* http_auth_handler_factory) {
+  context_->set_http_auth_handler_factory(http_auth_handler_factory);
+  http_auth_handler_factory_.reset(http_auth_handler_factory);
+}
+
+void URLRequestContextStorage::set_proxy_service(ProxyService* proxy_service) {
+  context_->set_proxy_service(proxy_service);
+  proxy_service_.reset(proxy_service);
+}
+
+void URLRequestContextStorage::set_ssl_config_service(
+    SSLConfigService* ssl_config_service) {
+  context_->set_ssl_config_service(ssl_config_service);
+  ssl_config_service_ = ssl_config_service;
+}
+
+void URLRequestContextStorage::set_network_delegate(
+    NetworkDelegate* network_delegate) {
+  context_->set_network_delegate(network_delegate);
+  network_delegate_.reset(network_delegate);
+}
+
+void URLRequestContextStorage::set_http_server_properties(
+    HttpServerProperties* http_server_properties) {
+  context_->set_http_server_properties(http_server_properties);
+  http_server_properties_.reset(http_server_properties);
+}
+
+void URLRequestContextStorage::set_cookie_store(CookieStore* cookie_store) {
+  context_->set_cookie_store(cookie_store);
+  cookie_store_ = cookie_store;
+}
+
+void URLRequestContextStorage::set_transport_security_state(
+    TransportSecurityState* transport_security_state) {
+  context_->set_transport_security_state(transport_security_state);
+  transport_security_state_.reset(transport_security_state);
+}
+
+void URLRequestContextStorage::set_http_transaction_factory(
+    HttpTransactionFactory* http_transaction_factory) {
+  context_->set_http_transaction_factory(http_transaction_factory);
+  http_transaction_factory_.reset(http_transaction_factory);
+}
+
+void URLRequestContextStorage::set_ftp_transaction_factory(
+    FtpTransactionFactory* ftp_transaction_factory) {
+  context_->set_ftp_transaction_factory(ftp_transaction_factory);
+  ftp_transaction_factory_.reset(ftp_transaction_factory);
+}
+
+void URLRequestContextStorage::set_job_factory(
+    URLRequestJobFactory* job_factory) {
+  context_->set_job_factory(job_factory);
+  job_factory_.reset(job_factory);
+}
+
+void URLRequestContextStorage::set_throttler_manager(
+    URLRequestThrottlerManager* throttler_manager) {
+  context_->set_throttler_manager(throttler_manager);
+  throttler_manager_.reset(throttler_manager);
+}
+
+void URLRequestContextStorage::set_http_user_agent_settings(
+    HttpUserAgentSettings* http_user_agent_settings) {
+  context_->set_http_user_agent_settings(http_user_agent_settings);
+  http_user_agent_settings_.reset(http_user_agent_settings);
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_context_storage.h b/src/net/url_request/url_request_context_storage.h
new file mode 100644
index 0000000..62f4c2a
--- /dev/null
+++ b/src/net/url_request/url_request_context_storage.h
@@ -0,0 +1,104 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_CONTEXT_STORAGE_H_
+#define NET_URL_REQUEST_URL_REQUEST_CONTEXT_STORAGE_H_
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "net/base/net_export.h"
+
+namespace net {
+
+class CertVerifier;
+class CookieStore;
+class FraudulentCertificateReporter;
+class FtpTransactionFactory;
+class HostResolver;
+class HttpAuthHandlerFactory;
+class HttpServerProperties;
+class HttpTransactionFactory;
+class HttpUserAgentSettings;
+class NetLog;
+class NetworkDelegate;
+class ServerBoundCertService;
+class ProxyService;
+class SSLConfigService;
+class TransportSecurityState;
+class URLRequestContext;
+class URLRequestJobFactory;
+class URLRequestThrottlerManager;
+
+// URLRequestContextStorage is a helper class that provides storage for unowned
+// member variables of URLRequestContext.
+class NET_EXPORT URLRequestContextStorage {
+ public:
+  // Note that URLRequestContextStorage does not acquire a reference to
+  // URLRequestContext, since it is often designed to be embedded in a
+  // URLRequestContext subclass.
+  explicit URLRequestContextStorage(URLRequestContext* context);
+  ~URLRequestContextStorage();
+
+  // These setters will set both the member variables and call the setter on the
+  // URLRequestContext object. In all cases, ownership is passed to |this|.
+
+  void set_net_log(NetLog* net_log);
+  void set_host_resolver(scoped_ptr<HostResolver> host_resolver);
+  void set_cert_verifier(CertVerifier* cert_verifier);
+  void set_server_bound_cert_service(
+      ServerBoundCertService* server_bound_cert_service);
+  void set_fraudulent_certificate_reporter(
+      FraudulentCertificateReporter* fraudulent_certificate_reporter);
+  void set_http_auth_handler_factory(
+      HttpAuthHandlerFactory* http_auth_handler_factory);
+  void set_proxy_service(ProxyService* proxy_service);
+  void set_ssl_config_service(SSLConfigService* ssl_config_service);
+  void set_network_delegate(NetworkDelegate* network_delegate);
+  void set_http_server_properties(HttpServerProperties* http_server_properties);
+  void set_cookie_store(CookieStore* cookie_store);
+  void set_transport_security_state(
+      TransportSecurityState* transport_security_state);
+  void set_http_transaction_factory(
+      HttpTransactionFactory* http_transaction_factory);
+  void set_ftp_transaction_factory(
+      FtpTransactionFactory* ftp_transaction_factory);
+  void set_job_factory(URLRequestJobFactory* job_factory);
+  void set_throttler_manager(URLRequestThrottlerManager* throttler_manager);
+  void set_http_user_agent_settings(
+      HttpUserAgentSettings* http_user_agent_settings);
+
+ private:
+  // We use a raw pointer to prevent reference cycles, since
+  // URLRequestContextStorage can often be contained within a URLRequestContext
+  // subclass.
+  URLRequestContext* const context_;
+
+  // Owned members.
+  scoped_ptr<NetLog> net_log_;
+  scoped_ptr<HostResolver> host_resolver_;
+  scoped_ptr<CertVerifier> cert_verifier_;
+  scoped_ptr<ServerBoundCertService> server_bound_cert_service_;
+  scoped_ptr<FraudulentCertificateReporter> fraudulent_certificate_reporter_;
+  scoped_ptr<HttpAuthHandlerFactory> http_auth_handler_factory_;
+  scoped_ptr<ProxyService> proxy_service_;
+  // TODO(willchan): Remove refcounting on these members.
+  scoped_refptr<SSLConfigService> ssl_config_service_;
+  scoped_ptr<NetworkDelegate> network_delegate_;
+  scoped_ptr<HttpServerProperties> http_server_properties_;
+  scoped_ptr<HttpUserAgentSettings> http_user_agent_settings_;
+  scoped_refptr<CookieStore> cookie_store_;
+  scoped_ptr<TransportSecurityState> transport_security_state_;
+
+  scoped_ptr<HttpTransactionFactory> http_transaction_factory_;
+  scoped_ptr<FtpTransactionFactory> ftp_transaction_factory_;
+  scoped_ptr<URLRequestJobFactory> job_factory_;
+  scoped_ptr<URLRequestThrottlerManager> throttler_manager_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestContextStorage);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_CONTEXT_STORAGE_H_
diff --git a/src/net/url_request/url_request_data_job.cc b/src/net/url_request/url_request_data_job.cc
new file mode 100644
index 0000000..5dffe32
--- /dev/null
+++ b/src/net/url_request/url_request_data_job.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Simple implementation of a data: protocol handler.
+
+#include "net/url_request/url_request_data_job.h"
+
+#include "net/base/data_url.h"
+#include "net/base/net_errors.h"
+
+namespace net {
+
+URLRequestDataJob::URLRequestDataJob(
+    URLRequest* request, NetworkDelegate* network_delegate)
+    : URLRequestSimpleJob(request, network_delegate) {
+}
+
+// static
+URLRequestJob* URLRequestDataJob::Factory(URLRequest* request,
+                                          NetworkDelegate* network_delegate,
+                                          const std::string& scheme) {
+  return new URLRequestDataJob(request, network_delegate);
+}
+
+int URLRequestDataJob::GetData(std::string* mime_type,
+                               std::string* charset,
+                               std::string* data,
+                               const CompletionCallback& callback) const {
+  // Check if data URL is valid. If not, don't bother to try to extract data.
+  // Otherwise, parse the data from the data URL.
+  const GURL& url = request_->url();
+  if (!url.is_valid())
+    return ERR_INVALID_URL;
+  return DataURL::Parse(url, mime_type, charset, data)? OK: ERR_INVALID_URL;
+}
+
+URLRequestDataJob::~URLRequestDataJob() {
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_data_job.h b/src/net/url_request/url_request_data_job.h
new file mode 100644
index 0000000..7ab6561
--- /dev/null
+++ b/src/net/url_request/url_request_data_job.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_DATA_JOB_H_
+#define NET_URL_REQUEST_URL_REQUEST_DATA_JOB_H_
+
+#include <string>
+
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_simple_job.h"
+
+namespace net {
+
+class URLRequest;
+
+class URLRequestDataJob : public URLRequestSimpleJob {
+ public:
+  URLRequestDataJob(URLRequest* request, NetworkDelegate* network_delegate);
+
+  static URLRequest::ProtocolFactory Factory;
+
+  // URLRequestSimpleJob
+  virtual int GetData(std::string* mime_type,
+                      std::string* charset,
+                      std::string* data,
+                      const CompletionCallback& callback) const OVERRIDE;
+
+ private:
+  virtual ~URLRequestDataJob();
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestDataJob);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_DATA_JOB_H_
diff --git a/src/net/url_request/url_request_error_job.cc b/src/net/url_request/url_request_error_job.cc
new file mode 100644
index 0000000..71069f2
--- /dev/null
+++ b/src/net/url_request/url_request_error_job.cc
@@ -0,0 +1,33 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_error_job.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/message_loop.h"
+#include "net/base/net_errors.h"
+#include "net/url_request/url_request_status.h"
+
+namespace net {
+
+URLRequestErrorJob::URLRequestErrorJob(
+    URLRequest* request, NetworkDelegate* network_delegate, int error)
+    : URLRequestJob(request, network_delegate),
+      error_(error),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {}
+
+URLRequestErrorJob::~URLRequestErrorJob() {}
+
+void URLRequestErrorJob::Start() {
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLRequestErrorJob::StartAsync, weak_factory_.GetWeakPtr()));
+}
+
+void URLRequestErrorJob::StartAsync() {
+  NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error_));
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_error_job.h b/src/net/url_request/url_request_error_job.h
new file mode 100644
index 0000000..7c162f0
--- /dev/null
+++ b/src/net/url_request/url_request_error_job.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Invalid URLs go through this URLRequestJob class rather than being
+// passed to the default job handler.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_ERROR_JOB_H_
+#define NET_URL_REQUEST_URL_REQUEST_ERROR_JOB_H_
+
+#include "base/memory/weak_ptr.h"
+#include "net/base/net_export.h"
+#include "net/url_request/url_request_job.h"
+
+namespace net {
+
+class NET_EXPORT URLRequestErrorJob : public URLRequestJob {
+ public:
+  URLRequestErrorJob(URLRequest* request,
+                     NetworkDelegate* network_delegate,
+                     int error);
+
+  virtual void Start() OVERRIDE;
+
+ private:
+  virtual ~URLRequestErrorJob();
+
+  void StartAsync();
+
+  int error_;
+
+  base::WeakPtrFactory<URLRequestErrorJob> weak_factory_;
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_ERROR_JOB_H_
diff --git a/src/net/url_request/url_request_file_dir_job.cc b/src/net/url_request/url_request_file_dir_job.cc
new file mode 100644
index 0000000..445c1eb
--- /dev/null
+++ b/src/net/url_request/url_request_file_dir_job.cc
@@ -0,0 +1,197 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_file_dir_job.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/message_loop.h"
+#include "base/sys_string_conversions.h"
+#include "base/utf_string_conversions.h"
+#include "base/time.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/base/net_util.h"
+#include "net/url_request/url_request_status.h"
+
+#if defined(OS_POSIX)
+#include <sys/stat.h>
+#endif
+
+namespace net {
+
+URLRequestFileDirJob::URLRequestFileDirJob(URLRequest* request,
+                                           NetworkDelegate* network_delegate,
+                                           const FilePath& dir_path)
+    : URLRequestJob(request, network_delegate),
+      ALLOW_THIS_IN_INITIALIZER_LIST(lister_(dir_path, this)),
+      dir_path_(dir_path),
+      canceled_(false),
+      list_complete_(false),
+      wrote_header_(false),
+      read_pending_(false),
+      read_buffer_length_(0),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {
+}
+
+void URLRequestFileDirJob::StartAsync() {
+  lister_.Start();
+
+  NotifyHeadersComplete();
+}
+
+void URLRequestFileDirJob::Start() {
+  // Start reading asynchronously so that all error reporting and data
+  // callbacks happen as they would for network requests.
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLRequestFileDirJob::StartAsync,
+                 weak_factory_.GetWeakPtr()));
+}
+
+void URLRequestFileDirJob::Kill() {
+  if (canceled_)
+    return;
+
+  canceled_ = true;
+
+  if (!list_complete_)
+    lister_.Cancel();
+
+  URLRequestJob::Kill();
+
+  weak_factory_.InvalidateWeakPtrs();
+}
+
+bool URLRequestFileDirJob::ReadRawData(IOBuffer* buf, int buf_size,
+                                       int* bytes_read) {
+  DCHECK(bytes_read);
+  *bytes_read = 0;
+
+  if (is_done())
+    return true;
+
+  if (FillReadBuffer(buf->data(), buf_size, bytes_read))
+    return true;
+
+  // We are waiting for more data
+  read_pending_ = true;
+  read_buffer_ = buf;
+  read_buffer_length_ = buf_size;
+  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+  return false;
+}
+
+bool URLRequestFileDirJob::GetMimeType(std::string* mime_type) const {
+  *mime_type = "text/html";
+  return true;
+}
+
+bool URLRequestFileDirJob::GetCharset(std::string* charset) {
+  // All the filenames are converted to UTF-8 before being added.
+  *charset = "utf-8";
+  return true;
+}
+
+void URLRequestFileDirJob::OnListFile(
+    const DirectoryLister::DirectoryListerData& data) {
+  // We wait to write out the header until we get the first file, so that we
+  // can catch errors from DirectoryLister and show an error page.
+  if (!wrote_header_) {
+#if defined(OS_WIN)
+    const string16& title = dir_path_.value();
+#elif defined(OS_POSIX)
+    // TODO(jungshik): Add SysNativeMBToUTF16 to sys_string_conversions.
+    // On Mac, need to add NFKC->NFC conversion either here or in file_path.
+    // On Linux, the file system encoding is not defined, but we assume that
+    // SysNativeMBToWide takes care of it at least for now. We can try something
+    // more sophisticated if necessary later.
+    const string16& title = WideToUTF16(
+        base::SysNativeMBToWide(dir_path_.value()));
+#endif
+    data_.append(GetDirectoryListingHeader(title));
+    wrote_header_ = true;
+  }
+
+#if defined(OS_WIN)
+  int64 size = (static_cast<unsigned __int64>(data.info.nFileSizeHigh) << 32) |
+      data.info.nFileSizeLow;
+
+  // Note that we should not convert ftLastWriteTime to the local time because
+  // ICU's datetime formatting APIs expect time in UTC and take into account
+  // the timezone before formatting.
+  data_.append(GetDirectoryListingEntry(
+      data.info.cFileName,
+      std::string(),
+      ((data.info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0),
+      size,
+      base::Time::FromFileTime(data.info.ftLastWriteTime)));
+#elif defined(OS_POSIX)
+  // TOOD(jungshik): The same issue as for the directory name.
+  data_.append(GetDirectoryListingEntry(
+      WideToUTF16(base::SysNativeMBToWide(data.info.filename)),
+      data.info.filename,
+      S_ISDIR(data.info.stat.st_mode),
+      data.info.stat.st_size,
+      base::Time::FromTimeT(data.info.stat.st_mtime)));
+#endif
+
+  // TODO(darin): coalesce more?
+  CompleteRead();
+}
+
+void URLRequestFileDirJob::OnListDone(int error) {
+  DCHECK(!canceled_);
+  if (error != OK) {
+    read_pending_ = false;
+    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, error));
+  } else {
+    list_complete_ = true;
+    CompleteRead();
+  }
+}
+
+URLRequestFileDirJob::~URLRequestFileDirJob() {}
+
+void URLRequestFileDirJob::CompleteRead() {
+  if (read_pending_) {
+    int bytes_read;
+    if (FillReadBuffer(read_buffer_->data(), read_buffer_length_,
+                       &bytes_read)) {
+      // We completed the read, so reset the read buffer.
+      read_pending_ = false;
+      read_buffer_ = NULL;
+      read_buffer_length_ = 0;
+
+      SetStatus(URLRequestStatus());
+      NotifyReadComplete(bytes_read);
+    } else {
+      NOTREACHED();
+      // TODO: Better error code.
+      NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, 0));
+    }
+  }
+}
+
+bool URLRequestFileDirJob::FillReadBuffer(char* buf, int buf_size,
+                                          int* bytes_read) {
+  DCHECK(bytes_read);
+
+  *bytes_read = 0;
+
+  int count = std::min(buf_size, static_cast<int>(data_.size()));
+  if (count) {
+    memcpy(buf, &data_[0], count);
+    data_.erase(0, count);
+    *bytes_read = count;
+    return true;
+  } else if (list_complete_) {
+    // EOF
+    return true;
+  }
+  return false;
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_file_dir_job.h b/src/net/url_request/url_request_file_dir_job.h
new file mode 100644
index 0000000..bc6cccb
--- /dev/null
+++ b/src/net/url_request/url_request_file_dir_job.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_FILE_DIR_JOB_H_
+#define NET_URL_REQUEST_URL_REQUEST_FILE_DIR_JOB_H_
+
+#include <string>
+
+#include "base/file_path.h"
+#include "base/file_util.h"
+#include "base/memory/weak_ptr.h"
+#include "net/base/directory_lister.h"
+#include "net/url_request/url_request_job.h"
+
+namespace net {
+
+class URLRequestFileDirJob
+  : public URLRequestJob,
+    public DirectoryLister::DirectoryListerDelegate {
+ public:
+  URLRequestFileDirJob(URLRequest* request,
+                       NetworkDelegate* network_delegate,
+                       const FilePath& dir_path);
+
+  bool list_complete() const { return list_complete_; }
+
+  virtual void StartAsync();
+
+  // Overridden from URLRequestJob:
+  virtual void Start() OVERRIDE;
+  virtual void Kill() OVERRIDE;
+  virtual bool ReadRawData(IOBuffer* buf,
+                           int buf_size,
+                           int *bytes_read) OVERRIDE;
+  virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
+  virtual bool GetCharset(std::string* charset) OVERRIDE;
+
+  // Overridden from DirectoryLister::DirectoryListerDelegate:
+  virtual void OnListFile(
+      const DirectoryLister::DirectoryListerData& data) OVERRIDE;
+  virtual void OnListDone(int error) OVERRIDE;
+
+ private:
+  virtual ~URLRequestFileDirJob();
+
+  void CloseLister();
+
+  // When we have data and a read has been pending, this function
+  // will fill the response buffer and notify the request
+  // appropriately.
+  void CompleteRead();
+
+  // Fills a buffer with the output.
+  bool FillReadBuffer(char *buf, int buf_size, int *bytes_read);
+
+  DirectoryLister lister_;
+  FilePath dir_path_;
+  std::string data_;
+  bool canceled_;
+
+  // Indicates whether we have the complete list of the dir
+  bool list_complete_;
+
+  // Indicates whether we have written the HTML header
+  bool wrote_header_;
+
+  // To simulate Async IO, we hold onto the Reader's buffer while
+  // we wait for IO to complete.  When done, we fill the buffer
+  // manually.
+  bool read_pending_;
+  scoped_refptr<IOBuffer> read_buffer_;
+  int read_buffer_length_;
+  base::WeakPtrFactory<URLRequestFileDirJob> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestFileDirJob);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_FILE_DIR_JOB_H_
diff --git a/src/net/url_request/url_request_file_job.cc b/src/net/url_request/url_request_file_job.cc
new file mode 100644
index 0000000..e6a17ab
--- /dev/null
+++ b/src/net/url_request/url_request_file_job.cc
@@ -0,0 +1,335 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// For loading files, we make use of overlapped i/o to ensure that reading from
+// the filesystem (e.g., a network filesystem) does not block the calling
+// thread.  An alternative approach would be to use a background thread or pool
+// of threads, but it seems better to leverage the operating system's ability
+// to do background file reads for us.
+//
+// Since overlapped reads require a 'static' buffer for the duration of the
+// asynchronous read, the URLRequestFileJob keeps a buffer as a member var.  In
+// URLRequestFileJob::Read, data is simply copied from the object's buffer into
+// the given buffer.  If there is no data to copy, the URLRequestFileJob
+// attempts to read more from the file to fill its buffer.  If reading from the
+// file does not complete synchronously, then the URLRequestFileJob waits for a
+// signal from the OS that the overlapped read has completed.  It does so by
+// leveraging the MessageLoop::WatchObject API.
+
+#include "net/url_request/url_request_file_job.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/message_loop.h"
+#include "base/path_service.h"
+#include "base/platform_file.h"
+#include "base/string_util.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/worker_pool.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/file_stream.h"
+#include "net/base/io_buffer.h"
+#include "net/base/load_flags.h"
+#include "net/base/mime_util.h"
+#include "net/base/net_errors.h"
+#include "net/base/net_util.h"
+#include "net/http/http_util.h"
+#include "net/url_request/url_request_error_job.h"
+#include "net/url_request/url_request_file_dir_job.h"
+
+#if defined(OS_WIN)
+#include "base/win/shortcut.h"
+#endif
+
+namespace net {
+
+URLRequestFileJob::FileMetaInfo::FileMetaInfo()
+    : file_size(0),
+      mime_type_result(false),
+      file_exists(false),
+      is_directory(false) {
+}
+
+URLRequestFileJob::URLRequestFileJob(URLRequest* request,
+                                     NetworkDelegate* network_delegate,
+                                     const FilePath& file_path)
+    : URLRequestJob(request, network_delegate),
+      file_path_(file_path),
+      stream_(new FileStream(NULL)),
+      remaining_bytes_(0),
+      weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
+}
+
+// static
+URLRequestJob* URLRequestFileJob::Factory(URLRequest* request,
+                                          NetworkDelegate* network_delegate,
+                                          const std::string& scheme) {
+  FilePath file_path;
+  const bool is_file = FileURLToFilePath(request->url(), &file_path);
+
+  // Check file access permissions.
+  if (!network_delegate ||
+      !network_delegate->CanAccessFile(*request, file_path)) {
+    return new URLRequestErrorJob(request, network_delegate, ERR_ACCESS_DENIED);
+  }
+
+
+  // We need to decide whether to create URLRequestFileJob for file access or
+  // URLRequestFileDirJob for directory access. To avoid accessing the
+  // filesystem, we only look at the path string here.
+  // The code in the URLRequestFileJob::Start() method discovers that a path,
+  // which doesn't end with a slash, should really be treated as a directory,
+  // and it then redirects to the URLRequestFileDirJob.
+  if (is_file &&
+      file_util::EndsWithSeparator(file_path) &&
+      file_path.IsAbsolute())
+#if defined(COBALT)
+    // We don't support FileDirJob.
+    return new URLRequestErrorJob(request, network_delegate, ERR_ACCESS_DENIED);
+#else
+    return new URLRequestFileDirJob(request, network_delegate, file_path);
+#endif
+
+  // Use a regular file request job for all non-directories (including invalid
+  // file names).
+  return new URLRequestFileJob(request, network_delegate, file_path);
+}
+
+void URLRequestFileJob::Start() {
+  FileMetaInfo* meta_info = new FileMetaInfo();
+  base::WorkerPool::PostTaskAndReply(
+      FROM_HERE,
+      base::Bind(&URLRequestFileJob::FetchMetaInfo, file_path_,
+                 base::Unretained(meta_info)),
+      base::Bind(&URLRequestFileJob::DidFetchMetaInfo,
+                 weak_ptr_factory_.GetWeakPtr(),
+                 base::Owned(meta_info)),
+      true);
+}
+
+void URLRequestFileJob::Kill() {
+  stream_.reset();
+  weak_ptr_factory_.InvalidateWeakPtrs();
+
+  URLRequestJob::Kill();
+}
+
+bool URLRequestFileJob::ReadRawData(IOBuffer* dest, int dest_size,
+                                    int *bytes_read) {
+  DCHECK_NE(dest_size, 0);
+  DCHECK(bytes_read);
+  DCHECK_GE(remaining_bytes_, 0);
+
+  if (remaining_bytes_ < dest_size)
+    dest_size = static_cast<int>(remaining_bytes_);
+
+  // If we should copy zero bytes because |remaining_bytes_| is zero, short
+  // circuit here.
+  if (!dest_size) {
+    *bytes_read = 0;
+    return true;
+  }
+
+  int rv = stream_->Read(dest, dest_size,
+                         base::Bind(&URLRequestFileJob::DidRead,
+                                    weak_ptr_factory_.GetWeakPtr()));
+  if (rv >= 0) {
+    // Data is immediately available.
+    *bytes_read = rv;
+    remaining_bytes_ -= rv;
+    DCHECK_GE(remaining_bytes_, 0);
+    return true;
+  }
+
+  // Otherwise, a read error occured.  We may just need to wait...
+  if (rv == ERR_IO_PENDING) {
+    SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+  } else {
+    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+  }
+  return false;
+}
+
+bool URLRequestFileJob::IsRedirectResponse(GURL* location,
+                                           int* http_status_code) {
+  if (meta_info_.is_directory) {
+    // This happens when we discovered the file is a directory, so needs a
+    // slash at the end of the path.
+    std::string new_path = request_->url().path();
+    new_path.push_back('/');
+    GURL::Replacements replacements;
+    replacements.SetPathStr(new_path);
+
+    *location = request_->url().ReplaceComponents(replacements);
+    *http_status_code = 301;  // simulate a permanent redirect
+    return true;
+  }
+
+#if defined(OS_WIN)
+  // Follow a Windows shortcut.
+  // We just resolve .lnk file, ignore others.
+  if (!LowerCaseEqualsASCII(file_path_.Extension(), ".lnk"))
+    return false;
+
+  FilePath new_path = file_path_;
+  bool resolved;
+  resolved = base::win::ResolveShortcut(new_path, &new_path, NULL);
+
+  // If shortcut is not resolved succesfully, do not redirect.
+  if (!resolved)
+    return false;
+
+  *location = FilePathToFileURL(new_path);
+  *http_status_code = 301;
+  return true;
+#else
+  return false;
+#endif
+}
+
+Filter* URLRequestFileJob::SetupFilter() const {
+  // Bug 9936 - .svgz files needs to be decompressed.
+  return LowerCaseEqualsASCII(file_path_.Extension(), ".svgz")
+      ? Filter::GZipFactory() : NULL;
+}
+
+bool URLRequestFileJob::GetMimeType(std::string* mime_type) const {
+  DCHECK(request_);
+  if (meta_info_.mime_type_result) {
+    *mime_type = meta_info_.mime_type;
+    return true;
+  }
+  return false;
+}
+
+void URLRequestFileJob::SetExtraRequestHeaders(
+    const HttpRequestHeaders& headers) {
+  std::string range_header;
+  if (headers.GetHeader(HttpRequestHeaders::kRange, &range_header)) {
+    // We only care about "Range" header here.
+    std::vector<HttpByteRange> ranges;
+    if (HttpUtil::ParseRangeHeader(range_header, &ranges)) {
+      if (ranges.size() == 1) {
+        byte_range_ = ranges[0];
+      } else {
+        // We don't support multiple range requests in one single URL request,
+        // because we need to do multipart encoding here.
+        // TODO(hclam): decide whether we want to support multiple range
+        // requests.
+        NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+                                    ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+      }
+    }
+  }
+}
+
+URLRequestFileJob::~URLRequestFileJob() {
+}
+
+void URLRequestFileJob::FetchMetaInfo(const FilePath& file_path,
+                                      FileMetaInfo* meta_info) {
+  base::PlatformFileInfo platform_info;
+  meta_info->file_exists = file_util::GetFileInfo(file_path, &platform_info);
+  if (meta_info->file_exists) {
+    meta_info->file_size = platform_info.size;
+    meta_info->is_directory = platform_info.is_directory;
+  }
+  // On Windows GetMimeTypeFromFile() goes to the registry. Thus it should be
+  // done in WorkerPool.
+  meta_info->mime_type_result = GetMimeTypeFromFile(file_path,
+                                                    &meta_info->mime_type);
+}
+
+void URLRequestFileJob::DidFetchMetaInfo(const FileMetaInfo* meta_info) {
+  meta_info_ = *meta_info;
+
+  // We use URLRequestFileJob to handle files as well as directories without
+  // trailing slash.
+  // If a directory does not exist, we return ERR_FILE_NOT_FOUND. Otherwise,
+  // we will append trailing slash and redirect to FileDirJob.
+  // A special case is "\" on Windows. We should resolve as invalid.
+  // However, Windows resolves "\" to "C:\", thus reports it as existent.
+  // So what happens is we append it with trailing slash and redirect it to
+  // FileDirJob where it is resolved as invalid.
+  if (!meta_info_.file_exists) {
+    DidOpen(ERR_FILE_NOT_FOUND);
+    return;
+  }
+  if (meta_info_.is_directory) {
+    DidOpen(OK);
+    return;
+  }
+
+  int flags = base::PLATFORM_FILE_OPEN |
+              base::PLATFORM_FILE_READ |
+              base::PLATFORM_FILE_ASYNC;
+  int rv = stream_->Open(file_path_, flags,
+                         base::Bind(&URLRequestFileJob::DidOpen,
+                                    weak_ptr_factory_.GetWeakPtr()));
+  if (rv != ERR_IO_PENDING)
+    DidOpen(rv);
+}
+
+void URLRequestFileJob::DidOpen(int result) {
+  if (result != OK) {
+    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
+    return;
+  }
+
+  if (!byte_range_.ComputeBounds(meta_info_.file_size)) {
+    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+               ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+    return;
+  }
+
+  remaining_bytes_ = byte_range_.last_byte_position() -
+                     byte_range_.first_byte_position() + 1;
+  DCHECK_GE(remaining_bytes_, 0);
+
+  if (remaining_bytes_ > 0 && byte_range_.first_byte_position() != 0) {
+    int rv = stream_->Seek(FROM_BEGIN, byte_range_.first_byte_position(),
+                           base::Bind(&URLRequestFileJob::DidSeek,
+                                      weak_ptr_factory_.GetWeakPtr()));
+    if (rv != ERR_IO_PENDING) {
+      // stream_->Seek() failed, so pass an intentionally erroneous value
+      // into DidSeek().
+      DidSeek(-1);
+    }
+  } else {
+    // We didn't need to call stream_->Seek() at all, so we pass to DidSeek()
+    // the value that would mean seek success. This way we skip the code
+    // handling seek failure.
+    DidSeek(byte_range_.first_byte_position());
+  }
+}
+
+void URLRequestFileJob::DidSeek(int64 result) {
+  if (result != byte_range_.first_byte_position()) {
+    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+                                ERR_REQUEST_RANGE_NOT_SATISFIABLE));
+    return;
+  }
+
+  set_expected_content_size(remaining_bytes_);
+  NotifyHeadersComplete();
+}
+
+void URLRequestFileJob::DidRead(int result) {
+  if (result > 0) {
+    SetStatus(URLRequestStatus());  // Clear the IO_PENDING status
+  } else if (result == 0) {
+    NotifyDone(URLRequestStatus());
+  } else {
+    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
+  }
+
+  remaining_bytes_ -= result;
+  DCHECK_GE(remaining_bytes_, 0);
+
+  NotifyReadComplete(result);
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_file_job.h b/src/net/url_request/url_request_file_job.h
new file mode 100644
index 0000000..9c2199c
--- /dev/null
+++ b/src/net/url_request/url_request_file_job.h
@@ -0,0 +1,107 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_FILE_JOB_H_
+#define NET_URL_REQUEST_URL_REQUEST_FILE_JOB_H_
+
+#include <string>
+#include <vector>
+
+#include "base/file_path.h"
+#include "base/memory/weak_ptr.h"
+#include "net/base/net_export.h"
+#include "net/http/http_byte_range.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_job.h"
+
+namespace base{
+struct PlatformFileInfo;
+}
+namespace file_util {
+struct FileInfo;
+}
+
+namespace net {
+
+class FileStream;
+
+// A request job that handles reading file URLs
+class NET_EXPORT URLRequestFileJob : public URLRequestJob {
+ public:
+  URLRequestFileJob(URLRequest* request,
+                    NetworkDelegate* network_delegate,
+                    const FilePath& file_path);
+
+  static URLRequest::ProtocolFactory Factory;
+
+  // URLRequestJob:
+  virtual void Start() OVERRIDE;
+  virtual void Kill() OVERRIDE;
+  virtual bool ReadRawData(IOBuffer* buf,
+                           int buf_size,
+                           int* bytes_read) OVERRIDE;
+  virtual bool IsRedirectResponse(GURL* location,
+                                  int* http_status_code) OVERRIDE;
+  virtual Filter* SetupFilter() const OVERRIDE;
+  virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
+  virtual void SetExtraRequestHeaders(
+      const HttpRequestHeaders& headers) OVERRIDE;
+
+ protected:
+  virtual ~URLRequestFileJob();
+
+  // The OS-specific full path name of the file
+  FilePath file_path_;
+
+ private:
+  // Meta information about the file. It's used as a member in the
+  // URLRequestFileJob and also passed between threads because disk access is
+  // necessary to obtain it.
+  struct FileMetaInfo {
+    FileMetaInfo();
+
+    // Size of the file.
+    int64 file_size;
+    // Mime type associated with the file.
+    std::string mime_type;
+    // Result returned from GetMimeTypeFromFile(), i.e. flag showing whether
+    // obtaining of the mime type was successful.
+    bool mime_type_result;
+    // Flag showing whether the file exists.
+    bool file_exists;
+    // Flag showing whether the file name actually refers to a directory.
+    bool is_directory;
+  };
+
+  // Fetches file info on a background thread.
+  static void FetchMetaInfo(const FilePath& file_path,
+                            FileMetaInfo* meta_info);
+
+  // Callback after fetching file info on a background thread.
+  void DidFetchMetaInfo(const FileMetaInfo* meta_info);
+
+  // Callback after opening file on a background thread.
+  void DidOpen(int result);
+
+  // Callback after seeking to the beginning of |byte_range_| in the file
+  // on a background thread.
+  void DidSeek(int64 result);
+
+  // Callback after data is asynchronously read from the file.
+  void DidRead(int result);
+
+  scoped_ptr<FileStream> stream_;
+  FileMetaInfo meta_info_;
+
+  HttpByteRange byte_range_;
+  int64 remaining_bytes_;
+
+  base::WeakPtrFactory<URLRequestFileJob> weak_ptr_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestFileJob);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_FILE_JOB_H_
diff --git a/src/net/url_request/url_request_filter.cc b/src/net/url_request/url_request_filter.cc
new file mode 100644
index 0000000..01b6069
--- /dev/null
+++ b/src/net/url_request/url_request_filter.cc
@@ -0,0 +1,150 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_filter.h"
+
+#include <set>
+
+#include "base/logging.h"
+
+namespace net {
+
+URLRequestFilter* URLRequestFilter::shared_instance_ = NULL;
+
+URLRequestFilter::~URLRequestFilter() {}
+
+// static
+URLRequestJob* URLRequestFilter::Factory(URLRequest* request,
+                                         NetworkDelegate* network_delegate,
+                                         const std::string& scheme) {
+  // Returning null here just means that the built-in handler will be used.
+  return GetInstance()->FindRequestHandler(request, network_delegate, scheme);
+}
+
+// static
+URLRequestFilter* URLRequestFilter::GetInstance() {
+  if (!shared_instance_)
+    shared_instance_ = new URLRequestFilter;
+  return shared_instance_;
+}
+
+void URLRequestFilter::AddHostnameHandler(const std::string& scheme,
+    const std::string& hostname, URLRequest::ProtocolFactory* factory) {
+  hostname_handler_map_[make_pair(scheme, hostname)] = factory;
+
+  // Register with the ProtocolFactory.
+  URLRequest::Deprecated::RegisterProtocolFactory(
+      scheme, &URLRequestFilter::Factory);
+
+#ifndef NDEBUG
+  // Check to see if we're masking URLs in the url_handler_map_.
+  for (UrlHandlerMap::const_iterator i = url_handler_map_.begin();
+       i != url_handler_map_.end(); ++i) {
+    const GURL& url = GURL(i->first);
+    HostnameHandlerMap::iterator host_it =
+        hostname_handler_map_.find(make_pair(url.scheme(), url.host()));
+    if (host_it != hostname_handler_map_.end())
+      NOTREACHED();
+  }
+#endif  // !NDEBUG
+}
+
+void URLRequestFilter::RemoveHostnameHandler(const std::string& scheme,
+                                             const std::string& hostname) {
+  HostnameHandlerMap::iterator iter =
+      hostname_handler_map_.find(make_pair(scheme, hostname));
+  DCHECK(iter != hostname_handler_map_.end());
+
+  hostname_handler_map_.erase(iter);
+  // Note that we don't unregister from the URLRequest ProtocolFactory as
+  // this would left no protocol factory for the scheme.
+  // URLRequestFilter::Factory will keep forwarding the requests to the
+  // URLRequestInetJob.
+}
+
+bool URLRequestFilter::AddUrlHandler(
+    const GURL& url,
+    URLRequest::ProtocolFactory* factory) {
+  if (!url.is_valid())
+    return false;
+  url_handler_map_[url.spec()] = factory;
+
+  // Register with the ProtocolFactory.
+  URLRequest::Deprecated::RegisterProtocolFactory(url.scheme(),
+                                                  &URLRequestFilter::Factory);
+#ifndef NDEBUG
+  // Check to see if this URL is masked by a hostname handler.
+  HostnameHandlerMap::iterator host_it =
+      hostname_handler_map_.find(make_pair(url.scheme(), url.host()));
+  if (host_it != hostname_handler_map_.end())
+    NOTREACHED();
+#endif  // !NDEBUG
+
+  return true;
+}
+
+void URLRequestFilter::RemoveUrlHandler(const GURL& url) {
+  UrlHandlerMap::iterator iter = url_handler_map_.find(url.spec());
+  DCHECK(iter != url_handler_map_.end());
+
+  url_handler_map_.erase(iter);
+  // Note that we don't unregister from the URLRequest ProtocolFactory as
+  // this would left no protocol factory for the scheme.
+  // URLRequestFilter::Factory will keep forwarding the requests to the
+  // URLRequestInetJob.
+}
+
+void URLRequestFilter::ClearHandlers() {
+  // Unregister with the ProtocolFactory.
+  std::set<std::string> schemes;
+  for (UrlHandlerMap::const_iterator i = url_handler_map_.begin();
+       i != url_handler_map_.end(); ++i) {
+    schemes.insert(GURL(i->first).scheme());
+  }
+  for (HostnameHandlerMap::const_iterator i = hostname_handler_map_.begin();
+       i != hostname_handler_map_.end(); ++i) {
+    schemes.insert(i->first.first);
+  }
+  for (std::set<std::string>::const_iterator scheme = schemes.begin();
+       scheme != schemes.end(); ++scheme) {
+    URLRequest::Deprecated::RegisterProtocolFactory(*scheme, NULL);
+  }
+
+  url_handler_map_.clear();
+  hostname_handler_map_.clear();
+  hit_count_ = 0;
+}
+
+URLRequestFilter::URLRequestFilter() : hit_count_(0) { }
+
+URLRequestJob* URLRequestFilter::FindRequestHandler(
+    URLRequest* request,
+    NetworkDelegate* network_delegate,
+    const std::string& scheme) {
+  URLRequestJob* job = NULL;
+  if (request->url().is_valid()) {
+    // Check the hostname map first.
+    const std::string& hostname = request->url().host();
+
+    HostnameHandlerMap::iterator i =
+        hostname_handler_map_.find(make_pair(scheme, hostname));
+    if (i != hostname_handler_map_.end())
+      job = i->second(request, network_delegate, scheme);
+
+    if (!job) {
+      // Not in the hostname map, check the url map.
+      const std::string& url = request->url().spec();
+      UrlHandlerMap::iterator i = url_handler_map_.find(url);
+      if (i != url_handler_map_.end())
+        job = i->second(request, network_delegate, scheme);
+    }
+  }
+  if (job) {
+    DVLOG(1) << "URLRequestFilter hit for " << request->url().spec();
+    hit_count_++;
+  }
+  return job;
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_filter.h b/src/net/url_request/url_request_filter.h
new file mode 100644
index 0000000..c039300
--- /dev/null
+++ b/src/net/url_request/url_request_filter.h
@@ -0,0 +1,94 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A class to help filter URLRequest jobs based on the URL of the request
+// rather than just the scheme.  Example usage:
+//
+// // Use as an "http" handler.
+// URLRequest::RegisterProtocolFactory("http", &URLRequestFilter::Factory);
+// // Add special handling for the URL http://foo.com/
+// URLRequestFilter::GetInstance()->AddUrlHandler(
+//     GURL("http://foo.com/"),
+//     &URLRequestCustomJob::Factory);
+//
+// If URLRequestFilter::Factory can't find a handle for the request, it passes
+// it through to URLRequestInetJob::Factory and lets the default network stack
+// handle it.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_FILTER_H_
+#define NET_URL_REQUEST_URL_REQUEST_FILTER_H_
+
+#include <map>
+#include <string>
+
+#include "base/hash_tables.h"
+#include "net/base/net_export.h"
+#include "net/url_request/url_request.h"
+
+class GURL;
+
+namespace net {
+class URLRequestJob;
+
+class NET_EXPORT URLRequestFilter {
+ public:
+  // scheme,hostname -> ProtocolFactory
+  typedef std::map<std::pair<std::string, std::string>,
+      URLRequest::ProtocolFactory*> HostnameHandlerMap;
+  typedef base::hash_map<std::string, URLRequest::ProtocolFactory*>
+      UrlHandlerMap;
+
+  ~URLRequestFilter();
+
+  static URLRequest::ProtocolFactory Factory;
+
+  // Singleton instance for use.
+  static URLRequestFilter* GetInstance();
+
+  void AddHostnameHandler(const std::string& scheme,
+                          const std::string& hostname,
+                          URLRequest::ProtocolFactory* factory);
+  void RemoveHostnameHandler(const std::string& scheme,
+                             const std::string& hostname);
+
+  // Returns true if we successfully added the URL handler.  This will replace
+  // old handlers for the URL if one existed.
+  bool AddUrlHandler(const GURL& url,
+                     URLRequest::ProtocolFactory* factory);
+
+  void RemoveUrlHandler(const GURL& url);
+
+  // Clear all the existing URL handlers and unregister with the
+  // ProtocolFactory.  Resets the hit count.
+  void ClearHandlers();
+
+  // Returns the number of times a handler was used to service a request.
+  int hit_count() const { return hit_count_; }
+
+ protected:
+  URLRequestFilter();
+
+  // Helper method that looks up the request in the url_handler_map_.
+  URLRequestJob* FindRequestHandler(URLRequest* request,
+                                    NetworkDelegate* network_delegate,
+                                    const std::string& scheme);
+
+  // Maps hostnames to factories.  Hostnames take priority over URLs.
+  HostnameHandlerMap hostname_handler_map_;
+
+  // Maps URLs to factories.
+  UrlHandlerMap url_handler_map_;
+
+  int hit_count_;
+
+ private:
+  // Singleton instance.
+  static URLRequestFilter* shared_instance_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestFilter);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_FILTER_H_
diff --git a/src/net/url_request/url_request_filter_unittest.cc b/src/net/url_request/url_request_filter_unittest.cc
new file mode 100644
index 0000000..9f11675
--- /dev/null
+++ b/src/net/url_request/url_request_filter_unittest.cc
@@ -0,0 +1,131 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_filter.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_job.h"
+#include "net/url_request/url_request_job_factory.h"
+#include "net/url_request/url_request_test_job.h"
+#include "net/url_request/url_request_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace net {
+
+namespace {
+
+URLRequestTestJob* job_a;
+
+URLRequestJob* FactoryA(URLRequest* request,
+                        NetworkDelegate* network_delegate,
+                        const std::string& scheme) {
+  job_a = new URLRequestTestJob(request, network_delegate);
+  return job_a;
+}
+
+URLRequestTestJob* job_b;
+
+URLRequestJob* FactoryB(URLRequest* request,
+                        NetworkDelegate* network_delegate,
+                        const std::string& scheme) {
+  job_b = new URLRequestTestJob(request, network_delegate);
+  return job_b;
+}
+
+TEST(URLRequestFilter, BasicMatching) {
+  TestDelegate delegate;
+  TestURLRequestContext request_context;
+
+  GURL url_1("http://foo.com/");
+  TestURLRequest request_1(url_1, &delegate, &request_context);
+
+  GURL url_2("http://bar.com/");
+  TestURLRequest request_2(url_2, &delegate, &request_context);
+
+  // Check AddUrlHandler checks for invalid URLs.
+  EXPECT_FALSE(URLRequestFilter::GetInstance()->AddUrlHandler(GURL(),
+                                                              &FactoryA));
+
+  // Check URL matching.
+  URLRequestFilter::GetInstance()->ClearHandlers();
+  EXPECT_TRUE(URLRequestFilter::GetInstance()->AddUrlHandler(url_1,
+                                                             &FactoryA));
+  {
+    scoped_refptr<URLRequestJob> found = URLRequestFilter::Factory(
+        &request_1, request_context.network_delegate(), url_1.scheme());
+    EXPECT_EQ(job_a, found);
+    EXPECT_TRUE(job_a != NULL);
+    job_a = NULL;
+  }
+  EXPECT_EQ(URLRequestFilter::GetInstance()->hit_count(), 1);
+
+  // Check we don't match other URLs.
+  EXPECT_TRUE(URLRequestFilter::Factory(
+      &request_2, request_context.network_delegate(), url_2.scheme()) == NULL);
+  EXPECT_EQ(1, URLRequestFilter::GetInstance()->hit_count());
+
+  // Check we can overwrite URL handler.
+  EXPECT_TRUE(URLRequestFilter::GetInstance()->AddUrlHandler(url_1,
+                                                             &FactoryB));
+  {
+    scoped_refptr<URLRequestJob> found = URLRequestFilter::Factory(
+        &request_1, request_context.network_delegate(), url_1.scheme());
+    EXPECT_EQ(job_b, found);
+    EXPECT_TRUE(job_b != NULL);
+    job_b = NULL;
+  }
+  EXPECT_EQ(2, URLRequestFilter::GetInstance()->hit_count());
+
+  // Check we can remove URL matching.
+  URLRequestFilter::GetInstance()->RemoveUrlHandler(url_1);
+  EXPECT_TRUE(URLRequestFilter::Factory(
+      &request_1, request_context.network_delegate(), url_1.scheme()) == NULL);
+  EXPECT_EQ(URLRequestFilter::GetInstance()->hit_count(), 2);
+
+  // Check hostname matching.
+  URLRequestFilter::GetInstance()->ClearHandlers();
+  EXPECT_EQ(0, URLRequestFilter::GetInstance()->hit_count());
+  URLRequestFilter::GetInstance()->AddHostnameHandler(url_1.scheme(),
+                                                      url_1.host(),
+                                                      &FactoryB);
+  {
+    scoped_refptr<URLRequestJob> found = URLRequestFilter::Factory(
+        &request_1, request_context.network_delegate(), url_1.scheme());
+    EXPECT_EQ(job_b, found);
+    EXPECT_TRUE(job_b != NULL);
+    job_b = NULL;
+  }
+  EXPECT_EQ(1, URLRequestFilter::GetInstance()->hit_count());
+
+  // Check we don't match other hostnames.
+  EXPECT_TRUE(URLRequestFilter::Factory(
+      &request_2, request_context.network_delegate(), url_2.scheme()) == NULL);
+  EXPECT_EQ(URLRequestFilter::GetInstance()->hit_count(), 1);
+
+  // Check we can overwrite hostname handler.
+  URLRequestFilter::GetInstance()->AddHostnameHandler(url_1.scheme(),
+                                                      url_1.host(),
+                                                      &FactoryA);
+  {
+    scoped_refptr<URLRequestJob> found = URLRequestFilter::Factory(
+        &request_1, request_context.network_delegate(), url_1.scheme());
+    EXPECT_EQ(job_a, found);
+    EXPECT_TRUE(job_a != NULL);
+    job_a = NULL;
+  }
+  EXPECT_EQ(2, URLRequestFilter::GetInstance()->hit_count());
+
+  // Check we can remove hostname matching.
+  URLRequestFilter::GetInstance()->RemoveHostnameHandler(url_1.scheme(),
+                                                         url_1.host());
+  EXPECT_TRUE(URLRequestFilter::Factory(
+      &request_1, request_context.network_delegate(), url_1.scheme()) == NULL);
+  EXPECT_EQ(2, URLRequestFilter::GetInstance()->hit_count());
+}
+
+}  // namespace
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_ftp_job.cc b/src/net/url_request/url_request_ftp_job.cc
new file mode 100644
index 0000000..27021a3
--- /dev/null
+++ b/src/net/url_request/url_request_ftp_job.cc
@@ -0,0 +1,264 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_ftp_job.h"
+
+#include "base/compiler_specific.h"
+#include "base/message_loop.h"
+#include "base/utf_string_conversions.h"
+#include "net/base/auth.h"
+#include "net/base/host_port_pair.h"
+#include "net/base/net_errors.h"
+#include "net/base/net_util.h"
+#include "net/ftp/ftp_response_info.h"
+#include "net/ftp/ftp_transaction_factory.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_error_job.h"
+
+namespace net {
+
+URLRequestFtpJob::URLRequestFtpJob(
+    URLRequest* request,
+    NetworkDelegate* network_delegate,
+    FtpTransactionFactory* ftp_transaction_factory,
+    FtpAuthCache* ftp_auth_cache)
+    : URLRequestJob(request, network_delegate),
+      read_in_progress_(false),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)),
+      ftp_transaction_factory_(ftp_transaction_factory),
+      ftp_auth_cache_(ftp_auth_cache) {
+  DCHECK(ftp_transaction_factory);
+  DCHECK(ftp_auth_cache);
+}
+
+// static
+URLRequestJob* URLRequestFtpJob::Factory(URLRequest* request,
+                                         NetworkDelegate* network_delegate,
+                                         const std::string& scheme) {
+  DCHECK_EQ(scheme, "ftp");
+
+  int port = request->url().IntPort();
+  if (request->url().has_port() &&
+      !IsPortAllowedByFtp(port) && !IsPortAllowedByOverride(port)) {
+    return new URLRequestErrorJob(request,
+                                  network_delegate,
+                                  ERR_UNSAFE_PORT);
+  }
+
+  return new URLRequestFtpJob(request,
+                              network_delegate,
+                              request->context()->ftp_transaction_factory(),
+                              request->context()->ftp_auth_cache());
+}
+
+bool URLRequestFtpJob::GetMimeType(std::string* mime_type) const {
+  if (transaction_->GetResponseInfo()->is_directory_listing) {
+    *mime_type = "text/vnd.chromium.ftp-dir";
+    return true;
+  }
+  return false;
+}
+
+HostPortPair URLRequestFtpJob::GetSocketAddress() const {
+  if (!transaction_.get()) {
+    return HostPortPair();
+  }
+  return transaction_->GetResponseInfo()->socket_address;
+}
+
+URLRequestFtpJob::~URLRequestFtpJob() {
+}
+
+void URLRequestFtpJob::StartTransaction() {
+  // Create a transaction.
+  DCHECK(!transaction_.get());
+
+  transaction_.reset(ftp_transaction_factory_->CreateTransaction());
+
+  // No matter what, we want to report our status as IO pending since we will
+  // be notifying our consumer asynchronously via OnStartCompleted.
+  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+  int rv;
+  if (transaction_.get()) {
+    rv = transaction_->Start(
+        &request_info_,
+        base::Bind(&URLRequestFtpJob::OnStartCompleted,
+                   base::Unretained(this)),
+        request_->net_log());
+    if (rv == ERR_IO_PENDING)
+      return;
+  } else {
+    rv = ERR_FAILED;
+  }
+  // The transaction started synchronously, but we need to notify the
+  // URLRequest delegate via the message loop.
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLRequestFtpJob::OnStartCompleted,
+                 weak_factory_.GetWeakPtr(), rv));
+}
+
+void URLRequestFtpJob::OnStartCompleted(int result) {
+  // Clear the IO_PENDING status
+  SetStatus(URLRequestStatus());
+
+  // Note that transaction_ may be NULL due to a creation failure.
+  if (transaction_.get()) {
+    // FTP obviously doesn't have HTTP Content-Length header. We have to pass
+    // the content size information manually.
+    set_expected_content_size(
+        transaction_->GetResponseInfo()->expected_content_size);
+  }
+
+  if (result == OK) {
+    NotifyHeadersComplete();
+  } else if (transaction_.get() &&
+             transaction_->GetResponseInfo()->needs_auth) {
+    GURL origin = request_->url().GetOrigin();
+    if (server_auth_ && server_auth_->state == AUTH_STATE_HAVE_AUTH) {
+      ftp_auth_cache_->Remove(origin, server_auth_->credentials);
+    } else if (!server_auth_) {
+      server_auth_ = new AuthData();
+    }
+    server_auth_->state = AUTH_STATE_NEED_AUTH;
+
+    FtpAuthCache::Entry* cached_auth = ftp_auth_cache_->Lookup(origin);
+    if (cached_auth) {
+      // Retry using cached auth data.
+      SetAuth(cached_auth->credentials);
+    } else {
+      // Prompt for a username/password.
+      NotifyHeadersComplete();
+    }
+  } else {
+    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
+  }
+}
+
+void URLRequestFtpJob::OnReadCompleted(int result) {
+  read_in_progress_ = false;
+  if (result == 0) {
+    NotifyDone(URLRequestStatus());
+  } else if (result < 0) {
+    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
+  } else {
+    // Clear the IO_PENDING status
+    SetStatus(URLRequestStatus());
+  }
+  NotifyReadComplete(result);
+}
+
+void URLRequestFtpJob::RestartTransactionWithAuth() {
+  DCHECK(server_auth_ && server_auth_->state == AUTH_STATE_HAVE_AUTH);
+
+  // No matter what, we want to report our status as IO pending since we will
+  // be notifying our consumer asynchronously via OnStartCompleted.
+  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+
+  int rv = transaction_->RestartWithAuth(
+      server_auth_->credentials,
+      base::Bind(&URLRequestFtpJob::OnStartCompleted,
+                 base::Unretained(this)));
+  if (rv == ERR_IO_PENDING)
+    return;
+
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLRequestFtpJob::OnStartCompleted,
+                 weak_factory_.GetWeakPtr(), rv));
+}
+
+void URLRequestFtpJob::Start() {
+  DCHECK(!transaction_.get());
+  request_info_.url = request_->url();
+  StartTransaction();
+}
+
+void URLRequestFtpJob::Kill() {
+  if (!transaction_.get())
+    return;
+  transaction_.reset();
+  URLRequestJob::Kill();
+  weak_factory_.InvalidateWeakPtrs();
+}
+
+LoadState URLRequestFtpJob::GetLoadState() const {
+  return transaction_.get() ?
+      transaction_->GetLoadState() : LOAD_STATE_IDLE;
+}
+
+bool URLRequestFtpJob::NeedsAuth() {
+  // Note that we only have to worry about cases where an actual FTP server
+  // requires auth (and not a proxy), because connecting to FTP via proxy
+  // effectively means the browser communicates via HTTP, and uses HTTP's
+  // Proxy-Authenticate protocol when proxy servers require auth.
+  return server_auth_ && server_auth_->state == AUTH_STATE_NEED_AUTH;
+}
+
+void URLRequestFtpJob::GetAuthChallengeInfo(
+    scoped_refptr<AuthChallengeInfo>* result) {
+  DCHECK((server_auth_ != NULL) &&
+         (server_auth_->state == AUTH_STATE_NEED_AUTH));
+  scoped_refptr<AuthChallengeInfo> auth_info(new AuthChallengeInfo);
+  auth_info->is_proxy = false;
+  auth_info->challenger = HostPortPair::FromURL(request_->url());
+  // scheme and realm are kept empty.
+  DCHECK(auth_info->scheme.empty());
+  DCHECK(auth_info->realm.empty());
+  result->swap(auth_info);
+}
+
+void URLRequestFtpJob::SetAuth(const AuthCredentials& credentials) {
+  DCHECK(NeedsAuth());
+  server_auth_->state = AUTH_STATE_HAVE_AUTH;
+  server_auth_->credentials = credentials;
+
+  ftp_auth_cache_->Add(request_->url().GetOrigin(), server_auth_->credentials);
+
+  RestartTransactionWithAuth();
+}
+
+void URLRequestFtpJob::CancelAuth() {
+  DCHECK(NeedsAuth());
+  server_auth_->state = AUTH_STATE_CANCELED;
+
+  // Once the auth is cancelled, we proceed with the request as though
+  // there were no auth.  Schedule this for later so that we don't cause
+  // any recursing into the caller as a result of this call.
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLRequestFtpJob::OnStartCompleted,
+                 weak_factory_.GetWeakPtr(), OK));
+}
+
+UploadProgress URLRequestFtpJob::GetUploadProgress() const {
+  return UploadProgress();
+}
+
+bool URLRequestFtpJob::ReadRawData(IOBuffer* buf,
+                                   int buf_size,
+                                   int *bytes_read) {
+  DCHECK_NE(buf_size, 0);
+  DCHECK(bytes_read);
+  DCHECK(!read_in_progress_);
+
+  int rv = transaction_->Read(buf, buf_size,
+                              base::Bind(&URLRequestFtpJob::OnReadCompleted,
+                                         base::Unretained(this)));
+  if (rv >= 0) {
+    *bytes_read = rv;
+    return true;
+  }
+
+  if (rv == ERR_IO_PENDING) {
+    read_in_progress_ = true;
+    SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+  } else {
+    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+  }
+  return false;
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_ftp_job.h b/src/net/url_request/url_request_ftp_job.h
new file mode 100644
index 0000000..d05e3d8
--- /dev/null
+++ b/src/net/url_request/url_request_ftp_job.h
@@ -0,0 +1,86 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_FTP_JOB_H_
+#define NET_URL_REQUEST_URL_REQUEST_FTP_JOB_H_
+
+#include <string>
+
+#include "base/memory/weak_ptr.h"
+#include "net/base/auth.h"
+#include "net/base/completion_callback.h"
+#include "net/ftp/ftp_request_info.h"
+#include "net/ftp/ftp_transaction.h"
+#include "net/url_request/url_request_job.h"
+
+namespace net {
+
+class NetworkDelegate;
+class FtpTransactionFactory;
+class FtpAuthCache;
+
+// A URLRequestJob subclass that is built on top of FtpTransaction. It
+// provides an implementation for FTP.
+class URLRequestFtpJob : public URLRequestJob {
+ public:
+  URLRequestFtpJob(URLRequest* request,
+                   NetworkDelegate* network_delegate,
+                   FtpTransactionFactory* ftp_transaction_factory,
+                   FtpAuthCache* ftp_auth_cache);
+
+  // TODO(shalev): get rid of this function in favor of FtpProtocolHandler.
+  static URLRequestJob* Factory(URLRequest* request,
+                                NetworkDelegate* network_delegate,
+                                const std::string& scheme);
+
+  // Overridden from URLRequestJob:
+  virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
+  virtual HostPortPair GetSocketAddress() const OVERRIDE;
+
+ private:
+  virtual ~URLRequestFtpJob();
+
+  void StartTransaction();
+
+  void OnStartCompleted(int result);
+  void OnReadCompleted(int result);
+
+  void RestartTransactionWithAuth();
+
+  void LogFtpServerType(char server_type);
+
+  // Overridden from URLRequestJob:
+  virtual void Start() OVERRIDE;
+  virtual void Kill() OVERRIDE;
+  virtual LoadState GetLoadState() const OVERRIDE;
+  virtual bool NeedsAuth() OVERRIDE;
+  virtual void GetAuthChallengeInfo(
+      scoped_refptr<AuthChallengeInfo>* auth_info) OVERRIDE;
+  virtual void SetAuth(const AuthCredentials& credentials) OVERRIDE;
+  virtual void CancelAuth() OVERRIDE;
+
+  // TODO(ibrar):  Yet to give another look at this function.
+  virtual UploadProgress GetUploadProgress() const OVERRIDE;
+  virtual bool ReadRawData(IOBuffer* buf,
+                           int buf_size,
+                           int *bytes_read) OVERRIDE;
+
+  FtpRequestInfo request_info_;
+  scoped_ptr<FtpTransaction> transaction_;
+
+  bool read_in_progress_;
+
+  scoped_refptr<AuthData> server_auth_;
+
+  base::WeakPtrFactory<URLRequestFtpJob> weak_factory_;
+
+  FtpTransactionFactory* ftp_transaction_factory_;
+  FtpAuthCache* ftp_auth_cache_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestFtpJob);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_FTP_JOB_H_
diff --git a/src/net/url_request/url_request_ftp_job_unittest.cc b/src/net/url_request/url_request_ftp_job_unittest.cc
new file mode 100644
index 0000000..e13a80d
--- /dev/null
+++ b/src/net/url_request/url_request_ftp_job_unittest.cc
@@ -0,0 +1,75 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/ftp/ftp_auth_cache.h"
+#include "net/ftp/ftp_transaction.h"
+#include "net/ftp/ftp_transaction_factory.h"
+#include "net/url_request/ftp_protocol_handler.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_ftp_job.h"
+#include "net/url_request/url_request_status.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::Return;
+using ::testing::_;
+
+namespace net {
+
+class MockFtpTransactionFactory : public FtpTransactionFactory {
+ public:
+  MOCK_METHOD0(CreateTransaction, FtpTransaction*());
+  MOCK_METHOD1(Suspend, void(bool suspend));
+};
+
+class MockURLRequestDelegate : public URLRequest::Delegate {
+ public:
+  MOCK_METHOD3(OnReceivedRedirect, void(URLRequest* request,
+                                        const GURL& new_url,
+                                        bool* defer_redirect));
+  MOCK_METHOD2(OnAuthRequired, void(URLRequest* request,
+                                    AuthChallengeInfo* auth_info));
+  MOCK_METHOD2(OnCertificateRequested,
+               void(URLRequest* request,
+                    SSLCertRequestInfo* cert_request_info));
+  MOCK_METHOD3(OnSSLCertificateError, void(URLRequest* request,
+                                           const SSLInfo& ssl_info,
+                                           bool fatal));
+  MOCK_METHOD1(OnResponseStarted, void(URLRequest* request));
+  MOCK_METHOD2(OnReadCompleted, void(URLRequest* request, int bytes_read));
+};
+
+ACTION_P(HandleOnResponseStarted, expected_status) {
+  EXPECT_EQ(expected_status, arg0->status().status());
+}
+
+TEST(FtpProtocolHandlerTest, CreateTransactionFails) {
+  testing::InSequence in_sequence_;
+
+  ::testing::StrictMock<MockFtpTransactionFactory> ftp_transaction_factory;
+  ::testing::StrictMock<MockURLRequestDelegate> delegate;
+  FtpAuthCache ftp_auth_cache;
+
+  GURL url("ftp://example.com");
+  URLRequestContext context;
+  URLRequest url_request(url, &delegate, &context);
+
+  FtpProtocolHandler ftp_protocol_handler(
+      &ftp_transaction_factory, &ftp_auth_cache);
+
+  scoped_refptr<URLRequestJob> ftp_job(
+      ftp_protocol_handler.MaybeCreateJob(&url_request, NULL));
+  ASSERT_TRUE(ftp_job.get());
+
+  EXPECT_CALL(ftp_transaction_factory, CreateTransaction())
+      .WillOnce(Return(static_cast<FtpTransaction*>(NULL)));
+  ftp_job->Start();
+  EXPECT_CALL(delegate, OnResponseStarted(_))
+      .WillOnce(HandleOnResponseStarted(URLRequestStatus::FAILED));
+  MessageLoop::current()->RunUntilIdle();
+  EXPECT_FALSE(url_request.is_pending());
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_http_job.cc b/src/net/url_request/url_request_http_job.cc
new file mode 100644
index 0000000..4977f8c
--- /dev/null
+++ b/src/net/url_request/url_request_http_job.cc
@@ -0,0 +1,1631 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_http_job.h"
+
+#include "base/base_switches.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/command_line.h"
+#include "base/compiler_specific.h"
+#include "base/file_util.h"
+#include "base/file_version_info.h"
+#include "base/message_loop.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram.h"
+#include "base/rand_util.h"
+#include "base/string_util.h"
+#include "base/time.h"
+#include "net/base/cert_status_flags.h"
+#include "net/base/filter.h"
+#include "net/base/host_port_pair.h"
+#include "net/base/load_flags.h"
+#include "net/base/mime_util.h"
+#include "net/base/net_errors.h"
+#include "net/base/net_util.h"
+#include "net/base/network_delegate.h"
+#if !defined(__LB_SHELL__) && !defined(OS_STARBOARD)
+#include "net/base/sdch_manager.h"
+#endif
+#include "net/base/ssl_cert_request_info.h"
+#include "net/base/ssl_config_service.h"
+#include "net/cookies/cookie_monster.h"
+#include "net/http/http_network_session.h"
+#include "net/http/http_request_headers.h"
+#include "net/http/http_response_headers.h"
+#include "net/http/http_response_info.h"
+#include "net/http/http_status_code.h"
+#include "net/http/http_transaction.h"
+#include "net/http/http_transaction_delegate.h"
+#include "net/http/http_transaction_factory.h"
+#include "net/http/http_util.h"
+#include "net/url_request/fraudulent_certificate_reporter.h"
+#include "net/url_request/http_user_agent_settings.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_error_job.h"
+#include "net/url_request/url_request_redirect_job.h"
+#include "net/url_request/url_request_throttler_header_adapter.h"
+#include "net/url_request/url_request_throttler_manager.h"
+
+namespace net {
+
+class URLRequestHttpJob::HttpFilterContext : public FilterContext {
+ public:
+  explicit HttpFilterContext(URLRequestHttpJob* job);
+  virtual ~HttpFilterContext();
+
+  // FilterContext implementation.
+  virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
+  virtual bool GetURL(GURL* gurl) const OVERRIDE;
+  virtual base::Time GetRequestTime() const OVERRIDE;
+  virtual bool IsCachedContent() const OVERRIDE;
+  virtual bool IsDownload() const OVERRIDE;
+  virtual bool IsSdchResponse() const OVERRIDE;
+  virtual int64 GetByteReadCount() const OVERRIDE;
+  virtual int GetResponseCode() const OVERRIDE;
+  virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE;
+
+  // Method to allow us to reset filter context for a response that should have
+  // been SDCH encoded when there is an update due to an explicit HTTP header.
+  void ResetSdchResponseToFalse();
+
+ private:
+  URLRequestHttpJob* job_;
+
+  DISALLOW_COPY_AND_ASSIGN(HttpFilterContext);
+};
+
+class URLRequestHttpJob::HttpTransactionDelegateImpl
+    : public HttpTransactionDelegate {
+ public:
+  explicit HttpTransactionDelegateImpl(URLRequest* request)
+      : request_(request),
+        network_delegate_(request->context()->network_delegate()),
+        cache_active_(false),
+        network_active_(false) {
+  }
+  virtual ~HttpTransactionDelegateImpl() {
+    OnDetachRequest();
+  }
+  void OnDetachRequest() {
+    if (request_ == NULL || network_delegate_ == NULL)
+      return;
+    network_delegate_->NotifyRequestWaitStateChange(
+        *request_,
+        NetworkDelegate::REQUEST_WAIT_STATE_RESET);
+    cache_active_ = false;
+    network_active_ = false;
+    request_ = NULL;
+  }
+  virtual void OnCacheActionStart() OVERRIDE {
+    if (request_ == NULL || network_delegate_ == NULL)
+      return;
+    DCHECK(!cache_active_ && !network_active_);
+    cache_active_ = true;
+    network_delegate_->NotifyRequestWaitStateChange(
+        *request_,
+        NetworkDelegate::REQUEST_WAIT_STATE_CACHE_START);
+  }
+  virtual void OnCacheActionFinish() OVERRIDE {
+    if (request_ == NULL || network_delegate_ == NULL)
+      return;
+    DCHECK(cache_active_ && !network_active_);
+    cache_active_ = false;
+    network_delegate_->NotifyRequestWaitStateChange(
+        *request_,
+        NetworkDelegate::REQUEST_WAIT_STATE_CACHE_FINISH);
+  }
+  virtual void OnNetworkActionStart() OVERRIDE {
+    if (request_ == NULL || network_delegate_ == NULL)
+      return;
+    DCHECK(!cache_active_ && !network_active_);
+    network_active_ = true;
+    network_delegate_->NotifyRequestWaitStateChange(
+        *request_,
+        NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_START);
+  }
+  virtual void OnNetworkActionFinish() OVERRIDE {
+    if (request_ == NULL || network_delegate_ == NULL)
+      return;
+    DCHECK(!cache_active_ && network_active_);
+    network_active_ = false;
+    network_delegate_->NotifyRequestWaitStateChange(
+        *request_,
+        NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_FINISH);
+  }
+ private:
+  URLRequest* request_;
+  NetworkDelegate* network_delegate_;
+  bool cache_active_;
+  bool network_active_;
+};
+
+URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job)
+    : job_(job) {
+  DCHECK(job_);
+}
+
+URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() {
+}
+
+bool URLRequestHttpJob::HttpFilterContext::GetMimeType(
+    std::string* mime_type) const {
+  return job_->GetMimeType(mime_type);
+}
+
+bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const {
+  if (!job_->request())
+    return false;
+  *gurl = job_->request()->url();
+  return true;
+}
+
+base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const {
+  return job_->request() ? job_->request()->request_time() : base::Time();
+}
+
+bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const {
+  return job_->is_cached_content_;
+}
+
+bool URLRequestHttpJob::HttpFilterContext::IsDownload() const {
+  return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0;
+}
+
+void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() {
+  DCHECK(job_->sdch_dictionary_advertised_);
+  job_->sdch_dictionary_advertised_ = false;
+}
+
+bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const {
+  return job_->sdch_dictionary_advertised_;
+}
+
+int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const {
+  return job_->filter_input_byte_count();
+}
+
+int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const {
+  return job_->GetResponseCode();
+}
+
+void URLRequestHttpJob::HttpFilterContext::RecordPacketStats(
+    StatisticSelector statistic) const {
+  job_->RecordPacketStats(statistic);
+}
+
+// TODO(darin): make sure the port blocking code is not lost
+// static
+URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request,
+                                          NetworkDelegate* network_delegate,
+                                          const std::string& scheme) {
+  DCHECK(scheme == "http" || scheme == "https");
+
+  if (!request->context()->http_transaction_factory()) {
+    NOTREACHED() << "requires a valid context";
+    return new URLRequestErrorJob(
+        request, network_delegate, ERR_INVALID_ARGUMENT);
+  }
+
+  GURL redirect_url;
+  if (request->GetHSTSRedirect(&redirect_url)) {
+    return new URLRequestRedirectJob(
+        request, network_delegate, redirect_url,
+        // Use status code 307 to preserve the method, so POST requests work.
+        URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT);
+  }
+  return new URLRequestHttpJob(request,
+                               network_delegate,
+                               request->context()->http_user_agent_settings());
+}
+
+
+URLRequestHttpJob::URLRequestHttpJob(
+    URLRequest* request,
+    NetworkDelegate* network_delegate,
+    const HttpUserAgentSettings* http_user_agent_settings)
+    : URLRequestJob(request, network_delegate),
+      response_info_(NULL),
+      response_cookies_save_index_(0),
+      proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
+      server_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
+      ALLOW_THIS_IN_INITIALIZER_LIST(start_callback_(
+          base::Bind(&URLRequestHttpJob::OnStartCompleted,
+                     base::Unretained(this)))),
+      ALLOW_THIS_IN_INITIALIZER_LIST(notify_before_headers_sent_callback_(
+          base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback,
+                     base::Unretained(this)))),
+      read_in_progress_(false),
+      transaction_(NULL),
+      throttling_entry_(NULL),
+      sdch_dictionary_advertised_(false),
+      sdch_test_activated_(false),
+      sdch_test_control_(false),
+      is_cached_content_(false),
+      request_creation_time_(),
+      packet_timing_enabled_(false),
+      done_(false),
+      bytes_observed_in_packets_(0),
+      request_time_snapshot_(),
+      final_packet_time_(),
+      ALLOW_THIS_IN_INITIALIZER_LIST(
+          filter_context_(new HttpFilterContext(this))),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)),
+      ALLOW_THIS_IN_INITIALIZER_LIST(on_headers_received_callback_(
+          base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback,
+                     base::Unretained(this)))),
+      awaiting_callback_(false),
+      http_transaction_delegate_(new HttpTransactionDelegateImpl(request)),
+      http_user_agent_settings_(http_user_agent_settings) {
+  URLRequestThrottlerManager* manager = request->context()->throttler_manager();
+  if (manager)
+    throttling_entry_ = manager->RegisterRequestUrl(request->url());
+
+  ResetTimer();
+}
+
+void URLRequestHttpJob::NotifyHeadersComplete() {
+  DCHECK(!response_info_);
+
+  response_info_ = transaction_->GetResponseInfo();
+
+  // Save boolean, as we'll need this info at destruction time, and filters may
+  // also need this info.
+  is_cached_content_ = response_info_->was_cached;
+
+  if (!is_cached_content_ && throttling_entry_) {
+    URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders());
+    throttling_entry_->UpdateWithResponse(request_info_.url.host(),
+                                          &response_adapter);
+  }
+
+  // The ordering of these calls is not important.
+  ProcessStrictTransportSecurityHeader();
+  ProcessPublicKeyPinsHeader();
+
+#if !defined(__LB_SHELL__) && !defined(OS_STARBOARD)
+  if (SdchManager::Global() &&
+      SdchManager::Global()->IsInSupportedDomain(request_->url())) {
+    const std::string name = "Get-Dictionary";
+    std::string url_text;
+    void* iter = NULL;
+    // TODO(jar): We need to not fetch dictionaries the first time they are
+    // seen, but rather wait until we can justify their usefulness.
+    // For now, we will only fetch the first dictionary, which will at least
+    // require multiple suggestions before we get additional ones for this site.
+    // Eventually we should wait until a dictionary is requested several times
+    // before we even download it (so that we don't waste memory or bandwidth).
+    if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) {
+      // request_->url() won't be valid in the destructor, so we use an
+      // alternate copy.
+      DCHECK_EQ(request_->url(), request_info_.url);
+      // Resolve suggested URL relative to request url.
+      sdch_dictionary_url_ = request_info_.url.Resolve(url_text);
+    }
+  }
+#endif
+
+  // The HTTP transaction may be restarted several times for the purposes
+  // of sending authorization information. Each time it restarts, we get
+  // notified of the headers completion so that we can update the cookie store.
+  if (transaction_->IsReadyToRestartForAuth()) {
+    DCHECK(!response_info_->auth_challenge.get());
+    // TODO(battre): This breaks the webrequest API for
+    // URLRequestTestHTTP.BasicAuthWithCookies
+    // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders
+    // occurs.
+    RestartTransactionWithAuth(AuthCredentials());
+    return;
+  }
+
+  URLRequestJob::NotifyHeadersComplete();
+}
+
+void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) {
+  DoneWithRequest(FINISHED);
+  URLRequestJob::NotifyDone(status);
+}
+
+void URLRequestHttpJob::DestroyTransaction() {
+  DCHECK(transaction_.get());
+
+  DoneWithRequest(ABORTED);
+  transaction_.reset();
+  response_info_ = NULL;
+}
+
+void URLRequestHttpJob::StartTransaction() {
+  if (request_->context()->network_delegate()) {
+    int rv = request_->context()->network_delegate()->NotifyBeforeSendHeaders(
+        request_, notify_before_headers_sent_callback_,
+        &request_info_.extra_headers);
+    // If an extension blocks the request, we rely on the callback to
+    // MaybeStartTransactionInternal().
+    if (rv == ERR_IO_PENDING) {
+      SetBlockedOnDelegate();
+      return;
+    }
+    MaybeStartTransactionInternal(rv);
+    return;
+  }
+  StartTransactionInternal();
+}
+
+void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) {
+  SetUnblockedOnDelegate();
+
+  // Check that there are no callbacks to already canceled requests.
+  DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
+
+  MaybeStartTransactionInternal(result);
+}
+
+void URLRequestHttpJob::MaybeStartTransactionInternal(int result) {
+  if (result == OK) {
+    StartTransactionInternal();
+  } else {
+    std::string source("delegate");
+    request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
+                                 NetLog::StringCallback("source", &source));
+    NotifyCanceled();
+    NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
+  }
+}
+
+void URLRequestHttpJob::StartTransactionInternal() {
+  // NOTE: This method assumes that request_info_ is already setup properly.
+
+  // If we already have a transaction, then we should restart the transaction
+  // with auth provided by auth_credentials_.
+
+  int rv;
+
+  if (request_->context()->network_delegate()) {
+    request_->context()->network_delegate()->NotifySendHeaders(
+        request_, request_info_.extra_headers);
+  }
+
+  if (transaction_.get()) {
+    rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_);
+    auth_credentials_ = AuthCredentials();
+  } else {
+    DCHECK(request_->context()->http_transaction_factory());
+
+    rv = request_->context()->http_transaction_factory()->CreateTransaction(
+        &transaction_, http_transaction_delegate_.get());
+    if (rv == OK) {
+      if (!throttling_entry_ ||
+          !throttling_entry_->ShouldRejectRequest(*request_)) {
+        rv = transaction_->Start(
+            &request_info_, start_callback_, request_->net_log());
+        start_time_ = base::TimeTicks::Now();
+      } else {
+        // Special error code for the exponential back-off module.
+        rv = ERR_TEMPORARILY_THROTTLED;
+      }
+    }
+  }
+
+  if (rv == ERR_IO_PENDING)
+    return;
+
+  // The transaction started synchronously, but we need to notify the
+  // URLRequest delegate via the message loop.
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLRequestHttpJob::OnStartCompleted,
+                 weak_factory_.GetWeakPtr(), rv));
+}
+
+void URLRequestHttpJob::AddExtraHeaders() {
+  // Supply Accept-Encoding field only if it is not already provided.
+  // It should be provided IF the content is known to have restrictions on
+  // potential encoding, such as streaming multi-media.
+  // For details see bug 47381.
+  // TODO(jar, enal): jpeg files etc. should set up a request header if
+  // possible. Right now it is done only by buffered_resource_loader and
+  // simple_data_source.
+  if (!request_info_.extra_headers.HasHeader(
+      HttpRequestHeaders::kAcceptEncoding)) {
+#if !defined(__LB_SHELL__) && !defined(OS_STARBOARD)
+    bool advertise_sdch = SdchManager::Global() &&
+        SdchManager::Global()->IsInSupportedDomain(request_->url());
+    std::string avail_dictionaries;
+    if (advertise_sdch) {
+      SdchManager::Global()->GetAvailDictionaryList(request_->url(),
+                                                    &avail_dictionaries);
+
+      // The AllowLatencyExperiment() is only true if we've successfully done a
+      // full SDCH compression recently in this browser session for this host.
+      // Note that for this path, there might be no applicable dictionaries,
+      // and hence we can't participate in the experiment.
+      if (!avail_dictionaries.empty() &&
+          SdchManager::Global()->AllowLatencyExperiment(request_->url())) {
+        // We are participating in the test (or control), and hence we'll
+        // eventually record statistics via either SDCH_EXPERIMENT_DECODE or
+        // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data.
+        packet_timing_enabled_ = true;
+        if (base::RandDouble() < .01) {
+          sdch_test_control_ = true;  // 1% probability.
+          advertise_sdch = false;
+        } else {
+          sdch_test_activated_ = true;
+        }
+      }
+    }
+#else
+    bool advertise_sdch = false;
+#endif
+
+    // Supply Accept-Encoding headers first so that it is more likely that they
+    // will be in the first transmitted packet.  This can sometimes make it
+    // easier to filter and analyze the streams to assure that a proxy has not
+    // damaged these headers.  Some proxies deliberately corrupt Accept-Encoding
+    // headers.
+    if (!advertise_sdch) {
+      // Tell the server what compression formats we support (other than SDCH).
+      request_info_.extra_headers.SetHeader(
+          HttpRequestHeaders::kAcceptEncoding, "gzip,deflate");
+    } else {
+#if !defined(__LB_SHELL__) && !defined(COBALT) && !defined(OS_STARBOARD)
+      // Include SDCH in acceptable list.
+      request_info_.extra_headers.SetHeader(
+          HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch");
+      if (!avail_dictionaries.empty()) {
+        request_info_.extra_headers.SetHeader(
+            kAvailDictionaryHeader,
+            avail_dictionaries);
+        sdch_dictionary_advertised_ = true;
+        // Since we're tagging this transaction as advertising a dictionary,
+        // we'll definitely employ an SDCH filter (or tentative sdch filter)
+        // when we get a response.  When done, we'll record histograms via
+        // SDCH_DECODE or SDCH_PASSTHROUGH.  Hence we need to record packet
+        // arrival times.
+        packet_timing_enabled_ = true;
+      }
+#endif
+    }
+  }
+
+  if (http_user_agent_settings_) {
+    // Only add default Accept-Language and Accept-Charset if the request
+    // didn't have them specified.
+    std::string accept_language =
+        http_user_agent_settings_->GetAcceptLanguage();
+    if (!accept_language.empty()) {
+      request_info_.extra_headers.SetHeaderIfMissing(
+          HttpRequestHeaders::kAcceptLanguage,
+          accept_language);
+    }
+    std::string accept_charset = http_user_agent_settings_->GetAcceptCharset();
+    if (!accept_charset.empty()) {
+      request_info_.extra_headers.SetHeaderIfMissing(
+          HttpRequestHeaders::kAcceptCharset,
+          accept_charset);
+    }
+  }
+}
+
+void URLRequestHttpJob::AddCookieHeaderAndStart() {
+  // No matter what, we want to report our status as IO pending since we will
+  // be notifying our consumer asynchronously via OnStartCompleted.
+  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+
+  // If the request was destroyed, then there is no more work to do.
+  if (!request_)
+    return;
+
+  CookieStore* cookie_store = request_->context()->cookie_store();
+  if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) {
+    net::CookieMonster* cookie_monster = cookie_store->GetCookieMonster();
+    if (cookie_monster) {
+      cookie_monster->GetAllCookiesForURLAsync(
+          request_->url(),
+          base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad,
+                     weak_factory_.GetWeakPtr()));
+    } else {
+      CheckCookiePolicyAndLoad(CookieList());
+    }
+  } else {
+    DoStartTransaction();
+  }
+}
+
+void URLRequestHttpJob::DoLoadCookies() {
+  CookieOptions options;
+  options.set_include_httponly();
+  request_->context()->cookie_store()->GetCookiesWithInfoAsync(
+      request_->url(), options,
+      base::Bind(&URLRequestHttpJob::OnCookiesLoaded,
+                 weak_factory_.GetWeakPtr()));
+}
+
+void URLRequestHttpJob::CheckCookiePolicyAndLoad(
+    const CookieList& cookie_list) {
+  if (CanGetCookies(cookie_list))
+    DoLoadCookies();
+  else
+    DoStartTransaction();
+}
+
+void URLRequestHttpJob::OnCookiesLoaded(
+    const std::string& cookie_line,
+    const std::vector<net::CookieStore::CookieInfo>& cookie_infos) {
+  if (!cookie_line.empty()) {
+    request_info_.extra_headers.SetHeader(
+        HttpRequestHeaders::kCookie, cookie_line);
+  }
+  DoStartTransaction();
+}
+
+void URLRequestHttpJob::DoStartTransaction() {
+  // We may have been canceled while retrieving cookies.
+  if (GetStatus().is_success()) {
+    StartTransaction();
+  } else {
+    NotifyCanceled();
+  }
+}
+
+void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) {
+  if (result != net::OK) {
+    std::string source("delegate");
+    request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
+                                 NetLog::StringCallback("source", &source));
+    NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
+    return;
+  }
+
+  DCHECK(transaction_.get());
+
+  const HttpResponseInfo* response_info = transaction_->GetResponseInfo();
+  DCHECK(response_info);
+
+  response_cookies_.clear();
+  response_cookies_save_index_ = 0;
+
+  FetchResponseCookies(&response_cookies_);
+
+  if (!GetResponseHeaders()->GetDateValue(&response_date_))
+    response_date_ = base::Time();
+
+  // Now, loop over the response cookies, and attempt to persist each.
+  SaveNextCookie();
+}
+
+// If the save occurs synchronously, SaveNextCookie will loop and save the next
+// cookie. If the save is deferred, the callback is responsible for continuing
+// to iterate through the cookies.
+// TODO(erikwright): Modify the CookieStore API to indicate via return value
+// whether it completed synchronously or asynchronously.
+// See http://crbug.com/131066.
+void URLRequestHttpJob::SaveNextCookie() {
+  // No matter what, we want to report our status as IO pending since we will
+  // be notifying our consumer asynchronously via OnStartCompleted.
+  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+
+  // Used to communicate with the callback. See the implementation of
+  // OnCookieSaved.
+  scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false);
+  scoped_refptr<SharedBoolean> save_next_cookie_running =
+      new SharedBoolean(true);
+
+  if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) &&
+      request_->context()->cookie_store() &&
+      response_cookies_.size() > 0) {
+    CookieOptions options;
+    options.set_include_httponly();
+    options.set_server_time(response_date_);
+
+    net::CookieStore::SetCookiesCallback callback(
+        base::Bind(&URLRequestHttpJob::OnCookieSaved,
+                   weak_factory_.GetWeakPtr(),
+                   save_next_cookie_running,
+                   callback_pending));
+
+    // Loop through the cookies as long as SetCookieWithOptionsAsync completes
+    // synchronously.
+    while (!callback_pending->data &&
+           response_cookies_save_index_ < response_cookies_.size()) {
+      if (CanSetCookie(
+          response_cookies_[response_cookies_save_index_], &options)) {
+        callback_pending->data = true;
+        request_->context()->cookie_store()->SetCookieWithOptionsAsync(
+            request_->url(), response_cookies_[response_cookies_save_index_],
+            options, callback);
+      }
+      ++response_cookies_save_index_;
+    }
+  }
+
+  save_next_cookie_running->data = false;
+
+  if (!callback_pending->data) {
+    response_cookies_.clear();
+    response_cookies_save_index_ = 0;
+    SetStatus(URLRequestStatus());  // Clear the IO_PENDING status
+    NotifyHeadersComplete();
+    return;
+  }
+}
+
+// |save_next_cookie_running| is true when the callback is bound and set to
+// false when SaveNextCookie exits, allowing the callback to determine if the
+// save occurred synchronously or asynchronously.
+// |callback_pending| is false when the callback is invoked and will be set to
+// true by the callback, allowing SaveNextCookie to detect whether the save
+// occurred synchronously.
+// See SaveNextCookie() for more information.
+void URLRequestHttpJob::OnCookieSaved(
+    scoped_refptr<SharedBoolean> save_next_cookie_running,
+    scoped_refptr<SharedBoolean> callback_pending,
+    bool cookie_status) {
+  callback_pending->data = false;
+
+  // If we were called synchronously, return.
+  if (save_next_cookie_running->data) {
+    return;
+  }
+
+  // We were called asynchronously, so trigger the next save.
+  // We may have been canceled within OnSetCookie.
+  if (GetStatus().is_success()) {
+    SaveNextCookie();
+  } else {
+    NotifyCanceled();
+  }
+}
+
+void URLRequestHttpJob::FetchResponseCookies(
+    std::vector<std::string>* cookies) {
+  const std::string name = "Set-Cookie";
+  std::string value;
+
+  void* iter = NULL;
+  HttpResponseHeaders* headers = GetResponseHeaders();
+  while (headers->EnumerateHeader(&iter, name, &value)) {
+    if (!value.empty())
+      cookies->push_back(value);
+  }
+}
+
+// NOTE: |ProcessStrictTransportSecurityHeader| and
+// |ProcessPublicKeyPinsHeader| have very similar structures, by design.
+// They manipulate different parts of |TransportSecurityState::DomainState|,
+// and they must remain complementary. If, in future changes here, there is
+// any conflict between their policies (such as in |domain_state.mode|), you
+// should resolve the conflict in favor of the more strict policy.
+void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() {
+  DCHECK(response_info_);
+
+  const URLRequestContext* ctx = request_->context();
+  const SSLInfo& ssl_info = response_info_->ssl_info;
+
+  // Only accept strict transport security headers on HTTPS connections that
+  // have no certificate errors.
+  if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
+      !ctx->transport_security_state()) {
+    return;
+  }
+
+  TransportSecurityState* security_state = ctx->transport_security_state();
+  TransportSecurityState::DomainState domain_state;
+  const std::string& host = request_info_.url.host();
+
+  bool sni_available =
+      SSLConfigService::IsSNIAvailable(ctx->ssl_config_service());
+  if (!security_state->GetDomainState(host, sni_available, &domain_state))
+    // |GetDomainState| may have altered |domain_state| while searching. If
+    // not found, start with a fresh state.
+    domain_state.upgrade_mode =
+        TransportSecurityState::DomainState::MODE_FORCE_HTTPS;
+
+  HttpResponseHeaders* headers = GetResponseHeaders();
+  std::string value;
+  void* iter = NULL;
+  base::Time now = base::Time::Now();
+
+  // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec:
+  //
+  //   If a UA receives more than one STS header field in a HTTP response
+  //   message over secure transport, then the UA MUST process only the
+  //   first such header field.
+  bool seen_sts = false;
+  while (headers->EnumerateHeader(&iter, "Strict-Transport-Security", &value)) {
+    if (seen_sts)
+      return;
+    seen_sts = true;
+    TransportSecurityState::DomainState domain_state;
+    if (domain_state.ParseSTSHeader(now, value))
+      security_state->EnableHost(host, domain_state);
+  }
+}
+
+void URLRequestHttpJob::ProcessPublicKeyPinsHeader() {
+  DCHECK(response_info_);
+
+  const URLRequestContext* ctx = request_->context();
+  const SSLInfo& ssl_info = response_info_->ssl_info;
+
+  // Only accept public key pins headers on HTTPS connections that have no
+  // certificate errors.
+  if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
+      !ctx->transport_security_state()) {
+    return;
+  }
+
+  TransportSecurityState* security_state = ctx->transport_security_state();
+  TransportSecurityState::DomainState domain_state;
+  const std::string& host = request_info_.url.host();
+
+  bool sni_available =
+      SSLConfigService::IsSNIAvailable(ctx->ssl_config_service());
+  if (!security_state->GetDomainState(host, sni_available, &domain_state))
+    // |GetDomainState| may have altered |domain_state| while searching. If
+    // not found, start with a fresh state.
+    domain_state.upgrade_mode =
+        TransportSecurityState::DomainState::MODE_DEFAULT;
+
+  HttpResponseHeaders* headers = GetResponseHeaders();
+  void* iter = NULL;
+  std::string value;
+  base::Time now = base::Time::Now();
+
+  while (headers->EnumerateHeader(&iter, "Public-Key-Pins", &value)) {
+    // Note that ParsePinsHeader updates |domain_state| (iff the header parses
+    // correctly), but does not completely overwrite it. It just updates the
+    // dynamic pinning metadata.
+    if (domain_state.ParsePinsHeader(now, value, ssl_info))
+      security_state->EnableHost(host, domain_state);
+  }
+}
+
+void URLRequestHttpJob::OnStartCompleted(int result) {
+  RecordTimer();
+
+  // If the request was destroyed, then there is no more work to do.
+  if (!request_)
+    return;
+
+  // If the transaction was destroyed, then the job was cancelled, and
+  // we can just ignore this notification.
+  if (!transaction_.get())
+    return;
+
+  // Clear the IO_PENDING status
+  SetStatus(URLRequestStatus());
+
+  const URLRequestContext* context = request_->context();
+
+  if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN &&
+      transaction_->GetResponseInfo() != NULL) {
+    FraudulentCertificateReporter* reporter =
+      context->fraudulent_certificate_reporter();
+    if (reporter != NULL) {
+      const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info;
+      bool sni_available = SSLConfigService::IsSNIAvailable(
+          context->ssl_config_service());
+      const std::string& host = request_->url().host();
+
+      reporter->SendReport(host, ssl_info, sni_available);
+    }
+  }
+
+  if (result == OK) {
+    scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders();
+    if (context->network_delegate()) {
+      // Note that |this| may not be deleted until
+      // |on_headers_received_callback_| or
+      // |NetworkDelegate::URLRequestDestroyed()| has been called.
+      int error = context->network_delegate()->
+          NotifyHeadersReceived(request_, on_headers_received_callback_,
+                                headers, &override_response_headers_);
+      if (error != net::OK) {
+        if (error == net::ERR_IO_PENDING) {
+          awaiting_callback_ = true;
+          SetBlockedOnDelegate();
+        } else {
+          std::string source("delegate");
+          request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
+                                       NetLog::StringCallback("source",
+                                                              &source));
+          NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error));
+        }
+        return;
+      }
+    }
+
+    SaveCookiesAndNotifyHeadersComplete(net::OK);
+  } else if (IsCertificateError(result)) {
+    // We encountered an SSL certificate error.  Ask our delegate to decide
+    // what we should do.
+
+    TransportSecurityState::DomainState domain_state;
+    const URLRequestContext* context = request_->context();
+    const bool fatal =
+        context->transport_security_state() &&
+        context->transport_security_state()->GetDomainState(
+            request_info_.url.host(),
+            SSLConfigService::IsSNIAvailable(context->ssl_config_service()),
+            &domain_state);
+    NotifySSLCertificateError(transaction_->GetResponseInfo()->ssl_info, fatal);
+  } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) {
+    NotifyCertificateRequested(
+        transaction_->GetResponseInfo()->cert_request_info);
+  } else {
+    NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
+  }
+}
+
+void URLRequestHttpJob::OnHeadersReceivedCallback(int result) {
+  SetUnblockedOnDelegate();
+  awaiting_callback_ = false;
+
+  // Check that there are no callbacks to already canceled requests.
+  DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
+
+  SaveCookiesAndNotifyHeadersComplete(result);
+}
+
+void URLRequestHttpJob::OnReadCompleted(int result) {
+  read_in_progress_ = false;
+
+  if (ShouldFixMismatchedContentLength(result))
+    result = OK;
+
+  if (result == OK) {
+    NotifyDone(URLRequestStatus());
+  } else if (result < 0) {
+    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
+  } else {
+    // Clear the IO_PENDING status
+    SetStatus(URLRequestStatus());
+  }
+
+  NotifyReadComplete(result);
+}
+
+void URLRequestHttpJob::RestartTransactionWithAuth(
+    const AuthCredentials& credentials) {
+  auth_credentials_ = credentials;
+
+  // These will be reset in OnStartCompleted.
+  response_info_ = NULL;
+  response_cookies_.clear();
+
+  ResetTimer();
+
+  // Update the cookies, since the cookie store may have been updated from the
+  // headers in the 401/407. Since cookies were already appended to
+  // extra_headers, we need to strip them out before adding them again.
+  request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie);
+
+  AddCookieHeaderAndStart();
+}
+
+void URLRequestHttpJob::SetUpload(UploadDataStream* upload) {
+  DCHECK(!transaction_.get()) << "cannot change once started";
+  request_info_.upload_data_stream = upload;
+}
+
+void URLRequestHttpJob::SetExtraRequestHeaders(
+    const HttpRequestHeaders& headers) {
+  DCHECK(!transaction_.get()) << "cannot change once started";
+  request_info_.extra_headers.CopyFrom(headers);
+}
+
+void URLRequestHttpJob::Start() {
+  DCHECK(!transaction_.get());
+
+  // Ensure that we do not send username and password fields in the referrer.
+  GURL referrer(request_->GetSanitizedReferrer());
+
+  request_info_.url = request_->url();
+  request_info_.method = request_->method();
+  request_info_.load_flags = request_->load_flags();
+  request_info_.priority = request_->priority();
+  request_info_.request_id = request_->identifier();
+
+  // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins
+  // from overriding headers that are controlled using other means. Otherwise a
+  // plugin could set a referrer although sending the referrer is inhibited.
+  request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer);
+
+  // Our consumer should have made sure that this is a safe referrer.  See for
+  // instance WebCore::FrameLoader::HideReferrer.
+  if (referrer.is_valid()) {
+    request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer,
+                                          referrer.spec());
+  }
+
+  request_info_.extra_headers.SetHeaderIfMissing(
+      HttpRequestHeaders::kUserAgent,
+      http_user_agent_settings_ ? http_user_agent_settings_->GetUserAgent()
+                                : EmptyString());
+
+  AddExtraHeaders();
+  AddCookieHeaderAndStart();
+}
+
+void URLRequestHttpJob::Kill() {
+  http_transaction_delegate_->OnDetachRequest();
+
+  if (!transaction_.get())
+    return;
+
+  weak_factory_.InvalidateWeakPtrs();
+  DestroyTransaction();
+  URLRequestJob::Kill();
+}
+
+LoadState URLRequestHttpJob::GetLoadState() const {
+  return transaction_.get() ?
+      transaction_->GetLoadState() : LOAD_STATE_IDLE;
+}
+
+UploadProgress URLRequestHttpJob::GetUploadProgress() const {
+  return transaction_.get() ?
+      transaction_->GetUploadProgress() : UploadProgress();
+}
+
+bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const {
+  DCHECK(transaction_.get());
+
+  if (!response_info_)
+    return false;
+
+  return GetResponseHeaders()->GetMimeType(mime_type);
+}
+
+bool URLRequestHttpJob::GetCharset(std::string* charset) {
+  DCHECK(transaction_.get());
+
+  if (!response_info_)
+    return false;
+
+  return GetResponseHeaders()->GetCharset(charset);
+}
+
+void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) {
+  DCHECK(request_);
+  DCHECK(transaction_.get());
+
+  if (response_info_) {
+    *info = *response_info_;
+    if (override_response_headers_)
+      info->headers = override_response_headers_;
+  }
+}
+
+bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) {
+  DCHECK(transaction_.get());
+
+  if (!response_info_)
+    return false;
+
+  // TODO(darin): Why are we extracting response cookies again?  Perhaps we
+  // should just leverage response_cookies_.
+
+  cookies->clear();
+  FetchResponseCookies(cookies);
+  return true;
+}
+
+int URLRequestHttpJob::GetResponseCode() const {
+  DCHECK(transaction_.get());
+
+  if (!response_info_)
+    return -1;
+
+  return GetResponseHeaders()->response_code();
+}
+
+Filter* URLRequestHttpJob::SetupFilter() const {
+  DCHECK(transaction_.get());
+  if (!response_info_)
+    return NULL;
+
+  std::vector<Filter::FilterType> encoding_types;
+  std::string encoding_type;
+  HttpResponseHeaders* headers = GetResponseHeaders();
+  void* iter = NULL;
+  while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) {
+    encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type));
+  }
+
+  if (filter_context_->IsSdchResponse()) {
+    // We are wary of proxies that discard or damage SDCH encoding.  If a server
+    // explicitly states that this is not SDCH content, then we can correct our
+    // assumption that this is an SDCH response, and avoid the need to recover
+    // as though the content is corrupted (when we discover it is not SDCH
+    // encoded).
+    std::string sdch_response_status;
+    iter = NULL;
+    while (headers->EnumerateHeader(&iter, "X-Sdch-Encode",
+                                    &sdch_response_status)) {
+      if (sdch_response_status == "0") {
+        filter_context_->ResetSdchResponseToFalse();
+        break;
+      }
+    }
+  }
+
+  // Even if encoding types are empty, there is a chance that we need to add
+  // some decoding, as some proxies strip encoding completely. In such cases,
+  // we may need to add (for example) SDCH filtering (when the context suggests
+  // it is appropriate).
+  Filter::FixupEncodingTypes(*filter_context_, &encoding_types);
+
+  return !encoding_types.empty()
+      ? Filter::Factory(encoding_types, *filter_context_) : NULL;
+}
+
+bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) {
+  // We only allow redirects to certain "safe" protocols.  This does not
+  // restrict redirects to externally handled protocols.  Our consumer would
+  // need to take care of those.
+
+  if (!URLRequest::IsHandledURL(location))
+    return true;
+
+  static const char* kSafeSchemes[] = {
+    "http",
+    "https",
+    "ftp"
+  };
+
+  for (size_t i = 0; i < arraysize(kSafeSchemes); ++i) {
+    if (location.SchemeIs(kSafeSchemes[i]))
+      return true;
+  }
+
+  return false;
+}
+
+bool URLRequestHttpJob::NeedsAuth() {
+  int code = GetResponseCode();
+  if (code == -1)
+    return false;
+
+  // Check if we need either Proxy or WWW Authentication.  This could happen
+  // because we either provided no auth info, or provided incorrect info.
+  switch (code) {
+    case 407:
+      if (proxy_auth_state_ == AUTH_STATE_CANCELED)
+        return false;
+      proxy_auth_state_ = AUTH_STATE_NEED_AUTH;
+      return true;
+    case 401:
+      if (server_auth_state_ == AUTH_STATE_CANCELED)
+        return false;
+      server_auth_state_ = AUTH_STATE_NEED_AUTH;
+      return true;
+  }
+  return false;
+}
+
+void URLRequestHttpJob::GetAuthChallengeInfo(
+    scoped_refptr<AuthChallengeInfo>* result) {
+  DCHECK(transaction_.get());
+  DCHECK(response_info_);
+
+  // sanity checks:
+  DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH ||
+         server_auth_state_ == AUTH_STATE_NEED_AUTH);
+  DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) ||
+         (GetResponseHeaders()->response_code() ==
+          HTTP_PROXY_AUTHENTICATION_REQUIRED));
+
+  *result = response_info_->auth_challenge;
+}
+
+void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) {
+  DCHECK(transaction_.get());
+
+  // Proxy gets set first, then WWW.
+  if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
+    proxy_auth_state_ = AUTH_STATE_HAVE_AUTH;
+  } else {
+    DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
+    server_auth_state_ = AUTH_STATE_HAVE_AUTH;
+  }
+
+  RestartTransactionWithAuth(credentials);
+}
+
+void URLRequestHttpJob::CancelAuth() {
+  // Proxy gets set first, then WWW.
+  if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
+    proxy_auth_state_ = AUTH_STATE_CANCELED;
+  } else {
+    DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
+    server_auth_state_ = AUTH_STATE_CANCELED;
+  }
+
+  // These will be reset in OnStartCompleted.
+  response_info_ = NULL;
+  response_cookies_.clear();
+
+  ResetTimer();
+
+  // OK, let the consumer read the error page...
+  //
+  // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false,
+  // which will cause the consumer to receive OnResponseStarted instead of
+  // OnAuthRequired.
+  //
+  // We have to do this via InvokeLater to avoid "recursing" the consumer.
+  //
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLRequestHttpJob::OnStartCompleted,
+                 weak_factory_.GetWeakPtr(), OK));
+}
+
+void URLRequestHttpJob::ContinueWithCertificate(
+    X509Certificate* client_cert) {
+  DCHECK(transaction_.get());
+
+  DCHECK(!response_info_) << "should not have a response yet";
+
+  ResetTimer();
+
+  // No matter what, we want to report our status as IO pending since we will
+  // be notifying our consumer asynchronously via OnStartCompleted.
+  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+
+  int rv = transaction_->RestartWithCertificate(client_cert, start_callback_);
+  if (rv == ERR_IO_PENDING)
+    return;
+
+  // The transaction started synchronously, but we need to notify the
+  // URLRequest delegate via the message loop.
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLRequestHttpJob::OnStartCompleted,
+                 weak_factory_.GetWeakPtr(), rv));
+}
+
+void URLRequestHttpJob::ContinueDespiteLastError() {
+  // If the transaction was destroyed, then the job was cancelled.
+  if (!transaction_.get())
+    return;
+
+  DCHECK(!response_info_) << "should not have a response yet";
+
+  ResetTimer();
+
+  // No matter what, we want to report our status as IO pending since we will
+  // be notifying our consumer asynchronously via OnStartCompleted.
+  SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+
+  int rv = transaction_->RestartIgnoringLastError(start_callback_);
+  if (rv == ERR_IO_PENDING)
+    return;
+
+  // The transaction started synchronously, but we need to notify the
+  // URLRequest delegate via the message loop.
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLRequestHttpJob::OnStartCompleted,
+                 weak_factory_.GetWeakPtr(), rv));
+}
+
+bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const {
+  // Some servers send the body compressed, but specify the content length as
+  // the uncompressed size.  Although this violates the HTTP spec we want to
+  // support it (as IE and FireFox do), but *only* for an exact match.
+  // See http://crbug.com/79694.
+  if (rv == net::ERR_CONTENT_LENGTH_MISMATCH ||
+      rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) {
+    if (request_ && request_->response_headers()) {
+      int64 expected_length = request_->response_headers()->GetContentLength();
+      VLOG(1) << __FUNCTION__ << "() "
+              << "\"" << request_->url().spec() << "\""
+              << " content-length = " << expected_length
+              << " pre total = " << prefilter_bytes_read()
+              << " post total = " << postfilter_bytes_read();
+      if (postfilter_bytes_read() == expected_length) {
+        // Clear the error.
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size,
+                                    int* bytes_read) {
+  DCHECK_NE(buf_size, 0);
+  DCHECK(bytes_read);
+  DCHECK(!read_in_progress_);
+
+  int rv = transaction_->Read(
+      buf, buf_size,
+      base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this)));
+
+  if (ShouldFixMismatchedContentLength(rv))
+    rv = 0;
+
+  if (rv >= 0) {
+    *bytes_read = rv;
+    if (!rv)
+      DoneWithRequest(FINISHED);
+    return true;
+  }
+
+  if (rv == ERR_IO_PENDING) {
+    read_in_progress_ = true;
+    SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+  } else {
+    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+  }
+
+  return false;
+}
+
+void URLRequestHttpJob::StopCaching() {
+  if (transaction_.get())
+    transaction_->StopCaching();
+}
+
+void URLRequestHttpJob::DoneReading() {
+  if (transaction_.get())
+    transaction_->DoneReading();
+  DoneWithRequest(FINISHED);
+}
+
+HostPortPair URLRequestHttpJob::GetSocketAddress() const {
+  return response_info_ ? response_info_->socket_address : HostPortPair();
+}
+
+URLRequestHttpJob::~URLRequestHttpJob() {
+  CHECK(!awaiting_callback_);
+
+#if !defined(__LB_SHELL__) && !defined(OS_STARBOARD)
+  DCHECK(!sdch_test_control_ || !sdch_test_activated_);
+  if (!is_cached_content_) {
+    if (sdch_test_control_)
+      RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK);
+    if (sdch_test_activated_)
+      RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE);
+  }
+  // Make sure SDCH filters are told to emit histogram data while
+  // filter_context_ is still alive.
+  DestroyFilters();
+
+  if (sdch_dictionary_url_.is_valid()) {
+    // Prior to reaching the destructor, request_ has been set to a NULL
+    // pointer, so request_->url() is no longer valid in the destructor, and we
+    // use an alternate copy |request_info_.url|.
+    SdchManager* manager = SdchManager::Global();
+    // To be extra safe, since this is a "different time" from when we decided
+    // to get the dictionary, we'll validate that an SdchManager is available.
+    // At shutdown time, care is taken to be sure that we don't delete this
+    // globally useful instance "too soon," so this check is just defensive
+    // coding to assure that IF the system is shutting down, we don't have any
+    // problem if the manager was deleted ahead of time.
+    if (manager)  // Defensive programming.
+      manager->FetchDictionary(request_info_.url, sdch_dictionary_url_);
+  }
+#endif
+  DoneWithRequest(ABORTED);
+}
+
+void URLRequestHttpJob::RecordTimer() {
+  if (request_creation_time_.is_null()) {
+    NOTREACHED()
+        << "The same transaction shouldn't start twice without new timing.";
+    return;
+  }
+
+  base::TimeDelta to_start = base::Time::Now() - request_creation_time_;
+  request_creation_time_ = base::Time();
+
+  UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start);
+
+  static const bool use_overlapped_read_histogram =
+      base::FieldTrialList::TrialExists("OverlappedReadImpact");
+  if (use_overlapped_read_histogram) {
+    UMA_HISTOGRAM_MEDIUM_TIMES(
+        base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
+                                   "OverlappedReadImpact"),
+        to_start);
+  }
+
+  static const bool use_warm_socket_impact_histogram =
+      base::FieldTrialList::TrialExists("WarmSocketImpact");
+  if (use_warm_socket_impact_histogram) {
+    UMA_HISTOGRAM_MEDIUM_TIMES(
+        base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
+                                   "WarmSocketImpact"),
+        to_start);
+  }
+
+  static const bool use_prefetch_histogram =
+      base::FieldTrialList::TrialExists("Prefetch");
+  if (use_prefetch_histogram) {
+    UMA_HISTOGRAM_MEDIUM_TIMES(
+        base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
+                                   "Prefetch"),
+        to_start);
+  }
+  static const bool use_prerender_histogram =
+      base::FieldTrialList::TrialExists("Prerender");
+  if (use_prerender_histogram) {
+    UMA_HISTOGRAM_MEDIUM_TIMES(
+        base::FieldTrial::MakeName("Net.HttpTimeToFirstByte",
+                                   "Prerender"),
+        to_start);
+  }
+}
+
+void URLRequestHttpJob::ResetTimer() {
+  if (!request_creation_time_.is_null()) {
+    NOTREACHED()
+        << "The timer was reset before it was recorded.";
+    return;
+  }
+  request_creation_time_ = base::Time::Now();
+}
+
+void URLRequestHttpJob::UpdatePacketReadTimes() {
+  if (!packet_timing_enabled_)
+    return;
+
+  if (filter_input_byte_count() <= bytes_observed_in_packets_) {
+    DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_);
+    return;  // No new bytes have arrived.
+  }
+
+  final_packet_time_ = base::Time::Now();
+  if (!bytes_observed_in_packets_)
+    request_time_snapshot_ = request_ ? request_->request_time() : base::Time();
+
+  bytes_observed_in_packets_ = filter_input_byte_count();
+}
+
+void URLRequestHttpJob::RecordPacketStats(
+    FilterContext::StatisticSelector statistic) const {
+  if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
+    return;
+
+  base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
+  switch (statistic) {
+    case FilterContext::SDCH_DECODE: {
+      UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
+          static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
+      return;
+    }
+    case FilterContext::SDCH_PASSTHROUGH: {
+      // Despite advertising a dictionary, we handled non-sdch compressed
+      // content.
+      return;
+    }
+
+    case FilterContext::SDCH_EXPERIMENT_DECODE: {
+      UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode",
+                                  duration,
+                                  base::TimeDelta::FromMilliseconds(20),
+                                  base::TimeDelta::FromMinutes(10), 100);
+      return;
+    }
+    case FilterContext::SDCH_EXPERIMENT_HOLDBACK: {
+      UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback",
+                                  duration,
+                                  base::TimeDelta::FromMilliseconds(20),
+                                  base::TimeDelta::FromMinutes(10), 100);
+      return;
+    }
+    default:
+      NOTREACHED();
+      return;
+  }
+}
+
+// The common type of histogram we use for all compression-tracking histograms.
+#define COMPRESSION_HISTOGRAM(name, sample) \
+    do { \
+      UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \
+                                  500, 1000000, 100); \
+    } while (0)
+
+void URLRequestHttpJob::RecordCompressionHistograms() {
+  DCHECK(request_);
+  if (!request_)
+    return;
+
+  if (is_cached_content_ ||                // Don't record cached content
+      !GetStatus().is_success() ||         // Don't record failed content
+      !IsCompressibleContent() ||          // Only record compressible content
+      !prefilter_bytes_read())       // Zero-byte responses aren't useful.
+    return;
+
+  // Miniature requests aren't really compressible.  Don't count them.
+  const int kMinSize = 16;
+  if (prefilter_bytes_read() < kMinSize)
+    return;
+
+  // Only record for http or https urls.
+  bool is_http = request_->url().SchemeIs("http");
+  bool is_https = request_->url().SchemeIs("https");
+  if (!is_http && !is_https)
+    return;
+
+  int compressed_B = prefilter_bytes_read();
+  int decompressed_B = postfilter_bytes_read();
+  bool was_filtered = HasFilter();
+
+  // We want to record how often downloaded resources are compressed.
+  // But, we recognize that different protocols may have different
+  // properties.  So, for each request, we'll put it into one of 3
+  // groups:
+  //      a) SSL resources
+  //         Proxies cannot tamper with compression headers with SSL.
+  //      b) Non-SSL, loaded-via-proxy resources
+  //         In this case, we know a proxy might have interfered.
+  //      c) Non-SSL, loaded-without-proxy resources
+  //         In this case, we know there was no explicit proxy.  However,
+  //         it is possible that a transparent proxy was still interfering.
+  //
+  // For each group, we record the same 3 histograms.
+
+  if (is_https) {
+    if (was_filtered) {
+      COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B);
+      COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B);
+    } else {
+      COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B);
+    }
+    return;
+  }
+
+  if (request_->was_fetched_via_proxy()) {
+    if (was_filtered) {
+      COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B);
+      COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B);
+    } else {
+      COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B);
+    }
+    return;
+  }
+
+  if (was_filtered) {
+    COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B);
+    COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B);
+  } else {
+    COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B);
+  }
+}
+
+bool URLRequestHttpJob::IsCompressibleContent() const {
+  std::string mime_type;
+  return GetMimeType(&mime_type) &&
+      (IsSupportedJavascriptMimeType(mime_type.c_str()) ||
+       IsSupportedNonImageMimeType(mime_type.c_str()));
+}
+
+void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) {
+  if (start_time_.is_null())
+    return;
+
+  base::TimeDelta total_time = base::TimeTicks::Now() - start_time_;
+  UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time);
+
+  if (reason == FINISHED) {
+    UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time);
+  } else {
+    UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time);
+  }
+
+  if (response_info_) {
+    if (response_info_->was_cached) {
+      UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time);
+    } else  {
+      UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time);
+    }
+  }
+
+  static const bool use_overlapped_read_histogram =
+      base::FieldTrialList::TrialExists("OverlappedReadImpact");
+  if (use_overlapped_read_histogram) {
+    UMA_HISTOGRAM_TIMES(
+        base::FieldTrial::MakeName("Net.HttpJob.TotalTime",
+                                   "OverlappedReadImpact"),
+        total_time);
+
+    if (reason == FINISHED) {
+      UMA_HISTOGRAM_TIMES(
+          base::FieldTrial::MakeName("Net.HttpJob.TotalTimeSuccess",
+                                     "OverlappedReadImpact"),
+          total_time);
+    } else {
+      UMA_HISTOGRAM_TIMES(
+          base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCancel",
+                                     "OverlappedReadImpact"),
+          total_time);
+    }
+
+    if (response_info_) {
+      if (response_info_->was_cached) {
+        UMA_HISTOGRAM_TIMES(
+            base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCached",
+                                       "OverlappedReadImpact"),
+            total_time);
+      } else  {
+        UMA_HISTOGRAM_TIMES(
+            base::FieldTrial::MakeName("Net.HttpJob.TotalTimeNotCached",
+                                       "OverlappedReadImpact"),
+            total_time);
+      }
+    }
+  }
+
+  static const bool cache_sensitivity_analysis =
+      base::FieldTrialList::TrialExists("CacheSensitivityAnalysis");
+  if (cache_sensitivity_analysis) {
+    UMA_HISTOGRAM_TIMES(
+        base::FieldTrial::MakeName("Net.HttpJob.TotalTime",
+                                   "CacheSensitivityAnalysis"),
+        total_time);
+
+    if (reason == FINISHED) {
+      UMA_HISTOGRAM_TIMES(
+          base::FieldTrial::MakeName("Net.HttpJob.TotalTimeSuccess",
+                                     "CacheSensitivityAnalysis"),
+          total_time);
+    } else {
+      UMA_HISTOGRAM_TIMES(
+          base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCancel",
+                                     "CacheSensitivityAnalysis"),
+          total_time);
+    }
+
+    if (response_info_) {
+      if (response_info_->was_cached) {
+        UMA_HISTOGRAM_TIMES(
+            base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCached",
+                                       "CacheSensitivityAnalysis"),
+            total_time);
+      } else  {
+        UMA_HISTOGRAM_TIMES(
+            base::FieldTrial::MakeName("Net.HttpJob.TotalTimeNotCached",
+                                       "CacheSensitivityAnalysis"),
+            total_time);
+      }
+    }
+  }
+
+  start_time_ = base::TimeTicks();
+}
+
+void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) {
+  if (done_)
+    return;
+  done_ = true;
+  RecordPerfHistograms(reason);
+  if (reason == FINISHED) {
+    request_->set_received_response_content_length(prefilter_bytes_read());
+    RecordCompressionHistograms();
+  }
+}
+
+HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const {
+  DCHECK(transaction_.get());
+  DCHECK(transaction_->GetResponseInfo());
+  return override_response_headers_.get() ?
+      override_response_headers_ :
+      transaction_->GetResponseInfo()->headers;
+}
+
+void URLRequestHttpJob::NotifyURLRequestDestroyed() {
+  awaiting_callback_ = false;
+}
+
+void URLRequestHttpJob::OnDetachRequest() {
+  http_transaction_delegate_->OnDetachRequest();
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_http_job.h b/src/net/url_request/url_request_http_job.h
new file mode 100644
index 0000000..9ef3884
--- /dev/null
+++ b/src/net/url_request/url_request_http_job.h
@@ -0,0 +1,258 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_HTTP_JOB_H_
+#define NET_URL_REQUEST_URL_REQUEST_HTTP_JOB_H_
+
+#include <string>
+#include <vector>
+
+#include "base/compiler_specific.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time.h"
+#include "net/base/auth.h"
+#include "net/base/completion_callback.h"
+#include "net/cookies/cookie_store.h"
+#include "net/http/http_request_info.h"
+#include "net/url_request/url_request_job.h"
+#include "net/url_request/url_request_throttler_entry_interface.h"
+
+namespace net {
+
+class HttpResponseHeaders;
+class HttpResponseInfo;
+class HttpTransaction;
+class HttpUserAgentSettings;
+class UploadDataStream;
+class URLRequestContext;
+
+// A URLRequestJob subclass that is built on top of HttpTransaction.  It
+// provides an implementation for both HTTP and HTTPS.
+class URLRequestHttpJob : public URLRequestJob {
+ public:
+  static URLRequestJob* Factory(URLRequest* request,
+                                NetworkDelegate* network_delegate,
+                                const std::string& scheme);
+
+ protected:
+  URLRequestHttpJob(URLRequest* request,
+                    NetworkDelegate* network_delegate,
+                    const HttpUserAgentSettings* http_user_agent_settings);
+
+  // Shadows URLRequestJob's version of this method so we can grab cookies.
+  void NotifyHeadersComplete();
+
+  // Shadows URLRequestJob's method so we can record histograms.
+  void NotifyDone(const URLRequestStatus& status);
+
+  void DestroyTransaction();
+
+  void AddExtraHeaders();
+  void AddCookieHeaderAndStart();
+  void SaveCookiesAndNotifyHeadersComplete(int result);
+  void SaveNextCookie();
+  void FetchResponseCookies(std::vector<std::string>* cookies);
+
+  // Processes the Strict-Transport-Security header, if one exists.
+  void ProcessStrictTransportSecurityHeader();
+
+  // Processes the Public-Key-Pins header, if one exists.
+  void ProcessPublicKeyPinsHeader();
+
+  // |result| should be net::OK, or the request is canceled.
+  void OnHeadersReceivedCallback(int result);
+  void OnStartCompleted(int result);
+  void OnReadCompleted(int result);
+  void NotifyBeforeSendHeadersCallback(int result);
+
+  void RestartTransactionWithAuth(const AuthCredentials& credentials);
+
+  // Overridden from URLRequestJob:
+  virtual void SetUpload(UploadDataStream* upload) OVERRIDE;
+  virtual void SetExtraRequestHeaders(
+      const HttpRequestHeaders& headers) OVERRIDE;
+  virtual void Start() OVERRIDE;
+  virtual void Kill() OVERRIDE;
+  virtual LoadState GetLoadState() const OVERRIDE;
+  virtual UploadProgress GetUploadProgress() const OVERRIDE;
+  virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
+  virtual bool GetCharset(std::string* charset) OVERRIDE;
+  virtual void GetResponseInfo(HttpResponseInfo* info) OVERRIDE;
+  virtual bool GetResponseCookies(std::vector<std::string>* cookies) OVERRIDE;
+  virtual int GetResponseCode() const OVERRIDE;
+  virtual Filter* SetupFilter() const OVERRIDE;
+  virtual bool IsSafeRedirect(const GURL& location) OVERRIDE;
+  virtual bool NeedsAuth() OVERRIDE;
+  virtual void GetAuthChallengeInfo(scoped_refptr<AuthChallengeInfo>*) OVERRIDE;
+  virtual void SetAuth(const AuthCredentials& credentials) OVERRIDE;
+  virtual void CancelAuth() OVERRIDE;
+  virtual void ContinueWithCertificate(X509Certificate* client_cert) OVERRIDE;
+  virtual void ContinueDespiteLastError() OVERRIDE;
+  virtual bool ReadRawData(IOBuffer* buf, int buf_size,
+                           int* bytes_read) OVERRIDE;
+  virtual void StopCaching() OVERRIDE;
+  virtual void DoneReading() OVERRIDE;
+  virtual HostPortPair GetSocketAddress() const OVERRIDE;
+  virtual void NotifyURLRequestDestroyed() OVERRIDE;
+
+  HttpRequestInfo request_info_;
+  const HttpResponseInfo* response_info_;
+
+  std::vector<std::string> response_cookies_;
+  size_t response_cookies_save_index_;
+  base::Time response_date_;
+
+  // Auth states for proxy and origin server.
+  AuthState proxy_auth_state_;
+  AuthState server_auth_state_;
+  AuthCredentials auth_credentials_;
+
+  CompletionCallback start_callback_;
+  CompletionCallback notify_before_headers_sent_callback_;
+
+  bool read_in_progress_;
+
+  // An URL for an SDCH dictionary as suggested in a Get-Dictionary HTTP header.
+  GURL sdch_dictionary_url_;
+
+  scoped_ptr<HttpTransaction> transaction_;
+
+  // This is used to supervise traffic and enforce exponential
+  // back-off.  May be NULL.
+  scoped_refptr<URLRequestThrottlerEntryInterface> throttling_entry_;
+
+  // Indicated if an SDCH dictionary was advertised, and hence an SDCH
+  // compressed response is expected.  We use this to help detect (accidental?)
+  // proxy corruption of a response, which sometimes marks SDCH content as
+  // having no content encoding <oops>.
+  bool sdch_dictionary_advertised_;
+
+  // For SDCH latency experiments, when we are able to do SDCH, we may enable
+  // either an SDCH latency test xor a pass through test.  The following bools
+  // indicate what we decided on for this instance.
+  bool sdch_test_activated_;  // Advertising a dictionary for sdch.
+  bool sdch_test_control_;    // Not even accepting-content sdch.
+
+  // For recording of stats, we need to remember if this is cached content.
+  bool is_cached_content_;
+
+ private:
+  enum CompletionCause {
+    ABORTED,
+    FINISHED
+  };
+
+  typedef base::RefCountedData<bool> SharedBoolean;
+
+  class HttpFilterContext;
+  class HttpTransactionDelegateImpl;
+
+  virtual ~URLRequestHttpJob();
+
+  void RecordTimer();
+  void ResetTimer();
+
+  virtual void UpdatePacketReadTimes() OVERRIDE;
+  void RecordPacketStats(FilterContext::StatisticSelector statistic) const;
+
+  void RecordCompressionHistograms();
+  bool IsCompressibleContent() const;
+
+  // Starts the transaction if extensions using the webrequest API do not
+  // object.
+  void StartTransaction();
+  // If |result| is net::OK, calls StartTransactionInternal. Otherwise notifies
+  // cancellation.
+  void MaybeStartTransactionInternal(int result);
+  void StartTransactionInternal();
+
+  void RecordPerfHistograms(CompletionCause reason);
+  void DoneWithRequest(CompletionCause reason);
+
+  // Callback functions for Cookie Monster
+  void DoLoadCookies();
+  void CheckCookiePolicyAndLoad(const CookieList& cookie_list);
+  void OnCookiesLoaded(
+      const std::string& cookie_line,
+      const std::vector<CookieStore::CookieInfo>& cookie_infos);
+  void DoStartTransaction();
+
+  // See the implementation for a description of save_next_cookie_running and
+  // callback_pending.
+  void OnCookieSaved(scoped_refptr<SharedBoolean> save_next_cookie_running,
+                     scoped_refptr<SharedBoolean> callback_pending,
+                     bool cookie_status);
+
+  // Some servers send the body compressed, but specify the content length as
+  // the uncompressed size. If this is the case, we return true in order
+  // to request to work around this non-adherence to the HTTP standard.
+  // |rv| is the standard return value of a read function indicating the number
+  // of bytes read or, if negative, an error code.
+  bool ShouldFixMismatchedContentLength(int rv) const;
+
+  // Returns the effective response headers, considering that they may be
+  // overridden by |override_response_headers_|.
+  HttpResponseHeaders* GetResponseHeaders() const;
+
+  // Override of the private interface of URLRequestJob.
+  virtual void OnDetachRequest() OVERRIDE;
+
+  base::Time request_creation_time_;
+
+  // Data used for statistics gathering. This data is only used for histograms
+  // and is not required. It is only gathered if packet_timing_enabled_ == true.
+  //
+  // TODO(jar): improve the quality of the gathered info by gathering most times
+  // at a lower point in the network stack, assuring we have actual packet
+  // boundaries, rather than approximations.  Also note that input byte count
+  // as gathered here is post-SSL, and post-cache-fetch, and does not reflect
+  // true packet arrival times in such cases.
+
+  // Enable recording of packet arrival times for histogramming.
+  bool packet_timing_enabled_;
+  bool done_;  // True when we are done doing work.
+
+  // The number of bytes that have been accounted for in packets (where some of
+  // those packets may possibly have had their time of arrival recorded).
+  int64 bytes_observed_in_packets_;
+
+  // The request time may not be available when we are being destroyed, so we
+  // snapshot it early on.
+  base::Time request_time_snapshot_;
+
+  // Since we don't save all packet times in packet_times_, we save the
+  // last time for use in histograms.
+  base::Time final_packet_time_;
+
+  // The start time for the job, ignoring re-starts.
+  base::TimeTicks start_time_;
+
+  scoped_ptr<HttpFilterContext> filter_context_;
+  base::WeakPtrFactory<URLRequestHttpJob> weak_factory_;
+
+  CompletionCallback on_headers_received_callback_;
+
+  // We allow the network delegate to modify a copy of the response headers.
+  // This prevents modifications of headers that are shared with the underlying
+  // layers of the network stack.
+  scoped_refptr<HttpResponseHeaders> override_response_headers_;
+
+  // Flag used to verify that |this| is not deleted while we are awaiting
+  // a callback from the NetworkDelegate. Used as a fail-fast mechanism.
+  // True if we are waiting a callback and
+  // NetworkDelegate::NotifyURLRequestDestroyed has not been called, yet,
+  // to inform the NetworkDelegate that it may not call back.
+  bool awaiting_callback_;
+
+  scoped_ptr<HttpTransactionDelegateImpl> http_transaction_delegate_;
+
+  const HttpUserAgentSettings* http_user_agent_settings_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestHttpJob);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_HTTP_JOB_H_
diff --git a/src/net/url_request/url_request_job.cc b/src/net/url_request/url_request_job.cc
new file mode 100644
index 0000000..c929665
--- /dev/null
+++ b/src/net/url_request/url_request_job.cc
@@ -0,0 +1,711 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_job.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/message_loop.h"
+#include "base/string_number_conversions.h"
+#include "base/string_util.h"
+#include "net/base/auth.h"
+#include "net/base/host_port_pair.h"
+#include "net/base/io_buffer.h"
+#include "net/base/load_states.h"
+#include "net/base/net_errors.h"
+#include "net/base/network_delegate.h"
+#include "net/http/http_response_headers.h"
+#include "net/url_request/url_request.h"
+
+namespace net {
+
+URLRequestJob::URLRequestJob(URLRequest* request,
+                             NetworkDelegate* network_delegate)
+    : request_(request),
+      done_(false),
+      prefilter_bytes_read_(0),
+      postfilter_bytes_read_(0),
+      filter_input_byte_count_(0),
+      filter_needs_more_output_space_(false),
+      filtered_read_buffer_len_(0),
+      has_handled_response_(false),
+      expected_content_size_(-1),
+      deferred_redirect_status_code_(-1),
+      network_delegate_(network_delegate),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {
+  base::SystemMonitor* system_monitor = base::SystemMonitor::Get();
+  if (system_monitor)
+    base::SystemMonitor::Get()->AddPowerObserver(this);
+}
+
+void URLRequestJob::SetUpload(UploadDataStream* upload) {
+}
+
+void URLRequestJob::SetExtraRequestHeaders(const HttpRequestHeaders& headers) {
+}
+
+void URLRequestJob::Kill() {
+  weak_factory_.InvalidateWeakPtrs();
+  // Make sure the request is notified that we are done.  We assume that the
+  // request took care of setting its error status before calling Kill.
+  if (request_)
+    NotifyCanceled();
+}
+
+void URLRequestJob::DetachRequest() {
+  request_ = NULL;
+  OnDetachRequest();
+}
+
+// This function calls ReadData to get stream data. If a filter exists, passes
+// the data to the attached filter. Then returns the output from filter back to
+// the caller.
+bool URLRequestJob::Read(IOBuffer* buf, int buf_size, int *bytes_read) {
+  bool rv = false;
+
+  DCHECK_LT(buf_size, 1000000);  // Sanity check.
+  DCHECK(buf);
+  DCHECK(bytes_read);
+  DCHECK(filtered_read_buffer_ == NULL);
+  DCHECK_EQ(0, filtered_read_buffer_len_);
+
+  *bytes_read = 0;
+
+  // Skip Filter if not present.
+  if (!filter_.get()) {
+    rv = ReadRawDataHelper(buf, buf_size, bytes_read);
+  } else {
+    // Save the caller's buffers while we do IO
+    // in the filter's buffers.
+    filtered_read_buffer_ = buf;
+    filtered_read_buffer_len_ = buf_size;
+
+    if (ReadFilteredData(bytes_read)) {
+      rv = true;   // We have data to return.
+
+      // It is fine to call DoneReading even if ReadFilteredData receives 0
+      // bytes from the net, but we avoid making that call if we know for
+      // sure that's the case (ReadRawDataHelper path).
+      if (*bytes_read == 0)
+        DoneReading();
+    } else {
+      rv = false;  // Error, or a new IO is pending.
+    }
+  }
+  if (rv && *bytes_read == 0)
+    NotifyDone(URLRequestStatus());
+  return rv;
+}
+
+void URLRequestJob::StopCaching() {
+  // Nothing to do here.
+}
+
+LoadState URLRequestJob::GetLoadState() const {
+  return LOAD_STATE_IDLE;
+}
+
+UploadProgress URLRequestJob::GetUploadProgress() const {
+  return UploadProgress();
+}
+
+bool URLRequestJob::GetCharset(std::string* charset) {
+  return false;
+}
+
+void URLRequestJob::GetResponseInfo(HttpResponseInfo* info) {
+}
+
+bool URLRequestJob::GetResponseCookies(std::vector<std::string>* cookies) {
+  return false;
+}
+
+Filter* URLRequestJob::SetupFilter() const {
+  return NULL;
+}
+
+bool URLRequestJob::IsRedirectResponse(GURL* location,
+                                       int* http_status_code) {
+  // For non-HTTP jobs, headers will be null.
+  HttpResponseHeaders* headers = request_->response_headers();
+  if (!headers)
+    return false;
+
+  std::string value;
+  if (!headers->IsRedirect(&value))
+    return false;
+
+  *location = request_->url().Resolve(value);
+  *http_status_code = headers->response_code();
+  return true;
+}
+
+bool URLRequestJob::IsSafeRedirect(const GURL& location) {
+  return true;
+}
+
+bool URLRequestJob::NeedsAuth() {
+  return false;
+}
+
+void URLRequestJob::GetAuthChallengeInfo(
+    scoped_refptr<AuthChallengeInfo>* auth_info) {
+  // This will only be called if NeedsAuth() returns true, in which
+  // case the derived class should implement this!
+  NOTREACHED();
+}
+
+void URLRequestJob::SetAuth(const AuthCredentials& credentials) {
+  // This will only be called if NeedsAuth() returns true, in which
+  // case the derived class should implement this!
+  NOTREACHED();
+}
+
+void URLRequestJob::CancelAuth() {
+  // This will only be called if NeedsAuth() returns true, in which
+  // case the derived class should implement this!
+  NOTREACHED();
+}
+
+void URLRequestJob::ContinueWithCertificate(
+    X509Certificate* client_cert) {
+  // The derived class should implement this!
+  NOTREACHED();
+}
+
+void URLRequestJob::ContinueDespiteLastError() {
+  // Implementations should know how to recover from errors they generate.
+  // If this code was reached, we are trying to recover from an error that
+  // we don't know how to recover from.
+  NOTREACHED();
+}
+
+void URLRequestJob::FollowDeferredRedirect() {
+  DCHECK(deferred_redirect_status_code_ != -1);
+
+  // NOTE: deferred_redirect_url_ may be invalid, and attempting to redirect to
+  // such an URL will fail inside FollowRedirect.  The DCHECK above asserts
+  // that we called OnReceivedRedirect.
+
+  // It is also possible that FollowRedirect will drop the last reference to
+  // this job, so we need to reset our members before calling it.
+
+  SetUnblockedOnDelegate();
+
+  GURL redirect_url = deferred_redirect_url_;
+  int redirect_status_code = deferred_redirect_status_code_;
+
+  deferred_redirect_url_ = GURL();
+  deferred_redirect_status_code_ = -1;
+
+  FollowRedirect(redirect_url, redirect_status_code);
+}
+
+bool URLRequestJob::GetMimeType(std::string* mime_type) const {
+  return false;
+}
+
+int URLRequestJob::GetResponseCode() const {
+  return -1;
+}
+
+HostPortPair URLRequestJob::GetSocketAddress() const {
+  return HostPortPair();
+}
+
+void URLRequestJob::OnSuspend() {
+  Kill();
+}
+
+void URLRequestJob::NotifyURLRequestDestroyed() {
+}
+
+URLRequestJob::~URLRequestJob() {
+  base::SystemMonitor* system_monitor = base::SystemMonitor::Get();
+  if (system_monitor)
+    base::SystemMonitor::Get()->RemovePowerObserver(this);
+}
+
+void URLRequestJob::NotifyCertificateRequested(
+    SSLCertRequestInfo* cert_request_info) {
+  if (!request_)
+    return;  // The request was destroyed, so there is no more work to do.
+
+  request_->NotifyCertificateRequested(cert_request_info);
+}
+
+void URLRequestJob::NotifySSLCertificateError(const SSLInfo& ssl_info,
+                                              bool fatal) {
+  if (!request_)
+    return;  // The request was destroyed, so there is no more work to do.
+
+  request_->NotifySSLCertificateError(ssl_info, fatal);
+}
+
+bool URLRequestJob::CanGetCookies(const CookieList& cookie_list) const {
+  if (!request_)
+    return false;  // The request was destroyed, so there is no more work to do.
+
+  return request_->CanGetCookies(cookie_list);
+}
+
+bool URLRequestJob::CanSetCookie(const std::string& cookie_line,
+                                 CookieOptions* options) const {
+  if (!request_)
+    return false;  // The request was destroyed, so there is no more work to do.
+
+  return request_->CanSetCookie(cookie_line, options);
+}
+
+void URLRequestJob::NotifyHeadersComplete() {
+  if (!request_ || !request_->has_delegate())
+    return;  // The request was destroyed, so there is no more work to do.
+
+  if (has_handled_response_)
+    return;
+
+  DCHECK(!request_->status().is_io_pending());
+
+  // Initialize to the current time, and let the subclass optionally override
+  // the time stamps if it has that information.  The default request_time is
+  // set by URLRequest before it calls our Start method.
+  request_->response_info_.response_time = base::Time::Now();
+  GetResponseInfo(&request_->response_info_);
+
+  // When notifying the delegate, the delegate can release the request
+  // (and thus release 'this').  After calling to the delgate, we must
+  // check the request pointer to see if it still exists, and return
+  // immediately if it has been destroyed.  self_preservation ensures our
+  // survival until we can get out of this method.
+  scoped_refptr<URLRequestJob> self_preservation(this);
+
+  GURL new_location;
+  int http_status_code;
+  if (IsRedirectResponse(&new_location, &http_status_code)) {
+    const GURL& url = request_->url();
+
+    // Move the reference fragment of the old location to the new one if the
+    // new one has none. This duplicates mozilla's behavior.
+    if (url.is_valid() && url.has_ref() && !new_location.has_ref()) {
+      GURL::Replacements replacements;
+      // Reference the |ref| directly out of the original URL to avoid a
+      // malloc.
+      replacements.SetRef(url.spec().data(),
+                          url.parsed_for_possibly_invalid_spec().ref);
+      new_location = new_location.ReplaceComponents(replacements);
+    }
+
+    bool defer_redirect = false;
+    request_->NotifyReceivedRedirect(new_location, &defer_redirect);
+
+    // Ensure that the request wasn't detached or destroyed in
+    // NotifyReceivedRedirect
+    if (!request_ || !request_->has_delegate())
+      return;
+
+    // If we were not cancelled, then maybe follow the redirect.
+    if (request_->status().is_success()) {
+      if (defer_redirect) {
+        deferred_redirect_url_ = new_location;
+        deferred_redirect_status_code_ = http_status_code;
+        SetBlockedOnDelegate();
+      } else {
+        FollowRedirect(new_location, http_status_code);
+      }
+      return;
+    }
+  } else if (NeedsAuth()) {
+    scoped_refptr<AuthChallengeInfo> auth_info;
+    GetAuthChallengeInfo(&auth_info);
+    // Need to check for a NULL auth_info because the server may have failed
+    // to send a challenge with the 401 response.
+    if (auth_info) {
+      request_->NotifyAuthRequired(auth_info);
+      // Wait for SetAuth or CancelAuth to be called.
+      return;
+    }
+  }
+
+  has_handled_response_ = true;
+  if (request_->status().is_success())
+    filter_.reset(SetupFilter());
+
+  if (!filter_.get()) {
+    std::string content_length;
+    request_->GetResponseHeaderByName("content-length", &content_length);
+    if (!content_length.empty())
+      base::StringToInt64(content_length, &expected_content_size_);
+  }
+
+  request_->NotifyResponseStarted();
+}
+
+void URLRequestJob::NotifyReadComplete(int bytes_read) {
+  if (!request_ || !request_->has_delegate())
+    return;  // The request was destroyed, so there is no more work to do.
+
+  // TODO(darin): Bug 1004233. Re-enable this test once all of the chrome
+  // unit_tests have been fixed to not trip this.
+  //DCHECK(!request_->status().is_io_pending());
+
+  // The headers should be complete before reads complete
+  DCHECK(has_handled_response_);
+
+  OnRawReadComplete(bytes_read);
+
+  // Don't notify if we had an error.
+  if (!request_->status().is_success())
+    return;
+
+  // When notifying the delegate, the delegate can release the request
+  // (and thus release 'this').  After calling to the delegate, we must
+  // check the request pointer to see if it still exists, and return
+  // immediately if it has been destroyed.  self_preservation ensures our
+  // survival until we can get out of this method.
+  scoped_refptr<URLRequestJob> self_preservation(this);
+
+  if (filter_.get()) {
+    // Tell the filter that it has more data
+    FilteredDataRead(bytes_read);
+
+    // Filter the data.
+    int filter_bytes_read = 0;
+    if (ReadFilteredData(&filter_bytes_read)) {
+      if (!filter_bytes_read)
+        DoneReading();
+      request_->NotifyReadCompleted(filter_bytes_read);
+    }
+  } else {
+    request_->NotifyReadCompleted(bytes_read);
+  }
+  DVLOG(1) << __FUNCTION__ << "() "
+           << "\"" << (request_ ? request_->url().spec() : "???") << "\""
+           << " pre bytes read = " << bytes_read
+           << " pre total = " << prefilter_bytes_read_
+           << " post total = " << postfilter_bytes_read_;
+}
+
+void URLRequestJob::NotifyStartError(const URLRequestStatus &status) {
+  DCHECK(!has_handled_response_);
+  has_handled_response_ = true;
+  if (request_) {
+    request_->set_status(status);
+    request_->NotifyResponseStarted();
+  }
+}
+
+void URLRequestJob::NotifyDone(const URLRequestStatus &status) {
+  DCHECK(!done_) << "Job sending done notification twice";
+  if (done_)
+    return;
+  done_ = true;
+
+  // Unless there was an error, we should have at least tried to handle
+  // the response before getting here.
+  DCHECK(has_handled_response_ || !status.is_success());
+
+  // As with NotifyReadComplete, we need to take care to notice if we were
+  // destroyed during a delegate callback.
+  if (request_) {
+    request_->set_is_pending(false);
+    // With async IO, it's quite possible to have a few outstanding
+    // requests.  We could receive a request to Cancel, followed shortly
+    // by a successful IO.  For tracking the status(), once there is
+    // an error, we do not change the status back to success.  To
+    // enforce this, only set the status if the job is so far
+    // successful.
+    if (request_->status().is_success()) {
+      if (status.status() == URLRequestStatus::FAILED) {
+        request_->net_log().AddEventWithNetErrorCode(NetLog::TYPE_FAILED,
+                                                     status.error());
+      }
+      request_->set_status(status);
+    }
+  }
+
+  // Complete this notification later.  This prevents us from re-entering the
+  // delegate if we're done because of a synchronous call.
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLRequestJob::CompleteNotifyDone,
+                 weak_factory_.GetWeakPtr()));
+}
+
+void URLRequestJob::CompleteNotifyDone() {
+  // Check if we should notify the delegate that we're done because of an error.
+  if (request_ &&
+      !request_->status().is_success() &&
+      request_->has_delegate()) {
+    // We report the error differently depending on whether we've called
+    // OnResponseStarted yet.
+    if (has_handled_response_) {
+      // We signal the error by calling OnReadComplete with a bytes_read of -1.
+      request_->NotifyReadCompleted(-1);
+    } else {
+      has_handled_response_ = true;
+      request_->NotifyResponseStarted();
+    }
+  }
+}
+
+void URLRequestJob::NotifyCanceled() {
+  if (!done_) {
+    NotifyDone(URLRequestStatus(URLRequestStatus::CANCELED, ERR_ABORTED));
+  }
+}
+
+void URLRequestJob::NotifyRestartRequired() {
+  DCHECK(!has_handled_response_);
+  if (GetStatus().status() != URLRequestStatus::CANCELED)
+    request_->Restart();
+}
+
+void URLRequestJob::SetBlockedOnDelegate() {
+  request_->SetBlockedOnDelegate();
+}
+
+void URLRequestJob::SetUnblockedOnDelegate() {
+  request_->SetUnblockedOnDelegate();
+}
+
+bool URLRequestJob::ReadRawData(IOBuffer* buf, int buf_size,
+                                int *bytes_read) {
+  DCHECK(bytes_read);
+  *bytes_read = 0;
+  return true;
+}
+
+void URLRequestJob::DoneReading() {
+  // Do nothing.
+}
+
+void URLRequestJob::FilteredDataRead(int bytes_read) {
+  DCHECK(filter_.get());  // don't add data if there is no filter
+  filter_->FlushStreamBuffer(bytes_read);
+}
+
+bool URLRequestJob::ReadFilteredData(int* bytes_read) {
+  DCHECK(filter_.get());  // don't add data if there is no filter
+  DCHECK(filtered_read_buffer_ != NULL);  // we need to have a buffer to fill
+  DCHECK_GT(filtered_read_buffer_len_, 0);  // sanity check
+  DCHECK_LT(filtered_read_buffer_len_, 1000000);  // sanity check
+  DCHECK(raw_read_buffer_ == NULL);  // there should be no raw read buffer yet
+
+  bool rv = false;
+  *bytes_read = 0;
+
+  if (is_done())
+    return true;
+
+  if (!filter_needs_more_output_space_ && !filter_->stream_data_len()) {
+    // We don't have any raw data to work with, so
+    // read from the socket.
+    int filtered_data_read;
+    if (ReadRawDataForFilter(&filtered_data_read)) {
+      if (filtered_data_read > 0) {
+        filter_->FlushStreamBuffer(filtered_data_read);  // Give data to filter.
+      } else {
+        return true;  // EOF
+      }
+    } else {
+      return false;  // IO Pending (or error)
+    }
+  }
+
+  if ((filter_->stream_data_len() || filter_needs_more_output_space_)
+      && !is_done()) {
+    // Get filtered data.
+    int filtered_data_len = filtered_read_buffer_len_;
+    Filter::FilterStatus status;
+    int output_buffer_size = filtered_data_len;
+    status = filter_->ReadData(filtered_read_buffer_->data(),
+                               &filtered_data_len);
+
+    if (filter_needs_more_output_space_ && 0 == filtered_data_len) {
+      // filter_needs_more_output_space_ was mistaken... there are no more bytes
+      // and we should have at least tried to fill up the filter's input buffer.
+      // Correct the state, and try again.
+      filter_needs_more_output_space_ = false;
+      return ReadFilteredData(bytes_read);
+    }
+
+    switch (status) {
+      case Filter::FILTER_DONE: {
+        filter_needs_more_output_space_ = false;
+        *bytes_read = filtered_data_len;
+        postfilter_bytes_read_ += filtered_data_len;
+        rv = true;
+        break;
+      }
+      case Filter::FILTER_NEED_MORE_DATA: {
+        filter_needs_more_output_space_ =
+            (filtered_data_len == output_buffer_size);
+        // We have finished filtering all data currently in the buffer.
+        // There might be some space left in the output buffer. One can
+        // consider reading more data from the stream to feed the filter
+        // and filling up the output buffer. This leads to more complicated
+        // buffer management and data notification mechanisms.
+        // We can revisit this issue if there is a real perf need.
+        if (filtered_data_len > 0) {
+          *bytes_read = filtered_data_len;
+          postfilter_bytes_read_ += filtered_data_len;
+          rv = true;
+        } else {
+          // Read again since we haven't received enough data yet (e.g., we may
+          // not have a complete gzip header yet)
+          rv = ReadFilteredData(bytes_read);
+        }
+        break;
+      }
+      case Filter::FILTER_OK: {
+        filter_needs_more_output_space_ =
+            (filtered_data_len == output_buffer_size);
+        *bytes_read = filtered_data_len;
+        postfilter_bytes_read_ += filtered_data_len;
+        rv = true;
+        break;
+      }
+      case Filter::FILTER_ERROR: {
+        DVLOG(1) << __FUNCTION__ << "() "
+                 << "\"" << (request_ ? request_->url().spec() : "???") << "\""
+                 << " Filter Error";
+        filter_needs_more_output_space_ = false;
+        NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+                   ERR_CONTENT_DECODING_FAILED));
+        rv = false;
+        break;
+      }
+      default: {
+        NOTREACHED();
+        filter_needs_more_output_space_ = false;
+        rv = false;
+        break;
+      }
+    }
+    DVLOG(2) << __FUNCTION__ << "() "
+             << "\"" << (request_ ? request_->url().spec() : "???") << "\""
+             << " rv = " << rv
+             << " post bytes read = " << filtered_data_len
+             << " pre total = " << prefilter_bytes_read_
+             << " post total = "
+             << postfilter_bytes_read_;
+    // If logging all bytes is enabled, log the filtered bytes read.
+    if (rv && request() && request()->net_log().IsLoggingBytes() &&
+        filtered_data_len > 0) {
+      request()->net_log().AddByteTransferEvent(
+          NetLog::TYPE_URL_REQUEST_JOB_FILTERED_BYTES_READ,
+          filtered_data_len, filtered_read_buffer_->data());
+    }
+  } else {
+    // we are done, or there is no data left.
+    rv = true;
+  }
+
+  if (rv) {
+    // When we successfully finished a read, we no longer need to
+    // save the caller's buffers. Release our reference.
+    filtered_read_buffer_ = NULL;
+    filtered_read_buffer_len_ = 0;
+  }
+  return rv;
+}
+
+const URLRequestStatus URLRequestJob::GetStatus() {
+  if (request_)
+    return request_->status();
+  // If the request is gone, we must be cancelled.
+  return URLRequestStatus(URLRequestStatus::CANCELED,
+                          ERR_ABORTED);
+}
+
+void URLRequestJob::SetStatus(const URLRequestStatus &status) {
+  if (request_)
+    request_->set_status(status);
+}
+
+bool URLRequestJob::ReadRawDataForFilter(int* bytes_read) {
+  bool rv = false;
+
+  DCHECK(bytes_read);
+  DCHECK(filter_.get());
+
+  *bytes_read = 0;
+
+  // Get more pre-filtered data if needed.
+  // TODO(mbelshe): is it possible that the filter needs *MORE* data
+  //    when there is some data already in the buffer?
+  if (!filter_->stream_data_len() && !is_done()) {
+    IOBuffer* stream_buffer = filter_->stream_buffer();
+    int stream_buffer_size = filter_->stream_buffer_size();
+    rv = ReadRawDataHelper(stream_buffer, stream_buffer_size, bytes_read);
+  }
+  return rv;
+}
+
+bool URLRequestJob::ReadRawDataHelper(IOBuffer* buf, int buf_size,
+                                      int* bytes_read) {
+  DCHECK(!request_->status().is_io_pending());
+  DCHECK(raw_read_buffer_ == NULL);
+
+  // Keep a pointer to the read buffer, so we have access to it in the
+  // OnRawReadComplete() callback in the event that the read completes
+  // asynchronously.
+  raw_read_buffer_ = buf;
+  bool rv = ReadRawData(buf, buf_size, bytes_read);
+
+  if (!request_->status().is_io_pending()) {
+    // If |filter_| is NULL, and logging all bytes is enabled, log the raw
+    // bytes read.
+    if (!filter_.get() && request() && request()->net_log().IsLoggingBytes() &&
+        *bytes_read > 0) {
+      request()->net_log().AddByteTransferEvent(
+          NetLog::TYPE_URL_REQUEST_JOB_BYTES_READ,
+          *bytes_read, raw_read_buffer_->data());
+    }
+
+    // If the read completes synchronously, either success or failure,
+    // invoke the OnRawReadComplete callback so we can account for the
+    // completed read.
+    OnRawReadComplete(*bytes_read);
+  }
+  return rv;
+}
+
+void URLRequestJob::FollowRedirect(const GURL& location, int http_status_code) {
+  int rv = request_->Redirect(location, http_status_code);
+  if (rv != OK)
+    NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
+}
+
+void URLRequestJob::OnRawReadComplete(int bytes_read) {
+  DCHECK(raw_read_buffer_);
+  if (bytes_read > 0) {
+    RecordBytesRead(bytes_read);
+  }
+  raw_read_buffer_ = NULL;
+}
+
+void URLRequestJob::RecordBytesRead(int bytes_read) {
+  filter_input_byte_count_ += bytes_read;
+  prefilter_bytes_read_ += bytes_read;
+  if (!filter_.get())
+    postfilter_bytes_read_ += bytes_read;
+  DVLOG(2) << __FUNCTION__ << "() "
+           << "\"" << (request_ ? request_->url().spec() : "???") << "\""
+           << " pre bytes read = " << bytes_read
+           << " pre total = " << prefilter_bytes_read_
+           << " post total = " << postfilter_bytes_read_;
+  UpdatePacketReadTimes();  // Facilitate stats recording if it is active.
+  if (network_delegate_)
+    network_delegate_->NotifyRawBytesRead(*request_, bytes_read);
+}
+
+bool URLRequestJob::FilterHasData() {
+    return filter_.get() && filter_->stream_data_len();
+}
+
+void URLRequestJob::UpdatePacketReadTimes() {
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_job.h b/src/net/url_request/url_request_job.h
new file mode 100644
index 0000000..3178285
--- /dev/null
+++ b/src/net/url_request/url_request_job.h
@@ -0,0 +1,388 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_JOB_H_
+#define NET_URL_REQUEST_URL_REQUEST_JOB_H_
+
+#include <string>
+#include <vector>
+
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/system_monitor/system_monitor.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/filter.h"
+#include "net/base/host_port_pair.h"
+#include "net/base/load_states.h"
+#include "net/base/net_export.h"
+#include "net/base/upload_progress.h"
+#include "net/cookies/canonical_cookie.h"
+
+namespace net {
+
+class AuthChallengeInfo;
+class AuthCredentials;
+class CookieOptions;
+class HttpRequestHeaders;
+class HttpResponseInfo;
+class IOBuffer;
+class NetworkDelegate;
+class SSLCertRequestInfo;
+class SSLInfo;
+class URLRequest;
+class UploadDataStream;
+class URLRequestStatus;
+class X509Certificate;
+
+class NET_EXPORT URLRequestJob : public base::RefCounted<URLRequestJob>,
+                                 public base::SystemMonitor::PowerObserver {
+ public:
+  explicit URLRequestJob(URLRequest* request,
+                         NetworkDelegate* network_delegate);
+
+  // Returns the request that owns this job. THIS POINTER MAY BE NULL if the
+  // request was destroyed.
+  URLRequest* request() const {
+    return request_;
+  }
+
+  // Sets the upload data, most requests have no upload data, so this is a NOP.
+  // Job types supporting upload data will override this.
+  virtual void SetUpload(UploadDataStream* upload_data_stream);
+
+  // Sets extra request headers for Job types that support request headers.
+  virtual void SetExtraRequestHeaders(const HttpRequestHeaders& headers);
+
+  // If any error occurs while starting the Job, NotifyStartError should be
+  // called.
+  // This helps ensure that all errors follow more similar notification code
+  // paths, which should simplify testing.
+  virtual void Start() = 0;
+
+  // This function MUST somehow call NotifyDone/NotifyCanceled or some requests
+  // will get leaked. Certain callers use that message to know when they can
+  // delete their URLRequest object, even when doing a cancel. The default
+  // Kill implementation calls NotifyCanceled, so it is recommended that
+  // subclasses call URLRequestJob::Kill() after doing any additional work.
+  //
+  // The job should endeavor to stop working as soon as is convenient, but must
+  // not send and complete notifications from inside this function. Instead,
+  // complete notifications (including "canceled") should be sent from a
+  // callback run from the message loop.
+  //
+  // The job is not obliged to immediately stop sending data in response to
+  // this call, nor is it obliged to fail with "canceled" unless not all data
+  // was sent as a result. A typical case would be where the job is almost
+  // complete and can succeed before the canceled notification can be
+  // dispatched (from the message loop).
+  //
+  // The job should be prepared to receive multiple calls to kill it, but only
+  // one notification must be issued.
+  virtual void Kill();
+
+  // Called to detach the request from this Job.  Results in the Job being
+  // killed off eventually. The job must not use the request pointer any more.
+  void DetachRequest();
+
+  // Called to read post-filtered data from this Job, returning the number of
+  // bytes read, 0 when there is no more data, or -1 if there was an error.
+  // This is just the backend for URLRequest::Read, see that function for
+  // more info.
+  bool Read(IOBuffer* buf, int buf_size, int* bytes_read);
+
+  // Stops further caching of this request, if any. For more info, see
+  // URLRequest::StopCaching().
+  virtual void StopCaching();
+
+  // Called to fetch the current load state for the job.
+  virtual LoadState GetLoadState() const;
+
+  // Called to get the upload progress in bytes.
+  virtual UploadProgress GetUploadProgress() const;
+
+  // Called to fetch the charset for this request.  Only makes sense for some
+  // types of requests. Returns true on success.  Calling this on a type that
+  // doesn't have a charset will return false.
+  virtual bool GetCharset(std::string* charset);
+
+  // Called to get response info.
+  virtual void GetResponseInfo(HttpResponseInfo* info);
+
+  // Returns the cookie values included in the response, if applicable.
+  // Returns true if applicable.
+  // NOTE: This removes the cookies from the job, so it will only return
+  //       useful results once per job.
+  virtual bool GetResponseCookies(std::vector<std::string>* cookies);
+
+  // Called to setup a stream filter for this request. An example of filter is
+  // content encoding/decoding.
+  // Subclasses should return the appropriate Filter, or NULL for no Filter.
+  // This class takes ownership of the returned Filter.
+  //
+  // The default implementation returns NULL.
+  virtual Filter* SetupFilter() const;
+
+  // Called to determine if this response is a redirect.  Only makes sense
+  // for some types of requests.  This method returns true if the response
+  // is a redirect, and fills in the location param with the URL of the
+  // redirect.  The HTTP status code (e.g., 302) is filled into
+  // |*http_status_code| to signify the type of redirect.
+  //
+  // The caller is responsible for following the redirect by setting up an
+  // appropriate replacement Job. Note that the redirected location may be
+  // invalid, the caller should be sure it can handle this.
+  //
+  // The default implementation inspects the response_info_.
+  virtual bool IsRedirectResponse(GURL* location, int* http_status_code);
+
+  // Called to determine if it is okay to redirect this job to the specified
+  // location.  This may be used to implement protocol-specific restrictions.
+  // If this function returns false, then the URLRequest will fail
+  // reporting ERR_UNSAFE_REDIRECT.
+  virtual bool IsSafeRedirect(const GURL& location);
+
+  // Called to determine if this response is asking for authentication.  Only
+  // makes sense for some types of requests.  The caller is responsible for
+  // obtaining the credentials passing them to SetAuth.
+  virtual bool NeedsAuth();
+
+  // Fills the authentication info with the server's response.
+  virtual void GetAuthChallengeInfo(
+      scoped_refptr<AuthChallengeInfo>* auth_info);
+
+  // Resend the request with authentication credentials.
+  virtual void SetAuth(const AuthCredentials& credentials);
+
+  // Display the error page without asking for credentials again.
+  virtual void CancelAuth();
+
+  virtual void ContinueWithCertificate(X509Certificate* client_cert);
+
+  // Continue processing the request ignoring the last error.
+  virtual void ContinueDespiteLastError();
+
+  void FollowDeferredRedirect();
+
+  // Returns true if the Job is done producing response data and has called
+  // NotifyDone on the request.
+  bool is_done() const { return done_; }
+
+  // Get/Set expected content size
+  int64 expected_content_size() const { return expected_content_size_; }
+  void set_expected_content_size(const int64& size) {
+    expected_content_size_ = size;
+  }
+
+  // Whether we have processed the response for that request yet.
+  bool has_response_started() const { return has_handled_response_; }
+
+  // These methods are not applicable to all connections.
+  virtual bool GetMimeType(std::string* mime_type) const;
+  virtual int GetResponseCode() const;
+
+  // Returns the socket address for the connection.
+  // See url_request.h for details.
+  virtual HostPortPair GetSocketAddress() const;
+
+  // base::SystemMonitor::PowerObserver methods:
+  // We invoke URLRequestJob::Kill on suspend (crbug.com/4606).
+  virtual void OnSuspend() OVERRIDE;
+
+  // Called after a NetworkDelegate has been informed that the URLRequest
+  // will be destroyed. This is used to track that no pending callbacks
+  // exist at destruction time of the URLRequestJob, unless they have been
+  // canceled by an explicit NetworkDelegate::NotifyURLRequestDestroyed() call.
+  virtual void NotifyURLRequestDestroyed();
+
+ protected:
+  friend class base::RefCounted<URLRequestJob>;
+  virtual ~URLRequestJob();
+
+  // Notifies the job that a certificate is requested.
+  void NotifyCertificateRequested(SSLCertRequestInfo* cert_request_info);
+
+  // Notifies the job about an SSL certificate error.
+  void NotifySSLCertificateError(const SSLInfo& ssl_info, bool fatal);
+
+  // Delegates to URLRequest::Delegate.
+  bool CanGetCookies(const CookieList& cookie_list) const;
+
+  // Delegates to URLRequest::Delegate.
+  bool CanSetCookie(const std::string& cookie_line,
+                    CookieOptions* options) const;
+
+  // Notifies the job that headers have been received.
+  void NotifyHeadersComplete();
+
+  // Notifies the request that the job has completed a Read operation.
+  void NotifyReadComplete(int bytes_read);
+
+  // Notifies the request that a start error has occurred.
+  void NotifyStartError(const URLRequestStatus& status);
+
+  // NotifyDone marks when we are done with a request.  It is really
+  // a glorified set_status, but also does internal state checking and
+  // job tracking.  It should be called once per request, when the job is
+  // finished doing all IO.
+  void NotifyDone(const URLRequestStatus& status);
+
+  // Some work performed by NotifyDone must be completed on a separate task
+  // so as to avoid re-entering the delegate.  This method exists to perform
+  // that work.
+  void CompleteNotifyDone();
+
+  // Used as an asynchronous callback for Kill to notify the URLRequest
+  // that we were canceled.
+  void NotifyCanceled();
+
+  // Notifies the job the request should be restarted.
+  // Should only be called if the job has not started a resposne.
+  void NotifyRestartRequired();
+
+  // Called when the network delegate blocks or unblocks this request when
+  // intercepting certain requests.
+  void SetBlockedOnDelegate();
+  void SetUnblockedOnDelegate();
+
+  // Called to read raw (pre-filtered) data from this Job.
+  // If returning true, data was read from the job.  buf will contain
+  // the data, and bytes_read will receive the number of bytes read.
+  // If returning true, and bytes_read is returned as 0, there is no
+  // additional data to be read.
+  // If returning false, an error occurred or an async IO is now pending.
+  // If async IO is pending, the status of the request will be
+  // URLRequestStatus::IO_PENDING, and buf must remain available until the
+  // operation is completed.  See comments on URLRequest::Read for more
+  // info.
+  virtual bool ReadRawData(IOBuffer* buf, int buf_size, int *bytes_read);
+
+  // Called to tell the job that a filter has successfully reached the end of
+  // the stream.
+  virtual void DoneReading();
+
+  // Informs the filter that data has been read into its buffer
+  void FilteredDataRead(int bytes_read);
+
+  // Reads filtered data from the request.  Returns true if successful,
+  // false otherwise.  Note, if there is not enough data received to
+  // return data, this call can issue a new async IO request under
+  // the hood.
+  bool ReadFilteredData(int *bytes_read);
+
+  // Whether the response is being filtered in this job.
+  // Only valid after NotifyHeadersComplete() has been called.
+  bool HasFilter() { return filter_ != NULL; }
+
+  // At or near destruction time, a derived class may request that the filters
+  // be destroyed so that statistics can be gathered while the derived class is
+  // still present to assist in calculations.  This is used by URLRequestHttpJob
+  // to get SDCH to emit stats.
+  void DestroyFilters() { filter_.reset(); }
+
+  // The status of the job.
+  const URLRequestStatus GetStatus();
+
+  // Set the status of the job.
+  void SetStatus(const URLRequestStatus& status);
+
+  // The number of bytes read before passing to the filter.
+  int prefilter_bytes_read() const { return prefilter_bytes_read_; }
+
+  // The number of bytes read after passing through the filter.
+  int postfilter_bytes_read() const { return postfilter_bytes_read_; }
+
+  // Total number of bytes read from network (or cache) and typically handed
+  // to filter to process.  Used to histogram compression ratios, and error
+  // recovery scenarios in filters.
+  int64 filter_input_byte_count() const { return filter_input_byte_count_; }
+
+  // The request that initiated this job. This value MAY BE NULL if the
+  // request was released by DetachRequest().
+  URLRequest* request_;
+
+ private:
+  // When data filtering is enabled, this function is used to read data
+  // for the filter.  Returns true if raw data was read.  Returns false if
+  // an error occurred (or we are waiting for IO to complete).
+  bool ReadRawDataForFilter(int *bytes_read);
+
+  // Invokes ReadRawData and records bytes read if the read completes
+  // synchronously.
+  bool ReadRawDataHelper(IOBuffer* buf, int buf_size, int* bytes_read);
+
+  // Called in response to a redirect that was not canceled to follow the
+  // redirect. The current job will be replaced with a new job loading the
+  // given redirect destination.
+  void FollowRedirect(const GURL& location, int http_status_code);
+
+  // Called after every raw read. If |bytes_read| is > 0, this indicates
+  // a successful read of |bytes_read| unfiltered bytes. If |bytes_read|
+  // is 0, this indicates that there is no additional data to read. If
+  // |bytes_read| is < 0, an error occurred and no bytes were read.
+  void OnRawReadComplete(int bytes_read);
+
+  // Updates the profiling info and notifies observers that an additional
+  // |bytes_read| unfiltered bytes have been read for this job.
+  void RecordBytesRead(int bytes_read);
+
+  // Called to query whether there is data available in the filter to be read
+  // out.
+  bool FilterHasData();
+
+  // Subclasses may implement this method to record packet arrival times.
+  // The default implementation does nothing.
+  virtual void UpdatePacketReadTimes();
+
+  // Custom handler for derived classes when the request is detached.
+  virtual void OnDetachRequest() {}
+
+  // Indicates that the job is done producing data, either it has completed
+  // all the data or an error has been encountered. Set exclusively by
+  // NotifyDone so that it is kept in sync with the request.
+  bool done_;
+
+  int prefilter_bytes_read_;
+  int postfilter_bytes_read_;
+  int64 filter_input_byte_count_;
+
+  // The data stream filter which is enabled on demand.
+  scoped_ptr<Filter> filter_;
+
+  // If the filter filled its output buffer, then there is a change that it
+  // still has internal data to emit, and this flag is set.
+  bool filter_needs_more_output_space_;
+
+  // When we filter data, we receive data into the filter buffers.  After
+  // processing the filtered data, we return the data in the caller's buffer.
+  // While the async IO is in progress, we save the user buffer here, and
+  // when the IO completes, we fill this in.
+  scoped_refptr<IOBuffer> filtered_read_buffer_;
+  int filtered_read_buffer_len_;
+
+  // We keep a pointer to the read buffer while asynchronous reads are
+  // in progress, so we are able to pass those bytes to job observers.
+  scoped_refptr<IOBuffer> raw_read_buffer_;
+
+  // Used by HandleResponseIfNecessary to track whether we've sent the
+  // OnResponseStarted callback and potentially redirect callbacks as well.
+  bool has_handled_response_;
+
+  // Expected content size
+  int64 expected_content_size_;
+
+  // Set when a redirect is deferred.
+  GURL deferred_redirect_url_;
+  int deferred_redirect_status_code_;
+
+  NetworkDelegate* network_delegate_;
+
+  base::WeakPtrFactory<URLRequestJob> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestJob);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_JOB_H_
diff --git a/src/net/url_request/url_request_job_factory.cc b/src/net/url_request/url_request_job_factory.cc
new file mode 100644
index 0000000..e07152a
--- /dev/null
+++ b/src/net/url_request/url_request_job_factory.cc
@@ -0,0 +1,22 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_job_factory.h"
+
+namespace net {
+
+URLRequestJobFactory::ProtocolHandler::~ProtocolHandler() {}
+
+URLRequestJobFactory::Interceptor::~Interceptor() {}
+
+bool URLRequestJobFactory::Interceptor::WillHandleProtocol(
+    const std::string& protocol) const {
+  return false;
+}
+
+URLRequestJobFactory::URLRequestJobFactory() {}
+
+URLRequestJobFactory::~URLRequestJobFactory() {}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_job_factory.h b/src/net/url_request/url_request_job_factory.h
new file mode 100644
index 0000000..adff7f1
--- /dev/null
+++ b/src/net/url_request/url_request_job_factory.h
@@ -0,0 +1,117 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_JOB_FACTORY_H_
+#define NET_URL_REQUEST_URL_REQUEST_JOB_FACTORY_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/threading/non_thread_safe.h"
+#include "net/base/net_export.h"
+
+class GURL;
+
+namespace net {
+
+class NetworkDelegate;
+class URLRequest;
+class URLRequestJob;
+
+class NET_EXPORT URLRequestJobFactory
+    : NON_EXPORTED_BASE(public base::NonThreadSafe) {
+ public:
+  // TODO(shalev): Move this to URLRequestJobFactoryImpl.
+  class NET_EXPORT ProtocolHandler {
+   public:
+    virtual ~ProtocolHandler();
+
+    virtual URLRequestJob* MaybeCreateJob(
+        URLRequest* request, NetworkDelegate* network_delegate) const = 0;
+  };
+
+  // TODO(shalev): Move this to URLRequestJobFactoryImpl.
+  class NET_EXPORT Interceptor {
+   public:
+    virtual ~Interceptor();
+
+    // Called for every request made.  Should return a new job to handle the
+    // request if it should be intercepted, or NULL to allow the request to
+    // be handled in the normal manner.
+    virtual URLRequestJob* MaybeIntercept(
+        URLRequest* request, NetworkDelegate* network_delegate) const = 0;
+
+    // Called after having received a redirect response, but prior to the
+    // the request delegate being informed of the redirect. Can return a new
+    // job to replace the existing job if it should be intercepted, or NULL
+    // to allow the normal handling to continue. If a new job is provided,
+    // the delegate never sees the original redirect response, instead the
+    // response produced by the intercept job will be returned.
+    virtual URLRequestJob* MaybeInterceptRedirect(
+        const GURL& location,
+        URLRequest* request,
+        NetworkDelegate* network_delegate) const = 0;
+
+    // Called after having received a final response, but prior to the
+    // the request delegate being informed of the response. This is also
+    // called when there is no server response at all to allow interception
+    // on DNS or network errors. Can return a new job to replace the existing
+    // job if it should be intercepted, or NULL to allow the normal handling to
+    // continue. If a new job is provided, the delegate never sees the original
+    // response, instead the response produced by the intercept job will be
+    // returned.
+    virtual URLRequestJob* MaybeInterceptResponse(
+        URLRequest* request, NetworkDelegate* network_delegate) const = 0;
+
+    // Returns true if this interceptor handles requests for URLs with the
+    // given protocol. Returning false does not imply that this interceptor
+    // can't or won't handle requests with the given protocol.
+    virtual bool WillHandleProtocol(const std::string& protocol) const;
+  };
+
+  URLRequestJobFactory();
+  virtual ~URLRequestJobFactory();
+
+  // TODO(shalev): Remove this from the interface.
+  // Sets the ProtocolHandler for a scheme. Returns true on success, false on
+  // failure (a ProtocolHandler already exists for |scheme|). On success,
+  // URLRequestJobFactory takes ownership of |protocol_handler|.
+  virtual bool SetProtocolHandler(const std::string& scheme,
+                                  ProtocolHandler* protocol_handler) = 0;
+
+  // TODO(shalev): Remove this from the interface.
+  // Takes ownership of |interceptor|. Adds it to the end of the Interceptor
+  // list.
+  virtual void AddInterceptor(Interceptor* interceptor) = 0;
+
+  // TODO(shalev): Consolidate MaybeCreateJobWithInterceptor and
+  // MaybeCreateJobWithProtocolHandler into a single method.
+  virtual URLRequestJob* MaybeCreateJobWithInterceptor(
+      URLRequest* request, NetworkDelegate* network_delegate) const = 0;
+
+  virtual URLRequestJob* MaybeCreateJobWithProtocolHandler(
+      const std::string& scheme,
+      URLRequest* request,
+      NetworkDelegate* network_delegate) const = 0;
+
+  virtual URLRequestJob* MaybeInterceptRedirect(
+      const GURL& location,
+      URLRequest* request,
+      NetworkDelegate* network_delegate) const = 0;
+
+  virtual URLRequestJob* MaybeInterceptResponse(
+      URLRequest* request, NetworkDelegate* network_delegate) const = 0;
+
+  virtual bool IsHandledProtocol(const std::string& scheme) const = 0;
+
+  virtual bool IsHandledURL(const GURL& url) const = 0;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(URLRequestJobFactory);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_JOB_FACTORY_H_
diff --git a/src/net/url_request/url_request_job_factory_impl.cc b/src/net/url_request/url_request_job_factory_impl.cc
new file mode 100644
index 0000000..aaeed79
--- /dev/null
+++ b/src/net/url_request/url_request_job_factory_impl.cc
@@ -0,0 +1,130 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_job_factory_impl.h"
+
+#include "base/stl_util.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/load_flags.h"
+#include "net/url_request/url_request_job_manager.h"
+
+namespace net {
+
+URLRequestJobFactoryImpl::URLRequestJobFactoryImpl() {}
+
+URLRequestJobFactoryImpl::~URLRequestJobFactoryImpl() {
+  STLDeleteValues(&protocol_handler_map_);
+  STLDeleteElements(&interceptors_);
+}
+
+bool URLRequestJobFactoryImpl::SetProtocolHandler(
+    const std::string& scheme,
+    ProtocolHandler* protocol_handler) {
+  DCHECK(CalledOnValidThread());
+
+  if (!protocol_handler) {
+    ProtocolHandlerMap::iterator it = protocol_handler_map_.find(scheme);
+    if (it == protocol_handler_map_.end())
+      return false;
+
+    delete it->second;
+    protocol_handler_map_.erase(it);
+    return true;
+  }
+
+  if (ContainsKey(protocol_handler_map_, scheme))
+    return false;
+  protocol_handler_map_[scheme] = protocol_handler;
+  return true;
+}
+
+void URLRequestJobFactoryImpl::AddInterceptor(Interceptor* interceptor) {
+  DCHECK(CalledOnValidThread());
+  CHECK(interceptor);
+
+  interceptors_.push_back(interceptor);
+}
+
+URLRequestJob* URLRequestJobFactoryImpl::MaybeCreateJobWithInterceptor(
+    URLRequest* request, NetworkDelegate* network_delegate) const {
+  DCHECK(CalledOnValidThread());
+  URLRequestJob* job = NULL;
+
+  if (!(request->load_flags() & LOAD_DISABLE_INTERCEPT)) {
+    InterceptorList::const_iterator i;
+    for (i = interceptors_.begin(); i != interceptors_.end(); ++i) {
+      job = (*i)->MaybeIntercept(request, network_delegate);
+      if (job)
+        return job;
+    }
+  }
+  return NULL;
+}
+
+URLRequestJob* URLRequestJobFactoryImpl::MaybeCreateJobWithProtocolHandler(
+    const std::string& scheme,
+    URLRequest* request,
+    NetworkDelegate* network_delegate) const {
+  DCHECK(CalledOnValidThread());
+  ProtocolHandlerMap::const_iterator it = protocol_handler_map_.find(scheme);
+  if (it == protocol_handler_map_.end())
+    return NULL;
+  return it->second->MaybeCreateJob(request, network_delegate);
+}
+
+URLRequestJob* URLRequestJobFactoryImpl::MaybeInterceptRedirect(
+    const GURL& location,
+    URLRequest* request,
+    NetworkDelegate* network_delegate) const {
+  DCHECK(CalledOnValidThread());
+  URLRequestJob* job = NULL;
+
+  if (!(request->load_flags() & LOAD_DISABLE_INTERCEPT)) {
+    InterceptorList::const_iterator i;
+    for (i = interceptors_.begin(); i != interceptors_.end(); ++i) {
+      job = (*i)->MaybeInterceptRedirect(location, request, network_delegate);
+      if (job)
+        return job;
+    }
+  }
+  return NULL;
+}
+
+URLRequestJob* URLRequestJobFactoryImpl::MaybeInterceptResponse(
+    URLRequest* request, NetworkDelegate* network_delegate) const {
+  DCHECK(CalledOnValidThread());
+  URLRequestJob* job = NULL;
+
+  if (!(request->load_flags() & LOAD_DISABLE_INTERCEPT)) {
+    InterceptorList::const_iterator i;
+    for (i = interceptors_.begin(); i != interceptors_.end(); ++i) {
+      job = (*i)->MaybeInterceptResponse(request, network_delegate);
+      if (job)
+        return job;
+    }
+  }
+  return NULL;
+}
+
+bool URLRequestJobFactoryImpl::IsHandledProtocol(
+    const std::string& scheme) const {
+  DCHECK(CalledOnValidThread());
+  InterceptorList::const_iterator i;
+  for (i = interceptors_.begin(); i != interceptors_.end(); ++i) {
+    if ((*i)->WillHandleProtocol(scheme))
+      return true;
+  }
+  return ContainsKey(protocol_handler_map_, scheme) ||
+      URLRequestJobManager::GetInstance()->SupportsScheme(scheme);
+}
+
+bool URLRequestJobFactoryImpl::IsHandledURL(const GURL& url) const {
+  if (!url.is_valid()) {
+    // We handle error cases.
+    return true;
+  }
+  return IsHandledProtocol(url.scheme());
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_job_factory_impl.h b/src/net/url_request/url_request_job_factory_impl.h
new file mode 100644
index 0000000..ff27185
--- /dev/null
+++ b/src/net/url_request/url_request_job_factory_impl.h
@@ -0,0 +1,52 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_JOB_FACTORY_IMPL_H_
+#define NET_URL_REQUEST_URL_REQUEST_JOB_FACTORY_IMPL_H_
+
+#include <map>
+#include <vector>
+#include "base/basictypes.h"
+#include "net/base/net_export.h"
+#include "net/url_request/url_request_job_factory.h"
+
+namespace net {
+
+class NET_EXPORT URLRequestJobFactoryImpl : public URLRequestJobFactory {
+ public:
+  URLRequestJobFactoryImpl();
+  virtual ~URLRequestJobFactoryImpl();
+
+  // URLRequestJobFactory implementation
+  virtual bool SetProtocolHandler(const std::string& scheme,
+                          ProtocolHandler* protocol_handler) OVERRIDE;
+  virtual void AddInterceptor(Interceptor* interceptor) OVERRIDE;
+  virtual URLRequestJob* MaybeCreateJobWithInterceptor(
+      URLRequest* request, NetworkDelegate* network_delegate) const OVERRIDE;
+  virtual URLRequestJob* MaybeCreateJobWithProtocolHandler(
+      const std::string& scheme,
+      URLRequest* request,
+      NetworkDelegate* network_delegate) const OVERRIDE;
+  virtual URLRequestJob* MaybeInterceptRedirect(
+      const GURL& location,
+      URLRequest* request,
+      NetworkDelegate* network_delegate) const OVERRIDE;
+  virtual URLRequestJob* MaybeInterceptResponse(
+      URLRequest* request, NetworkDelegate* network_delegate) const OVERRIDE;
+  virtual bool IsHandledProtocol(const std::string& scheme) const OVERRIDE;
+  virtual bool IsHandledURL(const GURL& url) const OVERRIDE;
+
+ private:
+  typedef std::map<std::string, ProtocolHandler*> ProtocolHandlerMap;
+  typedef std::vector<Interceptor*> InterceptorList;
+
+  ProtocolHandlerMap protocol_handler_map_;
+  InterceptorList interceptors_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestJobFactoryImpl);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_JOB_FACTORY_IMPL_H_
diff --git a/src/net/url_request/url_request_job_factory_impl_unittest.cc b/src/net/url_request/url_request_job_factory_impl_unittest.cc
new file mode 100644
index 0000000..232b21a
--- /dev/null
+++ b/src/net/url_request/url_request_job_factory_impl_unittest.cc
@@ -0,0 +1,218 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_job_factory_impl.h"
+
+#include "base/bind.h"
+#include "base/memory/weak_ptr.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_job.h"
+#include "net/url_request/url_request_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace net {
+
+namespace {
+
+class MockURLRequestJob : public URLRequestJob {
+ public:
+  MockURLRequestJob(URLRequest* request,
+                    NetworkDelegate* network_delegate,
+                    const URLRequestStatus& status)
+      : URLRequestJob(request, network_delegate),
+        status_(status),
+        ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {}
+
+  virtual void Start() OVERRIDE {
+    // Start reading asynchronously so that all error reporting and data
+    // callbacks happen as they would for network requests.
+    MessageLoop::current()->PostTask(
+        FROM_HERE,
+        base::Bind(&MockURLRequestJob::StartAsync,
+                   weak_factory_.GetWeakPtr()));
+  }
+
+ protected:
+  virtual ~MockURLRequestJob() {}
+
+ private:
+  void StartAsync() {
+    SetStatus(status_);
+    NotifyHeadersComplete();
+  }
+
+  URLRequestStatus status_;
+  base::WeakPtrFactory<MockURLRequestJob> weak_factory_;
+};
+
+class DummyProtocolHandler : public URLRequestJobFactory::ProtocolHandler {
+ public:
+  virtual URLRequestJob* MaybeCreateJob(
+      URLRequest* request, NetworkDelegate* network_delegate) const OVERRIDE {
+    return new MockURLRequestJob(
+        request,
+        network_delegate,
+        URLRequestStatus(URLRequestStatus::SUCCESS, OK));
+  }
+};
+
+class DummyInterceptor : public URLRequestJobFactory::Interceptor {
+ public:
+  DummyInterceptor()
+      : did_intercept_(false),
+        handle_all_protocols_(false) {
+  }
+
+  virtual URLRequestJob* MaybeIntercept(
+      URLRequest* request, NetworkDelegate* network_delegate) const OVERRIDE {
+    did_intercept_ = true;
+    return new MockURLRequestJob(
+        request,
+        network_delegate,
+        URLRequestStatus(URLRequestStatus::FAILED, ERR_FAILED));
+  }
+
+  virtual URLRequestJob* MaybeInterceptRedirect(
+      const GURL&                       /* location */,
+      URLRequest*                       /* request */,
+      NetworkDelegate* network_delegate /* network delegate */) const OVERRIDE {
+    return NULL;
+  }
+
+  virtual URLRequestJob* MaybeInterceptResponse(
+      URLRequest*                       /* request */,
+      NetworkDelegate* network_delegate /* network delegate */) const OVERRIDE {
+    return NULL;
+  }
+
+  virtual bool WillHandleProtocol(
+      const std::string& /* protocol */) const OVERRIDE {
+    return handle_all_protocols_;
+  }
+
+  mutable bool did_intercept_;
+  mutable bool handle_all_protocols_;
+};
+
+TEST(URLRequestJobFactoryTest, NoProtocolHandler) {
+  TestDelegate delegate;
+  TestURLRequestContext request_context;
+  TestURLRequest request(GURL("foo://bar"), &delegate, &request_context);
+  request.Start();
+
+  MessageLoop::current()->Run();
+  EXPECT_EQ(URLRequestStatus::FAILED, request.status().status());
+  EXPECT_EQ(ERR_UNKNOWN_URL_SCHEME, request.status().error());
+}
+
+TEST(URLRequestJobFactoryTest, BasicProtocolHandler) {
+  TestDelegate delegate;
+  URLRequestJobFactoryImpl job_factory;
+  TestURLRequestContext request_context;
+  request_context.set_job_factory(&job_factory);
+  job_factory.SetProtocolHandler("foo", new DummyProtocolHandler);
+  TestURLRequest request(GURL("foo://bar"), &delegate, &request_context);
+  request.Start();
+
+  MessageLoop::current()->Run();
+  EXPECT_EQ(URLRequestStatus::SUCCESS, request.status().status());
+  EXPECT_EQ(OK, request.status().error());
+}
+
+TEST(URLRequestJobFactoryTest, DeleteProtocolHandler) {
+  URLRequestJobFactoryImpl job_factory;
+  TestURLRequestContext request_context;
+  request_context.set_job_factory(&job_factory);
+  job_factory.SetProtocolHandler("foo", new DummyProtocolHandler);
+  job_factory.SetProtocolHandler("foo", NULL);
+}
+
+TEST(URLRequestJobFactoryTest, BasicInterceptor) {
+  TestDelegate delegate;
+  URLRequestJobFactoryImpl job_factory;
+  TestURLRequestContext request_context;
+  request_context.set_job_factory(&job_factory);
+  job_factory.AddInterceptor(new DummyInterceptor);
+  TestURLRequest request(GURL("http://bar"), &delegate, &request_context);
+  request.Start();
+
+  MessageLoop::current()->Run();
+  EXPECT_EQ(URLRequestStatus::FAILED, request.status().status());
+  EXPECT_EQ(ERR_FAILED, request.status().error());
+}
+
+TEST(URLRequestJobFactoryTest, InterceptorNeedsValidSchemeStill) {
+  TestDelegate delegate;
+  URLRequestJobFactoryImpl job_factory;
+  TestURLRequestContext request_context;
+  request_context.set_job_factory(&job_factory);
+  job_factory.AddInterceptor(new DummyInterceptor);
+  TestURLRequest request(GURL("foo://bar"), &delegate, &request_context);
+  request.Start();
+
+  MessageLoop::current()->Run();
+  EXPECT_EQ(URLRequestStatus::FAILED, request.status().status());
+  EXPECT_EQ(ERR_UNKNOWN_URL_SCHEME, request.status().error());
+}
+
+TEST(URLRequestJobFactoryTest, InterceptorOverridesProtocolHandler) {
+  TestDelegate delegate;
+  URLRequestJobFactoryImpl job_factory;
+  TestURLRequestContext request_context;
+  request_context.set_job_factory(&job_factory);
+  job_factory.SetProtocolHandler("foo", new DummyProtocolHandler);
+  job_factory.AddInterceptor(new DummyInterceptor);
+  TestURLRequest request(GURL("foo://bar"), &delegate, &request_context);
+  request.Start();
+
+  MessageLoop::current()->Run();
+  EXPECT_EQ(URLRequestStatus::FAILED, request.status().status());
+  EXPECT_EQ(ERR_FAILED, request.status().error());
+}
+
+TEST(URLRequestJobFactoryTest, InterceptorDoesntInterceptUnknownProtocols) {
+  TestDelegate delegate;
+  URLRequestJobFactoryImpl job_factory;
+  TestURLRequestContext request_context;
+  request_context.set_job_factory(&job_factory);
+  DummyInterceptor* interceptor = new DummyInterceptor;
+  job_factory.AddInterceptor(interceptor);
+  TestURLRequest request(GURL("foo://bar"), &delegate, &request_context);
+  request.Start();
+
+  MessageLoop::current()->Run();
+  EXPECT_FALSE(interceptor->did_intercept_);
+}
+
+TEST(URLRequestJobFactoryTest, InterceptorInterceptsHandledUnknownProtocols) {
+  TestDelegate delegate;
+  URLRequestJobFactoryImpl job_factory;
+  TestURLRequestContext request_context;
+  request_context.set_job_factory(&job_factory);
+  DummyInterceptor* interceptor = new DummyInterceptor;
+  interceptor->handle_all_protocols_ = true;
+  job_factory.AddInterceptor(interceptor);
+  TestURLRequest request(GURL("foo://bar"), &delegate, &request_context);
+  request.Start();
+
+  MessageLoop::current()->Run();
+  EXPECT_TRUE(interceptor->did_intercept_);
+  EXPECT_EQ(URLRequestStatus::FAILED, request.status().status());
+  EXPECT_EQ(ERR_FAILED, request.status().error());
+}
+
+TEST(URLRequestJobFactoryTest, InterceptorAffectsIsHandledProtocol) {
+  DummyInterceptor* interceptor = new DummyInterceptor;
+  URLRequestJobFactoryImpl job_factory;
+  job_factory.AddInterceptor(interceptor);
+  EXPECT_FALSE(interceptor->WillHandleProtocol("anything"));
+  EXPECT_FALSE(job_factory.IsHandledProtocol("anything"));
+  interceptor->handle_all_protocols_ = true;
+  EXPECT_TRUE(interceptor->WillHandleProtocol("anything"));
+  EXPECT_TRUE(job_factory.IsHandledProtocol("anything"));
+}
+
+}  // namespace
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_job_manager.cc b/src/net/url_request/url_request_job_manager.cc
new file mode 100644
index 0000000..a03e77c
--- /dev/null
+++ b/src/net/url_request/url_request_job_manager.cc
@@ -0,0 +1,290 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_job_manager.h"
+
+#include <algorithm>
+
+#include "base/memory/singleton.h"
+#include "build/build_config.h"
+#include "base/string_util.h"
+#include "net/base/load_flags.h"
+#include "net/base/net_errors.h"
+#include "net/base/network_delegate.h"
+#include "net/url_request/url_request_about_job.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_data_job.h"
+#include "net/url_request/url_request_error_job.h"
+#include "net/url_request/url_request_file_job.h"
+#include "net/url_request/url_request_ftp_job.h"
+#include "net/url_request/url_request_http_job.h"
+#include "net/url_request/url_request_job_factory.h"
+
+namespace net {
+
+// The built-in set of protocol factories
+namespace {
+
+struct SchemeToFactory {
+  const char* scheme;
+  URLRequest::ProtocolFactory* factory;
+};
+
+}  // namespace
+
+#if defined(COBALT)
+static const SchemeToFactory kBuiltinFactories[] = {
+  {"https", URLRequestHttpJob::Factory},
+  {"data", URLRequestDataJob::Factory},
+#if !defined(COBALT_FORCE_HTTPS)
+  { "http", URLRequestHttpJob::Factory },
+#endif
+#if defined(COBALT_ENABLE_FILE_SCHEME)
+  { "file", URLRequestFileJob::Factory },
+#endif
+};
+#else
+static const SchemeToFactory kBuiltinFactories[] = {
+  { "http", URLRequestHttpJob::Factory },
+  { "https", URLRequestHttpJob::Factory },
+#if !defined(DISABLE_FTP_SUPPORT)
+  { "ftp", URLRequestFtpJob::Factory },
+#endif
+  { "file", URLRequestFileJob::Factory },
+  { "about", URLRequestAboutJob::Factory },
+  { "data", URLRequestDataJob::Factory },
+};
+#endif  // defined(COBALT)
+
+// static
+URLRequestJobManager* URLRequestJobManager::GetInstance() {
+  return Singleton<URLRequestJobManager>::get();
+}
+
+URLRequestJob* URLRequestJobManager::CreateJob(
+    URLRequest* request, NetworkDelegate* network_delegate) const {
+  DCHECK(IsAllowedThread());
+
+  // If we are given an invalid URL, then don't even try to inspect the scheme.
+  if (!request->url().is_valid())
+    return new URLRequestErrorJob(request, network_delegate, ERR_INVALID_URL);
+
+  // We do this here to avoid asking interceptors about unsupported schemes.
+  const URLRequestJobFactory* job_factory = NULL;
+  job_factory = request->context()->job_factory();
+
+  const std::string& scheme = request->url().scheme();  // already lowercase
+  if (job_factory) {
+    if (!job_factory->IsHandledProtocol(scheme)) {
+      return new URLRequestErrorJob(
+          request, network_delegate, ERR_UNKNOWN_URL_SCHEME);
+    }
+  } else if (!SupportsScheme(scheme)) {
+    return new URLRequestErrorJob(
+        request, network_delegate, ERR_UNKNOWN_URL_SCHEME);
+  }
+
+  // THREAD-SAFETY NOTICE:
+  //   We do not need to acquire the lock here since we are only reading our
+  //   data structures.  They should only be modified on the current thread.
+
+  // See if the request should be intercepted.
+  //
+
+  if (job_factory) {
+    URLRequestJob* job = job_factory->MaybeCreateJobWithInterceptor(
+        request, network_delegate);
+    if (job)
+      return job;
+  }
+
+  // TODO(willchan): Remove this in favor of URLRequestJobFactory::Interceptor.
+  if (!(request->load_flags() & LOAD_DISABLE_INTERCEPT)) {
+    InterceptorList::const_iterator i;
+    for (i = interceptors_.begin(); i != interceptors_.end(); ++i) {
+      URLRequestJob* job = (*i)->MaybeIntercept(request, network_delegate);
+      if (job)
+        return job;
+    }
+  }
+
+  if (job_factory) {
+    URLRequestJob* job = job_factory->MaybeCreateJobWithProtocolHandler(
+        scheme, request, network_delegate);
+    if (job)
+      return job;
+  }
+
+  // TODO(willchan): Remove this in favor of
+  // URLRequestJobFactory::ProtocolHandler.
+  // See if the request should be handled by a registered protocol factory.
+  // If the registered factory returns null, then we want to fall-back to the
+  // built-in protocol factory.
+  FactoryMap::const_iterator i = factories_.find(scheme);
+  if (i != factories_.end()) {
+    URLRequestJob* job = i->second(request, network_delegate, scheme);
+    if (job)
+      return job;
+  }
+
+  // See if the request should be handled by a built-in protocol factory.
+  for (size_t i = 0; i < arraysize(kBuiltinFactories); ++i) {
+    if (scheme == kBuiltinFactories[i].scheme) {
+      URLRequestJob* job = (kBuiltinFactories[i].factory)(
+          request, network_delegate, scheme);
+      DCHECK(job);  // The built-in factories are not expected to fail!
+      return job;
+    }
+  }
+
+  // If we reached here, then it means that a registered protocol factory
+  // wasn't interested in handling the URL.  That is fairly unexpected, and we
+  // don't have a specific error to report here :-(
+  LOG(WARNING) << "Failed to map: " << request->url().spec();
+  return new URLRequestErrorJob(request, network_delegate, ERR_FAILED);
+}
+
+URLRequestJob* URLRequestJobManager::MaybeInterceptRedirect(
+    URLRequest* request,
+    NetworkDelegate* network_delegate,
+    const GURL& location) const {
+  DCHECK(IsAllowedThread());
+  if (!request->url().is_valid() ||
+      request->load_flags() & LOAD_DISABLE_INTERCEPT ||
+      request->status().status() == URLRequestStatus::CANCELED) {
+    return NULL;
+  }
+
+  const URLRequestJobFactory* job_factory = NULL;
+  job_factory = request->context()->job_factory();
+
+  const std::string& scheme = request->url().scheme();  // already lowercase
+  if (job_factory) {
+    if (!job_factory->IsHandledProtocol(scheme)) {
+      return NULL;
+    }
+  } else if (!SupportsScheme(scheme)) {
+    return NULL;
+  }
+
+  URLRequestJob* job = NULL;
+  if (job_factory)
+    job = job_factory->MaybeInterceptRedirect(
+        location, request, network_delegate);
+  if (job)
+    return job;
+
+  InterceptorList::const_iterator i;
+  for (i = interceptors_.begin(); i != interceptors_.end(); ++i) {
+    job = (*i)->MaybeInterceptRedirect(request, network_delegate, location);
+    if (job)
+      return job;
+  }
+  return NULL;
+}
+
+URLRequestJob* URLRequestJobManager::MaybeInterceptResponse(
+    URLRequest* request, NetworkDelegate* network_delegate) const {
+  DCHECK(IsAllowedThread());
+  if (!request->url().is_valid() ||
+      request->load_flags() & LOAD_DISABLE_INTERCEPT ||
+      request->status().status() == URLRequestStatus::CANCELED) {
+    return NULL;
+  }
+
+  const URLRequestJobFactory* job_factory = NULL;
+  job_factory = request->context()->job_factory();
+
+  const std::string& scheme = request->url().scheme();  // already lowercase
+  if (job_factory) {
+    if (!job_factory->IsHandledProtocol(scheme)) {
+      return NULL;
+    }
+  } else if (!SupportsScheme(scheme)) {
+    return NULL;
+  }
+
+  URLRequestJob* job = NULL;
+  if (job_factory)
+    job = job_factory->MaybeInterceptResponse(request, network_delegate);
+  if (job)
+    return job;
+
+  InterceptorList::const_iterator i;
+  for (i = interceptors_.begin(); i != interceptors_.end(); ++i) {
+    job = (*i)->MaybeInterceptResponse(request, network_delegate);
+    if (job)
+      return job;
+  }
+  return NULL;
+}
+
+bool URLRequestJobManager::SupportsScheme(const std::string& scheme) const {
+  // The set of registered factories may change on another thread.
+  {
+    base::AutoLock locked(lock_);
+    if (factories_.find(scheme) != factories_.end())
+      return true;
+  }
+
+  for (size_t i = 0; i < arraysize(kBuiltinFactories); ++i)
+    if (LowerCaseEqualsASCII(scheme, kBuiltinFactories[i].scheme))
+      return true;
+
+  return false;
+}
+
+URLRequest::ProtocolFactory* URLRequestJobManager::RegisterProtocolFactory(
+    const std::string& scheme,
+    URLRequest::ProtocolFactory* factory) {
+  DCHECK(IsAllowedThread());
+
+  base::AutoLock locked(lock_);
+
+  URLRequest::ProtocolFactory* old_factory;
+  FactoryMap::iterator i = factories_.find(scheme);
+  if (i != factories_.end()) {
+    old_factory = i->second;
+  } else {
+    old_factory = NULL;
+  }
+  if (factory) {
+    factories_[scheme] = factory;
+  } else if (i != factories_.end()) {  // uninstall any old one
+    factories_.erase(i);
+  }
+  return old_factory;
+}
+
+void URLRequestJobManager::RegisterRequestInterceptor(
+    URLRequest::Interceptor* interceptor) {
+  DCHECK(IsAllowedThread());
+
+  base::AutoLock locked(lock_);
+
+  DCHECK(std::find(interceptors_.begin(), interceptors_.end(), interceptor) ==
+         interceptors_.end());
+  interceptors_.push_back(interceptor);
+}
+
+void URLRequestJobManager::UnregisterRequestInterceptor(
+    URLRequest::Interceptor* interceptor) {
+  DCHECK(IsAllowedThread());
+
+  base::AutoLock locked(lock_);
+
+  InterceptorList::iterator i =
+      std::find(interceptors_.begin(), interceptors_.end(), interceptor);
+  DCHECK(i != interceptors_.end());
+  interceptors_.erase(i);
+}
+
+URLRequestJobManager::URLRequestJobManager()
+    : allowed_thread_(0),
+      allowed_thread_initialized_(false) {
+}
+
+URLRequestJobManager::~URLRequestJobManager() {}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_job_manager.h b/src/net/url_request/url_request_job_manager.h
new file mode 100644
index 0000000..ea441be
--- /dev/null
+++ b/src/net/url_request/url_request_job_manager.h
@@ -0,0 +1,116 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_JOB_MANAGER_H_
+#define NET_URL_REQUEST_URL_REQUEST_JOB_MANAGER_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "net/url_request/url_request.h"
+
+template <typename T> struct DefaultSingletonTraits;
+
+namespace net {
+
+// This class is responsible for managing the set of protocol factories and
+// request interceptors that determine how an URLRequestJob gets created to
+// handle an URLRequest.
+//
+// MULTI-THREADING NOTICE:
+//   URLRequest is designed to have all consumers on a single thread, and
+//   so no attempt is made to support ProtocolFactory or Interceptor instances
+//   being registered/unregistered or in any way poked on multiple threads.
+//   However, we do support checking for supported schemes FROM ANY THREAD
+//   (i.e., it is safe to call SupportsScheme on any thread).
+//
+class URLRequestJobManager {
+ public:
+  // Returns the singleton instance.
+  static URLRequestJobManager* GetInstance();
+
+  // Instantiate an URLRequestJob implementation based on the registered
+  // interceptors and protocol factories.  This will always succeed in
+  // returning a job unless we are--in the extreme case--out of memory.
+  URLRequestJob* CreateJob(URLRequest* request,
+                           NetworkDelegate* network_delegate) const;
+
+  // Allows interceptors to hijack the request after examining the new location
+  // of a redirect. Returns NULL if no interceptor intervenes.
+  URLRequestJob* MaybeInterceptRedirect(URLRequest* request,
+                                        NetworkDelegate* network_delegate,
+                                        const GURL& location) const;
+
+  // Allows interceptors to hijack the request after examining the response
+  // status and headers. This is also called when there is no server response
+  // at all to allow interception of failed requests due to network errors.
+  // Returns NULL if no interceptor intervenes.
+  URLRequestJob* MaybeInterceptResponse(
+      URLRequest* request, NetworkDelegate* network_delegate) const;
+
+  // Returns true if there is a protocol factory registered for the given
+  // scheme.  Note: also returns true if there is a built-in handler for the
+  // given scheme.
+  bool SupportsScheme(const std::string& scheme) const;
+
+  // Register a protocol factory associated with the given scheme.  The factory
+  // parameter may be null to clear any existing association.  Returns the
+  // previously registered protocol factory if any.
+  URLRequest::ProtocolFactory* RegisterProtocolFactory(
+      const std::string& scheme, URLRequest::ProtocolFactory* factory);
+
+  // Register/unregister a request interceptor.
+  void RegisterRequestInterceptor(URLRequest::Interceptor* interceptor);
+  void UnregisterRequestInterceptor(URLRequest::Interceptor* interceptor);
+
+ private:
+  typedef std::map<std::string, URLRequest::ProtocolFactory*> FactoryMap;
+  typedef std::vector<URLRequest::Interceptor*> InterceptorList;
+  friend struct DefaultSingletonTraits<URLRequestJobManager>;
+
+  URLRequestJobManager();
+  ~URLRequestJobManager();
+
+  // The first guy to call this function sets the allowed thread.  This way we
+  // avoid needing to define that thread externally.  Since we expect all
+  // callers to be on the same thread, we don't worry about threads racing to
+  // set the allowed thread.
+  bool IsAllowedThread() const {
+#if 0
+    if (!allowed_thread_initialized_) {
+      allowed_thread_ = base::PlatformThread::CurrentId();
+      allowed_thread_initialized_ = true;
+    }
+    return allowed_thread_ == base::PlatformThread::CurrentId();
+#else
+    // The previous version of this check used GetCurrentThread on Windows to
+    // get thread handles to compare. Unfortunately, GetCurrentThread returns
+    // a constant pseudo-handle (0xFFFFFFFE), and therefore IsAllowedThread
+    // always returned true. The above code that's turned off is the correct
+    // code, but causes the tree to turn red because some caller isn't
+    // respecting our thread requirements. We're turning off the check for now;
+    // bug http://b/issue?id=1338969 has been filed to fix things and turn the
+    // check back on.
+    return true;
+  }
+
+  // We use this to assert that CreateJob and the registration functions all
+  // run on the same thread.
+  mutable base::PlatformThreadId allowed_thread_;
+  mutable bool allowed_thread_initialized_;
+#endif
+
+  mutable base::Lock lock_;
+  FactoryMap factories_;
+  InterceptorList interceptors_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestJobManager);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_JOB_MANAGER_H_
diff --git a/src/net/url_request/url_request_job_unittest.cc b/src/net/url_request/url_request_job_unittest.cc
new file mode 100644
index 0000000..19c2395
--- /dev/null
+++ b/src/net/url_request/url_request_job_unittest.cc
@@ -0,0 +1,79 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_job.h"
+
+#include "net/http/http_transaction_unittest.h"
+#include "net/url_request/url_request_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// This is a header that signals the end of the data.
+const char kGzipGata[] = "\x1f\x08b\x08\0\0\0\0\0\0\3\3\0\0\0\0\0\0\0\0";
+
+void GZipServer(const net::HttpRequestInfo* request,
+                std::string* response_status, std::string* response_headers,
+                std::string* response_data) {
+  response_data->assign(kGzipGata, sizeof(kGzipGata));
+}
+
+const MockTransaction kGZip_Transaction = {
+  "http://www.google.com/gzyp",
+  "GET",
+  base::Time(),
+  "",
+  net::LOAD_NORMAL,
+  "HTTP/1.1 200 OK",
+  "Cache-Control: max-age=10000\n"
+  "Content-Encoding: gzip\n"
+  "Content-Length: 30\n",  // Intentionally wrong.
+  base::Time(),
+  "",
+  TEST_MODE_NORMAL,
+  &GZipServer,
+  0
+};
+
+}  // namespace
+
+TEST(URLRequestJob, TransactionNotifiedWhenDone) {
+  MockNetworkLayer network_layer;
+  net::TestURLRequestContext context;
+  context.set_http_transaction_factory(&network_layer);
+
+  net::TestDelegate d;
+  net::TestURLRequest req(GURL(kGZip_Transaction.url), &d, &context);
+  AddMockTransaction(&kGZip_Transaction);
+
+  req.set_method("GET");
+  req.Start();
+
+  MessageLoop::current()->Run();
+
+  EXPECT_TRUE(network_layer.done_reading_called());
+
+  RemoveMockTransaction(&kGZip_Transaction);
+}
+
+TEST(URLRequestJob, SyncTransactionNotifiedWhenDone) {
+  MockNetworkLayer network_layer;
+  net::TestURLRequestContext context;
+  context.set_http_transaction_factory(&network_layer);
+
+  net::TestDelegate d;
+  net::TestURLRequest req(GURL(kGZip_Transaction.url), &d, &context);
+  MockTransaction transaction(kGZip_Transaction);
+  transaction.test_mode = TEST_MODE_SYNC_ALL;
+  AddMockTransaction(&transaction);
+
+  req.set_method("GET");
+  req.Start();
+
+  MessageLoop::current()->Run();
+
+  EXPECT_TRUE(network_layer.done_reading_called());
+
+  RemoveMockTransaction(&transaction);
+}
diff --git a/src/net/url_request/url_request_netlog_params.cc b/src/net/url_request/url_request_netlog_params.cc
new file mode 100644
index 0000000..dbc5357
--- /dev/null
+++ b/src/net/url_request/url_request_netlog_params.cc
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_netlog_params.h"
+
+#include "base/string_number_conversions.h"
+#include "base/values.h"
+#include "googleurl/src/gurl.h"
+
+namespace net {
+
+Value* NetLogURLRequestStartCallback(const GURL* url,
+                                     const std::string* method,
+                                     int load_flags,
+                                     RequestPriority priority,
+                                     int64 upload_id,
+                                     NetLog::LogLevel /* log_level */) {
+  DictionaryValue* dict = new DictionaryValue();
+  dict->SetString("url", url->possibly_invalid_spec());
+  dict->SetString("method", *method);
+  dict->SetInteger("load_flags", load_flags);
+  dict->SetInteger("priority", static_cast<int>(priority));
+  if (upload_id > -1)
+    dict->SetString("upload_id", base::Int64ToString(upload_id));
+  return dict;
+}
+
+bool StartEventLoadFlagsFromEventParams(const Value* event_params,
+                                        int* load_flags) {
+  const DictionaryValue* dict;
+  if (!event_params->GetAsDictionary(&dict) ||
+      !dict->GetInteger("load_flags", load_flags)) {
+    *load_flags = 0;
+    return false;
+  }
+  return true;
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_netlog_params.h b/src/net/url_request/url_request_netlog_params.h
new file mode 100644
index 0000000..f2e0dce
--- /dev/null
+++ b/src/net/url_request/url_request_netlog_params.h
@@ -0,0 +1,40 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_NETLOG_PARAMS_H_
+#define NET_URL_REQUEST_URL_REQUEST_NETLOG_PARAMS_H_
+
+#include <string>
+
+#include "net/base/net_export.h"
+#include "net/base/net_log.h"
+#include "net/base/request_priority.h"
+
+class GURL;
+
+namespace base {
+class Value;
+}
+
+namespace net {
+
+// Returns a Value containing NetLog parameters for starting a URLRequest.
+NET_EXPORT base::Value* NetLogURLRequestStartCallback(
+    const GURL* url,
+    const std::string* method,
+    int load_flags,
+    RequestPriority priority,
+    int64 upload_id,
+    NetLog::LogLevel /* log_level */);
+
+// Attempts to extract the load flags from a Value created by the above
+// function.  On success, sets |load_flags| accordingly and returns true.
+// On failure, sets |load_flags| to 0.
+NET_EXPORT bool StartEventLoadFlagsFromEventParams(
+    const base::Value* event_params,
+    int* load_flags);
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_NETLOG_PARAMS_H_
diff --git a/src/net/url_request/url_request_redirect_job.cc b/src/net/url_request/url_request_redirect_job.cc
new file mode 100644
index 0000000..e05377f
--- /dev/null
+++ b/src/net/url_request/url_request_redirect_job.cc
@@ -0,0 +1,42 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_redirect_job.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/message_loop.h"
+
+namespace net {
+
+URLRequestRedirectJob::URLRequestRedirectJob(URLRequest* request,
+                                             NetworkDelegate* network_delegate,
+                                             const GURL& redirect_destination,
+                                             StatusCode http_status_code)
+    : URLRequestJob(request, network_delegate),
+      redirect_destination_(redirect_destination),
+      http_status_code_(http_status_code),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {}
+
+void URLRequestRedirectJob::Start() {
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLRequestRedirectJob::StartAsync,
+                 weak_factory_.GetWeakPtr()));
+}
+
+bool URLRequestRedirectJob::IsRedirectResponse(GURL* location,
+                                               int* http_status_code) {
+  *location = redirect_destination_;
+  *http_status_code = http_status_code_;
+  return true;
+}
+
+URLRequestRedirectJob::~URLRequestRedirectJob() {}
+
+void URLRequestRedirectJob::StartAsync() {
+  NotifyHeadersComplete();
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_redirect_job.h b/src/net/url_request/url_request_redirect_job.h
new file mode 100644
index 0000000..198ee76
--- /dev/null
+++ b/src/net/url_request/url_request_redirect_job.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_REDIRECT_JOB_H_
+#define NET_URL_REQUEST_URL_REQUEST_REDIRECT_JOB_H_
+
+#include "base/memory/weak_ptr.h"
+#include "net/base/net_export.h"
+#include "net/url_request/url_request_job.h"
+
+class GURL;
+
+namespace net {
+
+// A URLRequestJob that will redirect the request to the specified
+// URL.  This is useful to restart a request at a different URL based
+// on the result of another job.
+class NET_EXPORT URLRequestRedirectJob : public URLRequestJob {
+ public:
+  // Valid status codes for the redirect job. Other 30x codes are theoretically
+  // valid, but unused so far.  Both 302 and 307 are temporary redirects, with
+  // the difference being that 302 converts POSTs to GETs and removes upload
+  // data.
+  enum StatusCode {
+    REDIRECT_302_FOUND = 302,
+    REDIRECT_307_TEMPORARY_REDIRECT = 307,
+  };
+
+  // Constructs a job that redirects to the specified URL.
+  URLRequestRedirectJob(URLRequest* request,
+                        NetworkDelegate* network_delegate,
+                        const GURL& redirect_destination,
+                        StatusCode http_status_code);
+
+  virtual void Start() OVERRIDE;
+  virtual bool IsRedirectResponse(GURL* location,
+                                  int* http_status_code) OVERRIDE;
+
+ private:
+  virtual ~URLRequestRedirectJob();
+
+  void StartAsync();
+
+  const GURL redirect_destination_;
+  const int http_status_code_;
+
+  base::WeakPtrFactory<URLRequestRedirectJob> weak_factory_;
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_REDIRECT_JOB_H_
diff --git a/src/net/url_request/url_request_simple_job.cc b/src/net/url_request/url_request_simple_job.cc
new file mode 100644
index 0000000..fda1012
--- /dev/null
+++ b/src/net/url_request/url_request_simple_job.cc
@@ -0,0 +1,75 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_simple_job.h"
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/message_loop.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/url_request/url_request_status.h"
+
+namespace net {
+
+URLRequestSimpleJob::URLRequestSimpleJob(
+    URLRequest* request, NetworkDelegate* network_delegate)
+    : URLRequestJob(request, network_delegate),
+      data_offset_(0),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {}
+
+void URLRequestSimpleJob::Start() {
+  // Start reading asynchronously so that all error reporting and data
+  // callbacks happen as they would for network requests.
+  MessageLoop::current()->PostTask(
+      FROM_HERE,
+      base::Bind(&URLRequestSimpleJob::StartAsync,
+                 weak_factory_.GetWeakPtr()));
+}
+
+bool URLRequestSimpleJob::GetMimeType(std::string* mime_type) const {
+  *mime_type = mime_type_;
+  return true;
+}
+
+bool URLRequestSimpleJob::GetCharset(std::string* charset) {
+  *charset = charset_;
+  return true;
+}
+
+URLRequestSimpleJob::~URLRequestSimpleJob() {}
+
+bool URLRequestSimpleJob::ReadRawData(IOBuffer* buf, int buf_size,
+                                      int* bytes_read) {
+  DCHECK(bytes_read);
+  int remaining = static_cast<int>(data_.size()) - data_offset_;
+  if (buf_size > remaining)
+    buf_size = remaining;
+  memcpy(buf->data(), data_.data() + data_offset_, buf_size);
+  data_offset_ += buf_size;
+  *bytes_read = buf_size;
+  return true;
+}
+
+void URLRequestSimpleJob::StartAsync() {
+  if (!request_)
+    return;
+
+  int result = GetData(&mime_type_, &charset_, &data_,
+                       base::Bind(&URLRequestSimpleJob::OnGetDataCompleted,
+                                  weak_factory_.GetWeakPtr()));
+  if (result != ERR_IO_PENDING)
+    OnGetDataCompleted(result);
+}
+
+void URLRequestSimpleJob::OnGetDataCompleted(int result) {
+  if (result == OK) {
+    // Notify that the headers are complete
+    NotifyHeadersComplete();
+  } else {
+    NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
+  }
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_simple_job.h b/src/net/url_request/url_request_simple_job.h
new file mode 100644
index 0000000..7577151
--- /dev/null
+++ b/src/net/url_request/url_request_simple_job.h
@@ -0,0 +1,63 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_SIMPLE_JOB_H_
+#define NET_URL_REQUEST_URL_REQUEST_SIMPLE_JOB_H_
+
+#include <string>
+
+#include "base/memory/weak_ptr.h"
+#include "net/base/completion_callback.h"
+#include "net/base/net_export.h"
+#include "net/url_request/url_request_job.h"
+
+namespace net {
+
+class URLRequest;
+
+class NET_EXPORT URLRequestSimpleJob : public URLRequestJob {
+ public:
+  URLRequestSimpleJob(URLRequest* request, NetworkDelegate* network_delegate);
+
+  virtual void Start() OVERRIDE;
+  virtual bool ReadRawData(IOBuffer* buf,
+                           int buf_size,
+                           int *bytes_read) OVERRIDE;
+  virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
+  virtual bool GetCharset(std::string* charset) OVERRIDE;
+
+ protected:
+  virtual ~URLRequestSimpleJob();
+
+  // Subclasses must override the way response data is determined.
+  // The return value should be:
+  //  - OK if data is obtained;
+  //  - ERR_IO_PENDING if async processing is needed to finish obtaining data.
+  //    This is the only case when |callback| should be called after
+  //    completion of the operation. In other situations |callback| should
+  //    never be called;
+  //  - any other ERR_* code to indicate an error. This code will be used
+  //    as the error code in the URLRequestStatus when the URLRequest
+  //    is finished.
+  virtual int GetData(std::string* mime_type,
+                      std::string* charset,
+                      std::string* data,
+                      const CompletionCallback& callback) const = 0;
+
+ protected:
+  void StartAsync();
+
+ private:
+  void OnGetDataCompleted(int result);
+
+  std::string mime_type_;
+  std::string charset_;
+  std::string data_;
+  int data_offset_;
+  base::WeakPtrFactory<URLRequestSimpleJob> weak_factory_;
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_SIMPLE_JOB_H_
diff --git a/src/net/url_request/url_request_status.h b/src/net/url_request/url_request_status.h
new file mode 100644
index 0000000..521a3d4
--- /dev/null
+++ b/src/net/url_request/url_request_status.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file's dependencies should be kept to a minimum so that it can be
+// included in WebKit code that doesn't rely on much of common.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_STATUS_H_
+#define NET_URL_REQUEST_URL_REQUEST_STATUS_H_
+
+namespace net {
+
+// Represents the result of a URL request. It encodes errors and various
+// types of success.
+class URLRequestStatus {
+ public:
+  enum Status {
+    // Request succeeded, |error_| will be 0.
+    SUCCESS = 0,
+
+    // An IO request is pending, and the caller will be informed when it is
+    // completed.
+    IO_PENDING,
+
+    // Request was cancelled programatically.
+    CANCELED,
+
+    // The request failed for some reason. |error_| may have more information.
+    FAILED,
+  };
+
+  URLRequestStatus() : status_(SUCCESS), error_(0) {}
+  URLRequestStatus(Status s, int e) : status_(s), error_(e) {}
+
+  Status status() const { return status_; }
+  void set_status(Status s) { status_ = s; }
+
+  int error() const { return error_; }
+  void set_error(int e) { error_ = e; }
+
+  // Returns true if the status is success, which makes some calling code more
+  // convenient because this is the most common test.
+  bool is_success() const {
+    return status_ == SUCCESS || status_ == IO_PENDING;
+  }
+
+  // Returns true if the request is waiting for IO.
+  bool is_io_pending() const {
+    return status_ == IO_PENDING;
+  }
+
+ private:
+  // Application level status.
+  Status status_;
+
+  // Error code from the network layer if an error was encountered.
+  int error_;
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_STATUS_H_
diff --git a/src/net/url_request/url_request_test_job.cc b/src/net/url_request/url_request_test_job.cc
new file mode 100644
index 0000000..05b562b
--- /dev/null
+++ b/src/net/url_request/url_request_test_job.cc
@@ -0,0 +1,303 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_test_job.h"
+
+#include <algorithm>
+#include <list>
+
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/lazy_instance.h"
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/http/http_response_headers.h"
+
+namespace net {
+
+namespace {
+
+typedef std::list<URLRequestTestJob*> URLRequestJobList;
+base::LazyInstance<URLRequestJobList>::Leaky
+    g_pending_jobs = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+// static getters for known URLs
+GURL URLRequestTestJob::test_url_1() {
+  return GURL("test:url1");
+}
+GURL URLRequestTestJob::test_url_2() {
+  return GURL("test:url2");
+}
+GURL URLRequestTestJob::test_url_3() {
+  return GURL("test:url3");
+}
+GURL URLRequestTestJob::test_url_error() {
+  return GURL("test:error");
+}
+
+// static getters for known URL responses
+std::string URLRequestTestJob::test_data_1() {
+  return std::string("<html><title>Test One</title></html>");
+}
+std::string URLRequestTestJob::test_data_2() {
+  return std::string("<html><title>Test Two Two</title></html>");
+}
+std::string URLRequestTestJob::test_data_3() {
+  return std::string("<html><title>Test Three Three Three</title></html>");
+}
+
+// static getter for simple response headers
+std::string URLRequestTestJob::test_headers() {
+  static const char kHeaders[] =
+      "HTTP/1.1 200 OK\0"
+      "Content-type: text/html\0"
+      "\0";
+  return std::string(kHeaders, arraysize(kHeaders));
+}
+
+// static getter for redirect response headers
+std::string URLRequestTestJob::test_redirect_headers() {
+  static const char kHeaders[] =
+      "HTTP/1.1 302 MOVED\0"
+      "Location: somewhere\0"
+      "\0";
+  return std::string(kHeaders, arraysize(kHeaders));
+}
+
+// static getter for error response headers
+std::string URLRequestTestJob::test_error_headers() {
+  static const char kHeaders[] =
+      "HTTP/1.1 500 BOO HOO\0"
+      "\0";
+  return std::string(kHeaders, arraysize(kHeaders));
+}
+
+// static
+URLRequestJob* URLRequestTestJob::Factory(URLRequest* request,
+                                          NetworkDelegate* network_delegate,
+                                          const std::string& scheme) {
+  return new URLRequestTestJob(request, network_delegate);
+}
+
+URLRequestTestJob::URLRequestTestJob(URLRequest* request,
+                                     NetworkDelegate* network_delegate)
+    : URLRequestJob(request, network_delegate),
+      auto_advance_(false),
+      stage_(WAITING),
+      offset_(0),
+      async_buf_(NULL),
+      async_buf_size_(0),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {
+}
+
+URLRequestTestJob::URLRequestTestJob(URLRequest* request,
+                                     NetworkDelegate* network_delegate,
+                                     bool auto_advance)
+    : URLRequestJob(request, network_delegate),
+      auto_advance_(auto_advance),
+      stage_(WAITING),
+      offset_(0),
+      async_buf_(NULL),
+      async_buf_size_(0),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {
+}
+
+URLRequestTestJob::URLRequestTestJob(URLRequest* request,
+                                     NetworkDelegate* network_delegate,
+                                     const std::string& response_headers,
+                                     const std::string& response_data,
+                                     bool auto_advance)
+    : URLRequestJob(request, network_delegate),
+      auto_advance_(auto_advance),
+      stage_(WAITING),
+      response_headers_(new HttpResponseHeaders(response_headers)),
+      response_data_(response_data),
+      offset_(0),
+      async_buf_(NULL),
+      async_buf_size_(0),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {
+}
+
+URLRequestTestJob::~URLRequestTestJob() {
+  g_pending_jobs.Get().erase(
+      std::remove(
+          g_pending_jobs.Get().begin(), g_pending_jobs.Get().end(), this),
+      g_pending_jobs.Get().end());
+}
+
+bool URLRequestTestJob::GetMimeType(std::string* mime_type) const {
+  DCHECK(mime_type);
+  if (!response_headers_)
+    return false;
+  return response_headers_->GetMimeType(mime_type);
+}
+
+void URLRequestTestJob::Start() {
+  // Start reading asynchronously so that all error reporting and data
+  // callbacks happen as they would for network requests.
+  MessageLoop::current()->PostTask(
+      FROM_HERE, base::Bind(&URLRequestTestJob::StartAsync,
+                            weak_factory_.GetWeakPtr()));
+}
+
+void URLRequestTestJob::StartAsync() {
+  if (!response_headers_) {
+    response_headers_ = new HttpResponseHeaders(test_headers());
+    if (request_->url().spec() == test_url_1().spec()) {
+      response_data_ = test_data_1();
+      stage_ = DATA_AVAILABLE;  // Simulate a synchronous response for this one.
+    } else if (request_->url().spec() == test_url_2().spec()) {
+      response_data_ = test_data_2();
+    } else if (request_->url().spec() == test_url_3().spec()) {
+      response_data_ = test_data_3();
+    } else {
+      AdvanceJob();
+
+      // unexpected url, return error
+      // FIXME(brettw) we may want to use WININET errors or have some more types
+      // of errors
+      NotifyDone(URLRequestStatus(URLRequestStatus::FAILED,
+                                  ERR_INVALID_URL));
+      // FIXME(brettw): this should emulate a network error, and not just fail
+      // initiating a connection
+      return;
+    }
+  }
+
+  AdvanceJob();
+
+  this->NotifyHeadersComplete();
+}
+
+bool URLRequestTestJob::ReadRawData(IOBuffer* buf, int buf_size,
+                                    int *bytes_read) {
+  if (stage_ == WAITING) {
+    async_buf_ = buf;
+    async_buf_size_ = buf_size;
+    SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
+    return false;
+  }
+
+  DCHECK(bytes_read);
+  *bytes_read = 0;
+
+  if (offset_ >= static_cast<int>(response_data_.length())) {
+    return true;  // done reading
+  }
+
+  int to_read = buf_size;
+  if (to_read + offset_ > static_cast<int>(response_data_.length()))
+    to_read = static_cast<int>(response_data_.length()) - offset_;
+
+  memcpy(buf->data(), &response_data_.c_str()[offset_], to_read);
+  offset_ += to_read;
+
+  *bytes_read = to_read;
+  return true;
+}
+
+void URLRequestTestJob::GetResponseInfo(HttpResponseInfo* info) {
+  if (response_headers_)
+    info->headers = response_headers_;
+}
+
+int URLRequestTestJob::GetResponseCode() const {
+  if (response_headers_)
+    return response_headers_->response_code();
+  return -1;
+}
+
+bool URLRequestTestJob::IsRedirectResponse(GURL* location,
+                                           int* http_status_code) {
+  if (!response_headers_)
+    return false;
+
+  std::string value;
+  if (!response_headers_->IsRedirect(&value))
+    return false;
+
+  *location = request_->url().Resolve(value);
+  *http_status_code = response_headers_->response_code();
+  return true;
+}
+
+
+void URLRequestTestJob::Kill() {
+  stage_ = DONE;
+  URLRequestJob::Kill();
+  weak_factory_.InvalidateWeakPtrs();
+  g_pending_jobs.Get().erase(
+      std::remove(
+          g_pending_jobs.Get().begin(), g_pending_jobs.Get().end(), this),
+      g_pending_jobs.Get().end());
+}
+
+void URLRequestTestJob::ProcessNextOperation() {
+  switch (stage_) {
+    case WAITING:
+      // Must call AdvanceJob() prior to NotifyReadComplete() since that may
+      // delete |this|.
+      AdvanceJob();
+      stage_ = DATA_AVAILABLE;
+      // OK if ReadRawData wasn't called yet.
+      if (async_buf_) {
+        int bytes_read;
+        if (!ReadRawData(async_buf_, async_buf_size_, &bytes_read))
+          NOTREACHED() << "This should not return false in DATA_AVAILABLE.";
+        SetStatus(URLRequestStatus());  // clear the io pending flag
+        if (NextReadAsync()) {
+          // Make all future reads return io pending until the next
+          // ProcessNextOperation().
+          stage_ = WAITING;
+        }
+        NotifyReadComplete(bytes_read);
+      }
+      break;
+    case DATA_AVAILABLE:
+      AdvanceJob();
+      stage_ = ALL_DATA;  // done sending data
+      break;
+    case ALL_DATA:
+      stage_ = DONE;
+      return;
+    case DONE:
+      return;
+    default:
+      NOTREACHED() << "Invalid stage";
+      return;
+  }
+}
+
+bool URLRequestTestJob::NextReadAsync() {
+  return false;
+}
+
+void URLRequestTestJob::AdvanceJob() {
+  if (auto_advance_) {
+    MessageLoop::current()->PostTask(
+        FROM_HERE, base::Bind(&URLRequestTestJob::ProcessNextOperation,
+                              weak_factory_.GetWeakPtr()));
+    return;
+  }
+  g_pending_jobs.Get().push_back(this);
+}
+
+// static
+bool URLRequestTestJob::ProcessOnePendingMessage() {
+  if (g_pending_jobs.Get().empty())
+    return false;
+
+  URLRequestTestJob* next_job(g_pending_jobs.Get().front());
+  g_pending_jobs.Get().pop_front();
+
+  DCHECK(!next_job->auto_advance());  // auto_advance jobs should be in this q
+  next_job->ProcessNextOperation();
+  return true;
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_test_job.h b/src/net/url_request/url_request_test_job.h
new file mode 100644
index 0000000..0c46d1b
--- /dev/null
+++ b/src/net/url_request/url_request_test_job.h
@@ -0,0 +1,155 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_TEST_JOB_H_
+#define NET_URL_REQUEST_URL_REQUEST_TEST_JOB_H_
+
+#include <string>
+
+#include "base/memory/weak_ptr.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_job.h"
+
+namespace net {
+
+// This job type is designed to help with simple unit tests. To use, you
+// probably want to inherit from it to set up the state you want. Then install
+// it as the protocol handler for the "test" scheme.
+//
+// It will respond to three URLs, which you can retrieve using the test_url*
+// getters, which will in turn respond with the corresponding responses returned
+// by test_data*. Any other URLs that begin with "test:" will return an error,
+// which might also be useful, you can use test_url_error() to retreive a
+// standard one.
+//
+// You can override the known URLs or the response data by overriding Start().
+//
+// Optionally, you can also construct test jobs to return a headers and data
+// provided to the contstructor in response to any request url.
+//
+// When a job is created, it gets put on a queue of pending test jobs. To
+// process jobs on this queue, use ProcessOnePendingMessage, which will process
+// one step of the next job. If the job is incomplete, it will be added to the
+// end of the queue.
+//
+// Optionally, you can also construct test jobs that advance automatically
+// without having to call ProcessOnePendingMessage.
+class NET_EXPORT_PRIVATE URLRequestTestJob : public URLRequestJob {
+ public:
+  // Constructs a job to return one of the canned responses depending on the
+  // request url, with auto advance disabled.
+  URLRequestTestJob(URLRequest* request, NetworkDelegate* network_delegate);
+
+  // Constructs a job to return one of the canned responses depending on the
+  // request url, optionally with auto advance enabled.
+  URLRequestTestJob(URLRequest* request,
+                    NetworkDelegate* network_delegate,
+                    bool auto_advance);
+
+  // Constructs a job to return the given response regardless of the request
+  // url. The headers should include the HTTP status line and be formatted as
+  // expected by HttpResponseHeaders.
+  URLRequestTestJob(URLRequest* request,
+                    net::NetworkDelegate* network_delegate,
+                    const std::string& response_headers,
+                    const std::string& response_data,
+                    bool auto_advance);
+
+  // The three canned URLs this handler will respond to without having been
+  // explicitly initialized with response headers and data.
+  // FIXME(brettw): we should probably also have a redirect one
+  static GURL test_url_1();
+  static GURL test_url_2();
+  static GURL test_url_3();
+  static GURL test_url_error();
+
+  // The data that corresponds to each of the URLs above
+  static std::string test_data_1();
+  static std::string test_data_2();
+  static std::string test_data_3();
+
+  // The headers that correspond to each of the URLs above
+  static std::string test_headers();
+
+  // The headers for a redirect response
+  static std::string test_redirect_headers();
+
+  // The headers for a server error response
+  static std::string test_error_headers();
+
+  // Processes one pending message from the stack, returning true if any
+  // message was processed, or false if there are no more pending request
+  // notifications to send. This is not applicable when using auto_advance.
+  static bool ProcessOnePendingMessage();
+
+  // With auto advance enabled, the job will advance thru the stages without
+  // the caller having to call ProcessOnePendingMessage. Auto advance depends
+  // on having a message loop running. The default is to not auto advance.
+  // Should not be altered after the job has started.
+  bool auto_advance() { return auto_advance_; }
+  void set_auto_advance(bool auto_advance) { auto_advance_ = auto_advance; }
+
+  // Factory method for protocol factory registration if callers don't subclass
+  static URLRequest::ProtocolFactory Factory;
+
+  // Job functions
+  virtual void Start() OVERRIDE;
+  virtual bool ReadRawData(IOBuffer* buf,
+                           int buf_size,
+                           int *bytes_read) OVERRIDE;
+  virtual void Kill() OVERRIDE;
+  virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
+  virtual void GetResponseInfo(HttpResponseInfo* info) OVERRIDE;
+  virtual int GetResponseCode() const OVERRIDE;
+  virtual bool IsRedirectResponse(GURL* location,
+                                  int* http_status_code) OVERRIDE;
+
+ protected:
+  // Override to specify whether the next read done from this job will
+  // return IO pending.  This controls whether or not the WAITING state will
+  // transition back to WAITING or to DATA_AVAILABLE after an asynchronous
+  // read is processed.
+  virtual bool NextReadAsync();
+
+  // This is what operation we are going to do next when this job is handled.
+  // When the stage is DONE, this job will not be put on the queue.
+  enum Stage { WAITING, DATA_AVAILABLE, ALL_DATA, DONE };
+
+  virtual ~URLRequestTestJob();
+
+  // Call to process the next opeation, usually sending a notification, and
+  // advancing the stage if necessary. THIS MAY DELETE THE OBJECT.
+  void ProcessNextOperation();
+
+  // Call to move the job along to the next operation.
+  void AdvanceJob();
+
+  // Called via InvokeLater to cause callbacks to occur after Start() returns.
+  virtual void StartAsync();
+
+  bool auto_advance_;
+
+  Stage stage_;
+
+  // The headers the job should return, will be set in Start() if not provided
+  // in the explicit ctor.
+  scoped_refptr<HttpResponseHeaders> response_headers_;
+
+  // The data to send, will be set in Start() if not provided in the explicit
+  // ctor.
+  std::string response_data_;
+
+  // current offset within response_data_
+  int offset_;
+
+  // Holds the buffer for an asynchronous ReadRawData call
+  IOBuffer* async_buf_;
+  int async_buf_size_;
+
+  base::WeakPtrFactory<URLRequestTestJob> weak_factory_;
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_TEST_JOB_H_
diff --git a/src/net/url_request/url_request_test_util.cc b/src/net/url_request/url_request_test_util.cc
new file mode 100644
index 0000000..93c707d
--- /dev/null
+++ b/src/net/url_request/url_request_test_util.cc
@@ -0,0 +1,576 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_test_util.h"
+
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/message_loop.h"
+#include "base/threading/thread.h"
+#include "base/threading/worker_pool.h"
+#include "net/base/cert_verifier.h"
+#include "net/base/default_server_bound_cert_store.h"
+#include "net/base/host_port_pair.h"
+#include "net/base/mock_host_resolver.h"
+#include "net/base/server_bound_cert_service.h"
+#include "net/http/http_network_session.h"
+#include "net/http/http_server_properties_impl.h"
+#include "net/url_request/static_http_user_agent_settings.h"
+#include "net/url_request/url_request_job_factory_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace net {
+
+namespace {
+
+// These constants put the NetworkDelegate events of TestNetworkDelegate
+// into an order. They are used in conjunction with
+// |TestNetworkDelegate::next_states_| to check that we do not send
+// events in the wrong order.
+const int kStageBeforeURLRequest = 1 << 0;
+const int kStageBeforeSendHeaders = 1 << 1;
+const int kStageSendHeaders = 1 << 2;
+const int kStageHeadersReceived = 1 << 3;
+const int kStageAuthRequired = 1 << 4;
+const int kStageBeforeRedirect = 1 << 5;
+const int kStageResponseStarted = 1 << 6;
+const int kStageCompletedSuccess = 1 << 7;
+const int kStageCompletedError = 1 << 8;
+const int kStageURLRequestDestroyed = 1 << 9;
+const int kStageDestruction = 1 << 10;
+
+}  // namespace
+
+TestURLRequestContext::TestURLRequestContext()
+    : initialized_(false),
+      ALLOW_THIS_IN_INITIALIZER_LIST(context_storage_(this)) {
+  Init();
+}
+
+TestURLRequestContext::TestURLRequestContext(bool delay_initialization)
+    : initialized_(false),
+      ALLOW_THIS_IN_INITIALIZER_LIST(context_storage_(this)) {
+  if (!delay_initialization)
+    Init();
+}
+
+TestURLRequestContext::~TestURLRequestContext() {
+  DCHECK(initialized_);
+}
+
+void TestURLRequestContext::Init() {
+  DCHECK(!initialized_);
+  initialized_ = true;
+
+#if !__LB_ENABLE_NATIVE_HTTP_STACK__
+  if (!host_resolver())
+    context_storage_.set_host_resolver(
+        scoped_ptr<HostResolver>(new MockCachingHostResolver()));
+  if (!proxy_service())
+    context_storage_.set_proxy_service(ProxyService::CreateDirect());
+  if (!cert_verifier())
+    context_storage_.set_cert_verifier(CertVerifier::CreateDefault());
+  if (!ftp_transaction_factory()) {
+#if !defined(DISABLE_FTP_SUPPORT)
+    context_storage_.set_ftp_transaction_factory(
+        new FtpNetworkLayer(host_resolver()));
+#else
+    context_storage_.set_ftp_transaction_factory(NULL);
+#endif  // !defined(DISABLE_FTP_SUPPORT)
+  }
+  if (!ssl_config_service())
+    context_storage_.set_ssl_config_service(new SSLConfigServiceDefaults);
+  if (!http_auth_handler_factory()) {
+    context_storage_.set_http_auth_handler_factory(
+        HttpAuthHandlerFactory::CreateDefault(host_resolver()));
+  }
+  if (!http_server_properties()) {
+    context_storage_.set_http_server_properties(
+        new HttpServerPropertiesImpl);
+  }
+  if (!transport_security_state()) {
+    context_storage_.set_transport_security_state(
+        new TransportSecurityState());
+  }
+#endif
+  HttpNetworkSession::Params params;
+  params.host_resolver = host_resolver();
+  params.cert_verifier = cert_verifier();
+  params.proxy_service = proxy_service();
+  params.ssl_config_service = ssl_config_service();
+  params.http_auth_handler_factory = http_auth_handler_factory();
+  params.network_delegate = network_delegate();
+  params.http_server_properties = http_server_properties();
+  params.net_log = net_log();
+
+#if !__LB_ENABLE_NATIVE_HTTP_STACK__
+  if (!http_transaction_factory()) {
+    context_storage_.set_http_transaction_factory(new HttpCache(
+        new HttpNetworkSession(params),
+        HttpCache::DefaultBackend::InMemory(0)));
+  }
+#endif
+  // In-memory cookie store.
+  if (!cookie_store())
+    context_storage_.set_cookie_store(new CookieMonster(NULL, NULL));
+  // In-memory origin bound cert service.
+  if (!server_bound_cert_service()) {
+    context_storage_.set_server_bound_cert_service(
+        new ServerBoundCertService(
+            new DefaultServerBoundCertStore(NULL),
+            base::WorkerPool::GetTaskRunner(true)));
+  }
+  if (!http_user_agent_settings()) {
+    context_storage_.set_http_user_agent_settings(
+        new StaticHttpUserAgentSettings(
+            "en-us,fr", "iso-8859-1,*,utf-8", EmptyString()));
+  }
+  if (!job_factory())
+    context_storage_.set_job_factory(new URLRequestJobFactoryImpl);
+}
+
+TestURLRequest::TestURLRequest(const GURL& url,
+                               Delegate* delegate,
+                               TestURLRequestContext* context)
+    : URLRequest(url, delegate, context) {
+}
+
+TestURLRequest::~TestURLRequest() {
+}
+
+TestURLRequestContextGetter::TestURLRequestContextGetter(
+    const scoped_refptr<base::SingleThreadTaskRunner>& network_task_runner)
+    : network_task_runner_(network_task_runner) {
+  DCHECK(network_task_runner_);
+}
+
+TestURLRequestContextGetter::TestURLRequestContextGetter(
+    const scoped_refptr<base::SingleThreadTaskRunner>& network_task_runner,
+    scoped_ptr<TestURLRequestContext> context)
+    : network_task_runner_(network_task_runner), context_(context.Pass()) {
+  DCHECK(network_task_runner_);
+}
+
+TestURLRequestContextGetter::~TestURLRequestContextGetter() {}
+
+TestURLRequestContext* TestURLRequestContextGetter::GetURLRequestContext() {
+  if (!context_.get())
+    context_.reset(new TestURLRequestContext);
+  return context_.get();
+}
+
+scoped_refptr<base::SingleThreadTaskRunner>
+TestURLRequestContextGetter::GetNetworkTaskRunner() const {
+  return network_task_runner_;
+}
+
+TestDelegate::TestDelegate()
+    : cancel_in_rr_(false),
+      cancel_in_rs_(false),
+      cancel_in_rd_(false),
+      cancel_in_rd_pending_(false),
+      quit_on_complete_(true),
+      quit_on_redirect_(false),
+      allow_certificate_errors_(false),
+      response_started_count_(0),
+      received_bytes_count_(0),
+      received_redirect_count_(0),
+      received_data_before_response_(false),
+      request_failed_(false),
+      have_certificate_errors_(false),
+      certificate_errors_are_fatal_(false),
+      auth_required_(false),
+      buf_(new IOBuffer(kBufferSize)) {
+}
+
+TestDelegate::~TestDelegate() {}
+
+void TestDelegate::OnReceivedRedirect(URLRequest* request,
+                                      const GURL& new_url,
+                                      bool* defer_redirect) {
+  EXPECT_TRUE(request->is_redirecting());
+  received_redirect_count_++;
+  if (quit_on_redirect_) {
+    *defer_redirect = true;
+    MessageLoop::current()->PostTask(FROM_HERE, MessageLoop::QuitClosure());
+  } else if (cancel_in_rr_) {
+    request->Cancel();
+  }
+}
+
+void TestDelegate::OnAuthRequired(URLRequest* request,
+                                  AuthChallengeInfo* auth_info) {
+  auth_required_ = true;
+  if (!credentials_.Empty()) {
+    request->SetAuth(credentials_);
+  } else {
+    request->CancelAuth();
+  }
+}
+
+void TestDelegate::OnSSLCertificateError(URLRequest* request,
+                                         const SSLInfo& ssl_info,
+                                         bool fatal) {
+  // The caller can control whether it needs all SSL requests to go through,
+  // independent of any possible errors, or whether it wants SSL errors to
+  // cancel the request.
+  have_certificate_errors_ = true;
+  certificate_errors_are_fatal_ = fatal;
+  if (allow_certificate_errors_)
+    request->ContinueDespiteLastError();
+  else
+    request->Cancel();
+}
+
+void TestDelegate::OnResponseStarted(URLRequest* request) {
+  // It doesn't make sense for the request to have IO pending at this point.
+  DCHECK(!request->status().is_io_pending());
+  EXPECT_FALSE(request->is_redirecting());
+
+  response_started_count_++;
+  if (cancel_in_rs_) {
+    request->Cancel();
+    OnResponseCompleted(request);
+  } else if (!request->status().is_success()) {
+    DCHECK(request->status().status() == URLRequestStatus::FAILED ||
+           request->status().status() == URLRequestStatus::CANCELED);
+    request_failed_ = true;
+    OnResponseCompleted(request);
+  } else {
+    // Initiate the first read.
+    int bytes_read = 0;
+    if (request->Read(buf_, kBufferSize, &bytes_read))
+      OnReadCompleted(request, bytes_read);
+    else if (!request->status().is_io_pending())
+      OnResponseCompleted(request);
+  }
+}
+
+void TestDelegate::OnReadCompleted(URLRequest* request, int bytes_read) {
+  // It doesn't make sense for the request to have IO pending at this point.
+  DCHECK(!request->status().is_io_pending());
+
+  if (response_started_count_ == 0)
+    received_data_before_response_ = true;
+
+  if (cancel_in_rd_)
+    request->Cancel();
+
+  if (bytes_read >= 0) {
+    // There is data to read.
+    received_bytes_count_ += bytes_read;
+
+    // consume the data
+    data_received_.append(buf_->data(), bytes_read);
+  }
+
+  // If it was not end of stream, request to read more.
+  if (request->status().is_success() && bytes_read > 0) {
+    bytes_read = 0;
+    while (request->Read(buf_, kBufferSize, &bytes_read)) {
+      if (bytes_read > 0) {
+        data_received_.append(buf_->data(), bytes_read);
+        received_bytes_count_ += bytes_read;
+      } else {
+        break;
+      }
+    }
+  }
+  if (!request->status().is_io_pending())
+    OnResponseCompleted(request);
+  else if (cancel_in_rd_pending_)
+    request->Cancel();
+}
+
+void TestDelegate::OnResponseCompleted(URLRequest* request) {
+  if (quit_on_complete_)
+    MessageLoop::current()->PostTask(FROM_HERE, MessageLoop::QuitClosure());
+}
+
+TestNetworkDelegate::TestNetworkDelegate()
+    : last_error_(0),
+      error_count_(0),
+      created_requests_(0),
+      destroyed_requests_(0),
+      completed_requests_(0),
+      cookie_options_bit_mask_(0),
+      blocked_get_cookies_count_(0),
+      blocked_set_cookie_count_(0),
+      set_cookie_count_(0) {
+}
+
+TestNetworkDelegate::~TestNetworkDelegate() {
+  for (std::map<int, int>::iterator i = next_states_.begin();
+       i != next_states_.end(); ++i) {
+    event_order_[i->first] += "~TestNetworkDelegate\n";
+    EXPECT_TRUE(i->second & kStageDestruction) << event_order_[i->first];
+  }
+}
+
+void TestNetworkDelegate::InitRequestStatesIfNew(int request_id) {
+  if (next_states_.find(request_id) == next_states_.end()) {
+    next_states_[request_id] = kStageBeforeURLRequest;
+    event_order_[request_id] = "";
+  }
+}
+
+int TestNetworkDelegate::OnBeforeURLRequest(
+    URLRequest* request,
+    const CompletionCallback& callback,
+    GURL* new_url ) {
+  int req_id = request->identifier();
+  InitRequestStatesIfNew(req_id);
+  event_order_[req_id] += "OnBeforeURLRequest\n";
+  EXPECT_TRUE(next_states_[req_id] & kStageBeforeURLRequest) <<
+      event_order_[req_id];
+  next_states_[req_id] =
+      kStageBeforeSendHeaders |
+      kStageResponseStarted |  // data: URLs do not trigger sending headers
+      kStageBeforeRedirect |  // a delegate can trigger a redirection
+      kStageCompletedError |  // request canceled by delegate
+      kStageAuthRequired;  // Auth can come next for FTP requests
+  created_requests_++;
+  return OK;
+}
+
+int TestNetworkDelegate::OnBeforeSendHeaders(
+    URLRequest* request,
+    const CompletionCallback& callback,
+    HttpRequestHeaders* headers) {
+  int req_id = request->identifier();
+  InitRequestStatesIfNew(req_id);
+  event_order_[req_id] += "OnBeforeSendHeaders\n";
+  EXPECT_TRUE(next_states_[req_id] & kStageBeforeSendHeaders) <<
+      event_order_[req_id];
+  next_states_[req_id] =
+      kStageSendHeaders |
+      kStageCompletedError;  // request canceled by delegate
+
+  return OK;
+}
+
+void TestNetworkDelegate::OnSendHeaders(
+    URLRequest* request,
+    const HttpRequestHeaders& headers) {
+  int req_id = request->identifier();
+  InitRequestStatesIfNew(req_id);
+  event_order_[req_id] += "OnSendHeaders\n";
+  EXPECT_TRUE(next_states_[req_id] & kStageSendHeaders) <<
+      event_order_[req_id];
+  next_states_[req_id] =
+      kStageHeadersReceived |
+      kStageCompletedError;
+}
+
+int TestNetworkDelegate::OnHeadersReceived(
+    URLRequest* request,
+    const CompletionCallback& callback,
+    const HttpResponseHeaders* original_response_headers,
+    scoped_refptr<HttpResponseHeaders>* override_response_headers) {
+  int req_id = request->identifier();
+  event_order_[req_id] += "OnHeadersReceived\n";
+  InitRequestStatesIfNew(req_id);
+  EXPECT_TRUE(next_states_[req_id] & kStageHeadersReceived) <<
+      event_order_[req_id];
+  next_states_[req_id] =
+      kStageBeforeRedirect |
+      kStageResponseStarted |
+      kStageAuthRequired |
+      kStageCompletedError;  // e.g. proxy resolution problem
+
+  // Basic authentication sends a second request from the URLRequestHttpJob
+  // layer before the URLRequest reports that a response has started.
+  next_states_[req_id] |= kStageBeforeSendHeaders;
+
+  return OK;
+}
+
+void TestNetworkDelegate::OnBeforeRedirect(URLRequest* request,
+                                           const GURL& new_location) {
+  int req_id = request->identifier();
+  InitRequestStatesIfNew(req_id);
+  event_order_[req_id] += "OnBeforeRedirect\n";
+  EXPECT_TRUE(next_states_[req_id] & kStageBeforeRedirect) <<
+      event_order_[req_id];
+  next_states_[req_id] =
+      kStageBeforeURLRequest |  // HTTP redirects trigger this.
+      kStageBeforeSendHeaders |  // Redirects from the network delegate do not
+                                 // trigger onBeforeURLRequest.
+      kStageCompletedError;
+
+  // A redirect can lead to a file or a data URL. In this case, we do not send
+  // headers.
+  next_states_[req_id] |= kStageResponseStarted;
+}
+
+void TestNetworkDelegate::OnResponseStarted(URLRequest* request) {
+  int req_id = request->identifier();
+  InitRequestStatesIfNew(req_id);
+  event_order_[req_id] += "OnResponseStarted\n";
+  EXPECT_TRUE(next_states_[req_id] & kStageResponseStarted) <<
+      event_order_[req_id];
+  next_states_[req_id] = kStageCompletedSuccess | kStageCompletedError;
+  if (request->status().status() == URLRequestStatus::FAILED) {
+    error_count_++;
+    last_error_ = request->status().error();
+  }
+}
+
+void TestNetworkDelegate::OnRawBytesRead(const URLRequest& request,
+                                         int bytes_read) {
+}
+
+void TestNetworkDelegate::OnCompleted(URLRequest* request, bool started) {
+  int req_id = request->identifier();
+  InitRequestStatesIfNew(req_id);
+  event_order_[req_id] += "OnCompleted\n";
+  // Expect "Success -> (next_states_ & kStageCompletedSuccess)"
+  // is logically identical to
+  // Expect "!(Success) || (next_states_ & kStageCompletedSuccess)"
+  EXPECT_TRUE(!request->status().is_success() ||
+              (next_states_[req_id] & kStageCompletedSuccess)) <<
+      event_order_[req_id];
+  EXPECT_TRUE(request->status().is_success() ||
+              (next_states_[req_id] & kStageCompletedError)) <<
+      event_order_[req_id];
+  next_states_[req_id] = kStageURLRequestDestroyed;
+  completed_requests_++;
+  if (request->status().status() == URLRequestStatus::FAILED) {
+    error_count_++;
+    last_error_ = request->status().error();
+  }
+}
+
+void TestNetworkDelegate::OnURLRequestDestroyed(URLRequest* request) {
+  int req_id = request->identifier();
+  InitRequestStatesIfNew(req_id);
+  event_order_[req_id] += "OnURLRequestDestroyed\n";
+  EXPECT_TRUE(next_states_[req_id] & kStageURLRequestDestroyed) <<
+      event_order_[req_id];
+  next_states_[req_id] = kStageDestruction;
+  destroyed_requests_++;
+}
+
+void TestNetworkDelegate::OnPACScriptError(int line_number,
+                                           const string16& error) {
+}
+
+NetworkDelegate::AuthRequiredResponse TestNetworkDelegate::OnAuthRequired(
+    URLRequest* request,
+    const AuthChallengeInfo& auth_info,
+    const AuthCallback& callback,
+    AuthCredentials* credentials) {
+  int req_id = request->identifier();
+  InitRequestStatesIfNew(req_id);
+  event_order_[req_id] += "OnAuthRequired\n";
+  EXPECT_TRUE(next_states_[req_id] & kStageAuthRequired) <<
+      event_order_[req_id];
+  next_states_[req_id] = kStageBeforeSendHeaders |
+      kStageHeadersReceived |  // Request canceled by delegate simulates empty
+                               // response.
+      kStageResponseStarted |  // data: URLs do not trigger sending headers
+      kStageBeforeRedirect |   // a delegate can trigger a redirection
+      kStageCompletedError;    // request cancelled before callback
+  return NetworkDelegate::AUTH_REQUIRED_RESPONSE_NO_ACTION;
+}
+
+bool TestNetworkDelegate::OnCanGetCookies(const URLRequest& request,
+                                          const CookieList& cookie_list) {
+  bool allow = true;
+  if (cookie_options_bit_mask_ & NO_GET_COOKIES)
+    allow = false;
+
+  if (!allow) {
+    blocked_get_cookies_count_++;
+  }
+
+  return allow;
+}
+
+bool TestNetworkDelegate::OnCanSetCookie(const URLRequest& request,
+                                         const std::string& cookie_line,
+                                         CookieOptions* options) {
+  bool allow = true;
+  if (cookie_options_bit_mask_ & NO_SET_COOKIE)
+    allow = false;
+
+  if (!allow) {
+    blocked_set_cookie_count_++;
+  } else {
+    set_cookie_count_++;
+  }
+
+  return allow;
+}
+
+bool TestNetworkDelegate::OnCanAccessFile(const URLRequest& request,
+                                          const FilePath& path) const {
+  return true;
+}
+
+bool TestNetworkDelegate::OnCanThrottleRequest(
+    const URLRequest& request) const {
+  return true;
+}
+
+int TestNetworkDelegate::OnBeforeSocketStreamConnect(
+    SocketStream* socket,
+    const CompletionCallback& callback) {
+  return OK;
+}
+
+void TestNetworkDelegate::OnRequestWaitStateChange(
+    const URLRequest& request,
+    RequestWaitState state) {
+}
+
+// static
+std::string ScopedCustomUrlRequestTestHttpHost::value_("127.0.0.1");
+
+ScopedCustomUrlRequestTestHttpHost::ScopedCustomUrlRequestTestHttpHost(
+  const std::string& new_value)
+    : old_value_(value_),
+      new_value_(new_value) {
+  value_ = new_value_;
+}
+
+ScopedCustomUrlRequestTestHttpHost::~ScopedCustomUrlRequestTestHttpHost() {
+  DCHECK_EQ(value_, new_value_);
+  value_ = old_value_;
+}
+
+// static
+const std::string& ScopedCustomUrlRequestTestHttpHost::value() {
+  return value_;
+}
+
+TestJobInterceptor::TestJobInterceptor() : main_intercept_job_(NULL) {
+}
+
+URLRequestJob* TestJobInterceptor::MaybeIntercept(
+      URLRequest* request,
+      NetworkDelegate* network_delegate) const {
+  URLRequestJob* job = main_intercept_job_;
+  main_intercept_job_ = NULL;
+  return job;
+}
+
+URLRequestJob* TestJobInterceptor::MaybeInterceptRedirect(
+      const GURL& location,
+      URLRequest* request,
+      NetworkDelegate* network_delegate) const {
+  return NULL;
+}
+
+URLRequestJob* TestJobInterceptor::MaybeInterceptResponse(
+      URLRequest* request,
+      NetworkDelegate* network_delegate) const {
+  return NULL;
+}
+
+void TestJobInterceptor::set_main_intercept_job(URLRequestJob* job) {
+  main_intercept_job_ = job;
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_test_util.h b/src/net/url_request/url_request_test_util.h
new file mode 100644
index 0000000..323efb2
--- /dev/null
+++ b/src/net/url_request/url_request_test_util.h
@@ -0,0 +1,326 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_TEST_UTIL_H_
+#define NET_URL_REQUEST_URL_REQUEST_TEST_UTIL_H_
+
+#include <stdlib.h>
+
+#include <map>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop_proxy.h"
+#include "base/path_service.h"
+#include "base/process_util.h"
+#include "base/string16.h"
+#include "base/string_util.h"
+#include "base/time.h"
+#include "base/utf_string_conversions.h"
+#include "googleurl/src/url_util.h"
+#include "net/base/cert_verifier.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/base/network_delegate.h"
+#include "net/base/ssl_config_service_defaults.h"
+#include "net/cookies/cookie_monster.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/ftp/ftp_network_layer.h"
+#include "net/http/http_auth_handler_factory.h"
+#include "net/http/http_cache.h"
+#include "net/http/http_network_layer.h"
+#include "net/proxy/proxy_service.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_context_getter.h"
+#include "net/url_request/url_request_context_storage.h"
+#include "net/url_request/url_request_job_factory.h"
+
+using base::TimeDelta;
+
+namespace net {
+
+//-----------------------------------------------------------------------------
+
+class TestURLRequestContext : public URLRequestContext {
+ public:
+  TestURLRequestContext();
+  // Default constructor like TestURLRequestContext() but does not call
+  // Init() in case |delay_initialization| is true. This allows modifying the
+  // URLRequestContext before it is constructed completely. If
+  // |delay_initialization| is true, Init() needs be be called manually.
+  explicit TestURLRequestContext(bool delay_initialization);
+  virtual ~TestURLRequestContext();
+
+  void Init();
+
+ private:
+  bool initialized_;
+
+ protected:
+  URLRequestContextStorage context_storage_;
+};
+
+//-----------------------------------------------------------------------------
+
+// Used to return a dummy context, which lives on the message loop
+// given in the constructor.
+class TestURLRequestContextGetter : public URLRequestContextGetter {
+ public:
+  // |network_task_runner| must not be NULL.
+  explicit TestURLRequestContextGetter(
+      const scoped_refptr<base::SingleThreadTaskRunner>& network_task_runner);
+
+  // Use to pass a pre-initialized |context|.
+  TestURLRequestContextGetter(
+      const scoped_refptr<base::SingleThreadTaskRunner>& network_task_runner,
+      scoped_ptr<TestURLRequestContext> context);
+
+  // URLRequestContextGetter implementation.
+  virtual TestURLRequestContext* GetURLRequestContext() OVERRIDE;
+  virtual scoped_refptr<base::SingleThreadTaskRunner>
+      GetNetworkTaskRunner() const OVERRIDE;
+
+ protected:
+  virtual ~TestURLRequestContextGetter();
+
+ private:
+  const scoped_refptr<base::SingleThreadTaskRunner> network_task_runner_;
+  scoped_ptr<TestURLRequestContext> context_;
+};
+
+//-----------------------------------------------------------------------------
+
+class TestURLRequest : public URLRequest {
+ public:
+  TestURLRequest(
+      const GURL& url, Delegate* delegate, TestURLRequestContext* context);
+  virtual ~TestURLRequest();
+};
+
+//-----------------------------------------------------------------------------
+
+class TestDelegate : public URLRequest::Delegate {
+ public:
+  TestDelegate();
+  virtual ~TestDelegate();
+
+  void set_cancel_in_received_redirect(bool val) { cancel_in_rr_ = val; }
+  void set_cancel_in_response_started(bool val) { cancel_in_rs_ = val; }
+  void set_cancel_in_received_data(bool val) { cancel_in_rd_ = val; }
+  void set_cancel_in_received_data_pending(bool val) {
+    cancel_in_rd_pending_ = val;
+  }
+  void set_quit_on_complete(bool val) { quit_on_complete_ = val; }
+  void set_quit_on_redirect(bool val) { quit_on_redirect_ = val; }
+  void set_allow_certificate_errors(bool val) {
+    allow_certificate_errors_ = val;
+  }
+  void set_credentials(const AuthCredentials& credentials) {
+    credentials_ = credentials;
+  }
+
+  // query state
+  const std::string& data_received() const { return data_received_; }
+  int bytes_received() const { return static_cast<int>(data_received_.size()); }
+  int response_started_count() const { return response_started_count_; }
+  int received_redirect_count() const { return received_redirect_count_; }
+  bool received_data_before_response() const {
+    return received_data_before_response_;
+  }
+  bool request_failed() const { return request_failed_; }
+  bool have_certificate_errors() const { return have_certificate_errors_; }
+  bool certificate_errors_are_fatal() const {
+    return certificate_errors_are_fatal_;
+  }
+  bool auth_required_called() const { return auth_required_; }
+
+  // URLRequest::Delegate:
+  virtual void OnReceivedRedirect(URLRequest* request, const GURL& new_url,
+                                  bool* defer_redirect) OVERRIDE;
+  virtual void OnAuthRequired(URLRequest* request,
+                              AuthChallengeInfo* auth_info) OVERRIDE;
+  // NOTE: |fatal| causes |certificate_errors_are_fatal_| to be set to true.
+  // (Unit tests use this as a post-condition.) But for policy, this method
+  // consults |allow_certificate_errors_|.
+  virtual void OnSSLCertificateError(URLRequest* request,
+                                     const SSLInfo& ssl_info,
+                                     bool fatal) OVERRIDE;
+  virtual void OnResponseStarted(URLRequest* request) OVERRIDE;
+  virtual void OnReadCompleted(URLRequest* request,
+                               int bytes_read) OVERRIDE;
+
+ private:
+  static const int kBufferSize = 4096;
+
+  virtual void OnResponseCompleted(URLRequest* request);
+
+  // options for controlling behavior
+  bool cancel_in_rr_;
+  bool cancel_in_rs_;
+  bool cancel_in_rd_;
+  bool cancel_in_rd_pending_;
+  bool quit_on_complete_;
+  bool quit_on_redirect_;
+  bool allow_certificate_errors_;
+  AuthCredentials credentials_;
+
+  // tracks status of callbacks
+  int response_started_count_;
+  int received_bytes_count_;
+  int received_redirect_count_;
+  bool received_data_before_response_;
+  bool request_failed_;
+  bool have_certificate_errors_;
+  bool certificate_errors_are_fatal_;
+  bool auth_required_;
+  std::string data_received_;
+
+  // our read buffer
+  scoped_refptr<IOBuffer> buf_;
+};
+
+//-----------------------------------------------------------------------------
+
+class TestNetworkDelegate : public NetworkDelegate {
+ public:
+  enum Options {
+    NO_GET_COOKIES = 1 << 0,
+    NO_SET_COOKIE  = 1 << 1,
+  };
+
+  TestNetworkDelegate();
+  virtual ~TestNetworkDelegate();
+
+  void set_cookie_options(int o) {cookie_options_bit_mask_ = o; }
+
+  int last_error() const { return last_error_; }
+  int error_count() const { return error_count_; }
+  int created_requests() const { return created_requests_; }
+  int destroyed_requests() const { return destroyed_requests_; }
+  int completed_requests() const { return completed_requests_; }
+  int blocked_get_cookies_count() const { return blocked_get_cookies_count_; }
+  int blocked_set_cookie_count() const { return blocked_set_cookie_count_; }
+  int set_cookie_count() const { return set_cookie_count_; }
+
+ protected:
+  // NetworkDelegate:
+  virtual int OnBeforeURLRequest(URLRequest* request,
+                                 const CompletionCallback& callback,
+                                 GURL* new_url) OVERRIDE;
+  virtual int OnBeforeSendHeaders(URLRequest* request,
+                                  const CompletionCallback& callback,
+                                  HttpRequestHeaders* headers) OVERRIDE;
+  virtual void OnSendHeaders(URLRequest* request,
+                             const HttpRequestHeaders& headers) OVERRIDE;
+  virtual int OnHeadersReceived(
+      URLRequest* request,
+      const CompletionCallback& callback,
+      const HttpResponseHeaders* original_response_headers,
+      scoped_refptr<HttpResponseHeaders>* override_response_headers) OVERRIDE;
+  virtual void OnBeforeRedirect(URLRequest* request,
+                                const GURL& new_location) OVERRIDE;
+  virtual void OnResponseStarted(URLRequest* request) OVERRIDE;
+  virtual void OnRawBytesRead(const URLRequest& request,
+                              int bytes_read) OVERRIDE;
+  virtual void OnCompleted(URLRequest* request, bool started) OVERRIDE;
+  virtual void OnURLRequestDestroyed(URLRequest* request) OVERRIDE;
+  virtual void OnPACScriptError(int line_number,
+                                const string16& error) OVERRIDE;
+  virtual NetworkDelegate::AuthRequiredResponse OnAuthRequired(
+      URLRequest* request,
+      const AuthChallengeInfo& auth_info,
+      const AuthCallback& callback,
+      AuthCredentials* credentials) OVERRIDE;
+  virtual bool OnCanGetCookies(const URLRequest& request,
+                               const CookieList& cookie_list) OVERRIDE;
+  virtual bool OnCanSetCookie(const URLRequest& request,
+                              const std::string& cookie_line,
+                              CookieOptions* options) OVERRIDE;
+  virtual bool OnCanAccessFile(const URLRequest& request,
+                               const FilePath& path) const OVERRIDE;
+  virtual bool OnCanThrottleRequest(
+      const URLRequest& request) const OVERRIDE;
+  virtual int OnBeforeSocketStreamConnect(
+      SocketStream* stream,
+      const CompletionCallback& callback) OVERRIDE;
+  virtual void OnRequestWaitStateChange(const URLRequest& request,
+                                        RequestWaitState state) OVERRIDE;
+
+  void InitRequestStatesIfNew(int request_id);
+
+  int last_error_;
+  int error_count_;
+  int created_requests_;
+  int destroyed_requests_;
+  int completed_requests_;
+  int cookie_options_bit_mask_;
+  int blocked_get_cookies_count_;
+  int blocked_set_cookie_count_;
+  int set_cookie_count_;
+
+  // NetworkDelegate callbacks happen in a particular order (e.g.
+  // OnBeforeURLRequest is always called before OnBeforeSendHeaders).
+  // This bit-set indicates for each request id (key) what events may be sent
+  // next.
+  std::map<int, int> next_states_;
+
+  // A log that records for each request id (key) the order in which On...
+  // functions were called.
+  std::map<int, std::string> event_order_;
+};
+
+// Overrides the host used by the LocalHttpTestServer in
+// url_request_unittest.cc . This is used by the chrome_frame_net_tests due to
+// a mysterious bug when tests execute over the loopback adapter. See
+// http://crbug.com/114369 .
+class ScopedCustomUrlRequestTestHttpHost {
+ public:
+  // Sets the host name to be used. The previous hostname will be stored and
+  // restored upon destruction. Note that if the lifetimes of two or more
+  // instances of this class overlap, they must be strictly nested.
+  explicit ScopedCustomUrlRequestTestHttpHost(const std::string& new_value);
+
+  ~ScopedCustomUrlRequestTestHttpHost();
+
+  // Returns the current value to be used by HTTP tests in
+  // url_request_unittest.cc .
+  static const std::string& value();
+
+ private:
+  static std::string value_;
+  const std::string old_value_;
+  const std::string new_value_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedCustomUrlRequestTestHttpHost);
+};
+
+//-----------------------------------------------------------------------------
+
+// A simple Interceptor that returns a pre-built URLRequestJob only once.
+class TestJobInterceptor : public URLRequestJobFactory::Interceptor {
+ public:
+  TestJobInterceptor();
+
+  virtual URLRequestJob* MaybeIntercept(
+      URLRequest* request,
+      NetworkDelegate* network_delegate) const OVERRIDE;
+  virtual URLRequestJob* MaybeInterceptRedirect(
+      const GURL& location,
+      URLRequest* request,
+      NetworkDelegate* network_delegate) const OVERRIDE;
+  virtual URLRequestJob* MaybeInterceptResponse(
+      URLRequest* request,
+      NetworkDelegate* network_delegate) const OVERRIDE;
+  void set_main_intercept_job(URLRequestJob* job);
+
+ private:
+  mutable URLRequestJob* main_intercept_job_;
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_TEST_UTIL_H_
diff --git a/src/net/url_request/url_request_throttler_entry.cc b/src/net/url_request/url_request_throttler_entry.cc
new file mode 100644
index 0000000..0899ed4
--- /dev/null
+++ b/src/net/url_request/url_request_throttler_entry.cc
@@ -0,0 +1,321 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_throttler_entry.h"
+
+#include <cmath>
+
+#include "base/logging.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram.h"
+#include "base/rand_util.h"
+#include "base/string_number_conversions.h"
+#include "base/values.h"
+#include "net/base/load_flags.h"
+#include "net/base/net_log.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_throttler_header_interface.h"
+#include "net/url_request/url_request_throttler_manager.h"
+
+namespace net {
+
+const int URLRequestThrottlerEntry::kDefaultSlidingWindowPeriodMs = 2000;
+const int URLRequestThrottlerEntry::kDefaultMaxSendThreshold = 20;
+
+// This set of back-off parameters will (at maximum values, i.e. without
+// the reduction caused by jitter) add 0-41% (distributed uniformly
+// in that range) to the "perceived downtime" of the remote server, once
+// exponential back-off kicks in and is throttling requests for more than
+// about a second at a time.  Once the maximum back-off is reached, the added
+// perceived downtime decreases rapidly, percentage-wise.
+//
+// Another way to put it is that the maximum additional perceived downtime
+// with these numbers is a couple of seconds shy of 15 minutes, and such
+// a delay would not occur until the remote server has been actually
+// unavailable at the end of each back-off period for a total of about
+// 48 minutes.
+//
+// Ignoring the first couple of errors is just a conservative measure to
+// avoid false positives.  It should help avoid back-off from kicking in e.g.
+// on flaky connections.
+const int URLRequestThrottlerEntry::kDefaultNumErrorsToIgnore = 2;
+const int URLRequestThrottlerEntry::kDefaultInitialDelayMs = 700;
+const double URLRequestThrottlerEntry::kDefaultMultiplyFactor = 1.4;
+const double URLRequestThrottlerEntry::kDefaultJitterFactor = 0.4;
+const int URLRequestThrottlerEntry::kDefaultMaximumBackoffMs = 15 * 60 * 1000;
+const int URLRequestThrottlerEntry::kDefaultEntryLifetimeMs = 2 * 60 * 1000;
+const char URLRequestThrottlerEntry::kExponentialThrottlingHeader[] =
+    "X-Chrome-Exponential-Throttling";
+const char URLRequestThrottlerEntry::kExponentialThrottlingDisableValue[] =
+    "disable";
+
+// Returns NetLog parameters when a request is rejected by throttling.
+Value* NetLogRejectedRequestCallback(const std::string* url_id,
+                                     int num_failures,
+                                     int release_after_ms,
+                                     NetLog::LogLevel /* log_level */) {
+  DictionaryValue* dict = new DictionaryValue();
+  dict->SetString("url", *url_id);
+  dict->SetInteger("num_failures", num_failures);
+  dict->SetInteger("release_after_ms", release_after_ms);
+  return dict;
+}
+
+URLRequestThrottlerEntry::URLRequestThrottlerEntry(
+    URLRequestThrottlerManager* manager,
+    const std::string& url_id)
+    : sliding_window_period_(
+          base::TimeDelta::FromMilliseconds(kDefaultSlidingWindowPeriodMs)),
+      max_send_threshold_(kDefaultMaxSendThreshold),
+      is_backoff_disabled_(false),
+      backoff_entry_(&backoff_policy_),
+      manager_(manager),
+      url_id_(url_id),
+      net_log_(BoundNetLog::Make(
+          manager->net_log(), NetLog::SOURCE_EXPONENTIAL_BACKOFF_THROTTLING)) {
+  DCHECK(manager_);
+  Initialize();
+}
+
+URLRequestThrottlerEntry::URLRequestThrottlerEntry(
+    URLRequestThrottlerManager* manager,
+    const std::string& url_id,
+    int sliding_window_period_ms,
+    int max_send_threshold,
+    int initial_backoff_ms,
+    double multiply_factor,
+    double jitter_factor,
+    int maximum_backoff_ms)
+    : sliding_window_period_(
+          base::TimeDelta::FromMilliseconds(sliding_window_period_ms)),
+      max_send_threshold_(max_send_threshold),
+      is_backoff_disabled_(false),
+      backoff_entry_(&backoff_policy_),
+      manager_(manager),
+      url_id_(url_id) {
+  DCHECK_GT(sliding_window_period_ms, 0);
+  DCHECK_GT(max_send_threshold_, 0);
+  DCHECK_GE(initial_backoff_ms, 0);
+  DCHECK_GT(multiply_factor, 0);
+  DCHECK_GE(jitter_factor, 0.0);
+  DCHECK_LT(jitter_factor, 1.0);
+  DCHECK_GE(maximum_backoff_ms, 0);
+  DCHECK(manager_);
+
+  Initialize();
+  backoff_policy_.initial_delay_ms = initial_backoff_ms;
+  backoff_policy_.multiply_factor = multiply_factor;
+  backoff_policy_.jitter_factor = jitter_factor;
+  backoff_policy_.maximum_backoff_ms = maximum_backoff_ms;
+  backoff_policy_.entry_lifetime_ms = -1;
+  backoff_policy_.num_errors_to_ignore = 0;
+  backoff_policy_.always_use_initial_delay = false;
+}
+
+bool URLRequestThrottlerEntry::IsEntryOutdated() const {
+  // This function is called by the URLRequestThrottlerManager to determine
+  // whether entries should be discarded from its url_entries_ map.  We
+  // want to ensure that it does not remove entries from the map while there
+  // are clients (objects other than the manager) holding references to
+  // the entry, otherwise separate clients could end up holding separate
+  // entries for a request to the same URL, which is undesirable.  Therefore,
+  // if an entry has more than one reference (the map will always hold one),
+  // it should not be considered outdated.
+  //
+  // We considered whether to make URLRequestThrottlerEntry objects
+  // non-refcounted, but since any means of knowing whether they are
+  // currently in use by others than the manager would be more or less
+  // equivalent to a refcount, we kept them refcounted.
+  if (!HasOneRef())
+    return false;
+
+  // If there are send events in the sliding window period, we still need this
+  // entry.
+  if (!send_log_.empty() &&
+      send_log_.back() + sliding_window_period_ > ImplGetTimeNow()) {
+    return false;
+  }
+
+  return GetBackoffEntry()->CanDiscard();
+}
+
+void URLRequestThrottlerEntry::DisableBackoffThrottling() {
+  is_backoff_disabled_ = true;
+}
+
+void URLRequestThrottlerEntry::DetachManager() {
+  manager_ = NULL;
+}
+
+bool URLRequestThrottlerEntry::ShouldRejectRequest(
+    const URLRequest& request) const {
+  bool reject_request = false;
+  if (!is_backoff_disabled_ && !ExplicitUserRequest(request.load_flags()) &&
+      (!request.context()->network_delegate() ||
+       request.context()->network_delegate()->CanThrottleRequest(request)) &&
+      GetBackoffEntry()->ShouldRejectRequest()) {
+    int num_failures = GetBackoffEntry()->failure_count();
+    int release_after_ms =
+        GetBackoffEntry()->GetTimeUntilRelease().InMilliseconds();
+
+    net_log_.AddEvent(
+        NetLog::TYPE_THROTTLING_REJECTED_REQUEST,
+        base::Bind(&NetLogRejectedRequestCallback,
+                   &url_id_, num_failures, release_after_ms));
+
+    reject_request = true;
+  }
+
+  int reject_count = reject_request ? 1 : 0;
+  UMA_HISTOGRAM_ENUMERATION(
+      "Throttling.RequestThrottled", reject_count, 2);
+
+  return reject_request;
+}
+
+int64 URLRequestThrottlerEntry::ReserveSendingTimeForNextRequest(
+    const base::TimeTicks& earliest_time) {
+  base::TimeTicks now = ImplGetTimeNow();
+
+  // If a lot of requests were successfully made recently,
+  // sliding_window_release_time_ may be greater than
+  // exponential_backoff_release_time_.
+  base::TimeTicks recommended_sending_time =
+      std::max(std::max(now, earliest_time),
+               std::max(GetBackoffEntry()->GetReleaseTime(),
+                        sliding_window_release_time_));
+
+  DCHECK(send_log_.empty() ||
+         recommended_sending_time >= send_log_.back());
+  // Log the new send event.
+  send_log_.push(recommended_sending_time);
+
+  sliding_window_release_time_ = recommended_sending_time;
+
+  // Drop the out-of-date events in the event list.
+  // We don't need to worry that the queue may become empty during this
+  // operation, since the last element is sliding_window_release_time_.
+  while ((send_log_.front() + sliding_window_period_ <=
+          sliding_window_release_time_) ||
+         send_log_.size() > static_cast<unsigned>(max_send_threshold_)) {
+    send_log_.pop();
+  }
+
+  // Check if there are too many send events in recent time.
+  if (send_log_.size() == static_cast<unsigned>(max_send_threshold_))
+    sliding_window_release_time_ = send_log_.front() + sliding_window_period_;
+
+  return (recommended_sending_time - now).InMillisecondsRoundedUp();
+}
+
+base::TimeTicks
+    URLRequestThrottlerEntry::GetExponentialBackoffReleaseTime() const {
+  // If a site opts out, it's likely because they have problems that trigger
+  // the back-off mechanism when it shouldn't be triggered, in which case
+  // returning the calculated back-off release time would probably be the
+  // wrong thing to do (i.e. it would likely be too long).  Therefore, we
+  // return "now" so that retries are not delayed.
+  if (is_backoff_disabled_)
+    return ImplGetTimeNow();
+
+  return GetBackoffEntry()->GetReleaseTime();
+}
+
+void URLRequestThrottlerEntry::UpdateWithResponse(
+    const std::string& host,
+    const URLRequestThrottlerHeaderInterface* response) {
+  if (IsConsideredError(response->GetResponseCode())) {
+    GetBackoffEntry()->InformOfRequest(false);
+  } else {
+    GetBackoffEntry()->InformOfRequest(true);
+
+    std::string throttling_header = response->GetNormalizedValue(
+        kExponentialThrottlingHeader);
+    if (!throttling_header.empty())
+      HandleThrottlingHeader(throttling_header, host);
+  }
+}
+
+void URLRequestThrottlerEntry::ReceivedContentWasMalformed(int response_code) {
+  // A malformed body can only occur when the request to fetch a resource
+  // was successful.  Therefore, in such a situation, we will receive one
+  // call to ReceivedContentWasMalformed() and one call to
+  // UpdateWithResponse() with a response categorized as "good".  To end
+  // up counting one failure, we need to count two failures here against
+  // the one success in UpdateWithResponse().
+  //
+  // We do nothing for a response that is already being considered an error
+  // based on its status code (otherwise we would count 3 errors instead of 1).
+  if (!IsConsideredError(response_code)) {
+    GetBackoffEntry()->InformOfRequest(false);
+    GetBackoffEntry()->InformOfRequest(false);
+  }
+}
+
+URLRequestThrottlerEntry::~URLRequestThrottlerEntry() {
+}
+
+void URLRequestThrottlerEntry::Initialize() {
+  sliding_window_release_time_ = base::TimeTicks::Now();
+  backoff_policy_.num_errors_to_ignore = kDefaultNumErrorsToIgnore;
+  backoff_policy_.initial_delay_ms = kDefaultInitialDelayMs;
+  backoff_policy_.multiply_factor = kDefaultMultiplyFactor;
+  backoff_policy_.jitter_factor = kDefaultJitterFactor;
+  backoff_policy_.maximum_backoff_ms = kDefaultMaximumBackoffMs;
+  backoff_policy_.entry_lifetime_ms = kDefaultEntryLifetimeMs;
+  backoff_policy_.always_use_initial_delay = false;
+}
+
+bool URLRequestThrottlerEntry::IsConsideredError(int response_code) {
+  // We throttle only for the status codes most likely to indicate the server
+  // is failing because it is too busy or otherwise are likely to be
+  // because of DDoS.
+  //
+  // 500 is the generic error when no better message is suitable, and
+  //     as such does not necessarily indicate a temporary state, but
+  //     other status codes cover most of the permanent error states.
+  // 503 is explicitly documented as a temporary state where the server
+  //     is either overloaded or down for maintenance.
+  // 509 is the (non-standard but widely implemented) Bandwidth Limit Exceeded
+  //     status code, which might indicate DDoS.
+  //
+  // We do not back off on 502 or 504, which are reported by gateways
+  // (proxies) on timeouts or failures, because in many cases these requests
+  // have not made it to the destination server and so we do not actually
+  // know that it is down or busy.  One degenerate case could be a proxy on
+  // localhost, where you are not actually connected to the network.
+  return (response_code == 500 ||
+          response_code == 503 ||
+          response_code == 509);
+}
+
+base::TimeTicks URLRequestThrottlerEntry::ImplGetTimeNow() const {
+  return base::TimeTicks::Now();
+}
+
+void URLRequestThrottlerEntry::HandleThrottlingHeader(
+    const std::string& header_value,
+    const std::string& host) {
+  if (header_value == kExponentialThrottlingDisableValue) {
+    DisableBackoffThrottling();
+    if (manager_)
+      manager_->AddToOptOutList(host);
+  }
+}
+
+const BackoffEntry* URLRequestThrottlerEntry::GetBackoffEntry() const {
+  return &backoff_entry_;
+}
+
+BackoffEntry* URLRequestThrottlerEntry::GetBackoffEntry() {
+  return &backoff_entry_;
+}
+
+// static
+bool URLRequestThrottlerEntry::ExplicitUserRequest(const int load_flags) {
+  return (load_flags & LOAD_MAYBE_USER_GESTURE) != 0;
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_throttler_entry.h b/src/net/url_request/url_request_throttler_entry.h
new file mode 100644
index 0000000..66a8ce9
--- /dev/null
+++ b/src/net/url_request/url_request_throttler_entry.h
@@ -0,0 +1,175 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_THROTTLER_ENTRY_H_
+#define NET_URL_REQUEST_URL_REQUEST_THROTTLER_ENTRY_H_
+
+#include <queue>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/time.h"
+#include "net/base/backoff_entry.h"
+#include "net/base/net_log.h"
+#include "net/url_request/url_request_throttler_entry_interface.h"
+
+namespace net {
+
+class URLRequestThrottlerManager;
+
+// URLRequestThrottlerEntry represents an entry of URLRequestThrottlerManager.
+// It analyzes requests of a specific URL over some period of time, in order to
+// deduce the back-off time for every request.
+// The back-off algorithm consists of two parts. Firstly, exponential back-off
+// is used when receiving 5XX server errors or malformed response bodies.
+// The exponential back-off rule is enforced by URLRequestHttpJob. Any
+// request sent during the back-off period will be cancelled.
+// Secondly, a sliding window is used to count recent requests to a given
+// destination and provide guidance (to the application level only) on whether
+// too many requests have been sent and when a good time to send the next one
+// would be. This is never used to deny requests at the network level.
+class NET_EXPORT URLRequestThrottlerEntry
+    : public URLRequestThrottlerEntryInterface {
+ public:
+  // Sliding window period.
+  static const int kDefaultSlidingWindowPeriodMs;
+
+  // Maximum number of requests allowed in sliding window period.
+  static const int kDefaultMaxSendThreshold;
+
+  // Number of initial errors to ignore before starting exponential back-off.
+  static const int kDefaultNumErrorsToIgnore;
+
+  // Initial delay for exponential back-off.
+  static const int kDefaultInitialDelayMs;
+
+  // Factor by which the waiting time will be multiplied.
+  static const double kDefaultMultiplyFactor;
+
+  // Fuzzing percentage. ex: 10% will spread requests randomly
+  // between 90%-100% of the calculated time.
+  static const double kDefaultJitterFactor;
+
+  // Maximum amount of time we are willing to delay our request.
+  static const int kDefaultMaximumBackoffMs;
+
+  // Time after which the entry is considered outdated.
+  static const int kDefaultEntryLifetimeMs;
+
+  // Name of the header that sites can use to opt out of exponential back-off
+  // throttling.
+  static const char kExponentialThrottlingHeader[];
+
+  // Value for exponential throttling header that can be used to opt out of
+  // exponential back-off throttling.
+  static const char kExponentialThrottlingDisableValue[];
+
+  // The manager object's lifetime must enclose the lifetime of this object.
+  URLRequestThrottlerEntry(URLRequestThrottlerManager* manager,
+                           const std::string& url_id);
+
+  // The life span of instances created with this constructor is set to
+  // infinite, and the number of initial errors to ignore is set to 0.
+  // It is only used by unit tests.
+  URLRequestThrottlerEntry(URLRequestThrottlerManager* manager,
+                           const std::string& url_id,
+                           int sliding_window_period_ms,
+                           int max_send_threshold,
+                           int initial_backoff_ms,
+                           double multiply_factor,
+                           double jitter_factor,
+                           int maximum_backoff_ms);
+
+  // Used by the manager, returns true if the entry needs to be garbage
+  // collected.
+  bool IsEntryOutdated() const;
+
+  // Causes this entry to never reject requests due to back-off.
+  void DisableBackoffThrottling();
+
+  // Causes this entry to NULL its manager pointer.
+  void DetachManager();
+
+  // Implementation of URLRequestThrottlerEntryInterface.
+  virtual bool ShouldRejectRequest(const URLRequest& request) const OVERRIDE;
+  virtual int64 ReserveSendingTimeForNextRequest(
+      const base::TimeTicks& earliest_time) OVERRIDE;
+  virtual base::TimeTicks GetExponentialBackoffReleaseTime() const OVERRIDE;
+  virtual void UpdateWithResponse(
+      const std::string& host,
+      const URLRequestThrottlerHeaderInterface* response) OVERRIDE;
+  virtual void ReceivedContentWasMalformed(int response_code) OVERRIDE;
+
+ protected:
+  virtual ~URLRequestThrottlerEntry();
+
+  void Initialize();
+
+  // Returns true if the given response code is considered an error for
+  // throttling purposes.
+  bool IsConsideredError(int response_code);
+
+  // Equivalent to TimeTicks::Now(), virtual to be mockable for testing purpose.
+  virtual base::TimeTicks ImplGetTimeNow() const;
+
+  // Used internally to handle the opt-out header.
+  void HandleThrottlingHeader(const std::string& header_value,
+                              const std::string& host);
+
+  // Retrieves the back-off entry object we're using. Used to enable a
+  // unit testing seam for dependency injection in tests.
+  virtual const BackoffEntry* GetBackoffEntry() const;
+  virtual BackoffEntry* GetBackoffEntry();
+
+  // Returns true if |load_flags| contains a flag that indicates an
+  // explicit request by the user to load the resource. We never
+  // throttle requests with such load flags.
+  static bool ExplicitUserRequest(const int load_flags);
+
+  // Used by tests.
+  base::TimeTicks sliding_window_release_time() const {
+    return sliding_window_release_time_;
+  }
+
+  // Used by tests.
+  void set_sliding_window_release_time(const base::TimeTicks& release_time) {
+    sliding_window_release_time_ = release_time;
+  }
+
+  // Valid and immutable after construction time.
+  BackoffEntry::Policy backoff_policy_;
+
+ private:
+  // Timestamp calculated by the sliding window algorithm for when we advise
+  // clients the next request should be made, at the earliest. Advisory only,
+  // not used to deny requests.
+  base::TimeTicks sliding_window_release_time_;
+
+  // A list of the recent send events. We use them to decide whether there are
+  // too many requests sent in sliding window.
+  std::queue<base::TimeTicks> send_log_;
+
+  const base::TimeDelta sliding_window_period_;
+  const int max_send_threshold_;
+
+  // True if DisableBackoffThrottling() has been called on this object.
+  bool is_backoff_disabled_;
+
+  // Access it through GetBackoffEntry() to allow a unit test seam.
+  BackoffEntry backoff_entry_;
+
+  // Weak back-reference to the manager object managing us.
+  URLRequestThrottlerManager* manager_;
+
+  // Canonicalized URL string that this entry is for; used for logging only.
+  std::string url_id_;
+
+  BoundNetLog net_log_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestThrottlerEntry);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_THROTTLER_ENTRY_H_
diff --git a/src/net/url_request/url_request_throttler_entry_interface.h b/src/net/url_request/url_request_throttler_entry_interface.h
new file mode 100644
index 0000000..6d975f4
--- /dev/null
+++ b/src/net/url_request/url_request_throttler_entry_interface.h
@@ -0,0 +1,73 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_THROTTLER_ENTRY_INTERFACE_H_
+#define NET_URL_REQUEST_URL_REQUEST_THROTTLER_ENTRY_INTERFACE_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/time.h"
+#include "net/base/net_export.h"
+
+namespace net {
+
+class URLRequest;
+class URLRequestThrottlerHeaderInterface;
+
+// Interface provided on entries of the URL request throttler manager.
+class NET_EXPORT URLRequestThrottlerEntryInterface
+    : public base::RefCountedThreadSafe<URLRequestThrottlerEntryInterface> {
+ public:
+  URLRequestThrottlerEntryInterface() {}
+
+  // Returns true when we have encountered server errors and are doing
+  // exponential back-off, unless the request has load flags that mean
+  // it is likely to be user-initiated, or the NetworkDelegate returns
+  // false for |CanThrottleRequest(request)|.
+  //
+  // URLRequestHttpJob checks this method prior to every request; it
+  // cancels requests if this method returns true.
+  virtual bool ShouldRejectRequest(const URLRequest& request) const = 0;
+
+  // Calculates a recommended sending time for the next request and reserves it.
+  // The sending time is not earlier than the current exponential back-off
+  // release time or |earliest_time|. Moreover, the previous results of
+  // the method are taken into account, in order to make sure they are spread
+  // properly over time.
+  // Returns the recommended delay before sending the next request, in
+  // milliseconds. The return value is always positive or 0.
+  // Although it is not mandatory, respecting the value returned by this method
+  // is helpful to avoid traffic overload.
+  virtual int64 ReserveSendingTimeForNextRequest(
+      const base::TimeTicks& earliest_time) = 0;
+
+  // Returns the time after which requests are allowed.
+  virtual base::TimeTicks GetExponentialBackoffReleaseTime() const = 0;
+
+  // This method needs to be called each time a response is received.
+  virtual void UpdateWithResponse(
+      const std::string& host,
+      const URLRequestThrottlerHeaderInterface* response) = 0;
+
+  // Lets higher-level modules, that know how to parse particular response
+  // bodies, notify of receiving malformed content for the given URL. This will
+  // be handled by the throttler as if an HTTP 503 response had been received to
+  // the request, i.e. it will count as a failure, unless the HTTP response code
+  // indicated is already one of those that will be counted as an error.
+  virtual void ReceivedContentWasMalformed(int response_code) = 0;
+
+ protected:
+  friend class base::RefCountedThreadSafe<URLRequestThrottlerEntryInterface>;
+  virtual ~URLRequestThrottlerEntryInterface() {}
+
+ private:
+  friend class base::RefCounted<URLRequestThrottlerEntryInterface>;
+  DISALLOW_COPY_AND_ASSIGN(URLRequestThrottlerEntryInterface);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_THROTTLER_ENTRY_INTERFACE_H_
diff --git a/src/net/url_request/url_request_throttler_header_adapter.cc b/src/net/url_request/url_request_throttler_header_adapter.cc
new file mode 100644
index 0000000..51bbc74
--- /dev/null
+++ b/src/net/url_request/url_request_throttler_header_adapter.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_throttler_header_adapter.h"
+
+#include "net/http/http_response_headers.h"
+
+namespace net {
+
+URLRequestThrottlerHeaderAdapter::URLRequestThrottlerHeaderAdapter(
+    HttpResponseHeaders* headers)
+    : response_header_(headers) {
+}
+
+URLRequestThrottlerHeaderAdapter::~URLRequestThrottlerHeaderAdapter() {}
+
+std::string URLRequestThrottlerHeaderAdapter::GetNormalizedValue(
+    const std::string& key) const {
+  std::string return_value;
+  response_header_->GetNormalizedHeader(key, &return_value);
+  return return_value;
+}
+
+int URLRequestThrottlerHeaderAdapter::GetResponseCode() const {
+  return response_header_->response_code();
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_throttler_header_adapter.h b/src/net/url_request/url_request_throttler_header_adapter.h
new file mode 100644
index 0000000..17a13a1
--- /dev/null
+++ b/src/net/url_request/url_request_throttler_header_adapter.h
@@ -0,0 +1,35 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_THROTTLER_HEADER_ADAPTER_H_
+#define NET_URL_REQUEST_URL_REQUEST_THROTTLER_HEADER_ADAPTER_H_
+
+#include <string>
+
+#include "base/compiler_specific.h"
+#include "base/memory/ref_counted.h"
+#include "net/url_request/url_request_throttler_header_interface.h"
+
+namespace net {
+
+class HttpResponseHeaders;
+
+// Adapter for the HTTP header interface of the URL request throttler component.
+class URLRequestThrottlerHeaderAdapter
+    : public URLRequestThrottlerHeaderInterface {
+ public:
+  explicit URLRequestThrottlerHeaderAdapter(HttpResponseHeaders* headers);
+  virtual ~URLRequestThrottlerHeaderAdapter();
+
+  // Implementation of URLRequestThrottlerHeaderInterface
+  virtual std::string GetNormalizedValue(const std::string& key) const OVERRIDE;
+  virtual int GetResponseCode() const OVERRIDE;
+
+ private:
+  const scoped_refptr<HttpResponseHeaders> response_header_;
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_THROTTLER_HEADER_ADAPTER_H_
diff --git a/src/net/url_request/url_request_throttler_header_interface.h b/src/net/url_request/url_request_throttler_header_interface.h
new file mode 100644
index 0000000..c69d185
--- /dev/null
+++ b/src/net/url_request/url_request_throttler_header_interface.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_THROTTLER_HEADER_INTERFACE_H_
+#define NET_URL_REQUEST_URL_REQUEST_THROTTLER_HEADER_INTERFACE_H_
+
+#include <string>
+
+namespace net {
+
+// Interface to an HTTP header to enforce we have the methods we need.
+class URLRequestThrottlerHeaderInterface {
+ public:
+  virtual ~URLRequestThrottlerHeaderInterface() {}
+
+  // Method that enables us to fetch the header value by its key.
+  // ex: location: www.example.com -> key = "location" value = "www.example.com"
+  // If the key does not exist, it returns an empty string.
+  virtual std::string GetNormalizedValue(const std::string& key) const = 0;
+
+  // Returns the HTTP response code associated with the request.
+  virtual int GetResponseCode() const = 0;
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_THROTTLER_HEADER_INTERFACE_H_
diff --git a/src/net/url_request/url_request_throttler_manager.cc b/src/net/url_request/url_request_throttler_manager.cc
new file mode 100644
index 0000000..fa04a00
--- /dev/null
+++ b/src/net/url_request/url_request_throttler_manager.cc
@@ -0,0 +1,203 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_throttler_manager.h"
+
+#include "base/logging.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram.h"
+#include "base/string_util.h"
+#include "net/base/net_log.h"
+#include "net/base/net_util.h"
+
+namespace net {
+
+const unsigned int URLRequestThrottlerManager::kMaximumNumberOfEntries = 1500;
+const unsigned int URLRequestThrottlerManager::kRequestsBetweenCollecting = 200;
+
+URLRequestThrottlerManager::URLRequestThrottlerManager()
+    : requests_since_last_gc_(0),
+      enable_thread_checks_(false),
+      logged_for_localhost_disabled_(false),
+      registered_from_thread_(base::kInvalidThreadId) {
+  url_id_replacements_.ClearPassword();
+  url_id_replacements_.ClearUsername();
+  url_id_replacements_.ClearQuery();
+  url_id_replacements_.ClearRef();
+
+  NetworkChangeNotifier::AddIPAddressObserver(this);
+  NetworkChangeNotifier::AddConnectionTypeObserver(this);
+}
+
+URLRequestThrottlerManager::~URLRequestThrottlerManager() {
+  NetworkChangeNotifier::RemoveIPAddressObserver(this);
+  NetworkChangeNotifier::RemoveConnectionTypeObserver(this);
+
+  // Since the manager object might conceivably go away before the
+  // entries, detach the entries' back-pointer to the manager.
+  UrlEntryMap::iterator i = url_entries_.begin();
+  while (i != url_entries_.end()) {
+    if (i->second != NULL) {
+      i->second->DetachManager();
+    }
+    ++i;
+  }
+
+  // Delete all entries.
+  url_entries_.clear();
+}
+
+scoped_refptr<URLRequestThrottlerEntryInterface>
+    URLRequestThrottlerManager::RegisterRequestUrl(const GURL &url) {
+  DCHECK(!enable_thread_checks_ || CalledOnValidThread());
+
+  // Normalize the url.
+  std::string url_id = GetIdFromUrl(url);
+
+  // Periodically garbage collect old entries.
+  GarbageCollectEntriesIfNecessary();
+
+  // Find the entry in the map or create a new NULL entry.
+  scoped_refptr<URLRequestThrottlerEntry>& entry = url_entries_[url_id];
+
+  // If the entry exists but could be garbage collected at this point, we
+  // start with a fresh entry so that we possibly back off a bit less
+  // aggressively (i.e. this resets the error count when the entry's URL
+  // hasn't been requested in long enough).
+  if (entry.get() && entry->IsEntryOutdated()) {
+    entry = NULL;
+  }
+
+  // Create the entry if needed.
+  if (entry.get() == NULL) {
+    entry = new URLRequestThrottlerEntry(this, url_id);
+
+    // We only disable back-off throttling on an entry that we have
+    // just constructed.  This is to allow unit tests to explicitly override
+    // the entry for localhost URLs.  Given that we do not attempt to
+    // disable throttling for entries already handed out (see comment
+    // in AddToOptOutList), this is not a problem.
+    std::string host = url.host();
+    if (opt_out_hosts_.find(host) != opt_out_hosts_.end() ||
+        IsLocalhost(host)) {
+      if (!logged_for_localhost_disabled_ && IsLocalhost(host)) {
+        logged_for_localhost_disabled_ = true;
+        net_log_.AddEvent(NetLog::TYPE_THROTTLING_DISABLED_FOR_HOST,
+                          NetLog::StringCallback("host", &host));
+      }
+
+      // TODO(joi): Once sliding window is separate from back-off throttling,
+      // we can simply return a dummy implementation of
+      // URLRequestThrottlerEntryInterface here that never blocks anything (and
+      // not keep entries in url_entries_ for opted-out sites).
+      entry->DisableBackoffThrottling();
+    }
+  }
+
+  return entry;
+}
+
+void URLRequestThrottlerManager::AddToOptOutList(const std::string& host) {
+  // There is an edge case here that we are not handling, to keep things
+  // simple.  If a host starts adding the opt-out header to its responses
+  // after there are already one or more entries in url_entries_ for that
+  // host, the pre-existing entries may still perform back-off throttling.
+  // In practice, this would almost never occur.
+  if (opt_out_hosts_.find(host) == opt_out_hosts_.end()) {
+    UMA_HISTOGRAM_COUNTS("Throttling.SiteOptedOut", 1);
+
+    net_log_.EndEvent(NetLog::TYPE_THROTTLING_DISABLED_FOR_HOST,
+                      NetLog::StringCallback("host", &host));
+    opt_out_hosts_.insert(host);
+  }
+}
+
+void URLRequestThrottlerManager::OverrideEntryForTests(
+    const GURL& url,
+    URLRequestThrottlerEntry* entry) {
+  // Normalize the url.
+  std::string url_id = GetIdFromUrl(url);
+
+  // Periodically garbage collect old entries.
+  GarbageCollectEntriesIfNecessary();
+
+  url_entries_[url_id] = entry;
+}
+
+void URLRequestThrottlerManager::EraseEntryForTests(const GURL& url) {
+  // Normalize the url.
+  std::string url_id = GetIdFromUrl(url);
+  url_entries_.erase(url_id);
+}
+
+void URLRequestThrottlerManager::set_enable_thread_checks(bool enable) {
+  enable_thread_checks_ = enable;
+}
+
+bool URLRequestThrottlerManager::enable_thread_checks() const {
+  return enable_thread_checks_;
+}
+
+void URLRequestThrottlerManager::set_net_log(NetLog* net_log) {
+  DCHECK(net_log);
+  net_log_ = BoundNetLog::Make(net_log,
+                               NetLog::SOURCE_EXPONENTIAL_BACKOFF_THROTTLING);
+}
+
+NetLog* URLRequestThrottlerManager::net_log() const {
+  return net_log_.net_log();
+}
+
+void URLRequestThrottlerManager::OnIPAddressChanged() {
+  OnNetworkChange();
+}
+
+void URLRequestThrottlerManager::OnConnectionTypeChanged(
+    NetworkChangeNotifier::ConnectionType type) {
+  OnNetworkChange();
+}
+
+std::string URLRequestThrottlerManager::GetIdFromUrl(const GURL& url) const {
+  if (!url.is_valid())
+    return url.possibly_invalid_spec();
+
+  GURL id = url.ReplaceComponents(url_id_replacements_);
+  return StringToLowerASCII(id.spec()).c_str();
+}
+
+void URLRequestThrottlerManager::GarbageCollectEntriesIfNecessary() {
+  requests_since_last_gc_++;
+  if (requests_since_last_gc_ < kRequestsBetweenCollecting)
+    return;
+  requests_since_last_gc_ = 0;
+
+  GarbageCollectEntries();
+}
+
+void URLRequestThrottlerManager::GarbageCollectEntries() {
+  UrlEntryMap::iterator i = url_entries_.begin();
+  while (i != url_entries_.end()) {
+    if ((i->second)->IsEntryOutdated()) {
+      url_entries_.erase(i++);
+    } else {
+      ++i;
+    }
+  }
+
+  // In case something broke we want to make sure not to grow indefinitely.
+  while (url_entries_.size() > kMaximumNumberOfEntries) {
+    url_entries_.erase(url_entries_.begin());
+  }
+}
+
+void URLRequestThrottlerManager::OnNetworkChange() {
+  // Remove all entries.  Any entries that in-flight requests have a reference
+  // to will live until those requests end, and these entries may be
+  // inconsistent with new entries for the same URLs, but since what we
+  // want is a clean slate for the new connection type, this is OK.
+  url_entries_.clear();
+  requests_since_last_gc_ = 0;
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_throttler_manager.h b/src/net/url_request/url_request_throttler_manager.h
new file mode 100644
index 0000000..9623834
--- /dev/null
+++ b/src/net/url_request/url_request_throttler_manager.h
@@ -0,0 +1,166 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_THROTTLER_MANAGER_H_
+#define NET_URL_REQUEST_URL_REQUEST_THROTTLER_MANAGER_H_
+
+#include <map>
+#include <set>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/memory/ref_counted.h"
+#include "base/threading/non_thread_safe.h"
+#include "base/threading/platform_thread.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/net_export.h"
+#include "net/base/network_change_notifier.h"
+#include "net/url_request/url_request_throttler_entry.h"
+
+namespace net {
+
+class BoundNetLog;
+class NetLog;
+
+// Class that registers URL request throttler entries for URLs being accessed
+// in order to supervise traffic. URL requests for HTTP contents should
+// register their URLs in this manager on each request.
+//
+// URLRequestThrottlerManager maintains a map of URL IDs to URL request
+// throttler entries. It creates URL request throttler entries when new URLs
+// are registered, and does garbage collection from time to time in order to
+// clean out outdated entries. URL ID consists of lowercased scheme, host, port
+// and path. All URLs converted to the same ID will share the same entry.
+class NET_EXPORT URLRequestThrottlerManager
+    : NON_EXPORTED_BASE(public base::NonThreadSafe),
+      public NetworkChangeNotifier::IPAddressObserver,
+      public NetworkChangeNotifier::ConnectionTypeObserver {
+ public:
+  URLRequestThrottlerManager();
+  virtual ~URLRequestThrottlerManager();
+
+  // Must be called for every request, returns the URL request throttler entry
+  // associated with the URL. The caller must inform this entry of some events.
+  // Please refer to url_request_throttler_entry_interface.h for further
+  // informations.
+  scoped_refptr<URLRequestThrottlerEntryInterface> RegisterRequestUrl(
+      const GURL& url);
+
+  // Adds the given host to a list of sites for which exponential back-off
+  // throttling will be disabled.  Subdomains are not included, so they
+  // must be added separately.
+  void AddToOptOutList(const std::string& host);
+
+  // Registers a new entry in this service and overrides the existing entry (if
+  // any) for the URL. The service will hold a reference to the entry.
+  // It is only used by unit tests.
+  void OverrideEntryForTests(const GURL& url, URLRequestThrottlerEntry* entry);
+
+  // Explicitly erases an entry.
+  // This is useful to remove those entries which have got infinite lifetime and
+  // thus won't be garbage collected.
+  // It is only used by unit tests.
+  void EraseEntryForTests(const GURL& url);
+
+  // Turns threading model verification on or off.  Any code that correctly
+  // uses the network stack should preferably call this function to enable
+  // verification of correct adherence to the network stack threading model.
+  void set_enable_thread_checks(bool enable);
+  bool enable_thread_checks() const;
+
+  // Whether throttling is enabled or not.
+  void set_enforce_throttling(bool enforce);
+  bool enforce_throttling();
+
+  // Sets the NetLog instance to use.
+  void set_net_log(NetLog* net_log);
+  NetLog* net_log() const;
+
+  // IPAddressObserver interface.
+  virtual void OnIPAddressChanged() OVERRIDE;
+
+  // ConnectionTypeObserver interface.
+  virtual void OnConnectionTypeChanged(
+      NetworkChangeNotifier::ConnectionType type) OVERRIDE;
+
+  // Method that allows us to transform a URL into an ID that can be used in our
+  // map. Resulting IDs will be lowercase and consist of the scheme, host, port
+  // and path (without query string, fragment, etc.).
+  // If the URL is invalid, the invalid spec will be returned, without any
+  // transformation.
+  std::string GetIdFromUrl(const GURL& url) const;
+
+  // Method that ensures the map gets cleaned from time to time. The period at
+  // which garbage collecting happens is adjustable with the
+  // kRequestBetweenCollecting constant.
+  void GarbageCollectEntriesIfNecessary();
+
+  // Method that does the actual work of garbage collecting.
+  void GarbageCollectEntries();
+
+  // When we switch from online to offline or change IP addresses, we
+  // clear all back-off history. This is a precaution in case the change in
+  // online state now lets us communicate without error with servers that
+  // we were previously getting 500 or 503 responses from (perhaps the
+  // responses are from a badly-written proxy that should have returned a
+  // 502 or 504 because it's upstream connection was down or it had no route
+  // to the server).
+  void OnNetworkChange();
+
+  // Used by tests.
+  int GetNumberOfEntriesForTests() const { return url_entries_.size(); }
+
+ private:
+  // From each URL we generate an ID composed of the scheme, host, port and path
+  // that allows us to uniquely map an entry to it.
+  typedef std::map<std::string, scoped_refptr<URLRequestThrottlerEntry> >
+      UrlEntryMap;
+
+  // We maintain a set of hosts that have opted out of exponential
+  // back-off throttling.
+  typedef std::set<std::string> OptOutHosts;
+
+  // Maximum number of entries that we are willing to collect in our map.
+  static const unsigned int kMaximumNumberOfEntries;
+  // Number of requests that will be made between garbage collection.
+  static const unsigned int kRequestsBetweenCollecting;
+
+  // Map that contains a list of URL ID and their matching
+  // URLRequestThrottlerEntry.
+  UrlEntryMap url_entries_;
+
+  // Set of hosts that have opted out.
+  OptOutHosts opt_out_hosts_;
+
+  // This keeps track of how many requests have been made. Used with
+  // GarbageCollectEntries.
+  unsigned int requests_since_last_gc_;
+
+  // Valid after construction.
+  GURL::Replacements url_id_replacements_;
+
+  // Certain tests do not obey the net component's threading policy, so we
+  // keep track of whether we're being used by tests, and turn off certain
+  // checks.
+  //
+  // TODO(joi): See if we can fix the offending unit tests and remove this
+  // workaround.
+  bool enable_thread_checks_;
+
+  // Initially false, switches to true once we have logged because of back-off
+  // being disabled for localhost.
+  bool logged_for_localhost_disabled_;
+
+  // NetLog to use, if configured.
+  BoundNetLog net_log_;
+
+  // Valid once we've registered for network notifications.
+  base::PlatformThreadId registered_from_thread_;
+
+  DISALLOW_COPY_AND_ASSIGN(URLRequestThrottlerManager);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_THROTTLER_MANAGER_H_
diff --git a/src/net/url_request/url_request_throttler_simulation_unittest.cc b/src/net/url_request/url_request_throttler_simulation_unittest.cc
new file mode 100644
index 0000000..48b0319
--- /dev/null
+++ b/src/net/url_request/url_request_throttler_simulation_unittest.cc
@@ -0,0 +1,757 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The tests in this file attempt to verify the following through simulation:
+// a) That a server experiencing overload will actually benefit from the
+//    anti-DDoS throttling logic, i.e. that its traffic spike will subside
+//    and be distributed over a longer period of time;
+// b) That "well-behaved" clients of a server under DDoS attack actually
+//    benefit from the anti-DDoS throttling logic; and
+// c) That the approximate increase in "perceived downtime" introduced by
+//    anti-DDoS throttling for various different actual downtimes is what
+//    we expect it to be.
+
+#include <cmath>
+#include <limits>
+#include <vector>
+
+#include "base/environment.h"
+#include "base/memory/scoped_vector.h"
+#include "base/rand_util.h"
+#include "base/time.h"
+#include "net/url_request/url_request_test_util.h"
+#include "net/url_request/url_request_throttler_manager.h"
+#include "net/url_request/url_request_throttler_test_support.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace net {
+namespace {
+
+// Set this variable in your environment if you want to see verbose results
+// of the simulation tests.
+const char kShowSimulationVariableName[] = "SHOW_SIMULATION_RESULTS";
+
+// Prints output only if a given environment variable is set. We use this
+// to not print any output for human evaluation when the test is run without
+// supervision.
+void VerboseOut(const char* format, ...) {
+  static bool have_checked_environment = false;
+  static bool should_print = false;
+  if (!have_checked_environment) {
+    have_checked_environment = true;
+    scoped_ptr<base::Environment> env(base::Environment::Create());
+    if (env->HasVar(kShowSimulationVariableName))
+      should_print = true;
+  }
+
+  if (should_print) {
+    va_list arglist;
+    va_start(arglist, format);
+    vprintf(format, arglist);
+    va_end(arglist);
+  }
+}
+
+// A simple two-phase discrete time simulation. Actors are added in the order
+// they should take action at every tick of the clock. Ticks of the clock
+// are two-phase:
+// - Phase 1 advances every actor's time to a new absolute time.
+// - Phase 2 asks each actor to perform their action.
+class DiscreteTimeSimulation {
+ public:
+  class Actor {
+   public:
+    virtual ~Actor() {}
+    virtual void AdvanceTime(const TimeTicks& absolute_time) = 0;
+    virtual void PerformAction() = 0;
+  };
+
+  DiscreteTimeSimulation() {}
+
+  // Adds an |actor| to the simulation. The client of the simulation maintains
+  // ownership of |actor| and must ensure its lifetime exceeds that of the
+  // simulation. Actors should be added in the order you wish for them to
+  // act at each tick of the simulation.
+  void AddActor(Actor* actor) {
+    actors_.push_back(actor);
+  }
+
+  // Runs the simulation for, pretending |time_between_ticks| passes from one
+  // tick to the next. The start time will be the current real time. The
+  // simulation will stop when the simulated duration is equal to or greater
+  // than |maximum_simulated_duration|.
+  void RunSimulation(const TimeDelta& maximum_simulated_duration,
+                     const TimeDelta& time_between_ticks) {
+    TimeTicks start_time = TimeTicks();
+    TimeTicks now = start_time;
+    while ((now - start_time) <= maximum_simulated_duration) {
+      for (std::vector<Actor*>::iterator it = actors_.begin();
+           it != actors_.end();
+           ++it) {
+        (*it)->AdvanceTime(now);
+      }
+
+      for (std::vector<Actor*>::iterator it = actors_.begin();
+           it != actors_.end();
+           ++it) {
+        (*it)->PerformAction();
+      }
+
+      now += time_between_ticks;
+    }
+  }
+
+ private:
+  std::vector<Actor*> actors_;
+
+  DISALLOW_COPY_AND_ASSIGN(DiscreteTimeSimulation);
+};
+
+// Represents a web server in a simulation of a server under attack by
+// a lot of clients. Must be added to the simulation's list of actors
+// after all |Requester| objects.
+class Server : public DiscreteTimeSimulation::Actor {
+ public:
+  Server(int max_queries_per_tick,
+         double request_drop_ratio)
+      : max_queries_per_tick_(max_queries_per_tick),
+        request_drop_ratio_(request_drop_ratio),
+        num_overloaded_ticks_remaining_(0),
+        num_current_tick_queries_(0),
+        num_overloaded_ticks_(0),
+        max_experienced_queries_per_tick_(0),
+        mock_request_(GURL(), NULL, &context_) {
+  }
+
+  void SetDowntime(const TimeTicks& start_time, const TimeDelta& duration) {
+    start_downtime_ = start_time;
+    end_downtime_ = start_time + duration;
+  }
+
+  virtual void AdvanceTime(const TimeTicks& absolute_time) OVERRIDE {
+    now_ = absolute_time;
+  }
+
+  virtual void PerformAction() OVERRIDE {
+    // We are inserted at the end of the actor's list, so all Requester
+    // instances have already done their bit.
+    if (num_current_tick_queries_ > max_experienced_queries_per_tick_)
+      max_experienced_queries_per_tick_ = num_current_tick_queries_;
+
+    if (num_current_tick_queries_ > max_queries_per_tick_) {
+      // We pretend the server fails for the next several ticks after it
+      // gets overloaded.
+      num_overloaded_ticks_remaining_ = 5;
+      ++num_overloaded_ticks_;
+    } else if (num_overloaded_ticks_remaining_ > 0) {
+      --num_overloaded_ticks_remaining_;
+    }
+
+    requests_per_tick_.push_back(num_current_tick_queries_);
+    num_current_tick_queries_ = 0;
+  }
+
+  // This is called by Requester. It returns the response code from
+  // the server.
+  int HandleRequest() {
+    ++num_current_tick_queries_;
+    if (!start_downtime_.is_null() &&
+        start_downtime_ < now_ && now_ < end_downtime_) {
+      // For the simulation measuring the increase in perceived
+      // downtime, it might be interesting to count separately the
+      // queries seen by the server (assuming a front-end reverse proxy
+      // is what actually serves up the 503s in this case) so that we could
+      // visualize the traffic spike seen by the server when it comes up,
+      // which would in many situations be ameliorated by the anti-DDoS
+      // throttling.
+      return 503;
+    }
+
+    if ((num_overloaded_ticks_remaining_ > 0 ||
+         num_current_tick_queries_ > max_queries_per_tick_) &&
+        base::RandDouble() < request_drop_ratio_) {
+      return 503;
+    }
+
+    return 200;
+  }
+
+  int num_overloaded_ticks() const {
+    return num_overloaded_ticks_;
+  }
+
+  int max_experienced_queries_per_tick() const {
+    return max_experienced_queries_per_tick_;
+  }
+
+  const URLRequest& mock_request() const {
+    return mock_request_;
+  }
+
+  std::string VisualizeASCII(int terminal_width) {
+    // Account for | characters we place at left of graph.
+    terminal_width -= 1;
+
+    VerboseOut("Overloaded for %d of %d ticks.\n",
+               num_overloaded_ticks_, requests_per_tick_.size());
+    VerboseOut("Got maximum of %d requests in a tick.\n\n",
+               max_experienced_queries_per_tick_);
+
+    VerboseOut("Traffic graph:\n\n");
+
+    // Printing the graph like this is a bit overkill, but was very useful
+    // while developing the various simulations to see if they were testing
+    // the corner cases we want to simulate.
+
+    // Find the smallest number of whole ticks we need to group into a
+    // column that will let all ticks fit into the column width we have.
+    int num_ticks = requests_per_tick_.size();
+    double ticks_per_column_exact =
+        static_cast<double>(num_ticks) / static_cast<double>(terminal_width);
+    int ticks_per_column = std::ceil(ticks_per_column_exact);
+    DCHECK_GE(ticks_per_column * terminal_width, num_ticks);
+
+    // Sum up the column values.
+    int num_columns = num_ticks / ticks_per_column;
+    if (num_ticks % ticks_per_column)
+      ++num_columns;
+    DCHECK_LE(num_columns, terminal_width);
+    scoped_array<int> columns(new int[num_columns]);
+    for (int tx = 0; tx < num_ticks; ++tx) {
+      int cx = tx / ticks_per_column;
+      if (tx % ticks_per_column == 0)
+        columns[cx] = 0;
+      columns[cx] += requests_per_tick_[tx];
+    }
+
+    // Find the lowest integer divisor that will let the column values
+    // be represented in a graph of maximum height 50.
+    int max_value = 0;
+    for (int cx = 0; cx < num_columns; ++cx)
+      max_value = std::max(max_value, columns[cx]);
+    const int kNumRows = 50;
+    double row_divisor_exact = max_value / static_cast<double>(kNumRows);
+    int row_divisor = std::ceil(row_divisor_exact);
+    DCHECK_GE(row_divisor * kNumRows, max_value);
+
+    // To show the overload line, we calculate the appropriate value.
+    int overload_value = max_queries_per_tick_ * ticks_per_column;
+
+    // When num_ticks is not a whole multiple of ticks_per_column, the last
+    // column includes fewer ticks than the others. In this case, don't
+    // print it so that we don't show an inconsistent value.
+    int num_printed_columns = num_columns;
+    if (num_ticks % ticks_per_column)
+      --num_printed_columns;
+
+    // This is a top-to-bottom traversal of rows, left-to-right per row.
+    std::string output;
+    for (int rx = 0; rx < kNumRows; ++rx) {
+      int range_min = (kNumRows - rx) * row_divisor;
+      int range_max = range_min + row_divisor;
+      if (range_min == 0)
+        range_min = -1;  // Make 0 values fit in the bottom range.
+      output.append("|");
+      for (int cx = 0; cx < num_printed_columns; ++cx) {
+        char block = ' ';
+        // Show the overload line.
+        if (range_min < overload_value && overload_value <= range_max)
+          block = '-';
+
+        // Preferentially, show the graph line.
+        if (range_min < columns[cx] && columns[cx] <= range_max)
+          block = '#';
+
+        output.append(1, block);
+      }
+      output.append("\n");
+    }
+    output.append("|");
+    output.append(num_printed_columns, '=');
+
+    return output;
+  }
+
+ private:
+  TimeTicks now_;
+  TimeTicks start_downtime_;  // Can be 0 to say "no downtime".
+  TimeTicks end_downtime_;
+  const int max_queries_per_tick_;
+  const double request_drop_ratio_;  // Ratio of requests to 503 when failing.
+  int num_overloaded_ticks_remaining_;
+  int num_current_tick_queries_;
+  int num_overloaded_ticks_;
+  int max_experienced_queries_per_tick_;
+  std::vector<int> requests_per_tick_;
+
+  TestURLRequestContext context_;
+  TestURLRequest mock_request_;
+
+  DISALLOW_COPY_AND_ASSIGN(Server);
+};
+
+// Mock throttler entry used by Requester class.
+class MockURLRequestThrottlerEntry : public URLRequestThrottlerEntry {
+ public:
+  explicit MockURLRequestThrottlerEntry(
+      URLRequestThrottlerManager* manager)
+      : URLRequestThrottlerEntry(manager, ""),
+        mock_backoff_entry_(&backoff_policy_) {
+  }
+
+  virtual const BackoffEntry* GetBackoffEntry() const OVERRIDE {
+    return &mock_backoff_entry_;
+  }
+
+  virtual BackoffEntry* GetBackoffEntry() OVERRIDE {
+    return &mock_backoff_entry_;
+  }
+
+  virtual TimeTicks ImplGetTimeNow() const OVERRIDE {
+    return fake_now_;
+  }
+
+  void SetFakeNow(const TimeTicks& fake_time) {
+    fake_now_ = fake_time;
+    mock_backoff_entry_.set_fake_now(fake_time);
+  }
+
+  TimeTicks fake_now() const {
+    return fake_now_;
+  }
+
+ protected:
+  virtual ~MockURLRequestThrottlerEntry() {}
+
+ private:
+  TimeTicks fake_now_;
+  MockBackoffEntry mock_backoff_entry_;
+};
+
+// Registry of results for a class of |Requester| objects (e.g. attackers vs.
+// regular clients).
+class RequesterResults {
+public:
+  RequesterResults()
+      : num_attempts_(0), num_successful_(0), num_failed_(0), num_blocked_(0) {
+  }
+
+  void AddSuccess() {
+    ++num_attempts_;
+    ++num_successful_;
+  }
+
+  void AddFailure() {
+    ++num_attempts_;
+    ++num_failed_;
+  }
+
+  void AddBlocked() {
+    ++num_attempts_;
+    ++num_blocked_;
+  }
+
+  int num_attempts() const { return num_attempts_; }
+  int num_successful() const { return num_successful_; }
+  int num_failed() const { return num_failed_; }
+  int num_blocked() const { return num_blocked_; }
+
+  double GetBlockedRatio() {
+    DCHECK(num_attempts_);
+    return static_cast<double>(num_blocked_) /
+        static_cast<double>(num_attempts_);
+  }
+
+  double GetSuccessRatio() {
+    DCHECK(num_attempts_);
+    return static_cast<double>(num_successful_) /
+        static_cast<double>(num_attempts_);
+  }
+
+  void PrintResults(const char* class_description) {
+    if (num_attempts_ == 0) {
+      VerboseOut("No data for %s\n", class_description);
+      return;
+    }
+
+    VerboseOut("Requester results for %s\n", class_description);
+    VerboseOut("  %d attempts\n", num_attempts_);
+    VerboseOut("  %d successes\n", num_successful_);
+    VerboseOut("  %d 5xx responses\n", num_failed_);
+    VerboseOut("  %d requests blocked\n", num_blocked_);
+    VerboseOut("  %.2f success ratio\n", GetSuccessRatio());
+    VerboseOut("  %.2f blocked ratio\n", GetBlockedRatio());
+    VerboseOut("\n");
+  }
+
+ private:
+  int num_attempts_;
+  int num_successful_;
+  int num_failed_;
+  int num_blocked_;
+};
+
+// Represents an Requester in a simulated DDoS situation, that periodically
+// requests a specific resource.
+class Requester : public DiscreteTimeSimulation::Actor {
+ public:
+  Requester(MockURLRequestThrottlerEntry* throttler_entry,
+            const TimeDelta& time_between_requests,
+            Server* server,
+            RequesterResults* results)
+      : throttler_entry_(throttler_entry),
+        time_between_requests_(time_between_requests),
+        last_attempt_was_failure_(false),
+        server_(server),
+        results_(results) {
+    DCHECK(server_);
+  }
+
+  void AdvanceTime(const TimeTicks& absolute_time) OVERRIDE {
+    if (time_of_last_success_.is_null())
+      time_of_last_success_ = absolute_time;
+
+    throttler_entry_->SetFakeNow(absolute_time);
+  }
+
+  void PerformAction() OVERRIDE {
+    TimeDelta effective_delay = time_between_requests_;
+    TimeDelta current_jitter = TimeDelta::FromMilliseconds(
+        request_jitter_.InMilliseconds() * base::RandDouble());
+    if (base::RandInt(0, 1)) {
+      effective_delay -= current_jitter;
+    } else {
+      effective_delay += current_jitter;
+    }
+
+    if (throttler_entry_->fake_now() - time_of_last_attempt_ >
+        effective_delay) {
+      if (!throttler_entry_->ShouldRejectRequest(server_->mock_request())) {
+        int status_code = server_->HandleRequest();
+        MockURLRequestThrottlerHeaderAdapter response_headers(status_code);
+        throttler_entry_->UpdateWithResponse("", &response_headers);
+
+        if (status_code == 200) {
+          if (results_)
+            results_->AddSuccess();
+
+          if (last_attempt_was_failure_) {
+            last_downtime_duration_ =
+                throttler_entry_->fake_now() - time_of_last_success_;
+          }
+
+          time_of_last_success_ = throttler_entry_->fake_now();
+          last_attempt_was_failure_ = false;
+        } else {
+          if (results_)
+            results_->AddFailure();
+          last_attempt_was_failure_ = true;
+        }
+      } else {
+        if (results_)
+          results_->AddBlocked();
+        last_attempt_was_failure_ = true;
+      }
+
+      time_of_last_attempt_ = throttler_entry_->fake_now();
+    }
+  }
+
+  // Adds a delay until the first request, equal to a uniformly distributed
+  // value between now and now + max_delay.
+  void SetStartupJitter(const TimeDelta& max_delay) {
+    int delay_ms = base::RandInt(0, max_delay.InMilliseconds());
+    time_of_last_attempt_ = TimeTicks() +
+        TimeDelta::FromMilliseconds(delay_ms) - time_between_requests_;
+  }
+
+  void SetRequestJitter(const TimeDelta& request_jitter) {
+    request_jitter_ = request_jitter;
+  }
+
+  TimeDelta last_downtime_duration() const { return last_downtime_duration_; }
+
+ private:
+  scoped_refptr<MockURLRequestThrottlerEntry> throttler_entry_;
+  const TimeDelta time_between_requests_;
+  TimeDelta request_jitter_;
+  TimeTicks time_of_last_attempt_;
+  TimeTicks time_of_last_success_;
+  bool last_attempt_was_failure_;
+  TimeDelta last_downtime_duration_;
+  Server* const server_;
+  RequesterResults* const results_;  // May be NULL.
+
+  DISALLOW_COPY_AND_ASSIGN(Requester);
+};
+
+void SimulateAttack(Server* server,
+                    RequesterResults* attacker_results,
+                    RequesterResults* client_results,
+                    bool enable_throttling) {
+  const size_t kNumAttackers = 50;
+  const size_t kNumClients = 50;
+  DiscreteTimeSimulation simulation;
+  URLRequestThrottlerManager manager;
+  ScopedVector<Requester> requesters;
+  for (size_t i = 0; i < kNumAttackers; ++i) {
+    // Use a tiny time_between_requests so the attackers will ping the
+    // server at every tick of the simulation.
+    scoped_refptr<MockURLRequestThrottlerEntry> throttler_entry(
+        new MockURLRequestThrottlerEntry(&manager));
+    if (!enable_throttling)
+      throttler_entry->DisableBackoffThrottling();
+
+      Requester* attacker = new Requester(throttler_entry.get(),
+                                        TimeDelta::FromMilliseconds(1),
+                                        server,
+                                        attacker_results);
+    attacker->SetStartupJitter(TimeDelta::FromSeconds(120));
+    requesters.push_back(attacker);
+    simulation.AddActor(attacker);
+  }
+  for (size_t i = 0; i < kNumClients; ++i) {
+    // Normal clients only make requests every 2 minutes, plus/minus 1 minute.
+    scoped_refptr<MockURLRequestThrottlerEntry> throttler_entry(
+        new MockURLRequestThrottlerEntry(&manager));
+    if (!enable_throttling)
+      throttler_entry->DisableBackoffThrottling();
+
+    Requester* client = new Requester(throttler_entry.get(),
+                                      TimeDelta::FromMinutes(2),
+                                      server,
+                                      client_results);
+    client->SetStartupJitter(TimeDelta::FromSeconds(120));
+    client->SetRequestJitter(TimeDelta::FromMinutes(1));
+    requesters.push_back(client);
+    simulation.AddActor(client);
+  }
+  simulation.AddActor(server);
+
+  simulation.RunSimulation(TimeDelta::FromMinutes(6),
+                           TimeDelta::FromSeconds(1));
+}
+
+TEST(URLRequestThrottlerSimulation, HelpsInAttack) {
+  Server unprotected_server(30, 1.0);
+  RequesterResults unprotected_attacker_results;
+  RequesterResults unprotected_client_results;
+  Server protected_server(30, 1.0);
+  RequesterResults protected_attacker_results;
+  RequesterResults protected_client_results;
+  SimulateAttack(&unprotected_server,
+                 &unprotected_attacker_results,
+                 &unprotected_client_results,
+                 false);
+  SimulateAttack(&protected_server,
+                 &protected_attacker_results,
+                 &protected_client_results,
+                 true);
+
+  // These assert that the DDoS protection actually benefits the
+  // server. Manual inspection of the traffic graphs will show this
+  // even more clearly.
+  EXPECT_GT(unprotected_server.num_overloaded_ticks(),
+            protected_server.num_overloaded_ticks());
+  EXPECT_GT(unprotected_server.max_experienced_queries_per_tick(),
+            protected_server.max_experienced_queries_per_tick());
+
+  // These assert that the DDoS protection actually benefits non-malicious
+  // (and non-degenerate/accidentally DDoSing) users.
+  EXPECT_LT(protected_client_results.GetBlockedRatio(),
+            protected_attacker_results.GetBlockedRatio());
+  EXPECT_GT(protected_client_results.GetSuccessRatio(),
+            unprotected_client_results.GetSuccessRatio());
+
+  // The rest is just for optional manual evaluation of the results;
+  // in particular the traffic pattern is interesting.
+
+  VerboseOut("\nUnprotected server's results:\n\n");
+  VerboseOut(unprotected_server.VisualizeASCII(132).c_str());
+  VerboseOut("\n\n");
+  VerboseOut("Protected server's results:\n\n");
+  VerboseOut(protected_server.VisualizeASCII(132).c_str());
+  VerboseOut("\n\n");
+
+  unprotected_attacker_results.PrintResults(
+      "attackers attacking unprotected server.");
+  unprotected_client_results.PrintResults(
+      "normal clients making requests to unprotected server.");
+  protected_attacker_results.PrintResults(
+      "attackers attacking protected server.");
+  protected_client_results.PrintResults(
+      "normal clients making requests to protected server.");
+}
+
+// Returns the downtime perceived by the client, as a ratio of the
+// actual downtime.
+double SimulateDowntime(const TimeDelta& duration,
+                        const TimeDelta& average_client_interval,
+                        bool enable_throttling) {
+  TimeDelta time_between_ticks = duration / 200;
+  TimeTicks start_downtime = TimeTicks() + (duration / 2);
+
+  // A server that never rejects requests, but will go down for maintenance.
+  Server server(std::numeric_limits<int>::max(), 1.0);
+  server.SetDowntime(start_downtime, duration);
+
+  URLRequestThrottlerManager manager;
+  scoped_refptr<MockURLRequestThrottlerEntry> throttler_entry(
+      new MockURLRequestThrottlerEntry(&manager));
+  if (!enable_throttling)
+    throttler_entry->DisableBackoffThrottling();
+
+  Requester requester(
+      throttler_entry.get(), average_client_interval, &server, NULL);
+  requester.SetStartupJitter(duration / 3);
+  requester.SetRequestJitter(average_client_interval);
+
+  DiscreteTimeSimulation simulation;
+  simulation.AddActor(&requester);
+  simulation.AddActor(&server);
+
+  simulation.RunSimulation(duration * 2, time_between_ticks);
+
+  return static_cast<double>(
+      requester.last_downtime_duration().InMilliseconds()) /
+      static_cast<double>(duration.InMilliseconds());
+}
+
+TEST(URLRequestThrottlerSimulation, PerceivedDowntimeRatio) {
+  struct Stats {
+    // Expected interval that we expect the ratio of downtime when anti-DDoS
+    // is enabled and downtime when anti-DDoS is not enabled to fall within.
+    //
+    // The expected interval depends on two things:  The exponential back-off
+    // policy encoded in URLRequestThrottlerEntry, and the test or set of
+    // tests that the Stats object is tracking (e.g. a test where the client
+    // retries very rapidly on a very long downtime will tend to increase the
+    // number).
+    //
+    // To determine an appropriate new interval when parameters have changed,
+    // run the test a few times (you may have to Ctrl-C out of it after a few
+    // seconds) and choose an interval that the test converges quickly and
+    // reliably to.  Then set the new interval, and run the test e.g. 20 times
+    // in succession to make sure it never takes an obscenely long time to
+    // converge to this interval.
+    double expected_min_increase;
+    double expected_max_increase;
+
+    size_t num_runs;
+    double total_ratio_unprotected;
+    double total_ratio_protected;
+
+    bool DidConverge(double* increase_ratio_out) {
+      double unprotected_ratio = total_ratio_unprotected / num_runs;
+      double protected_ratio = total_ratio_protected / num_runs;
+      double increase_ratio = protected_ratio / unprotected_ratio;
+      if (increase_ratio_out)
+        *increase_ratio_out = increase_ratio;
+      return expected_min_increase <= increase_ratio &&
+          increase_ratio <= expected_max_increase;
+    }
+
+    void ReportTrialResult(double increase_ratio) {
+      VerboseOut(
+          "  Perceived downtime with throttling is %.4f times without.\n",
+          increase_ratio);
+      VerboseOut("  Test result after %d trials.\n", num_runs);
+    }
+  };
+
+  Stats global_stats = { 1.08, 1.15 };
+
+  struct Trial {
+    TimeDelta duration;
+    TimeDelta average_client_interval;
+    Stats stats;
+
+    void PrintTrialDescription() {
+      double duration_minutes =
+          static_cast<double>(duration.InSeconds()) / 60.0;
+      double interval_minutes =
+          static_cast<double>(average_client_interval.InSeconds()) / 60.0;
+      VerboseOut("Trial with %.2f min downtime, avg. interval %.2f min.\n",
+                 duration_minutes, interval_minutes);
+    }
+  };
+
+  // We don't set or check expected ratio intervals on individual
+  // experiments as this might make the test too fragile, but we
+  // print them out at the end for manual evaluation (we want to be
+  // able to make claims about the expected ratios depending on the
+  // type of behavior of the client and the downtime, e.g. the difference
+  // in behavior between a client making requests every few minutes vs.
+  // one that makes a request every 15 seconds).
+  Trial trials[] = {
+    { TimeDelta::FromSeconds(10), TimeDelta::FromSeconds(3) },
+    { TimeDelta::FromSeconds(30), TimeDelta::FromSeconds(7) },
+    { TimeDelta::FromMinutes(5), TimeDelta::FromSeconds(30) },
+    { TimeDelta::FromMinutes(10), TimeDelta::FromSeconds(20) },
+    { TimeDelta::FromMinutes(20), TimeDelta::FromSeconds(15) },
+    { TimeDelta::FromMinutes(20), TimeDelta::FromSeconds(50) },
+    { TimeDelta::FromMinutes(30), TimeDelta::FromMinutes(2) },
+    { TimeDelta::FromMinutes(30), TimeDelta::FromMinutes(5) },
+    { TimeDelta::FromMinutes(40), TimeDelta::FromMinutes(7) },
+    { TimeDelta::FromMinutes(40), TimeDelta::FromMinutes(2) },
+    { TimeDelta::FromMinutes(40), TimeDelta::FromSeconds(15) },
+    { TimeDelta::FromMinutes(60), TimeDelta::FromMinutes(7) },
+    { TimeDelta::FromMinutes(60), TimeDelta::FromMinutes(2) },
+    { TimeDelta::FromMinutes(60), TimeDelta::FromSeconds(15) },
+    { TimeDelta::FromMinutes(80), TimeDelta::FromMinutes(20) },
+    { TimeDelta::FromMinutes(80), TimeDelta::FromMinutes(3) },
+    { TimeDelta::FromMinutes(80), TimeDelta::FromSeconds(15) },
+
+    // Most brutal?
+    { TimeDelta::FromMinutes(45), TimeDelta::FromMilliseconds(500) },
+  };
+
+  // If things don't converge by the time we've done 100K trials, then
+  // clearly one or more of the expected intervals are wrong.
+  while (global_stats.num_runs < 100000) {
+    for (size_t i = 0; i < ARRAYSIZE_UNSAFE(trials); ++i) {
+      ++global_stats.num_runs;
+      ++trials[i].stats.num_runs;
+      double ratio_unprotected = SimulateDowntime(
+          trials[i].duration, trials[i].average_client_interval, false);
+      double ratio_protected = SimulateDowntime(
+          trials[i].duration, trials[i].average_client_interval, true);
+      global_stats.total_ratio_unprotected += ratio_unprotected;
+      global_stats.total_ratio_protected += ratio_protected;
+      trials[i].stats.total_ratio_unprotected += ratio_unprotected;
+      trials[i].stats.total_ratio_protected += ratio_protected;
+    }
+
+    double increase_ratio;
+    if (global_stats.DidConverge(&increase_ratio))
+      break;
+
+    if (global_stats.num_runs > 200) {
+      VerboseOut("Test has not yet converged on expected interval.\n");
+      global_stats.ReportTrialResult(increase_ratio);
+    }
+  }
+
+  double average_increase_ratio;
+  EXPECT_TRUE(global_stats.DidConverge(&average_increase_ratio));
+
+  // Print individual trial results for optional manual evaluation.
+  double max_increase_ratio = 0.0;
+  for (size_t i = 0; i < ARRAYSIZE_UNSAFE(trials); ++i) {
+    double increase_ratio;
+    trials[i].stats.DidConverge(&increase_ratio);
+    max_increase_ratio = std::max(max_increase_ratio, increase_ratio);
+    trials[i].PrintTrialDescription();
+    trials[i].stats.ReportTrialResult(increase_ratio);
+  }
+
+  VerboseOut("Average increase ratio was %.4f\n", average_increase_ratio);
+  VerboseOut("Maximum increase ratio was %.4f\n", max_increase_ratio);
+}
+
+}  // namespace
+}  // namespace net
diff --git a/src/net/url_request/url_request_throttler_test_support.cc b/src/net/url_request/url_request_throttler_test_support.cc
new file mode 100644
index 0000000..b6088ec
--- /dev/null
+++ b/src/net/url_request/url_request_throttler_test_support.cc
@@ -0,0 +1,58 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_throttler_test_support.h"
+
+#include "net/url_request/url_request_throttler_entry.h"
+
+namespace net {
+
+MockBackoffEntry::MockBackoffEntry(const BackoffEntry::Policy* const policy)
+    : BackoffEntry(policy) {
+}
+
+MockBackoffEntry::~MockBackoffEntry() {
+}
+
+base::TimeTicks MockBackoffEntry::ImplGetTimeNow() const {
+  return fake_now_;
+}
+
+void MockBackoffEntry::set_fake_now(const base::TimeTicks& now) {
+  fake_now_ = now;
+}
+
+MockURLRequestThrottlerHeaderAdapter::MockURLRequestThrottlerHeaderAdapter(
+    int response_code)
+    : fake_response_code_(response_code) {
+}
+
+MockURLRequestThrottlerHeaderAdapter::MockURLRequestThrottlerHeaderAdapter(
+    const std::string& retry_value,
+    const std::string& opt_out_value,
+    int response_code)
+    : fake_retry_value_(retry_value),
+      fake_opt_out_value_(opt_out_value),
+      fake_response_code_(response_code) {
+}
+
+MockURLRequestThrottlerHeaderAdapter::~MockURLRequestThrottlerHeaderAdapter() {
+}
+
+std::string MockURLRequestThrottlerHeaderAdapter::GetNormalizedValue(
+    const std::string& key) const {
+  if (key ==
+      URLRequestThrottlerEntry::kExponentialThrottlingHeader &&
+      !fake_opt_out_value_.empty()) {
+    return fake_opt_out_value_;
+  }
+
+  return "";
+}
+
+int MockURLRequestThrottlerHeaderAdapter::GetResponseCode() const {
+  return fake_response_code_;
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_throttler_test_support.h b/src/net/url_request/url_request_throttler_test_support.h
new file mode 100644
index 0000000..cad041c
--- /dev/null
+++ b/src/net/url_request/url_request_throttler_test_support.h
@@ -0,0 +1,61 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_URL_REQUEST_THROTTLER_TEST_SUPPORT_H_
+#define NET_URL_REQUEST_URL_REQUEST_THROTTLER_TEST_SUPPORT_H_
+
+#include <string>
+
+#include "base/time.h"
+#include "net/base/backoff_entry.h"
+#include "net/url_request/url_request_throttler_header_interface.h"
+
+namespace net {
+
+class MockBackoffEntry : public BackoffEntry {
+ public:
+  explicit MockBackoffEntry(const BackoffEntry::Policy* const policy);
+  virtual ~MockBackoffEntry();
+
+  // BackoffEntry overrides.
+  virtual base::TimeTicks ImplGetTimeNow() const OVERRIDE;
+
+  void set_fake_now(const base::TimeTicks& now);
+
+ private:
+  base::TimeTicks fake_now_;
+};
+
+// Mocks the URLRequestThrottlerHeaderInterface, allowing testing code to
+// pass arbitrary fake headers to the throttling code.
+class MockURLRequestThrottlerHeaderAdapter
+    : public URLRequestThrottlerHeaderInterface {
+ public:
+  // Constructs mock response headers with the given |response_code| and no
+  // custom response header fields.
+  explicit MockURLRequestThrottlerHeaderAdapter(int response_code);
+
+  // Constructs mock response headers with the given |response_code| and
+  // with a custom-retry header value |retry_value| if it is non-empty, and
+  // a custom opt-out header value |opt_out_value| if it is non-empty.
+  MockURLRequestThrottlerHeaderAdapter(const std::string& retry_value,
+                                       const std::string& opt_out_value,
+                                       int response_code);
+  virtual ~MockURLRequestThrottlerHeaderAdapter();
+
+  // URLRequestThrottlerHeaderInterface overrides.
+  virtual std::string GetNormalizedValue(const std::string& key) const OVERRIDE;
+  virtual int GetResponseCode() const OVERRIDE;
+
+ private:
+  std::string fake_retry_value_;
+  std::string fake_opt_out_value_;
+  int fake_response_code_;
+
+  DISALLOW_COPY_AND_ASSIGN(MockURLRequestThrottlerHeaderAdapter);
+};
+
+}  // namespace net
+
+#endif  // NET_URL_REQUEST_URL_REQUEST_THROTTLER_TEST_SUPPORT_H_
diff --git a/src/net/url_request/url_request_throttler_unittest.cc b/src/net/url_request/url_request_throttler_unittest.cc
new file mode 100644
index 0000000..221d0c8
--- /dev/null
+++ b/src/net/url_request/url_request_throttler_unittest.cc
@@ -0,0 +1,563 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/url_request_throttler_manager.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/pickle.h"
+#include "base/stl_util.h"
+#include "base/stringprintf.h"
+#include "base/string_number_conversions.h"
+#include "base/time.h"
+#include "net/base/load_flags.h"
+#include "net/base/test_completion_callback.h"
+#include "net/url_request/url_request_context.h"
+#include "net/url_request/url_request_test_util.h"
+#include "net/url_request/url_request_throttler_header_interface.h"
+#include "net/url_request/url_request_throttler_test_support.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::TimeDelta;
+using base::TimeTicks;
+
+namespace net {
+
+namespace {
+
+using base::Histogram;
+using base::HistogramSamples;
+using base::StatisticsRecorder;
+
+class MockURLRequestThrottlerEntry : public URLRequestThrottlerEntry {
+ public:
+  explicit MockURLRequestThrottlerEntry(
+      net::URLRequestThrottlerManager* manager)
+      : net::URLRequestThrottlerEntry(manager, ""),
+        mock_backoff_entry_(&backoff_policy_) {
+    InitPolicy();
+  }
+  MockURLRequestThrottlerEntry(
+      net::URLRequestThrottlerManager* manager,
+      const TimeTicks& exponential_backoff_release_time,
+      const TimeTicks& sliding_window_release_time,
+      const TimeTicks& fake_now)
+      : net::URLRequestThrottlerEntry(manager, ""),
+        fake_time_now_(fake_now),
+        mock_backoff_entry_(&backoff_policy_) {
+    InitPolicy();
+
+    mock_backoff_entry_.set_fake_now(fake_now);
+    set_exponential_backoff_release_time(exponential_backoff_release_time);
+    set_sliding_window_release_time(sliding_window_release_time);
+  }
+
+  void InitPolicy() {
+    // Some tests become flaky if we have jitter.
+    backoff_policy_.jitter_factor = 0.0;
+
+    // This lets us avoid having to make multiple failures initially (this
+    // logic is already tested in the BackoffEntry unit tests).
+    backoff_policy_.num_errors_to_ignore = 0;
+  }
+
+  const BackoffEntry* GetBackoffEntry() const {
+    return &mock_backoff_entry_;
+  }
+
+  BackoffEntry* GetBackoffEntry() {
+    return &mock_backoff_entry_;
+  }
+
+  static bool ExplicitUserRequest(int load_flags) {
+    return URLRequestThrottlerEntry::ExplicitUserRequest(load_flags);
+  }
+
+  void ResetToBlank(const TimeTicks& time_now) {
+    fake_time_now_ = time_now;
+    mock_backoff_entry_.set_fake_now(time_now);
+
+    GetBackoffEntry()->Reset();
+    GetBackoffEntry()->SetCustomReleaseTime(time_now);
+    set_sliding_window_release_time(time_now);
+  }
+
+  // Overridden for tests.
+  virtual TimeTicks ImplGetTimeNow() const OVERRIDE { return fake_time_now_; }
+
+  void set_exponential_backoff_release_time(
+      const base::TimeTicks& release_time) {
+    GetBackoffEntry()->SetCustomReleaseTime(release_time);
+  }
+
+  base::TimeTicks sliding_window_release_time() const {
+    return URLRequestThrottlerEntry::sliding_window_release_time();
+  }
+
+  void set_sliding_window_release_time(
+      const base::TimeTicks& release_time) {
+    URLRequestThrottlerEntry::set_sliding_window_release_time(
+        release_time);
+  }
+
+  TimeTicks fake_time_now_;
+  MockBackoffEntry mock_backoff_entry_;
+
+ protected:
+  virtual ~MockURLRequestThrottlerEntry() {}
+};
+
+class MockURLRequestThrottlerManager : public URLRequestThrottlerManager {
+ public:
+  MockURLRequestThrottlerManager() : create_entry_index_(0) {}
+
+  // Method to process the URL using URLRequestThrottlerManager protected
+  // method.
+  std::string DoGetUrlIdFromUrl(const GURL& url) { return GetIdFromUrl(url); }
+
+  // Method to use the garbage collecting method of URLRequestThrottlerManager.
+  void DoGarbageCollectEntries() { GarbageCollectEntries(); }
+
+  // Returns the number of entries in the map.
+  int GetNumberOfEntries() const { return GetNumberOfEntriesForTests(); }
+
+  void CreateEntry(bool is_outdated) {
+    TimeTicks time = TimeTicks::Now();
+    if (is_outdated) {
+      time -= TimeDelta::FromMilliseconds(
+          MockURLRequestThrottlerEntry::kDefaultEntryLifetimeMs + 1000);
+    }
+    std::string fake_url_string("http://www.fakeurl.com/");
+    fake_url_string.append(base::IntToString(create_entry_index_++));
+    GURL fake_url(fake_url_string);
+    OverrideEntryForTests(
+        fake_url,
+        new MockURLRequestThrottlerEntry(this, time, TimeTicks::Now(),
+                                         TimeTicks::Now()));
+  }
+
+ private:
+  int create_entry_index_;
+};
+
+struct TimeAndBool {
+  TimeAndBool(const TimeTicks& time_value, bool expected, int line_num) {
+    time = time_value;
+    result = expected;
+    line = line_num;
+  }
+  TimeTicks time;
+  bool result;
+  int line;
+};
+
+struct GurlAndString {
+  GurlAndString(const GURL& url_value,
+                const std::string& expected,
+                int line_num) {
+    url = url_value;
+    result = expected;
+    line = line_num;
+  }
+  GURL url;
+  std::string result;
+  int line;
+};
+
+}  // namespace
+
+class URLRequestThrottlerEntryTest : public testing::Test {
+ protected:
+  URLRequestThrottlerEntryTest() : request_(GURL(), NULL, &context_) {
+  }
+
+  virtual void SetUp();
+  virtual void TearDown();
+
+  // After calling this function, histogram snapshots in |samples_| contain
+  // only the delta caused by the test case currently running.
+  void CalculateHistogramDeltas();
+
+  TimeTicks now_;
+  MockURLRequestThrottlerManager manager_;  // Dummy object, not used.
+  scoped_refptr<MockURLRequestThrottlerEntry> entry_;
+
+  std::map<std::string, HistogramSamples*> original_samples_;
+  std::map<std::string, HistogramSamples*> samples_;
+
+  TestURLRequestContext context_;
+  TestURLRequest request_;
+};
+
+// List of all histograms we care about in these unit tests.
+const char* kHistogramNames[] = {
+  "Throttling.FailureCountAtSuccess",
+  "Throttling.PerceivedDowntime",
+  "Throttling.RequestThrottled",
+  "Throttling.SiteOptedOut",
+};
+
+void URLRequestThrottlerEntryTest::SetUp() {
+  request_.set_load_flags(0);
+
+  now_ = TimeTicks::Now();
+  entry_ = new MockURLRequestThrottlerEntry(&manager_);
+  entry_->ResetToBlank(now_);
+
+  for (size_t i = 0; i < arraysize(kHistogramNames); ++i) {
+    // Must retrieve original samples for each histogram for comparison
+    // as other tests may affect them.
+    const char* name = kHistogramNames[i];
+    Histogram* histogram = StatisticsRecorder::FindHistogram(name);
+    if (histogram) {
+      original_samples_[name] = histogram->SnapshotSamples().release();
+    } else {
+      original_samples_[name] = NULL;
+    }
+  }
+}
+
+void URLRequestThrottlerEntryTest::TearDown() {
+  STLDeleteValues(&original_samples_);
+  STLDeleteValues(&samples_);
+}
+
+void URLRequestThrottlerEntryTest::CalculateHistogramDeltas() {
+  for (size_t i = 0; i < arraysize(kHistogramNames); ++i) {
+    const char* name = kHistogramNames[i];
+    HistogramSamples* original = original_samples_[name];
+
+    Histogram* histogram = StatisticsRecorder::FindHistogram(name);
+    if (histogram) {
+      ASSERT_EQ(Histogram::kUmaTargetedHistogramFlag, histogram->flags());
+
+      scoped_ptr<HistogramSamples> samples(histogram->SnapshotSamples());
+      if (original)
+        samples->Subtract(*original);
+      samples_[name] = samples.release();
+    }
+  }
+
+  // Ensure we don't accidentally use the originals in our tests.
+  STLDeleteValues(&original_samples_);
+  original_samples_.clear();
+}
+
+std::ostream& operator<<(std::ostream& out, const base::TimeTicks& time) {
+  return out << time.ToInternalValue();
+}
+
+TEST_F(URLRequestThrottlerEntryTest, InterfaceDuringExponentialBackoff) {
+  entry_->set_exponential_backoff_release_time(
+      entry_->fake_time_now_ + TimeDelta::FromMilliseconds(1));
+  EXPECT_TRUE(entry_->ShouldRejectRequest(request_));
+
+  // Also end-to-end test the load flags exceptions.
+  request_.set_load_flags(LOAD_MAYBE_USER_GESTURE);
+  EXPECT_FALSE(entry_->ShouldRejectRequest(request_));
+
+  CalculateHistogramDeltas();
+  ASSERT_EQ(1, samples_["Throttling.RequestThrottled"]->GetCount(0));
+  ASSERT_EQ(1, samples_["Throttling.RequestThrottled"]->GetCount(1));
+}
+
+TEST_F(URLRequestThrottlerEntryTest, InterfaceNotDuringExponentialBackoff) {
+  entry_->set_exponential_backoff_release_time(entry_->fake_time_now_);
+  EXPECT_FALSE(entry_->ShouldRejectRequest(request_));
+  entry_->set_exponential_backoff_release_time(
+      entry_->fake_time_now_ - TimeDelta::FromMilliseconds(1));
+  EXPECT_FALSE(entry_->ShouldRejectRequest(request_));
+
+  CalculateHistogramDeltas();
+  ASSERT_EQ(2, samples_["Throttling.RequestThrottled"]->GetCount(0));
+  ASSERT_EQ(0, samples_["Throttling.RequestThrottled"]->GetCount(1));
+}
+
+TEST_F(URLRequestThrottlerEntryTest, InterfaceUpdateFailure) {
+  MockURLRequestThrottlerHeaderAdapter failure_response(503);
+  entry_->UpdateWithResponse("", &failure_response);
+  EXPECT_GT(entry_->GetExponentialBackoffReleaseTime(), entry_->fake_time_now_)
+      << "A failure should increase the release_time";
+}
+
+TEST_F(URLRequestThrottlerEntryTest, InterfaceUpdateSuccess) {
+  MockURLRequestThrottlerHeaderAdapter success_response(200);
+  entry_->UpdateWithResponse("", &success_response);
+  EXPECT_EQ(entry_->GetExponentialBackoffReleaseTime(), entry_->fake_time_now_)
+      << "A success should not add any delay";
+}
+
+TEST_F(URLRequestThrottlerEntryTest, InterfaceUpdateSuccessThenFailure) {
+  MockURLRequestThrottlerHeaderAdapter failure_response(503);
+  MockURLRequestThrottlerHeaderAdapter success_response(200);
+  entry_->UpdateWithResponse("", &success_response);
+  entry_->UpdateWithResponse("", &failure_response);
+  EXPECT_GT(entry_->GetExponentialBackoffReleaseTime(), entry_->fake_time_now_)
+      << "This scenario should add delay";
+  entry_->UpdateWithResponse("", &success_response);
+}
+
+TEST_F(URLRequestThrottlerEntryTest, IsEntryReallyOutdated) {
+  TimeDelta lifetime = TimeDelta::FromMilliseconds(
+      MockURLRequestThrottlerEntry::kDefaultEntryLifetimeMs);
+  const TimeDelta kFiveMs = TimeDelta::FromMilliseconds(5);
+
+  TimeAndBool test_values[] = {
+      TimeAndBool(now_, false, __LINE__),
+      TimeAndBool(now_ - kFiveMs, false, __LINE__),
+      TimeAndBool(now_ + kFiveMs, false, __LINE__),
+      TimeAndBool(now_ - (lifetime - kFiveMs), false, __LINE__),
+      TimeAndBool(now_ - lifetime, true, __LINE__),
+      TimeAndBool(now_ - (lifetime + kFiveMs), true, __LINE__)};
+
+  for (unsigned int i = 0; i < arraysize(test_values); ++i) {
+    entry_->set_exponential_backoff_release_time(test_values[i].time);
+    EXPECT_EQ(entry_->IsEntryOutdated(), test_values[i].result) <<
+        "Test case #" << i << " line " << test_values[i].line << " failed";
+  }
+}
+
+TEST_F(URLRequestThrottlerEntryTest, MaxAllowedBackoff) {
+  for (int i = 0; i < 30; ++i) {
+    MockURLRequestThrottlerHeaderAdapter response_adapter(503);
+    entry_->UpdateWithResponse("", &response_adapter);
+  }
+
+  TimeDelta delay = entry_->GetExponentialBackoffReleaseTime() - now_;
+  EXPECT_EQ(delay.InMilliseconds(),
+            MockURLRequestThrottlerEntry::kDefaultMaximumBackoffMs);
+}
+
+TEST_F(URLRequestThrottlerEntryTest, MalformedContent) {
+  MockURLRequestThrottlerHeaderAdapter response_adapter(503);
+  for (int i = 0; i < 5; ++i)
+    entry_->UpdateWithResponse("", &response_adapter);
+
+  TimeTicks release_after_failures = entry_->GetExponentialBackoffReleaseTime();
+
+  // Inform the entry that a response body was malformed, which is supposed to
+  // increase the back-off time.  Note that we also submit a successful
+  // UpdateWithResponse to pair with ReceivedContentWasMalformed() since that
+  // is what happens in practice (if a body is received, then a non-500
+  // response must also have been received).
+  entry_->ReceivedContentWasMalformed(200);
+  MockURLRequestThrottlerHeaderAdapter success_adapter(200);
+  entry_->UpdateWithResponse("", &success_adapter);
+  EXPECT_GT(entry_->GetExponentialBackoffReleaseTime(), release_after_failures);
+}
+
+TEST_F(URLRequestThrottlerEntryTest, SlidingWindow) {
+  int max_send = URLRequestThrottlerEntry::kDefaultMaxSendThreshold;
+  int sliding_window =
+      URLRequestThrottlerEntry::kDefaultSlidingWindowPeriodMs;
+
+  TimeTicks time_1 = entry_->fake_time_now_ +
+      TimeDelta::FromMilliseconds(sliding_window / 3);
+  TimeTicks time_2 = entry_->fake_time_now_ +
+      TimeDelta::FromMilliseconds(2 * sliding_window / 3);
+  TimeTicks time_3 = entry_->fake_time_now_ +
+      TimeDelta::FromMilliseconds(sliding_window);
+  TimeTicks time_4 = entry_->fake_time_now_ +
+      TimeDelta::FromMilliseconds(sliding_window + 2 * sliding_window / 3);
+
+  entry_->set_exponential_backoff_release_time(time_1);
+
+  for (int i = 0; i < max_send / 2; ++i) {
+    EXPECT_EQ(2 * sliding_window / 3,
+              entry_->ReserveSendingTimeForNextRequest(time_2));
+  }
+  EXPECT_EQ(time_2, entry_->sliding_window_release_time());
+
+  entry_->fake_time_now_ = time_3;
+
+  for (int i = 0; i < (max_send + 1) / 2; ++i)
+    EXPECT_EQ(0, entry_->ReserveSendingTimeForNextRequest(TimeTicks()));
+
+  EXPECT_EQ(time_4, entry_->sliding_window_release_time());
+}
+
+TEST_F(URLRequestThrottlerEntryTest, ExplicitUserRequest) {
+  ASSERT_FALSE(MockURLRequestThrottlerEntry::ExplicitUserRequest(0));
+  ASSERT_TRUE(MockURLRequestThrottlerEntry::ExplicitUserRequest(
+      LOAD_MAYBE_USER_GESTURE));
+  ASSERT_FALSE(MockURLRequestThrottlerEntry::ExplicitUserRequest(
+      ~LOAD_MAYBE_USER_GESTURE));
+}
+
+class URLRequestThrottlerManagerTest : public testing::Test {
+ protected:
+  URLRequestThrottlerManagerTest()
+      : request_(GURL(), NULL, &context_) {
+  }
+
+  virtual void SetUp() {
+    request_.set_load_flags(0);
+  }
+
+  // context_ must be declared before request_.
+  TestURLRequestContext context_;
+  TestURLRequest request_;
+};
+
+TEST_F(URLRequestThrottlerManagerTest, IsUrlStandardised) {
+  MockURLRequestThrottlerManager manager;
+  GurlAndString test_values[] = {
+      GurlAndString(GURL("http://www.example.com"),
+                    std::string("http://www.example.com/"),
+                    __LINE__),
+      GurlAndString(GURL("http://www.Example.com"),
+                    std::string("http://www.example.com/"),
+                    __LINE__),
+      GurlAndString(GURL("http://www.ex4mple.com/Pr4c71c41"),
+                    std::string("http://www.ex4mple.com/pr4c71c41"),
+                    __LINE__),
+      GurlAndString(GURL("http://www.example.com/0/token/false"),
+                    std::string("http://www.example.com/0/token/false"),
+                    __LINE__),
+      GurlAndString(GURL("http://www.example.com/index.php?code=javascript"),
+                    std::string("http://www.example.com/index.php"),
+                    __LINE__),
+      GurlAndString(GURL("http://www.example.com/index.php?code=1#superEntry"),
+                    std::string("http://www.example.com/index.php"),
+                    __LINE__),
+      GurlAndString(GURL("http://www.example.com/index.php#superEntry"),
+                    std::string("http://www.example.com/index.php"),
+                    __LINE__),
+      GurlAndString(GURL("http://www.example.com:1234/"),
+                    std::string("http://www.example.com:1234/"),
+                    __LINE__)};
+
+  for (unsigned int i = 0; i < arraysize(test_values); ++i) {
+    std::string temp = manager.DoGetUrlIdFromUrl(test_values[i].url);
+    EXPECT_EQ(temp, test_values[i].result) <<
+        "Test case #" << i << " line " << test_values[i].line << " failed";
+  }
+}
+
+TEST_F(URLRequestThrottlerManagerTest, AreEntriesBeingCollected) {
+  MockURLRequestThrottlerManager manager;
+
+  manager.CreateEntry(true);  // true = Entry is outdated.
+  manager.CreateEntry(true);
+  manager.CreateEntry(true);
+  manager.DoGarbageCollectEntries();
+  EXPECT_EQ(0, manager.GetNumberOfEntries());
+
+  manager.CreateEntry(false);
+  manager.CreateEntry(false);
+  manager.CreateEntry(false);
+  manager.CreateEntry(true);
+  manager.DoGarbageCollectEntries();
+  EXPECT_EQ(3, manager.GetNumberOfEntries());
+}
+
+TEST_F(URLRequestThrottlerManagerTest, IsHostBeingRegistered) {
+  MockURLRequestThrottlerManager manager;
+
+  manager.RegisterRequestUrl(GURL("http://www.example.com/"));
+  manager.RegisterRequestUrl(GURL("http://www.google.com/"));
+  manager.RegisterRequestUrl(GURL("http://www.google.com/index/0"));
+  manager.RegisterRequestUrl(GURL("http://www.google.com/index/0?code=1"));
+  manager.RegisterRequestUrl(GURL("http://www.google.com/index/0#lolsaure"));
+
+  EXPECT_EQ(3, manager.GetNumberOfEntries());
+}
+
+void ExpectEntryAllowsAllOnErrorIfOptedOut(
+    net::URLRequestThrottlerEntryInterface* entry,
+    bool opted_out,
+    const URLRequest& request) {
+  EXPECT_FALSE(entry->ShouldRejectRequest(request));
+  MockURLRequestThrottlerHeaderAdapter failure_adapter(503);
+  for (int i = 0; i < 10; ++i) {
+    // Host doesn't really matter in this scenario so we skip it.
+    entry->UpdateWithResponse("", &failure_adapter);
+  }
+  EXPECT_NE(opted_out, entry->ShouldRejectRequest(request));
+
+  if (opted_out) {
+    // We're not mocking out GetTimeNow() in this scenario
+    // so add a 100 ms buffer to avoid flakiness (that should always
+    // give enough time to get from the TimeTicks::Now() call here
+    // to the TimeTicks::Now() call in the entry class).
+    EXPECT_GT(TimeTicks::Now() + TimeDelta::FromMilliseconds(100),
+              entry->GetExponentialBackoffReleaseTime());
+  } else {
+    // As above, add 100 ms.
+    EXPECT_LT(TimeTicks::Now() + TimeDelta::FromMilliseconds(100),
+              entry->GetExponentialBackoffReleaseTime());
+  }
+}
+
+TEST_F(URLRequestThrottlerManagerTest, OptOutHeader) {
+  MockURLRequestThrottlerManager manager;
+  scoped_refptr<net::URLRequestThrottlerEntryInterface> entry =
+      manager.RegisterRequestUrl(GURL("http://www.google.com/yodude"));
+
+  // Fake a response with the opt-out header.
+  MockURLRequestThrottlerHeaderAdapter response_adapter(
+      "",
+      MockURLRequestThrottlerEntry::kExponentialThrottlingDisableValue,
+      200);
+  entry->UpdateWithResponse("www.google.com", &response_adapter);
+
+  // Ensure that the same entry on error always allows everything.
+  ExpectEntryAllowsAllOnErrorIfOptedOut(entry, true, request_);
+
+  // Ensure that a freshly created entry (for a different URL on an
+  // already opted-out host) also gets "always allow" behavior.
+  scoped_refptr<net::URLRequestThrottlerEntryInterface> other_entry =
+      manager.RegisterRequestUrl(GURL("http://www.google.com/bingobob"));
+  ExpectEntryAllowsAllOnErrorIfOptedOut(other_entry, true, request_);
+
+  // Fake a response with the opt-out header incorrectly specified.
+  scoped_refptr<net::URLRequestThrottlerEntryInterface> no_opt_out_entry =
+      manager.RegisterRequestUrl(GURL("http://www.nike.com/justdoit"));
+  MockURLRequestThrottlerHeaderAdapter wrong_adapter("", "yesplease", 200);
+  no_opt_out_entry->UpdateWithResponse("www.nike.com", &wrong_adapter);
+  ExpectEntryAllowsAllOnErrorIfOptedOut(no_opt_out_entry, false, request_);
+
+  // A localhost entry should always be opted out.
+  scoped_refptr<net::URLRequestThrottlerEntryInterface> localhost_entry =
+      manager.RegisterRequestUrl(GURL("http://localhost/hello"));
+  ExpectEntryAllowsAllOnErrorIfOptedOut(localhost_entry, true, request_);
+}
+
+TEST_F(URLRequestThrottlerManagerTest, ClearOnNetworkChange) {
+  for (int i = 0; i < 3; ++i) {
+    MockURLRequestThrottlerManager manager;
+    scoped_refptr<net::URLRequestThrottlerEntryInterface> entry_before =
+        manager.RegisterRequestUrl(GURL("http://www.example.com/"));
+    MockURLRequestThrottlerHeaderAdapter failure_adapter(503);
+    for (int j = 0; j < 10; ++j) {
+      // Host doesn't really matter in this scenario so we skip it.
+      entry_before->UpdateWithResponse("", &failure_adapter);
+    }
+    EXPECT_TRUE(entry_before->ShouldRejectRequest(request_));
+
+    switch (i) {
+      case 0:
+        manager.OnIPAddressChanged();
+        break;
+      case 1:
+        manager.OnConnectionTypeChanged(
+            net::NetworkChangeNotifier::CONNECTION_UNKNOWN);
+        break;
+      case 2:
+        manager.OnConnectionTypeChanged(
+            net::NetworkChangeNotifier::CONNECTION_NONE);
+        break;
+      default:
+        FAIL();
+    }
+
+    scoped_refptr<net::URLRequestThrottlerEntryInterface> entry_after =
+        manager.RegisterRequestUrl(GURL("http://www.example.com/"));
+    EXPECT_FALSE(entry_after->ShouldRejectRequest(request_));
+  }
+}
+
+}  // namespace net
diff --git a/src/net/url_request/url_request_unittest.cc b/src/net/url_request/url_request_unittest.cc
new file mode 100644
index 0000000..fb17ec9
--- /dev/null
+++ b/src/net/url_request/url_request_unittest.cc
@@ -0,0 +1,5197 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <shlobj.h>
+#endif
+
+#include <algorithm>
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/compiler_specific.h"
+#include "base/file_util.h"
+#include "base/format_macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop.h"
+#include "base/path_service.h"
+#include "base/process_util.h"
+#include "base/string_number_conversions.h"
+#include "base/string_piece.h"
+#include "base/string_split.h"
+#include "base/string_util.h"
+#include "base/stringprintf.h"
+#include "base/utf_string_conversions.h"
+#include "net/base/cert_test_util.h"
+#include "net/base/ev_root_ca_metadata.h"
+#include "net/base/load_flags.h"
+#include "net/base/mock_host_resolver.h"
+#include "net/base/net_errors.h"
+#include "net/base/net_log.h"
+#include "net/base/net_log_unittest.h"
+#include "net/base/net_module.h"
+#include "net/base/net_util.h"
+#include "net/base/ssl_connection_status_flags.h"
+#include "net/base/test_data_directory.h"
+#include "net/base/test_root_certs.h"
+#include "net/base/upload_bytes_element_reader.h"
+#include "net/base/upload_data_stream.h"
+#include "net/base/upload_file_element_reader.h"
+#include "net/cookies/cookie_monster.h"
+#include "net/cookies/cookie_store_test_helpers.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/ftp/ftp_network_layer.h"
+#include "net/http/http_cache.h"
+#include "net/http/http_network_layer.h"
+#include "net/http/http_network_session.h"
+#include "net/http/http_request_headers.h"
+#include "net/http/http_response_headers.h"
+#include "net/ocsp/nss_ocsp.h"
+#include "net/proxy/proxy_service.h"
+#include "net/socket/ssl_client_socket.h"
+#include "net/test/test_server.h"
+#include "net/url_request/ftp_protocol_handler.h"
+#include "net/url_request/static_http_user_agent_settings.h"
+#include "net/url_request/url_request.h"
+#include "net/url_request/url_request_file_dir_job.h"
+#include "net/url_request/url_request_http_job.h"
+#include "net/url_request/url_request_job_factory_impl.h"
+#include "net/url_request/url_request_redirect_job.h"
+#include "net/url_request/url_request_test_job.h"
+#include "net/url_request/url_request_test_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/platform_test.h"
+
+#if defined(OS_WIN)
+#include "base/win/scoped_com_initializer.h"
+#include "base/win/windows_version.h"
+#endif
+
+using base::Time;
+
+namespace net {
+
+namespace {
+
+const string16 kChrome(ASCIIToUTF16("chrome"));
+const string16 kSecret(ASCIIToUTF16("secret"));
+const string16 kUser(ASCIIToUTF16("user"));
+
+base::StringPiece TestNetResourceProvider(int key) {
+  return "header";
+}
+
+// Do a case-insensitive search through |haystack| for |needle|.
+bool ContainsString(const std::string& haystack, const char* needle) {
+  std::string::const_iterator it =
+      std::search(haystack.begin(),
+                  haystack.end(),
+                  needle,
+                  needle + strlen(needle),
+                  base::CaseInsensitiveCompare<char>());
+  return it != haystack.end();
+}
+
+void FillBuffer(char* buffer, size_t len) {
+  static bool called = false;
+  if (!called) {
+    called = true;
+    int seed = static_cast<int>(Time::Now().ToInternalValue());
+    srand(seed);
+  }
+
+  for (size_t i = 0; i < len; i++) {
+    buffer[i] = static_cast<char>(rand());
+    if (!buffer[i])
+      buffer[i] = 'g';
+  }
+}
+
+UploadDataStream* CreateSimpleUploadData(const char* data) {
+  scoped_ptr<UploadElementReader> reader(
+      new UploadBytesElementReader(data, strlen(data)));
+  return UploadDataStream::CreateWithReader(reader.Pass(), 0);
+}
+
+// Verify that the SSLInfo of a successful SSL connection has valid values.
+void CheckSSLInfo(const SSLInfo& ssl_info) {
+  // Allow ChromeFrame fake SSLInfo to get through.
+  if (ssl_info.cert.get() &&
+      ssl_info.cert.get()->issuer().GetDisplayName() == "Chrome Internal") {
+    // -1 means unknown.
+    EXPECT_EQ(ssl_info.security_bits, -1);
+    return;
+  }
+
+  // -1 means unknown.  0 means no encryption.
+  EXPECT_GT(ssl_info.security_bits, 0);
+
+  // The cipher suite TLS_NULL_WITH_NULL_NULL (0) must not be negotiated.
+  int cipher_suite = SSLConnectionStatusToCipherSuite(
+      ssl_info.connection_status);
+  EXPECT_NE(0, cipher_suite);
+}
+
+bool FingerprintsEqual(const HashValueVector& a, const HashValueVector& b) {
+  size_t size = a.size();
+
+  if (size != b.size())
+    return false;
+
+  for (size_t i = 0; i < size; ++i) {
+    if (!a[i].Equals(b[i]))
+      return false;
+  }
+
+  return true;
+}
+
+// A network delegate that allows the user to choose a subset of request stages
+// to block in. When blocking, the delegate can do one of the following:
+//  * synchronously return a pre-specified error code, or
+//  * asynchronously return that value via an automatically called callback,
+//    or
+//  * block and wait for the user to do a callback.
+// Additionally, the user may also specify a redirect URL -- then each request
+// with the current URL different from the redirect target will be redirected
+// to that target, in the on-before-URL-request stage, independent of whether
+// the delegate blocks in ON_BEFORE_URL_REQUEST or not.
+class BlockingNetworkDelegate : public TestNetworkDelegate {
+ public:
+  // Stages in which the delegate can block.
+  enum Stage {
+    NOT_BLOCKED = 0,
+    ON_BEFORE_URL_REQUEST = 1 << 0,
+    ON_BEFORE_SEND_HEADERS = 1 << 1,
+    ON_HEADERS_RECEIVED = 1 << 2,
+    ON_AUTH_REQUIRED = 1 << 3
+  };
+
+  // Behavior during blocked stages.  During other stages, just
+  // returns net::OK or NetworkDelegate::AUTH_REQUIRED_RESPONSE_NO_ACTION.
+  enum BlockMode {
+    SYNCHRONOUS,    // No callback, returns specified return values.
+    AUTO_CALLBACK,  // |this| posts a task to run the callback using the
+                    // specified return codes.
+    USER_CALLBACK,  // User takes care of doing a callback.  |retval_| and
+                    // |auth_retval_| are ignored. In every blocking stage the
+                    // message loop is quit.
+  };
+
+  // Creates a delegate which does not block at all.
+  explicit BlockingNetworkDelegate(BlockMode block_mode);
+
+  // For users to trigger a callback returning |response|.
+  // Side-effects: resets |stage_blocked_for_callback_| and stored callbacks.
+  // Only call if |block_mode_| == USER_CALLBACK.
+  void DoCallback(int response);
+  void DoAuthCallback(NetworkDelegate::AuthRequiredResponse response);
+
+  // Setters.
+  void set_retval(int retval) {
+    ASSERT_NE(USER_CALLBACK, block_mode_);
+    ASSERT_NE(ERR_IO_PENDING, retval);
+    ASSERT_NE(OK, retval);
+    retval_ = retval;
+  }
+
+  // If |auth_retval| == AUTH_REQUIRED_RESPONSE_SET_AUTH, then
+  // |auth_credentials_| will be passed with the response.
+  void set_auth_retval(AuthRequiredResponse auth_retval) {
+    ASSERT_NE(USER_CALLBACK, block_mode_);
+    ASSERT_NE(AUTH_REQUIRED_RESPONSE_IO_PENDING, auth_retval);
+    auth_retval_ = auth_retval;
+  }
+  void set_auth_credentials(const AuthCredentials& auth_credentials) {
+    auth_credentials_ = auth_credentials;
+  }
+
+  void set_redirect_url(const GURL& url) {
+    redirect_url_ = url;
+  }
+
+  void set_block_on(int block_on) {
+    block_on_ = block_on;
+  }
+
+  // Allows the user to check in which state did we block.
+  Stage stage_blocked_for_callback() const {
+    EXPECT_EQ(USER_CALLBACK, block_mode_);
+    return stage_blocked_for_callback_;
+  }
+
+ private:
+  void RunCallback(int response, const CompletionCallback& callback);
+  void RunAuthCallback(AuthRequiredResponse response,
+                       const AuthCallback& callback);
+
+  // TestNetworkDelegate implementation.
+  virtual int OnBeforeURLRequest(URLRequest* request,
+                                 const CompletionCallback& callback,
+                                 GURL* new_url) OVERRIDE;
+
+  virtual int OnBeforeSendHeaders(URLRequest* request,
+                                  const CompletionCallback& callback,
+                                  HttpRequestHeaders* headers) OVERRIDE;
+
+  virtual int OnHeadersReceived(
+      URLRequest* request,
+      const CompletionCallback& callback,
+      const HttpResponseHeaders* original_response_headers,
+      scoped_refptr<HttpResponseHeaders>* override_response_headers) OVERRIDE;
+
+  virtual NetworkDelegate::AuthRequiredResponse OnAuthRequired(
+      URLRequest* request,
+      const AuthChallengeInfo& auth_info,
+      const AuthCallback& callback,
+      AuthCredentials* credentials) OVERRIDE;
+
+  // Resets the callbacks and |stage_blocked_for_callback_|.
+  void Reset();
+
+  // Checks whether we should block in |stage|. If yes, returns an error code
+  // and optionally sets up callback based on |block_mode_|. If no, returns OK.
+  int MaybeBlockStage(Stage stage, const CompletionCallback& callback);
+
+  // Configuration parameters, can be adjusted by public methods:
+  const BlockMode block_mode_;
+
+  // Values returned on blocking stages when mode is SYNCHRONOUS or
+  // AUTO_CALLBACK. For USER_CALLBACK these are set automatically to IO_PENDING.
+  int retval_;  // To be returned in non-auth stages.
+  AuthRequiredResponse auth_retval_;
+
+  GURL redirect_url_;  // Used if non-empty.
+  int block_on_;  // Bit mask: in which stages to block.
+
+  // |auth_credentials_| will be copied to |*target_auth_credential_| on
+  // callback.
+  AuthCredentials auth_credentials_;
+  AuthCredentials* target_auth_credentials_;
+
+  // Internal variables, not set by not the user:
+  // Last blocked stage waiting for user callback (unused if |block_mode_| !=
+  // USER_CALLBACK).
+  Stage stage_blocked_for_callback_;
+
+  // Callback objects stored during blocking stages.
+  CompletionCallback callback_;
+  AuthCallback auth_callback_;
+
+  base::WeakPtrFactory<BlockingNetworkDelegate> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(BlockingNetworkDelegate);
+};
+
+BlockingNetworkDelegate::BlockingNetworkDelegate(BlockMode block_mode)
+    : block_mode_(block_mode),
+      retval_(OK),
+      auth_retval_(AUTH_REQUIRED_RESPONSE_NO_ACTION),
+      block_on_(0),
+      target_auth_credentials_(NULL),
+      stage_blocked_for_callback_(NOT_BLOCKED),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {
+}
+
+void BlockingNetworkDelegate::DoCallback(int response) {
+  ASSERT_EQ(USER_CALLBACK, block_mode_);
+  ASSERT_NE(NOT_BLOCKED, stage_blocked_for_callback_);
+  ASSERT_NE(ON_AUTH_REQUIRED, stage_blocked_for_callback_);
+  CompletionCallback callback = callback_;
+  Reset();
+  RunCallback(response, callback);
+}
+
+void BlockingNetworkDelegate::DoAuthCallback(
+    NetworkDelegate::AuthRequiredResponse response) {
+  ASSERT_EQ(USER_CALLBACK, block_mode_);
+  ASSERT_EQ(ON_AUTH_REQUIRED, stage_blocked_for_callback_);
+  AuthCallback auth_callback = auth_callback_;
+  Reset();
+  RunAuthCallback(response, auth_callback);
+}
+
+void BlockingNetworkDelegate::RunCallback(int response,
+                                          const CompletionCallback& callback) {
+  callback.Run(response);
+}
+
+void BlockingNetworkDelegate::RunAuthCallback(AuthRequiredResponse response,
+                                              const AuthCallback& callback) {
+  if (auth_retval_ == AUTH_REQUIRED_RESPONSE_SET_AUTH) {
+    ASSERT_TRUE(target_auth_credentials_ != NULL);
+    *target_auth_credentials_ = auth_credentials_;
+  }
+  callback.Run(response);
+}
+
+int BlockingNetworkDelegate::OnBeforeURLRequest(
+    URLRequest* request,
+    const CompletionCallback& callback,
+    GURL* new_url) {
+  if (redirect_url_ == request->url())
+    return OK;  // We've already seen this request and redirected elsewhere.
+
+  TestNetworkDelegate::OnBeforeURLRequest(request, callback, new_url);
+
+  if (!redirect_url_.is_empty())
+    *new_url = redirect_url_;
+
+  return MaybeBlockStage(ON_BEFORE_URL_REQUEST, callback);
+}
+
+int BlockingNetworkDelegate::OnBeforeSendHeaders(
+    URLRequest* request,
+    const CompletionCallback& callback,
+    HttpRequestHeaders* headers) {
+  TestNetworkDelegate::OnBeforeSendHeaders(request, callback, headers);
+
+  return MaybeBlockStage(ON_BEFORE_SEND_HEADERS, callback);
+}
+
+int BlockingNetworkDelegate::OnHeadersReceived(
+    URLRequest* request,
+    const CompletionCallback& callback,
+    const HttpResponseHeaders* original_response_headers,
+    scoped_refptr<HttpResponseHeaders>* override_response_headers) {
+  TestNetworkDelegate::OnHeadersReceived(
+      request, callback, original_response_headers,
+      override_response_headers);
+
+  return MaybeBlockStage(ON_HEADERS_RECEIVED, callback);
+}
+
+NetworkDelegate::AuthRequiredResponse BlockingNetworkDelegate::OnAuthRequired(
+    URLRequest* request,
+    const AuthChallengeInfo& auth_info,
+    const AuthCallback& callback,
+    AuthCredentials* credentials) {
+  TestNetworkDelegate::OnAuthRequired(request, auth_info, callback,
+                                      credentials);
+  // Check that the user has provided callback for the previous blocked stage.
+  EXPECT_EQ(NOT_BLOCKED, stage_blocked_for_callback_);
+
+  if ((block_on_ & ON_AUTH_REQUIRED) == 0) {
+    return AUTH_REQUIRED_RESPONSE_NO_ACTION;
+  }
+
+  target_auth_credentials_ = credentials;
+
+  switch (block_mode_) {
+    case SYNCHRONOUS:
+      if (auth_retval_ == AUTH_REQUIRED_RESPONSE_SET_AUTH)
+        *target_auth_credentials_ = auth_credentials_;
+      return auth_retval_;
+
+    case AUTO_CALLBACK:
+      MessageLoop::current()->PostTask(
+          FROM_HERE,
+          base::Bind(&BlockingNetworkDelegate::RunAuthCallback,
+                     weak_factory_.GetWeakPtr(), auth_retval_, callback));
+      return AUTH_REQUIRED_RESPONSE_IO_PENDING;
+
+    case USER_CALLBACK:
+      auth_callback_ = callback;
+      stage_blocked_for_callback_ = ON_AUTH_REQUIRED;
+      MessageLoop::current()->PostTask(FROM_HERE, MessageLoop::QuitClosure());
+      return AUTH_REQUIRED_RESPONSE_IO_PENDING;
+  }
+  NOTREACHED();
+  return AUTH_REQUIRED_RESPONSE_NO_ACTION;  // Dummy value.
+}
+
+void BlockingNetworkDelegate::Reset() {
+  EXPECT_NE(NOT_BLOCKED, stage_blocked_for_callback_);
+  stage_blocked_for_callback_ = NOT_BLOCKED;
+  callback_.Reset();
+  auth_callback_.Reset();
+}
+
+int BlockingNetworkDelegate::MaybeBlockStage(
+    BlockingNetworkDelegate::Stage stage,
+    const CompletionCallback& callback) {
+  // Check that the user has provided callback for the previous blocked stage.
+  EXPECT_EQ(NOT_BLOCKED, stage_blocked_for_callback_);
+
+  if ((block_on_ & stage) == 0) {
+    return OK;
+  }
+
+  switch (block_mode_) {
+    case SYNCHRONOUS:
+      EXPECT_NE(OK, retval_);
+      return retval_;
+
+    case AUTO_CALLBACK:
+      MessageLoop::current()->PostTask(
+          FROM_HERE,
+          base::Bind(&BlockingNetworkDelegate::RunCallback,
+                     weak_factory_.GetWeakPtr(), retval_, callback));
+      return ERR_IO_PENDING;
+
+    case USER_CALLBACK:
+      callback_ = callback;
+      stage_blocked_for_callback_ = stage;
+      MessageLoop::current()->PostTask(FROM_HERE, MessageLoop::QuitClosure());
+      return ERR_IO_PENDING;
+  }
+  NOTREACHED();
+  return 0;
+}
+
+class TestURLRequestContextWithProxy : public TestURLRequestContext {
+ public:
+  // Does not own |delegate|.
+  TestURLRequestContextWithProxy(const std::string& proxy,
+                                 NetworkDelegate* delegate)
+      : TestURLRequestContext(true) {
+    context_storage_.set_proxy_service(ProxyService::CreateFixed(proxy));
+    set_network_delegate(delegate);
+    Init();
+  }
+  virtual ~TestURLRequestContextWithProxy() {}
+};
+
+}  // namespace
+
+// Inherit PlatformTest since we require the autorelease pool on Mac OS X.
+class URLRequestTest : public PlatformTest {
+ public:
+  URLRequestTest() : default_context_(true) {
+    default_context_.set_network_delegate(&default_network_delegate_);
+    default_context_.Init();
+  }
+
+  // Adds the TestJobInterceptor to the default context.
+  TestJobInterceptor* AddTestInterceptor() {
+    TestJobInterceptor* interceptor = new TestJobInterceptor();
+    default_context_.set_job_factory(&job_factory_);
+    job_factory_.AddInterceptor(interceptor);
+    return interceptor;
+  }
+
+ protected:
+  TestNetworkDelegate default_network_delegate_;  // Must outlive URLRequest.
+  URLRequestJobFactoryImpl job_factory_;
+  TestURLRequestContext default_context_;
+};
+
+TEST_F(URLRequestTest, AboutBlankTest) {
+  TestDelegate d;
+  {
+    URLRequest r(GURL("about:blank"), &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(!r.is_pending());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(d.bytes_received(), 0);
+    EXPECT_EQ("", r.GetSocketAddress().host());
+    EXPECT_EQ(0, r.GetSocketAddress().port());
+  }
+}
+
+TEST_F(URLRequestTest, DataURLImageTest) {
+  TestDelegate d;
+  {
+    // Use our nice little Chrome logo.
+    URLRequest r(GURL(
+        "data:image/png;base64,"
+        "iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAADVklEQVQ4jX2TfUwUBBjG3"
+        "w1y+HGcd9dxhXR8T4awOccJGgOSWclHImznLkTlSw0DDQXkrmgYgbUYnlQTqQxIEVxitD"
+        "5UMCATRA1CEEg+Qjw3bWDxIauJv/5oumqs39/P827vnucRmYN0gyF01GI5MpCVdW0gO7t"
+        "vNC+vqSEtbZefk5NuLv1jdJ46p/zw0HeH4+PHr3h7c1mjoV2t5rKzMx1+fg9bAgK6zHq9"
+        "cU5z+LpA3xOtx34+vTeT21onRuzssC3zxbbSwC13d/pFuC7CkIMDxQpF7r/MWq12UctI1"
+        "dWWm99ypqSYmRUBdKem8MkrO/kgaTt1O7YzlpzE5GIVd0WYUqt57yWf2McHTObYPbVD+Z"
+        "wbtlLTVMZ3BW+TnLyXLaWtmEq6WJVbT3HBh3Svj2HQQcm43XwmtoYM6vVKleh0uoWvnzW"
+        "3v3MpidruPTQPf0bia7sJOtBM0ufTWNvus/nkDFHF9ZS+uYVjRUasMeHUmyLYtcklTvzW"
+        "GFZnNOXczThvpKIzjcahSqIzkvDLayDq6D3eOjtBbNUEIZYyqsvj4V4wY92eNJ4IoyhTb"
+        "xXX1T5xsV9tm9r4TQwHLiZw/pdDZJea8TKmsmR/K0uLh/GwnCHghTja6lPhphezPfO5/5"
+        "MrVvMzNaI3+ERHfrFzPKQukrQGI4d/3EFD/3E2mVNYvi4at7CXWREaxZGD+3hg28zD3gV"
+        "Md6q5c8GdosynKmSeRuGzpjyl1/9UDGtPR5HeaKT8Wjo17WXk579BXVUhN64ehF9fhRtq"
+        "/uxxZKzNiZFGD0wRC3NFROZ5mwIPL/96K/rKMMLrIzF9uhHr+/sYH7DAbwlgC4J+R2Z7F"
+        "Ux1qLnV7MGF40smVSoJ/jvHRfYhQeUJd/SnYtGWhPHR0Sz+GE2F2yth0B36Vcz2KpnufB"
+        "JbsysjjW4kblBUiIjiURUWqJY65zxbnTy57GQyH58zgy0QBtTQv5gH15XMdKkYu+TGaJM"
+        "nlm2O34uI4b9tflqp1+QEFGzoW/ulmcofcpkZCYJhDfSpme7QcrHa+Xfji8paEQkTkSfm"
+        "moRWRNZr/F1KfVMjW+IKEnv2FwZfKdzt0BQR6lClcZR0EfEXEfv/G6W9iLiIyCoReV5En"
+        "hORIBHx+ufPj/gLB/zGI/G4Bk0AAAAASUVORK5CYII="),
+        &d,
+        &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(!r.is_pending());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(d.bytes_received(), 911);
+    EXPECT_EQ("", r.GetSocketAddress().host());
+    EXPECT_EQ(0, r.GetSocketAddress().port());
+  }
+}
+
+TEST_F(URLRequestTest, FileTest) {
+  FilePath app_path;
+  PathService::Get(base::FILE_EXE, &app_path);
+  GURL app_url = FilePathToFileURL(app_path);
+
+  TestDelegate d;
+  {
+    URLRequest r(app_url, &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    int64 file_size = -1;
+    EXPECT_TRUE(file_util::GetFileSize(app_path, &file_size));
+
+    EXPECT_TRUE(!r.is_pending());
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(d.bytes_received(), static_cast<int>(file_size));
+    EXPECT_EQ("", r.GetSocketAddress().host());
+    EXPECT_EQ(0, r.GetSocketAddress().port());
+  }
+}
+
+TEST_F(URLRequestTest, FileTestCancel) {
+  FilePath app_path;
+  PathService::Get(base::FILE_EXE, &app_path);
+  GURL app_url = FilePathToFileURL(app_path);
+
+  TestDelegate d;
+  {
+    URLRequest r(app_url, &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+    r.Cancel();
+  }
+  // Async cancelation should be safe even when URLRequest has been already
+  // destroyed.
+  MessageLoop::current()->RunUntilIdle();
+}
+
+TEST_F(URLRequestTest, FileTestFullSpecifiedRange) {
+  const size_t buffer_size = 4000;
+  scoped_array<char> buffer(new char[buffer_size]);
+  FillBuffer(buffer.get(), buffer_size);
+
+  FilePath temp_path;
+  EXPECT_TRUE(file_util::CreateTemporaryFile(&temp_path));
+  GURL temp_url = FilePathToFileURL(temp_path);
+  EXPECT_TRUE(file_util::WriteFile(temp_path, buffer.get(), buffer_size));
+
+  int64 file_size;
+  EXPECT_TRUE(file_util::GetFileSize(temp_path, &file_size));
+
+  const size_t first_byte_position = 500;
+  const size_t last_byte_position = buffer_size - first_byte_position;
+  const size_t content_length = last_byte_position - first_byte_position + 1;
+  std::string partial_buffer_string(buffer.get() + first_byte_position,
+                                    buffer.get() + last_byte_position + 1);
+
+  TestDelegate d;
+  {
+    URLRequest r(temp_url, &d, &default_context_);
+
+    HttpRequestHeaders headers;
+    headers.SetHeader(HttpRequestHeaders::kRange,
+                      base::StringPrintf(
+                           "bytes=%" PRIuS "-%" PRIuS,
+                           first_byte_position, last_byte_position));
+    r.SetExtraRequestHeaders(headers);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+    EXPECT_TRUE(!r.is_pending());
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(static_cast<int>(content_length), d.bytes_received());
+    // Don't use EXPECT_EQ, it will print out a lot of garbage if check failed.
+    EXPECT_TRUE(partial_buffer_string == d.data_received());
+  }
+
+  EXPECT_TRUE(file_util::Delete(temp_path, false));
+}
+
+TEST_F(URLRequestTest, FileTestHalfSpecifiedRange) {
+  const size_t buffer_size = 4000;
+  scoped_array<char> buffer(new char[buffer_size]);
+  FillBuffer(buffer.get(), buffer_size);
+
+  FilePath temp_path;
+  EXPECT_TRUE(file_util::CreateTemporaryFile(&temp_path));
+  GURL temp_url = FilePathToFileURL(temp_path);
+  EXPECT_TRUE(file_util::WriteFile(temp_path, buffer.get(), buffer_size));
+
+  int64 file_size;
+  EXPECT_TRUE(file_util::GetFileSize(temp_path, &file_size));
+
+  const size_t first_byte_position = 500;
+  const size_t last_byte_position = buffer_size - 1;
+  const size_t content_length = last_byte_position - first_byte_position + 1;
+  std::string partial_buffer_string(buffer.get() + first_byte_position,
+                                    buffer.get() + last_byte_position + 1);
+
+  TestDelegate d;
+  {
+    URLRequest r(temp_url, &d, &default_context_);
+
+    HttpRequestHeaders headers;
+    headers.SetHeader(HttpRequestHeaders::kRange,
+                      base::StringPrintf("bytes=%" PRIuS "-",
+                                         first_byte_position));
+    r.SetExtraRequestHeaders(headers);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+    EXPECT_TRUE(!r.is_pending());
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(static_cast<int>(content_length), d.bytes_received());
+    // Don't use EXPECT_EQ, it will print out a lot of garbage if check failed.
+    EXPECT_TRUE(partial_buffer_string == d.data_received());
+  }
+
+  EXPECT_TRUE(file_util::Delete(temp_path, false));
+}
+
+TEST_F(URLRequestTest, FileTestMultipleRanges) {
+  const size_t buffer_size = 400000;
+  scoped_array<char> buffer(new char[buffer_size]);
+  FillBuffer(buffer.get(), buffer_size);
+
+  FilePath temp_path;
+  EXPECT_TRUE(file_util::CreateTemporaryFile(&temp_path));
+  GURL temp_url = FilePathToFileURL(temp_path);
+  EXPECT_TRUE(file_util::WriteFile(temp_path, buffer.get(), buffer_size));
+
+  int64 file_size;
+  EXPECT_TRUE(file_util::GetFileSize(temp_path, &file_size));
+
+  TestDelegate d;
+  {
+    URLRequest r(temp_url, &d, &default_context_);
+
+    HttpRequestHeaders headers;
+    headers.SetHeader(HttpRequestHeaders::kRange,
+                      "bytes=0-0,10-200,200-300");
+    r.SetExtraRequestHeaders(headers);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+    EXPECT_TRUE(d.request_failed());
+  }
+
+  EXPECT_TRUE(file_util::Delete(temp_path, false));
+}
+
+TEST_F(URLRequestTest, InvalidUrlTest) {
+  TestDelegate d;
+  {
+    URLRequest r(GURL("invalid url"), &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+    EXPECT_TRUE(d.request_failed());
+  }
+}
+
+#if defined(OS_WIN)
+TEST_F(URLRequestTest, ResolveShortcutTest) {
+  FilePath app_path;
+  PathService::Get(base::DIR_SOURCE_ROOT, &app_path);
+  app_path = app_path.AppendASCII("net");
+  app_path = app_path.AppendASCII("data");
+  app_path = app_path.AppendASCII("url_request_unittest");
+  app_path = app_path.AppendASCII("with-headers.html");
+
+  std::wstring lnk_path = app_path.value() + L".lnk";
+
+  base::win::ScopedCOMInitializer com_initializer;
+
+  // Temporarily create a shortcut for test
+  IShellLink* shell = NULL;
+  ASSERT_TRUE(SUCCEEDED(CoCreateInstance(
+      CLSID_ShellLink, NULL, CLSCTX_INPROC_SERVER, IID_IShellLink,
+      reinterpret_cast<LPVOID*>(&shell))));
+  IPersistFile* persist = NULL;
+  ASSERT_TRUE(SUCCEEDED(shell->QueryInterface(
+      IID_IPersistFile, reinterpret_cast<LPVOID*>(&persist))));
+  EXPECT_TRUE(SUCCEEDED(shell->SetPath(app_path.value().c_str())));
+  EXPECT_TRUE(SUCCEEDED(shell->SetDescription(L"ResolveShortcutTest")));
+  EXPECT_TRUE(SUCCEEDED(persist->Save(lnk_path.c_str(), TRUE)));
+  if (persist)
+    persist->Release();
+  if (shell)
+    shell->Release();
+
+  TestDelegate d;
+  {
+    URLRequest r(FilePathToFileURL(FilePath(lnk_path)), &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    WIN32_FILE_ATTRIBUTE_DATA data;
+    GetFileAttributesEx(app_path.value().c_str(),
+                        GetFileExInfoStandard, &data);
+    HANDLE file = CreateFile(app_path.value().c_str(), GENERIC_READ,
+                             FILE_SHARE_READ, NULL, OPEN_EXISTING,
+                             FILE_ATTRIBUTE_NORMAL, NULL);
+    EXPECT_NE(INVALID_HANDLE_VALUE, file);
+    scoped_array<char> buffer(new char[data.nFileSizeLow]);
+    DWORD read_size;
+    BOOL result;
+    result = ReadFile(file, buffer.get(), data.nFileSizeLow,
+                      &read_size, NULL);
+    std::string content(buffer.get(), read_size);
+    CloseHandle(file);
+
+    EXPECT_TRUE(!r.is_pending());
+    EXPECT_EQ(1, d.received_redirect_count());
+    EXPECT_EQ(content, d.data_received());
+  }
+
+  // Clean the shortcut
+  DeleteFile(lnk_path.c_str());
+}
+#endif  // defined(OS_WIN)
+
+TEST_F(URLRequestTest, FileDirCancelTest) {
+  // Put in mock resource provider.
+  NetModule::SetResourceProvider(TestNetResourceProvider);
+
+  TestDelegate d;
+  {
+    FilePath file_path;
+    PathService::Get(base::DIR_SOURCE_ROOT, &file_path);
+    file_path = file_path.Append(FILE_PATH_LITERAL("net"));
+    file_path = file_path.Append(FILE_PATH_LITERAL("data"));
+
+    URLRequest req(FilePathToFileURL(file_path), &d, &default_context_);
+    req.Start();
+    EXPECT_TRUE(req.is_pending());
+
+    d.set_cancel_in_received_data_pending(true);
+
+    MessageLoop::current()->Run();
+  }
+
+  // Take out mock resource provider.
+  NetModule::SetResourceProvider(NULL);
+}
+
+TEST_F(URLRequestTest, FileDirRedirectNoCrash) {
+  // There is an implicit redirect when loading a file path that matches a
+  // directory and does not end with a slash.  Ensure that following such
+  // redirects does not crash.  See http://crbug.com/18686.
+
+  FilePath path;
+  PathService::Get(base::DIR_SOURCE_ROOT, &path);
+  path = path.Append(FILE_PATH_LITERAL("net"));
+  path = path.Append(FILE_PATH_LITERAL("data"));
+  path = path.Append(FILE_PATH_LITERAL("url_request_unittest"));
+
+  TestDelegate d;
+  URLRequest req(FilePathToFileURL(path), &d, &default_context_);
+  req.Start();
+  MessageLoop::current()->Run();
+
+  ASSERT_EQ(1, d.received_redirect_count());
+  ASSERT_LT(0, d.bytes_received());
+  ASSERT_FALSE(d.request_failed());
+  ASSERT_TRUE(req.status().is_success());
+}
+
+#if defined(OS_WIN)
+// Don't accept the url "file:///" on windows. See http://crbug.com/1474.
+TEST_F(URLRequestTest, FileDirRedirectSingleSlash) {
+  TestDelegate d;
+  URLRequest req(GURL("file:///"), &d, &default_context_);
+  req.Start();
+  MessageLoop::current()->Run();
+
+  ASSERT_EQ(1, d.received_redirect_count());
+  ASSERT_FALSE(req.status().is_success());
+}
+#endif
+
+// Custom URLRequestJobs for use with interceptor tests
+class RestartTestJob : public URLRequestTestJob {
+ public:
+  RestartTestJob(URLRequest* request, NetworkDelegate* network_delegate)
+    : URLRequestTestJob(request, network_delegate, true) {}
+ protected:
+  virtual void StartAsync() {
+    this->NotifyRestartRequired();
+  }
+ private:
+  ~RestartTestJob() {}
+};
+
+class CancelTestJob : public URLRequestTestJob {
+ public:
+  explicit CancelTestJob(URLRequest* request, NetworkDelegate* network_delegate)
+    : URLRequestTestJob(request, network_delegate, true) {}
+ protected:
+  virtual void StartAsync() {
+    request_->Cancel();
+  }
+ private:
+  ~CancelTestJob() {}
+};
+
+class CancelThenRestartTestJob : public URLRequestTestJob {
+ public:
+  explicit CancelThenRestartTestJob(URLRequest* request,
+                                    NetworkDelegate* network_delegate)
+      : URLRequestTestJob(request, network_delegate, true) {
+  }
+ protected:
+  virtual void StartAsync() {
+    request_->Cancel();
+    this->NotifyRestartRequired();
+  }
+ private:
+  ~CancelThenRestartTestJob() {}
+};
+
+// An Interceptor for use with interceptor tests
+class TestInterceptor : URLRequest::Interceptor {
+ public:
+  TestInterceptor()
+      : intercept_main_request_(false), restart_main_request_(false),
+        cancel_main_request_(false), cancel_then_restart_main_request_(false),
+        simulate_main_network_error_(false),
+        intercept_redirect_(false), cancel_redirect_request_(false),
+        intercept_final_response_(false), cancel_final_request_(false),
+        did_intercept_main_(false), did_restart_main_(false),
+        did_cancel_main_(false), did_cancel_then_restart_main_(false),
+        did_simulate_error_main_(false),
+        did_intercept_redirect_(false), did_cancel_redirect_(false),
+        did_intercept_final_(false), did_cancel_final_(false) {
+    URLRequest::Deprecated::RegisterRequestInterceptor(this);
+  }
+
+  ~TestInterceptor() {
+    URLRequest::Deprecated::UnregisterRequestInterceptor(this);
+  }
+
+  virtual URLRequestJob* MaybeIntercept(URLRequest* request,
+                                        NetworkDelegate* network_delegate) {
+    if (restart_main_request_) {
+      restart_main_request_ = false;
+      did_restart_main_ = true;
+      return new RestartTestJob(request, network_delegate);
+    }
+    if (cancel_main_request_) {
+      cancel_main_request_ = false;
+      did_cancel_main_ = true;
+      return new CancelTestJob(request, network_delegate);
+    }
+    if (cancel_then_restart_main_request_) {
+      cancel_then_restart_main_request_ = false;
+      did_cancel_then_restart_main_ = true;
+      return new CancelThenRestartTestJob(request, network_delegate);
+    }
+    if (simulate_main_network_error_) {
+      simulate_main_network_error_ = false;
+      did_simulate_error_main_ = true;
+      // will error since the requeted url is not one of its canned urls
+      return new URLRequestTestJob(request, network_delegate, true);
+    }
+    if (!intercept_main_request_)
+      return NULL;
+    intercept_main_request_ = false;
+    did_intercept_main_ = true;
+    return new URLRequestTestJob(request,
+                                 network_delegate,
+                                 main_headers_,
+                                 main_data_,
+                                 true);
+  }
+
+  virtual URLRequestJob* MaybeInterceptRedirect(
+      URLRequest* request,
+      NetworkDelegate* network_delegate,
+      const GURL& location) {
+    if (cancel_redirect_request_) {
+      cancel_redirect_request_ = false;
+      did_cancel_redirect_ = true;
+      return new CancelTestJob(request, network_delegate);
+    }
+    if (!intercept_redirect_)
+      return NULL;
+    intercept_redirect_ = false;
+    did_intercept_redirect_ = true;
+    return new URLRequestTestJob(request,
+                                 network_delegate,
+                                 redirect_headers_,
+                                 redirect_data_,
+                                 true);
+  }
+
+  virtual URLRequestJob* MaybeInterceptResponse(
+      URLRequest* request, NetworkDelegate* network_delegate) {
+    if (cancel_final_request_) {
+      cancel_final_request_ = false;
+      did_cancel_final_ = true;
+      return new CancelTestJob(request, network_delegate);
+    }
+    if (!intercept_final_response_)
+      return NULL;
+    intercept_final_response_ = false;
+    did_intercept_final_ = true;
+    return new URLRequestTestJob(request,
+                                 network_delegate,
+                                 final_headers_,
+                                 final_data_,
+                                 true);
+  }
+
+  // Whether to intercept the main request, and if so the response to return.
+  bool intercept_main_request_;
+  std::string main_headers_;
+  std::string main_data_;
+
+  // Other actions we take at MaybeIntercept time
+  bool restart_main_request_;
+  bool cancel_main_request_;
+  bool cancel_then_restart_main_request_;
+  bool simulate_main_network_error_;
+
+  // Whether to intercept redirects, and if so the response to return.
+  bool intercept_redirect_;
+  std::string redirect_headers_;
+  std::string redirect_data_;
+
+  // Other actions we can take at MaybeInterceptRedirect time
+  bool cancel_redirect_request_;
+
+  // Whether to intercept final response, and if so the response to return.
+  bool intercept_final_response_;
+  std::string final_headers_;
+  std::string final_data_;
+
+  // Other actions we can take at MaybeInterceptResponse time
+  bool cancel_final_request_;
+
+  // If we did something or not
+  bool did_intercept_main_;
+  bool did_restart_main_;
+  bool did_cancel_main_;
+  bool did_cancel_then_restart_main_;
+  bool did_simulate_error_main_;
+  bool did_intercept_redirect_;
+  bool did_cancel_redirect_;
+  bool did_intercept_final_;
+  bool did_cancel_final_;
+
+  // Static getters for canned response header and data strings
+
+  static std::string ok_data() {
+    return URLRequestTestJob::test_data_1();
+  }
+
+  static std::string ok_headers() {
+    return URLRequestTestJob::test_headers();
+  }
+
+  static std::string redirect_data() {
+    return std::string();
+  }
+
+  static std::string redirect_headers() {
+    return URLRequestTestJob::test_redirect_headers();
+  }
+
+  static std::string error_data() {
+    return std::string("ohhh nooooo mr. bill!");
+  }
+
+  static std::string error_headers() {
+    return URLRequestTestJob::test_error_headers();
+  }
+};
+
+TEST_F(URLRequestTest, Intercept) {
+  TestInterceptor interceptor;
+
+  // intercept the main request and respond with a simple response
+  interceptor.intercept_main_request_ = true;
+  interceptor.main_headers_ = TestInterceptor::ok_headers();
+  interceptor.main_data_ = TestInterceptor::ok_data();
+
+  TestDelegate d;
+  URLRequest req(GURL("http://test_intercept/foo"), &d, &default_context_);
+  base::SupportsUserData::Data* user_data0 = new base::SupportsUserData::Data();
+  base::SupportsUserData::Data* user_data1 = new base::SupportsUserData::Data();
+  base::SupportsUserData::Data* user_data2 = new base::SupportsUserData::Data();
+  req.SetUserData(NULL, user_data0);
+  req.SetUserData(&user_data1, user_data1);
+  req.SetUserData(&user_data2, user_data2);
+  req.set_method("GET");
+  req.Start();
+  MessageLoop::current()->Run();
+
+  // Make sure we can retrieve our specific user data
+  EXPECT_EQ(user_data0, req.GetUserData(NULL));
+  EXPECT_EQ(user_data1, req.GetUserData(&user_data1));
+  EXPECT_EQ(user_data2, req.GetUserData(&user_data2));
+
+  // Check the interceptor got called as expected
+  EXPECT_TRUE(interceptor.did_intercept_main_);
+
+  // Check we got one good response
+  EXPECT_TRUE(req.status().is_success());
+  EXPECT_EQ(200, req.response_headers()->response_code());
+  EXPECT_EQ(TestInterceptor::ok_data(), d.data_received());
+  EXPECT_EQ(1, d.response_started_count());
+  EXPECT_EQ(0, d.received_redirect_count());
+}
+
+TEST_F(URLRequestTest, InterceptRedirect) {
+  TestInterceptor interceptor;
+
+  // intercept the main request and respond with a redirect
+  interceptor.intercept_main_request_ = true;
+  interceptor.main_headers_ = TestInterceptor::redirect_headers();
+  interceptor.main_data_ = TestInterceptor::redirect_data();
+
+  // intercept that redirect and respond a final OK response
+  interceptor.intercept_redirect_ = true;
+  interceptor.redirect_headers_ =  TestInterceptor::ok_headers();
+  interceptor.redirect_data_ = TestInterceptor::ok_data();
+
+  TestDelegate d;
+  URLRequest req(GURL("http://test_intercept/foo"), &d, &default_context_);
+  req.set_method("GET");
+  req.Start();
+  MessageLoop::current()->Run();
+
+  // Check the interceptor got called as expected
+  EXPECT_TRUE(interceptor.did_intercept_main_);
+  EXPECT_TRUE(interceptor.did_intercept_redirect_);
+
+  // Check we got one good response
+  EXPECT_TRUE(req.status().is_success());
+  if (req.status().is_success()) {
+    EXPECT_EQ(200, req.response_headers()->response_code());
+  }
+  EXPECT_EQ(TestInterceptor::ok_data(), d.data_received());
+  EXPECT_EQ(1, d.response_started_count());
+  EXPECT_EQ(0, d.received_redirect_count());
+}
+
+TEST_F(URLRequestTest, InterceptServerError) {
+  TestInterceptor interceptor;
+
+  // intercept the main request to generate a server error response
+  interceptor.intercept_main_request_ = true;
+  interceptor.main_headers_ = TestInterceptor::error_headers();
+  interceptor.main_data_ = TestInterceptor::error_data();
+
+  // intercept that error and respond with an OK response
+  interceptor.intercept_final_response_ = true;
+  interceptor.final_headers_ = TestInterceptor::ok_headers();
+  interceptor.final_data_ = TestInterceptor::ok_data();
+
+  TestDelegate d;
+  URLRequest req(GURL("http://test_intercept/foo"), &d, &default_context_);
+  req.set_method("GET");
+  req.Start();
+  MessageLoop::current()->Run();
+
+  // Check the interceptor got called as expected
+  EXPECT_TRUE(interceptor.did_intercept_main_);
+  EXPECT_TRUE(interceptor.did_intercept_final_);
+
+  // Check we got one good response
+  EXPECT_TRUE(req.status().is_success());
+  EXPECT_EQ(200, req.response_headers()->response_code());
+  EXPECT_EQ(TestInterceptor::ok_data(), d.data_received());
+  EXPECT_EQ(1, d.response_started_count());
+  EXPECT_EQ(0, d.received_redirect_count());
+}
+
+TEST_F(URLRequestTest, InterceptNetworkError) {
+  TestInterceptor interceptor;
+
+  // intercept the main request to simulate a network error
+  interceptor.simulate_main_network_error_ = true;
+
+  // intercept that error and respond with an OK response
+  interceptor.intercept_final_response_ = true;
+  interceptor.final_headers_ = TestInterceptor::ok_headers();
+  interceptor.final_data_ = TestInterceptor::ok_data();
+
+  TestDelegate d;
+  URLRequest req(GURL("http://test_intercept/foo"), &d, &default_context_);
+  req.set_method("GET");
+  req.Start();
+  MessageLoop::current()->Run();
+
+  // Check the interceptor got called as expected
+  EXPECT_TRUE(interceptor.did_simulate_error_main_);
+  EXPECT_TRUE(interceptor.did_intercept_final_);
+
+  // Check we received one good response
+  EXPECT_TRUE(req.status().is_success());
+  EXPECT_EQ(200, req.response_headers()->response_code());
+  EXPECT_EQ(TestInterceptor::ok_data(), d.data_received());
+  EXPECT_EQ(1, d.response_started_count());
+  EXPECT_EQ(0, d.received_redirect_count());
+}
+
+TEST_F(URLRequestTest, InterceptRestartRequired) {
+  TestInterceptor interceptor;
+
+  // restart the main request
+  interceptor.restart_main_request_ = true;
+
+  // then intercept the new main request and respond with an OK response
+  interceptor.intercept_main_request_ = true;
+  interceptor.main_headers_ = TestInterceptor::ok_headers();
+  interceptor.main_data_ = TestInterceptor::ok_data();
+
+  TestDelegate d;
+  URLRequest req(GURL("http://test_intercept/foo"), &d, &default_context_);
+  req.set_method("GET");
+  req.Start();
+  MessageLoop::current()->Run();
+
+  // Check the interceptor got called as expected
+  EXPECT_TRUE(interceptor.did_restart_main_);
+  EXPECT_TRUE(interceptor.did_intercept_main_);
+
+  // Check we received one good response
+  EXPECT_TRUE(req.status().is_success());
+  if (req.status().is_success()) {
+    EXPECT_EQ(200, req.response_headers()->response_code());
+  }
+  EXPECT_EQ(TestInterceptor::ok_data(), d.data_received());
+  EXPECT_EQ(1, d.response_started_count());
+  EXPECT_EQ(0, d.received_redirect_count());
+}
+
+TEST_F(URLRequestTest, InterceptRespectsCancelMain) {
+  TestInterceptor interceptor;
+
+  // intercept the main request and cancel from within the restarted job
+  interceptor.cancel_main_request_ = true;
+
+  // setup to intercept final response and override it with an OK response
+  interceptor.intercept_final_response_ = true;
+  interceptor.final_headers_ = TestInterceptor::ok_headers();
+  interceptor.final_data_ = TestInterceptor::ok_data();
+
+  TestDelegate d;
+  URLRequest req(GURL("http://test_intercept/foo"), &d, &default_context_);
+  req.set_method("GET");
+  req.Start();
+  MessageLoop::current()->Run();
+
+  // Check the interceptor got called as expected
+  EXPECT_TRUE(interceptor.did_cancel_main_);
+  EXPECT_FALSE(interceptor.did_intercept_final_);
+
+  // Check we see a canceled request
+  EXPECT_FALSE(req.status().is_success());
+  EXPECT_EQ(URLRequestStatus::CANCELED, req.status().status());
+}
+
+TEST_F(URLRequestTest, InterceptRespectsCancelRedirect) {
+  TestInterceptor interceptor;
+
+  // intercept the main request and respond with a redirect
+  interceptor.intercept_main_request_ = true;
+  interceptor.main_headers_ = TestInterceptor::redirect_headers();
+  interceptor.main_data_ = TestInterceptor::redirect_data();
+
+  // intercept the redirect and cancel from within that job
+  interceptor.cancel_redirect_request_ = true;
+
+  // setup to intercept final response and override it with an OK response
+  interceptor.intercept_final_response_ = true;
+  interceptor.final_headers_ = TestInterceptor::ok_headers();
+  interceptor.final_data_ = TestInterceptor::ok_data();
+
+  TestDelegate d;
+  URLRequest req(GURL("http://test_intercept/foo"), &d, &default_context_);
+  req.set_method("GET");
+  req.Start();
+  MessageLoop::current()->Run();
+
+  // Check the interceptor got called as expected
+  EXPECT_TRUE(interceptor.did_intercept_main_);
+  EXPECT_TRUE(interceptor.did_cancel_redirect_);
+  EXPECT_FALSE(interceptor.did_intercept_final_);
+
+  // Check we see a canceled request
+  EXPECT_FALSE(req.status().is_success());
+  EXPECT_EQ(URLRequestStatus::CANCELED, req.status().status());
+}
+
+TEST_F(URLRequestTest, InterceptRespectsCancelFinal) {
+  TestInterceptor interceptor;
+
+  // intercept the main request to simulate a network error
+  interceptor.simulate_main_network_error_ = true;
+
+  // setup to intercept final response and cancel from within that job
+  interceptor.cancel_final_request_ = true;
+
+  TestDelegate d;
+  URLRequest req(GURL("http://test_intercept/foo"), &d, &default_context_);
+  req.set_method("GET");
+  req.Start();
+  MessageLoop::current()->Run();
+
+  // Check the interceptor got called as expected
+  EXPECT_TRUE(interceptor.did_simulate_error_main_);
+  EXPECT_TRUE(interceptor.did_cancel_final_);
+
+  // Check we see a canceled request
+  EXPECT_FALSE(req.status().is_success());
+  EXPECT_EQ(URLRequestStatus::CANCELED, req.status().status());
+}
+
+TEST_F(URLRequestTest, InterceptRespectsCancelInRestart) {
+  TestInterceptor interceptor;
+
+  // intercept the main request and cancel then restart from within that job
+  interceptor.cancel_then_restart_main_request_ = true;
+
+  // setup to intercept final response and override it with an OK response
+  interceptor.intercept_final_response_ = true;
+  interceptor.final_headers_ = TestInterceptor::ok_headers();
+  interceptor.final_data_ = TestInterceptor::ok_data();
+
+  TestDelegate d;
+  URLRequest req(GURL("http://test_intercept/foo"), &d, &default_context_);
+  req.set_method("GET");
+  req.Start();
+  MessageLoop::current()->Run();
+
+  // Check the interceptor got called as expected
+  EXPECT_TRUE(interceptor.did_cancel_then_restart_main_);
+  EXPECT_FALSE(interceptor.did_intercept_final_);
+
+  // Check we see a canceled request
+  EXPECT_FALSE(req.status().is_success());
+  EXPECT_EQ(URLRequestStatus::CANCELED, req.status().status());
+}
+
+// Check that two different URL requests have different identifiers.
+TEST_F(URLRequestTest, Identifiers) {
+  TestDelegate d;
+  TestURLRequestContext context;
+  TestURLRequest req(GURL("http://example.com"), &d, &context);
+  TestURLRequest other_req(GURL("http://example.com"), &d, &context);
+
+  ASSERT_NE(req.identifier(), other_req.identifier());
+}
+
+// Check that a failure to connect to the proxy is reported to the network
+// delegate.
+TEST_F(URLRequestTest, NetworkDelegateProxyError) {
+  MockHostResolver host_resolver;
+  host_resolver.rules()->AddSimulatedFailure("*");
+
+  TestNetworkDelegate network_delegate;  // Must outlive URLRequests.
+  TestURLRequestContextWithProxy context("myproxy:70", &network_delegate);
+
+  TestDelegate d;
+  URLRequest req(GURL("http://example.com"), &d, &context);
+  req.set_method("GET");
+
+  req.Start();
+  MessageLoop::current()->Run();
+
+  // Check we see a failed request.
+  EXPECT_FALSE(req.status().is_success());
+  EXPECT_EQ(URLRequestStatus::FAILED, req.status().status());
+  EXPECT_EQ(ERR_PROXY_CONNECTION_FAILED, req.status().error());
+
+  EXPECT_EQ(1, network_delegate.error_count());
+  EXPECT_EQ(ERR_PROXY_CONNECTION_FAILED, network_delegate.last_error());
+  EXPECT_EQ(1, network_delegate.completed_requests());
+}
+
+// Make sure that net::NetworkDelegate::NotifyCompleted is called if
+// content is empty.
+TEST_F(URLRequestTest, RequestCompletionForEmptyResponse) {
+  TestDelegate d;
+  URLRequest req(GURL("data:,"), &d, &default_context_);
+  req.Start();
+  MessageLoop::current()->Run();
+  EXPECT_EQ("", d.data_received());
+  EXPECT_EQ(1, default_network_delegate_.completed_requests());
+}
+
+// TODO(droger): Support TestServer on iOS (see http://crbug.com/148666).
+#if !defined(OS_IOS)
+// A subclass of TestServer that uses a statically-configured hostname. This is
+// to work around mysterious failures in chrome_frame_net_tests. See:
+// http://crbug.com/114369
+class LocalHttpTestServer : public TestServer {
+ public:
+  explicit LocalHttpTestServer(const FilePath& document_root)
+      : TestServer(TestServer::TYPE_HTTP,
+                   ScopedCustomUrlRequestTestHttpHost::value(),
+                   document_root) {}
+  LocalHttpTestServer()
+      : TestServer(TestServer::TYPE_HTTP,
+                   ScopedCustomUrlRequestTestHttpHost::value(),
+                   FilePath()) {}
+};
+
+TEST_F(URLRequestTest, FLAKY_DelayedCookieCallback) {
+  LocalHttpTestServer test_server;
+  ASSERT_TRUE(test_server.Start());
+
+  TestURLRequestContext context;
+  scoped_refptr<DelayedCookieMonster> delayed_cm =
+      new DelayedCookieMonster();
+  scoped_refptr<CookieStore> cookie_store = delayed_cm;
+  context.set_cookie_store(delayed_cm);
+
+  // Set up a cookie.
+  {
+    TestNetworkDelegate network_delegate;
+    context.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(
+        test_server.GetURL("set-cookie?CookieToNotSend=1"), &d, &context);
+    req.Start();
+    MessageLoop::current()->Run();
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+    EXPECT_EQ(1, network_delegate.set_cookie_count());
+  }
+
+  // Verify that the cookie is set.
+  {
+    TestNetworkDelegate network_delegate;
+    context.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(test_server.GetURL("echoheader?Cookie"), &d, &context);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("CookieToNotSend=1")
+                != std::string::npos);
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+}
+
+TEST_F(URLRequestTest, DoNotSendCookies) {
+  LocalHttpTestServer test_server;
+  ASSERT_TRUE(test_server.Start());
+
+  // Set up a cookie.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(test_server.GetURL("set-cookie?CookieToNotSend=1"),
+                   &d,
+                   &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+
+  // Verify that the cookie is set.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(
+        test_server.GetURL("echoheader?Cookie"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("CookieToNotSend=1")
+                != std::string::npos);
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+
+  // Verify that the cookie isn't sent when LOAD_DO_NOT_SEND_COOKIES is set.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(
+        test_server.GetURL("echoheader?Cookie"), &d, &default_context_);
+    req.set_load_flags(LOAD_DO_NOT_SEND_COOKIES);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("Cookie: CookieToNotSend=1")
+                == std::string::npos);
+
+    // LOAD_DO_NOT_SEND_COOKIES does not trigger OnGetCookies.
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+}
+
+TEST_F(URLRequestTest, DoNotSaveCookies) {
+  LocalHttpTestServer test_server;
+  ASSERT_TRUE(test_server.Start());
+
+  // Set up a cookie.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(test_server.GetURL("set-cookie?CookieToNotUpdate=2"),
+                   &d,
+                   &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+    EXPECT_EQ(1, network_delegate.set_cookie_count());
+  }
+
+  // Try to set-up another cookie and update the previous cookie.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(
+        test_server.GetURL("set-cookie?CookieToNotSave=1&CookieToNotUpdate=1"),
+        &d,
+        &default_context_);
+    req.set_load_flags(LOAD_DO_NOT_SAVE_COOKIES);
+    req.Start();
+
+    MessageLoop::current()->Run();
+
+    // LOAD_DO_NOT_SAVE_COOKIES does not trigger OnSetCookie.
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+    EXPECT_EQ(0, network_delegate.set_cookie_count());
+  }
+
+  // Verify the cookies weren't saved or updated.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(
+        test_server.GetURL("echoheader?Cookie"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("CookieToNotSave=1")
+                == std::string::npos);
+    EXPECT_TRUE(d.data_received().find("CookieToNotUpdate=2")
+                != std::string::npos);
+
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+    EXPECT_EQ(0, network_delegate.set_cookie_count());
+  }
+}
+
+TEST_F(URLRequestTest, DoNotSendCookies_ViaPolicy) {
+  LocalHttpTestServer test_server;
+  ASSERT_TRUE(test_server.Start());
+
+  // Set up a cookie.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(test_server.GetURL("set-cookie?CookieToNotSend=1"),
+                   &d,
+                   &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+
+  // Verify that the cookie is set.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(
+        test_server.GetURL("echoheader?Cookie"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("CookieToNotSend=1")
+                != std::string::npos);
+
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+
+  // Verify that the cookie isn't sent.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    network_delegate.set_cookie_options(TestNetworkDelegate::NO_GET_COOKIES);
+    URLRequest req(
+        test_server.GetURL("echoheader?Cookie"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("Cookie: CookieToNotSend=1")
+                == std::string::npos);
+
+    EXPECT_EQ(1, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+}
+
+TEST_F(URLRequestTest, DoNotSaveCookies_ViaPolicy) {
+  LocalHttpTestServer test_server;
+  ASSERT_TRUE(test_server.Start());
+
+  // Set up a cookie.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(test_server.GetURL("set-cookie?CookieToNotUpdate=2"),
+                   &d,
+                   &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+
+  // Try to set-up another cookie and update the previous cookie.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    network_delegate.set_cookie_options(TestNetworkDelegate::NO_SET_COOKIE);
+    URLRequest req(
+        test_server.GetURL("set-cookie?CookieToNotSave=1&CookieToNotUpdate=1"),
+        &d,
+        &default_context_);
+    req.Start();
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(2, network_delegate.blocked_set_cookie_count());
+  }
+
+  // Verify the cookies weren't saved or updated.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(
+        test_server.GetURL("echoheader?Cookie"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("CookieToNotSave=1")
+                == std::string::npos);
+    EXPECT_TRUE(d.data_received().find("CookieToNotUpdate=2")
+                != std::string::npos);
+
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+}
+
+TEST_F(URLRequestTest, DoNotSaveEmptyCookies) {
+  LocalHttpTestServer test_server;
+  ASSERT_TRUE(test_server.Start());
+
+  // Set up an empty cookie.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(test_server.GetURL("set-cookie"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+    EXPECT_EQ(0, network_delegate.set_cookie_count());
+  }
+}
+
+TEST_F(URLRequestTest, DoNotSendCookies_ViaPolicy_Async) {
+  LocalHttpTestServer test_server;
+  ASSERT_TRUE(test_server.Start());
+
+  // Set up a cookie.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(test_server.GetURL("set-cookie?CookieToNotSend=1"),
+                   &d,
+                   &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+
+  // Verify that the cookie is set.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(
+        test_server.GetURL("echoheader?Cookie"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("CookieToNotSend=1")
+                != std::string::npos);
+
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+
+  // Verify that the cookie isn't sent.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    network_delegate.set_cookie_options(TestNetworkDelegate::NO_GET_COOKIES);
+    URLRequest req(
+        test_server.GetURL("echoheader?Cookie"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("Cookie: CookieToNotSend=1")
+                == std::string::npos);
+
+    EXPECT_EQ(1, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+}
+
+TEST_F(URLRequestTest, DoNotSaveCookies_ViaPolicy_Async) {
+  LocalHttpTestServer test_server;
+  ASSERT_TRUE(test_server.Start());
+
+  // Set up a cookie.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(test_server.GetURL("set-cookie?CookieToNotUpdate=2"),
+                   &d,
+                   &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+
+  // Try to set-up another cookie and update the previous cookie.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    network_delegate.set_cookie_options(TestNetworkDelegate::NO_SET_COOKIE);
+    URLRequest req(
+        test_server.GetURL("set-cookie?CookieToNotSave=1&CookieToNotUpdate=1"),
+        &d,
+        &default_context_);
+    req.Start();
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(2, network_delegate.blocked_set_cookie_count());
+  }
+
+  // Verify the cookies weren't saved or updated.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(
+        test_server.GetURL("echoheader?Cookie"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("CookieToNotSave=1")
+                == std::string::npos);
+    EXPECT_TRUE(d.data_received().find("CookieToNotUpdate=2")
+                != std::string::npos);
+
+    EXPECT_EQ(0, network_delegate.blocked_get_cookies_count());
+    EXPECT_EQ(0, network_delegate.blocked_set_cookie_count());
+  }
+}
+
+// FixedDateNetworkDelegate swaps out the server's HTTP Date response header
+// value for the |fixed_date| argument given to the constructor.
+class FixedDateNetworkDelegate : public TestNetworkDelegate {
+ public:
+  explicit FixedDateNetworkDelegate(const std::string& fixed_date)
+      : fixed_date_(fixed_date) {}
+  virtual ~FixedDateNetworkDelegate() {}
+
+  // net::NetworkDelegate implementation
+  virtual int OnHeadersReceived(
+      net::URLRequest* request,
+      const net::CompletionCallback& callback,
+      const net::HttpResponseHeaders* original_response_headers,
+      scoped_refptr<net::HttpResponseHeaders>* override_response_headers)
+      OVERRIDE;
+
+ private:
+  std::string fixed_date_;
+
+  DISALLOW_COPY_AND_ASSIGN(FixedDateNetworkDelegate);
+};
+
+int FixedDateNetworkDelegate::OnHeadersReceived(
+    net::URLRequest* request,
+    const net::CompletionCallback& callback,
+    const net::HttpResponseHeaders* original_response_headers,
+    scoped_refptr<net::HttpResponseHeaders>* override_response_headers) {
+  net::HttpResponseHeaders* new_response_headers =
+      new net::HttpResponseHeaders(original_response_headers->raw_headers());
+
+  new_response_headers->RemoveHeader("Date");
+  new_response_headers->AddHeader("Date: " + fixed_date_);
+
+  *override_response_headers = new_response_headers;
+  return TestNetworkDelegate::OnHeadersReceived(request,
+                                                callback,
+                                                original_response_headers,
+                                                override_response_headers);
+}
+
+// Test that cookie expiration times are adjusted for server/client clock
+// skew and that we handle incorrect timezone specifier "UTC" in HTTP Date
+// headers by defaulting to GMT. (crbug.com/135131)
+TEST_F(URLRequestTest, AcceptClockSkewCookieWithWrongDateTimezone) {
+  LocalHttpTestServer test_server;
+  ASSERT_TRUE(test_server.Start());
+
+  // Set up an expired cookie.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(test_server.GetURL(
+        "set-cookie?StillGood=1;expires=Mon,18-Apr-1977,22:50:13,GMT"),
+        &d,
+        &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+  }
+  // Verify that the cookie is not set.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(
+        test_server.GetURL("echoheader?Cookie"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("StillGood=1") == std::string::npos);
+  }
+  // Set up a cookie with clock skew and "UTC" HTTP Date timezone specifier.
+  {
+    FixedDateNetworkDelegate network_delegate("18-Apr-1977 22:49:13 UTC");
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(test_server.GetURL(
+        "set-cookie?StillGood=1;expires=Mon,18-Apr-1977,22:50:13,GMT"),
+        &d,
+        &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+  }
+  // Verify that the cookie is set.
+  {
+    TestNetworkDelegate network_delegate;
+    default_context_.set_network_delegate(&network_delegate);
+    TestDelegate d;
+    URLRequest req(
+        test_server.GetURL("echoheader?Cookie"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("StillGood=1") != std::string::npos);
+  }
+}
+
+
+// Check that it is impossible to change the referrer in the extra headers of
+// an URLRequest.
+TEST_F(URLRequestTest, DoNotOverrideReferrer) {
+  LocalHttpTestServer test_server;
+  ASSERT_TRUE(test_server.Start());
+
+  // If extra headers contain referer and the request contains a referer,
+  // only the latter shall be respected.
+  {
+    TestDelegate d;
+    URLRequest req(
+        test_server.GetURL("echoheader?Referer"), &d, &default_context_);
+    req.set_referrer("http://foo.com/");
+
+    HttpRequestHeaders headers;
+    headers.SetHeader(HttpRequestHeaders::kReferer, "http://bar.com/");
+    req.SetExtraRequestHeaders(headers);
+
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ("http://foo.com/", d.data_received());
+  }
+
+  // If extra headers contain a referer but the request does not, no referer
+  // shall be sent in the header.
+  {
+    TestDelegate d;
+    URLRequest req(
+        test_server.GetURL("echoheader?Referer"), &d, &default_context_);
+
+    HttpRequestHeaders headers;
+    headers.SetHeader(HttpRequestHeaders::kReferer, "http://bar.com/");
+    req.SetExtraRequestHeaders(headers);
+    req.set_load_flags(LOAD_VALIDATE_CACHE);
+
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ("None", d.data_received());
+  }
+}
+
+class URLRequestTestHTTP : public URLRequestTest {
+ public:
+  URLRequestTestHTTP()
+      : test_server_(FilePath(FILE_PATH_LITERAL(
+                                  "net/data/url_request_unittest"))) {
+  }
+
+ protected:
+  // Requests |redirect_url|, which must return a HTTP 3xx redirect.
+  // |request_method| is the method to use for the initial request.
+  // |redirect_method| is the method that is expected to be used for the second
+  // request, after redirection.
+  // If |include_data| is true, data is uploaded with the request.  The
+  // response body is expected to match it exactly, if and only if
+  // |request_method| == |redirect_method|.
+  void HTTPRedirectMethodTest(const GURL& redirect_url,
+                              const std::string& request_method,
+                              const std::string& redirect_method,
+                              bool include_data) {
+    static const char kData[] = "hello world";
+    TestDelegate d;
+    URLRequest req(redirect_url, &d, &default_context_);
+    req.set_method(request_method);
+    if (include_data) {
+      req.set_upload(make_scoped_ptr(CreateSimpleUploadData(kData)));
+      HttpRequestHeaders headers;
+      headers.SetHeader(HttpRequestHeaders::kContentLength,
+                        base::UintToString(arraysize(kData) - 1));
+      req.SetExtraRequestHeaders(headers);
+    }
+    req.Start();
+    MessageLoop::current()->Run();
+    EXPECT_EQ(redirect_method, req.method());
+    EXPECT_EQ(URLRequestStatus::SUCCESS, req.status().status());
+    EXPECT_EQ(OK, req.status().error());
+    if (include_data) {
+      if (request_method == redirect_method) {
+        EXPECT_EQ(kData, d.data_received());
+      } else {
+        EXPECT_NE(kData, d.data_received());
+      }
+    }
+    if (HasFailure())
+      LOG(WARNING) << "Request method was: " << request_method;
+  }
+
+  void HTTPUploadDataOperationTest(const std::string& method) {
+    const int kMsgSize = 20000;  // multiple of 10
+    const int kIterations = 50;
+    char* uploadBytes = new char[kMsgSize+1];
+    char* ptr = uploadBytes;
+    char marker = 'a';
+    for (int idx = 0; idx < kMsgSize/10; idx++) {
+      memcpy(ptr, "----------", 10);
+      ptr += 10;
+      if (idx % 100 == 0) {
+        ptr--;
+        *ptr++ = marker;
+        if (++marker > 'z')
+          marker = 'a';
+      }
+    }
+    uploadBytes[kMsgSize] = '\0';
+
+    for (int i = 0; i < kIterations; ++i) {
+      TestDelegate d;
+      URLRequest r(test_server_.GetURL("echo"), &d, &default_context_);
+      r.set_method(method.c_str());
+
+      r.set_upload(make_scoped_ptr(CreateSimpleUploadData(uploadBytes)));
+
+      r.Start();
+      EXPECT_TRUE(r.is_pending());
+
+      MessageLoop::current()->Run();
+
+      ASSERT_EQ(1, d.response_started_count())
+          << "request failed: " << r.status().status()
+          << ", os error: " << r.status().error();
+
+      EXPECT_FALSE(d.received_data_before_response());
+      EXPECT_EQ(uploadBytes, d.data_received());
+    }
+    delete[] uploadBytes;
+  }
+
+  void AddChunksToUpload(URLRequest* r) {
+    r->AppendChunkToUpload("a", 1, false);
+    r->AppendChunkToUpload("bcd", 3, false);
+    r->AppendChunkToUpload("this is a longer chunk than before.", 35, false);
+    r->AppendChunkToUpload("\r\n\r\n", 4, false);
+    r->AppendChunkToUpload("0", 1, false);
+    r->AppendChunkToUpload("2323", 4, true);
+  }
+
+  void VerifyReceivedDataMatchesChunks(URLRequest* r, TestDelegate* d) {
+    // This should match the chunks sent by AddChunksToUpload().
+    const std::string expected_data =
+        "abcdthis is a longer chunk than before.\r\n\r\n02323";
+
+    ASSERT_EQ(1, d->response_started_count())
+        << "request failed: " << r->status().status()
+        << ", os error: " << r->status().error();
+
+    EXPECT_FALSE(d->received_data_before_response());
+
+    EXPECT_EQ(expected_data.size(), static_cast<size_t>(d->bytes_received()));
+    EXPECT_EQ(expected_data, d->data_received());
+  }
+
+  bool DoManyCookiesRequest(int num_cookies) {
+    TestDelegate d;
+    URLRequest r(test_server_.GetURL("set-many-cookies?" +
+                                     base::IntToString(num_cookies)),
+                                     &d,
+                                     &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    bool is_success = r.status().is_success();
+
+    if (!is_success) {
+      // Requests handled by ChromeFrame send a less precise error message,
+      // ERR_CONNECTION_ABORTED.
+      EXPECT_TRUE(r.status().error() == ERR_RESPONSE_HEADERS_TOO_BIG ||
+                  r.status().error() == ERR_CONNECTION_ABORTED);
+      // The test server appears to be unable to handle subsequent requests
+      // after this error is triggered. Force it to restart.
+      EXPECT_TRUE(test_server_.Stop());
+      EXPECT_TRUE(test_server_.Start());
+    }
+
+    return is_success;
+  }
+
+  LocalHttpTestServer test_server_;
+};
+
+// In this unit test, we're using the HTTPTestServer as a proxy server and
+// issuing a CONNECT request with the magic host name "www.redirect.com".
+// The HTTPTestServer will return a 302 response, which we should not
+// follow.
+// flaky: crbug.com/96594
+TEST_F(URLRequestTestHTTP, FLAKY_ProxyTunnelRedirectTest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestNetworkDelegate network_delegate;  // Must outlive URLRequest.
+  TestURLRequestContextWithProxy context(
+      test_server_.host_port_pair().ToString(),
+      &network_delegate);
+
+  TestDelegate d;
+  {
+    URLRequest r(GURL("https://www.redirect.com/"), &d, &context);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::FAILED, r.status().status());
+    EXPECT_EQ(ERR_TUNNEL_CONNECTION_FAILED, r.status().error());
+    EXPECT_EQ(1, d.response_started_count());
+    // We should not have followed the redirect.
+    EXPECT_EQ(0, d.received_redirect_count());
+  }
+}
+
+// This is the same as the previous test, but checks that the network delegate
+// registers the error.
+// This test was disabled because it made chrome_frame_net_tests hang
+// (see bug 102991).
+TEST_F(URLRequestTestHTTP, DISABLED_NetworkDelegateTunnelConnectionFailed) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestNetworkDelegate network_delegate;  // Must outlive URLRequest.
+  TestURLRequestContextWithProxy context(
+      test_server_.host_port_pair().ToString(),
+      &network_delegate);
+
+  TestDelegate d;
+  {
+    URLRequest r(GURL("https://www.redirect.com/"), &d, &context);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::FAILED, r.status().status());
+    EXPECT_EQ(ERR_TUNNEL_CONNECTION_FAILED, r.status().error());
+    EXPECT_EQ(1, d.response_started_count());
+    // We should not have followed the redirect.
+    EXPECT_EQ(0, d.received_redirect_count());
+
+    EXPECT_EQ(1, network_delegate.error_count());
+    EXPECT_EQ(ERR_TUNNEL_CONNECTION_FAILED, network_delegate.last_error());
+  }
+}
+
+// Tests that we can block and asynchronously return OK in various stages.
+TEST_F(URLRequestTestHTTP, NetworkDelegateBlockAsynchronously) {
+  static const BlockingNetworkDelegate::Stage blocking_stages[] = {
+    BlockingNetworkDelegate::ON_BEFORE_URL_REQUEST,
+    BlockingNetworkDelegate::ON_BEFORE_SEND_HEADERS,
+    BlockingNetworkDelegate::ON_HEADERS_RECEIVED
+  };
+  static const size_t blocking_stages_length = arraysize(blocking_stages);
+
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::USER_CALLBACK);
+  network_delegate.set_block_on(
+      BlockingNetworkDelegate::ON_BEFORE_URL_REQUEST |
+      BlockingNetworkDelegate::ON_BEFORE_SEND_HEADERS |
+      BlockingNetworkDelegate::ON_HEADERS_RECEIVED);
+
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  {
+    URLRequest r(test_server_.GetURL("empty.html"), &d, &context);
+
+    r.Start();
+    for (size_t i = 0; i < blocking_stages_length; ++i) {
+      MessageLoop::current()->Run();
+      EXPECT_EQ(blocking_stages[i],
+                network_delegate.stage_blocked_for_callback());
+      network_delegate.DoCallback(OK);
+    }
+    MessageLoop::current()->Run();
+    EXPECT_EQ(200, r.GetResponseCode());
+    EXPECT_EQ(URLRequestStatus::SUCCESS, r.status().status());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Tests that the network delegate can block and cancel a request.
+TEST_F(URLRequestTestHTTP, NetworkDelegateCancelRequest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::AUTO_CALLBACK);
+  network_delegate.set_block_on(BlockingNetworkDelegate::ON_BEFORE_URL_REQUEST);
+  network_delegate.set_retval(ERR_EMPTY_RESPONSE);
+
+  TestURLRequestContextWithProxy context(
+      test_server_.host_port_pair().ToString(),
+      &network_delegate);
+
+  {
+    URLRequest r(test_server_.GetURL(""), &d, &context);
+
+    r.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::FAILED, r.status().status());
+    EXPECT_EQ(ERR_EMPTY_RESPONSE, r.status().error());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Helper function for NetworkDelegateCancelRequestAsynchronously and
+// NetworkDelegateCancelRequestSynchronously. Sets up a blocking network
+// delegate operating in |block_mode| and a request for |url|. It blocks the
+// request in |stage| and cancels it with ERR_BLOCKED_BY_CLIENT.
+void NetworkDelegateCancelRequest(BlockingNetworkDelegate::BlockMode block_mode,
+                                  BlockingNetworkDelegate::Stage stage,
+                                  const GURL& url) {
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(block_mode);
+  network_delegate.set_retval(ERR_BLOCKED_BY_CLIENT);
+  network_delegate.set_block_on(stage);
+
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  {
+    URLRequest r(url, &d, &context);
+
+    r.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::FAILED, r.status().status());
+    EXPECT_EQ(ERR_BLOCKED_BY_CLIENT, r.status().error());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// The following 3 tests check that the network delegate can cancel a request
+// synchronously in various stages of the request.
+TEST_F(URLRequestTestHTTP, NetworkDelegateCancelRequestSynchronously1) {
+  ASSERT_TRUE(test_server_.Start());
+  NetworkDelegateCancelRequest(BlockingNetworkDelegate::SYNCHRONOUS,
+                               BlockingNetworkDelegate::ON_BEFORE_URL_REQUEST,
+                               test_server_.GetURL(""));
+}
+
+TEST_F(URLRequestTestHTTP, NetworkDelegateCancelRequestSynchronously2) {
+  ASSERT_TRUE(test_server_.Start());
+  NetworkDelegateCancelRequest(BlockingNetworkDelegate::SYNCHRONOUS,
+                               BlockingNetworkDelegate::ON_BEFORE_SEND_HEADERS,
+                               test_server_.GetURL(""));
+}
+
+TEST_F(URLRequestTestHTTP, NetworkDelegateCancelRequestSynchronously3) {
+  ASSERT_TRUE(test_server_.Start());
+  NetworkDelegateCancelRequest(BlockingNetworkDelegate::SYNCHRONOUS,
+                               BlockingNetworkDelegate::ON_HEADERS_RECEIVED,
+                               test_server_.GetURL(""));
+}
+
+// The following 3 tests check that the network delegate can cancel a request
+// asynchronously in various stages of the request.
+TEST_F(URLRequestTestHTTP, NetworkDelegateCancelRequestAsynchronously1) {
+  ASSERT_TRUE(test_server_.Start());
+  NetworkDelegateCancelRequest(BlockingNetworkDelegate::AUTO_CALLBACK,
+                               BlockingNetworkDelegate::ON_BEFORE_URL_REQUEST,
+                               test_server_.GetURL(""));
+}
+
+TEST_F(URLRequestTestHTTP, NetworkDelegateCancelRequestAsynchronously2) {
+  ASSERT_TRUE(test_server_.Start());
+  NetworkDelegateCancelRequest(BlockingNetworkDelegate::AUTO_CALLBACK,
+                               BlockingNetworkDelegate::ON_BEFORE_SEND_HEADERS,
+                               test_server_.GetURL(""));
+}
+
+TEST_F(URLRequestTestHTTP, NetworkDelegateCancelRequestAsynchronously3) {
+  ASSERT_TRUE(test_server_.Start());
+  NetworkDelegateCancelRequest(BlockingNetworkDelegate::AUTO_CALLBACK,
+                               BlockingNetworkDelegate::ON_HEADERS_RECEIVED,
+                               test_server_.GetURL(""));
+}
+
+// Tests that the network delegate can block and redirect a request to a new
+// URL.
+TEST_F(URLRequestTestHTTP, NetworkDelegateRedirectRequest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::AUTO_CALLBACK);
+  network_delegate.set_block_on(BlockingNetworkDelegate::ON_BEFORE_URL_REQUEST);
+  GURL redirect_url(test_server_.GetURL("simple.html"));
+  network_delegate.set_redirect_url(redirect_url);
+
+  TestURLRequestContextWithProxy context(
+      test_server_.host_port_pair().ToString(),
+      &network_delegate);
+
+  {
+    GURL original_url(test_server_.GetURL("empty.html"));
+    URLRequest r(original_url, &d, &context);
+
+    r.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::SUCCESS, r.status().status());
+    EXPECT_EQ(0, r.status().error());
+    EXPECT_EQ(redirect_url, r.url());
+    EXPECT_EQ(original_url, r.original_url());
+    EXPECT_EQ(2U, r.url_chain().size());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Tests that the network delegate can block and redirect a request to a new
+// URL by setting a redirect_url and returning in OnBeforeURLRequest directly.
+TEST_F(URLRequestTestHTTP, NetworkDelegateRedirectRequestSynchronously) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::SYNCHRONOUS);
+  GURL redirect_url(test_server_.GetURL("simple.html"));
+  network_delegate.set_redirect_url(redirect_url);
+
+  TestURLRequestContextWithProxy context(
+      test_server_.host_port_pair().ToString(),
+      &network_delegate);
+
+  {
+    GURL original_url(test_server_.GetURL("empty.html"));
+    URLRequest r(original_url, &d, &context);
+
+    r.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::SUCCESS, r.status().status());
+    EXPECT_EQ(0, r.status().error());
+    EXPECT_EQ(redirect_url, r.url());
+    EXPECT_EQ(original_url, r.original_url());
+    EXPECT_EQ(2U, r.url_chain().size());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Tests that redirects caused by the network delegate preserve POST data.
+TEST_F(URLRequestTestHTTP, NetworkDelegateRedirectRequestPost) {
+  ASSERT_TRUE(test_server_.Start());
+
+  const char kData[] = "hello world";
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::AUTO_CALLBACK);
+  network_delegate.set_block_on(BlockingNetworkDelegate::ON_BEFORE_URL_REQUEST);
+  GURL redirect_url(test_server_.GetURL("echo"));
+  network_delegate.set_redirect_url(redirect_url);
+
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  {
+    GURL original_url(test_server_.GetURL("empty.html"));
+    URLRequest r(original_url, &d, &context);
+    r.set_method("POST");
+    r.set_upload(make_scoped_ptr(CreateSimpleUploadData(kData)));
+    HttpRequestHeaders headers;
+    headers.SetHeader(HttpRequestHeaders::kContentLength,
+                      base::UintToString(arraysize(kData) - 1));
+    r.SetExtraRequestHeaders(headers);
+    r.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::SUCCESS, r.status().status());
+    EXPECT_EQ(0, r.status().error());
+    EXPECT_EQ(redirect_url, r.url());
+    EXPECT_EQ(original_url, r.original_url());
+    EXPECT_EQ(2U, r.url_chain().size());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+    EXPECT_EQ("POST", r.method());
+    EXPECT_EQ(kData, d.data_received());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Tests that the network delegate can synchronously complete OnAuthRequired
+// by taking no action. This indicates that the NetworkDelegate does not want to
+// handle the challenge, and is passing the buck along to the
+// URLRequest::Delegate.
+TEST_F(URLRequestTestHTTP, NetworkDelegateOnAuthRequiredSyncNoAction) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::SYNCHRONOUS);
+
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  d.set_credentials(AuthCredentials(kUser, kSecret));
+
+  {
+    GURL url(test_server_.GetURL("auth-basic"));
+    URLRequest r(url, &d, &context);
+    r.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::SUCCESS, r.status().status());
+    EXPECT_EQ(0, r.status().error());
+    EXPECT_EQ(200, r.GetResponseCode());
+    EXPECT_TRUE(d.auth_required_called());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Tests that the network delegate can synchronously complete OnAuthRequired
+// by setting credentials.
+TEST_F(URLRequestTestHTTP, NetworkDelegateOnAuthRequiredSyncSetAuth) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::SYNCHRONOUS);
+  network_delegate.set_block_on(BlockingNetworkDelegate::ON_AUTH_REQUIRED);
+  network_delegate.set_auth_retval(
+      NetworkDelegate::AUTH_REQUIRED_RESPONSE_SET_AUTH);
+
+  network_delegate.set_auth_credentials(AuthCredentials(kUser, kSecret));
+
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  {
+    GURL url(test_server_.GetURL("auth-basic"));
+    URLRequest r(url, &d, &context);
+    r.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::SUCCESS, r.status().status());
+    EXPECT_EQ(0, r.status().error());
+    EXPECT_EQ(200, r.GetResponseCode());
+    EXPECT_FALSE(d.auth_required_called());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Tests that the network delegate can synchronously complete OnAuthRequired
+// by cancelling authentication.
+TEST_F(URLRequestTestHTTP, NetworkDelegateOnAuthRequiredSyncCancel) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::SYNCHRONOUS);
+  network_delegate.set_block_on(BlockingNetworkDelegate::ON_AUTH_REQUIRED);
+  network_delegate.set_auth_retval(
+      NetworkDelegate::AUTH_REQUIRED_RESPONSE_CANCEL_AUTH);
+
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  {
+    GURL url(test_server_.GetURL("auth-basic"));
+    URLRequest r(url, &d, &context);
+    r.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::SUCCESS, r.status().status());
+    EXPECT_EQ(OK, r.status().error());
+    EXPECT_EQ(401, r.GetResponseCode());
+    EXPECT_FALSE(d.auth_required_called());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Tests that the network delegate can asynchronously complete OnAuthRequired
+// by taking no action. This indicates that the NetworkDelegate does not want
+// to handle the challenge, and is passing the buck along to the
+// URLRequest::Delegate.
+TEST_F(URLRequestTestHTTP, NetworkDelegateOnAuthRequiredAsyncNoAction) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::AUTO_CALLBACK);
+  network_delegate.set_block_on(BlockingNetworkDelegate::ON_AUTH_REQUIRED);
+
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  d.set_credentials(AuthCredentials(kUser, kSecret));
+
+  {
+    GURL url(test_server_.GetURL("auth-basic"));
+    URLRequest r(url, &d, &context);
+    r.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::SUCCESS, r.status().status());
+    EXPECT_EQ(0, r.status().error());
+    EXPECT_EQ(200, r.GetResponseCode());
+    EXPECT_TRUE(d.auth_required_called());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Tests that the network delegate can asynchronously complete OnAuthRequired
+// by setting credentials.
+TEST_F(URLRequestTestHTTP, NetworkDelegateOnAuthRequiredAsyncSetAuth) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::AUTO_CALLBACK);
+  network_delegate.set_block_on(BlockingNetworkDelegate::ON_AUTH_REQUIRED);
+  network_delegate.set_auth_retval(
+      NetworkDelegate::AUTH_REQUIRED_RESPONSE_SET_AUTH);
+
+  AuthCredentials auth_credentials(kUser, kSecret);
+  network_delegate.set_auth_credentials(auth_credentials);
+
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  {
+    GURL url(test_server_.GetURL("auth-basic"));
+    URLRequest r(url, &d, &context);
+    r.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::SUCCESS, r.status().status());
+    EXPECT_EQ(0, r.status().error());
+
+    EXPECT_EQ(200, r.GetResponseCode());
+    EXPECT_FALSE(d.auth_required_called());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Tests that the network delegate can asynchronously complete OnAuthRequired
+// by cancelling authentication.
+TEST_F(URLRequestTestHTTP, NetworkDelegateOnAuthRequiredAsyncCancel) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::AUTO_CALLBACK);
+  network_delegate.set_block_on(BlockingNetworkDelegate::ON_AUTH_REQUIRED);
+  network_delegate.set_auth_retval(
+      NetworkDelegate::AUTH_REQUIRED_RESPONSE_CANCEL_AUTH);
+
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  {
+    GURL url(test_server_.GetURL("auth-basic"));
+    URLRequest r(url, &d, &context);
+    r.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::SUCCESS, r.status().status());
+    EXPECT_EQ(OK, r.status().error());
+    EXPECT_EQ(401, r.GetResponseCode());
+    EXPECT_FALSE(d.auth_required_called());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Tests that we can handle when a network request was canceled while we were
+// waiting for the network delegate.
+// Part 1: Request is cancelled while waiting for OnBeforeURLRequest callback.
+TEST_F(URLRequestTestHTTP, NetworkDelegateCancelWhileWaiting1) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::USER_CALLBACK);
+  network_delegate.set_block_on(BlockingNetworkDelegate::ON_BEFORE_URL_REQUEST);
+
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  {
+    URLRequest r(test_server_.GetURL(""), &d, &context);
+
+    r.Start();
+    MessageLoop::current()->Run();
+    EXPECT_EQ(BlockingNetworkDelegate::ON_BEFORE_URL_REQUEST,
+              network_delegate.stage_blocked_for_callback());
+    EXPECT_EQ(0, network_delegate.completed_requests());
+    // Cancel before callback.
+    r.Cancel();
+    // Ensure that network delegate is notified.
+    EXPECT_EQ(1, network_delegate.completed_requests());
+    EXPECT_EQ(URLRequestStatus::CANCELED, r.status().status());
+    EXPECT_EQ(ERR_ABORTED, r.status().error());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Tests that we can handle when a network request was canceled while we were
+// waiting for the network delegate.
+// Part 2: Request is cancelled while waiting for OnBeforeSendHeaders callback.
+TEST_F(URLRequestTestHTTP, NetworkDelegateCancelWhileWaiting2) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::USER_CALLBACK);
+  network_delegate.set_block_on(
+      BlockingNetworkDelegate::ON_BEFORE_SEND_HEADERS);
+
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  {
+    URLRequest r(test_server_.GetURL(""), &d, &context);
+
+    r.Start();
+    MessageLoop::current()->Run();
+    EXPECT_EQ(BlockingNetworkDelegate::ON_BEFORE_SEND_HEADERS,
+              network_delegate.stage_blocked_for_callback());
+    EXPECT_EQ(0, network_delegate.completed_requests());
+    // Cancel before callback.
+    r.Cancel();
+    // Ensure that network delegate is notified.
+    EXPECT_EQ(1, network_delegate.completed_requests());
+    EXPECT_EQ(URLRequestStatus::CANCELED, r.status().status());
+    EXPECT_EQ(ERR_ABORTED, r.status().error());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Tests that we can handle when a network request was canceled while we were
+// waiting for the network delegate.
+// Part 3: Request is cancelled while waiting for OnHeadersReceived callback.
+TEST_F(URLRequestTestHTTP, NetworkDelegateCancelWhileWaiting3) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::USER_CALLBACK);
+  network_delegate.set_block_on(BlockingNetworkDelegate::ON_HEADERS_RECEIVED);
+
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  {
+    URLRequest r(test_server_.GetURL(""), &d, &context);
+
+    r.Start();
+    MessageLoop::current()->Run();
+    EXPECT_EQ(BlockingNetworkDelegate::ON_HEADERS_RECEIVED,
+              network_delegate.stage_blocked_for_callback());
+    EXPECT_EQ(0, network_delegate.completed_requests());
+    // Cancel before callback.
+    r.Cancel();
+    // Ensure that network delegate is notified.
+    EXPECT_EQ(1, network_delegate.completed_requests());
+    EXPECT_EQ(URLRequestStatus::CANCELED, r.status().status());
+    EXPECT_EQ(ERR_ABORTED, r.status().error());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// Tests that we can handle when a network request was canceled while we were
+// waiting for the network delegate.
+// Part 4: Request is cancelled while waiting for OnAuthRequired callback.
+TEST_F(URLRequestTestHTTP, NetworkDelegateCancelWhileWaiting4) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  BlockingNetworkDelegate network_delegate(
+      BlockingNetworkDelegate::USER_CALLBACK);
+  network_delegate.set_block_on(BlockingNetworkDelegate::ON_AUTH_REQUIRED);
+
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  {
+    URLRequest r(test_server_.GetURL("auth-basic"), &d, &context);
+
+    r.Start();
+    MessageLoop::current()->Run();
+    EXPECT_EQ(BlockingNetworkDelegate::ON_AUTH_REQUIRED,
+              network_delegate.stage_blocked_for_callback());
+    EXPECT_EQ(0, network_delegate.completed_requests());
+    // Cancel before callback.
+    r.Cancel();
+    // Ensure that network delegate is notified.
+    EXPECT_EQ(1, network_delegate.completed_requests());
+    EXPECT_EQ(URLRequestStatus::CANCELED, r.status().status());
+    EXPECT_EQ(ERR_ABORTED, r.status().error());
+    EXPECT_EQ(1, network_delegate.created_requests());
+    EXPECT_EQ(0, network_delegate.destroyed_requests());
+  }
+  EXPECT_EQ(1, network_delegate.destroyed_requests());
+}
+
+// In this unit test, we're using the HTTPTestServer as a proxy server and
+// issuing a CONNECT request with the magic host name "www.server-auth.com".
+// The HTTPTestServer will return a 401 response, which we should balk at.
+TEST_F(URLRequestTestHTTP, UnexpectedServerAuthTest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestNetworkDelegate network_delegate;  // Must outlive URLRequest.
+  TestURLRequestContextWithProxy context(
+      test_server_.host_port_pair().ToString(),
+      &network_delegate);
+
+  TestDelegate d;
+  {
+    URLRequest r(GURL("https://www.server-auth.com/"), &d, &context);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::FAILED, r.status().status());
+    EXPECT_EQ(ERR_TUNNEL_CONNECTION_FAILED, r.status().error());
+  }
+}
+
+TEST_F(URLRequestTestHTTP, GetTest_NoCache) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    URLRequest r(test_server_.GetURL(""), &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_NE(0, d.bytes_received());
+    EXPECT_EQ(test_server_.host_port_pair().host(),
+              r.GetSocketAddress().host());
+    EXPECT_EQ(test_server_.host_port_pair().port(),
+              r.GetSocketAddress().port());
+
+    // TODO(eroman): Add back the NetLog tests...
+  }
+}
+
+// This test has the server send a large number of cookies to the client.
+// To ensure that no number of cookies causes a crash, a galloping binary
+// search is used to estimate that maximum number of cookies that are accepted
+// by the browser. Beyond the maximum number, the request will fail with
+// ERR_RESPONSE_HEADERS_TOO_BIG.
+TEST_F(URLRequestTestHTTP, GetTest_ManyCookies) {
+  ASSERT_TRUE(test_server_.Start());
+
+  int lower_bound = 0;
+  int upper_bound = 1;
+
+  // Double the number of cookies until the response header limits are
+  // exceeded.
+  while (DoManyCookiesRequest(upper_bound)) {
+    lower_bound = upper_bound;
+    upper_bound *= 2;
+    ASSERT_LT(upper_bound, 1000000);
+  }
+
+  int tolerance = upper_bound * 0.005;
+  if (tolerance < 2)
+    tolerance = 2;
+
+  // Perform a binary search to find the highest possible number of cookies,
+  // within the desired tolerance.
+  while (upper_bound - lower_bound >= tolerance) {
+    int num_cookies = (lower_bound + upper_bound) / 2;
+
+    if (DoManyCookiesRequest(num_cookies))
+      lower_bound = num_cookies;
+    else
+      upper_bound = num_cookies;
+  }
+  // Success: the test did not crash.
+}
+
+TEST_F(URLRequestTestHTTP, GetTest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    URLRequest r(test_server_.GetURL(""), &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_NE(0, d.bytes_received());
+    EXPECT_EQ(test_server_.host_port_pair().host(),
+              r.GetSocketAddress().host());
+    EXPECT_EQ(test_server_.host_port_pair().port(),
+              r.GetSocketAddress().port());
+  }
+}
+
+TEST_F(URLRequestTestHTTP, GetZippedTest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  // Parameter that specifies the Content-Length field in the response:
+  // C - Compressed length.
+  // U - Uncompressed length.
+  // L - Large length (larger than both C & U).
+  // M - Medium length (between C & U).
+  // S - Small length (smaller than both C & U).
+  const char test_parameters[] = "CULMS";
+  const int num_tests = arraysize(test_parameters)- 1;  // Skip NULL.
+  // C & U should be OK.
+  // L & M are larger than the data sent, and show an error.
+  // S has too little data, but we seem to accept it.
+  const bool test_expect_success[num_tests] =
+      { true, true, false, false, true };
+
+  for (int i = 0; i < num_tests ; i++) {
+    TestDelegate d;
+    {
+      std::string test_file =
+          base::StringPrintf("compressedfiles/BullRunSpeech.txt?%c",
+                             test_parameters[i]);
+
+      TestNetworkDelegate network_delegate;  // Must outlive URLRequest.
+      TestURLRequestContext context(true);
+      context.set_network_delegate(&network_delegate);
+      context.Init();
+
+      URLRequest r(test_server_.GetURL(test_file), &d, &context);
+      r.Start();
+      EXPECT_TRUE(r.is_pending());
+
+      MessageLoop::current()->Run();
+
+      EXPECT_EQ(1, d.response_started_count());
+      EXPECT_FALSE(d.received_data_before_response());
+      VLOG(1) << " Received " << d.bytes_received() << " bytes"
+              << " status = " << r.status().status()
+              << " error = " << r.status().error();
+      if (test_expect_success[i]) {
+        EXPECT_EQ(URLRequestStatus::SUCCESS, r.status().status())
+            << " Parameter = \"" << test_file << "\"";
+      } else {
+        EXPECT_EQ(URLRequestStatus::FAILED, r.status().status());
+        EXPECT_EQ(ERR_CONTENT_LENGTH_MISMATCH, r.status().error())
+            << " Parameter = \"" << test_file << "\"";
+      }
+    }
+  }
+}
+
+// This test was disabled because it made chrome_frame_net_tests hang
+// (see bug 102991).
+TEST_F(URLRequestTestHTTP, DISABLED_HTTPSToHTTPRedirectNoRefererTest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestServer https_test_server(TestServer::TYPE_HTTPS,
+                               TestServer::kLocalhost,
+                               FilePath(FILE_PATH_LITERAL("net/data/ssl")));
+  ASSERT_TRUE(https_test_server.Start());
+
+  // An https server is sent a request with an https referer,
+  // and responds with a redirect to an http url. The http
+  // server should not be sent the referer.
+  GURL http_destination = test_server_.GetURL("");
+  TestDelegate d;
+  URLRequest req(https_test_server.GetURL(
+      "server-redirect?" + http_destination.spec()), &d, &default_context_);
+  req.set_referrer("https://www.referrer.com/");
+  req.Start();
+  MessageLoop::current()->Run();
+
+  EXPECT_EQ(1, d.response_started_count());
+  EXPECT_EQ(1, d.received_redirect_count());
+  EXPECT_EQ(http_destination, req.url());
+  EXPECT_EQ(std::string(), req.referrer());
+}
+
+TEST_F(URLRequestTestHTTP, MultipleRedirectTest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  GURL destination_url = test_server_.GetURL("");
+  GURL middle_redirect_url = test_server_.GetURL(
+      "server-redirect?" + destination_url.spec());
+  GURL original_url = test_server_.GetURL(
+      "server-redirect?" + middle_redirect_url.spec());
+  TestDelegate d;
+  URLRequest req(original_url, &d, &default_context_);
+  req.Start();
+  MessageLoop::current()->Run();
+
+  EXPECT_EQ(1, d.response_started_count());
+  EXPECT_EQ(2, d.received_redirect_count());
+  EXPECT_EQ(destination_url, req.url());
+  EXPECT_EQ(original_url, req.original_url());
+  ASSERT_EQ(3U, req.url_chain().size());
+  EXPECT_EQ(original_url, req.url_chain()[0]);
+  EXPECT_EQ(middle_redirect_url, req.url_chain()[1]);
+  EXPECT_EQ(destination_url, req.url_chain()[2]);
+}
+
+namespace {
+
+const char kExtraHeader[] = "Allow-Snafu";
+const char kExtraValue[] = "fubar";
+
+class RedirectWithAdditionalHeadersDelegate : public TestDelegate {
+  void OnReceivedRedirect(net::URLRequest* request,
+                          const GURL& new_url,
+                          bool* defer_redirect) {
+    TestDelegate::OnReceivedRedirect(request, new_url, defer_redirect);
+    request->SetExtraRequestHeaderByName(kExtraHeader, kExtraValue, false);
+  }
+};
+
+}  // namespace
+
+TEST_F(URLRequestTestHTTP, RedirectWithAdditionalHeadersTest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  GURL destination_url = test_server_.GetURL(
+      "echoheader?" + std::string(kExtraHeader));
+  GURL original_url = test_server_.GetURL(
+      "server-redirect?" + destination_url.spec());
+  RedirectWithAdditionalHeadersDelegate d;
+  URLRequest req(original_url, &d, &default_context_);
+  req.Start();
+  MessageLoop::current()->Run();
+
+  std::string value;
+  const HttpRequestHeaders& headers = req.extra_request_headers();
+  EXPECT_TRUE(headers.GetHeader(kExtraHeader, &value));
+  EXPECT_EQ(kExtraValue, value);
+  EXPECT_FALSE(req.is_pending());
+  EXPECT_FALSE(req.is_redirecting());
+  EXPECT_EQ(kExtraValue, d.data_received());
+}
+
+namespace {
+
+const char kExtraHeaderToRemove[] = "To-Be-Removed";
+
+class RedirectWithHeaderRemovalDelegate : public TestDelegate {
+  void OnReceivedRedirect(net::URLRequest* request,
+                          const GURL& new_url,
+                          bool* defer_redirect) {
+    TestDelegate::OnReceivedRedirect(request, new_url, defer_redirect);
+    request->RemoveRequestHeaderByName(kExtraHeaderToRemove);
+  }
+};
+
+}  // namespace
+
+TEST_F(URLRequestTestHTTP, RedirectWithHeaderRemovalTest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  GURL destination_url = test_server_.GetURL(
+      "echoheader?" + std::string(kExtraHeaderToRemove));
+  GURL original_url = test_server_.GetURL(
+      "server-redirect?" + destination_url.spec());
+  RedirectWithHeaderRemovalDelegate d;
+  URLRequest req(original_url, &d, &default_context_);
+  req.SetExtraRequestHeaderByName(kExtraHeaderToRemove, "dummy", false);
+  req.Start();
+  MessageLoop::current()->Run();
+
+  std::string value;
+  const HttpRequestHeaders& headers = req.extra_request_headers();
+  EXPECT_FALSE(headers.GetHeader(kExtraHeaderToRemove, &value));
+  EXPECT_FALSE(req.is_pending());
+  EXPECT_FALSE(req.is_redirecting());
+  EXPECT_EQ("None", d.data_received());
+}
+
+TEST_F(URLRequestTestHTTP, CancelTest) {
+  TestDelegate d;
+  {
+    URLRequest r(GURL("http://www.google.com/"), &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    r.Cancel();
+
+    MessageLoop::current()->Run();
+
+    // We expect to receive OnResponseStarted even though the request has been
+    // cancelled.
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_EQ(0, d.bytes_received());
+    EXPECT_FALSE(d.received_data_before_response());
+  }
+}
+
+TEST_F(URLRequestTestHTTP, CancelTest2) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    URLRequest r(test_server_.GetURL(""), &d, &default_context_);
+
+    d.set_cancel_in_response_started(true);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_EQ(0, d.bytes_received());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(URLRequestStatus::CANCELED, r.status().status());
+  }
+}
+
+TEST_F(URLRequestTestHTTP, CancelTest3) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    URLRequest r(test_server_.GetURL(""), &d, &default_context_);
+
+    d.set_cancel_in_received_data(true);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.response_started_count());
+    // There is no guarantee about how much data was received
+    // before the cancel was issued.  It could have been 0 bytes,
+    // or it could have been all the bytes.
+    // EXPECT_EQ(0, d.bytes_received());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(URLRequestStatus::CANCELED, r.status().status());
+  }
+}
+
+TEST_F(URLRequestTestHTTP, CancelTest4) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    URLRequest r(test_server_.GetURL(""), &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    // The request will be implicitly canceled when it is destroyed. The
+    // test delegate must not post a quit message when this happens because
+    // this test doesn't actually have a message loop. The quit message would
+    // get put on this thread's message queue and the next test would exit
+    // early, causing problems.
+    d.set_quit_on_complete(false);
+  }
+  // expect things to just cleanup properly.
+
+  // we won't actually get a received reponse here because we've never run the
+  // message loop
+  EXPECT_FALSE(d.received_data_before_response());
+  EXPECT_EQ(0, d.bytes_received());
+}
+
+TEST_F(URLRequestTestHTTP, CancelTest5) {
+  ASSERT_TRUE(test_server_.Start());
+
+  // populate cache
+  {
+    TestDelegate d;
+    URLRequest r(test_server_.GetURL("cachetime"), &d, &default_context_);
+    r.Start();
+    MessageLoop::current()->Run();
+    EXPECT_EQ(URLRequestStatus::SUCCESS, r.status().status());
+  }
+
+  // cancel read from cache (see bug 990242)
+  {
+    TestDelegate d;
+    URLRequest r(test_server_.GetURL("cachetime"), &d, &default_context_);
+    r.Start();
+    r.Cancel();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(URLRequestStatus::CANCELED, r.status().status());
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_EQ(0, d.bytes_received());
+    EXPECT_FALSE(d.received_data_before_response());
+  }
+}
+
+TEST_F(URLRequestTestHTTP, PostTest) {
+  ASSERT_TRUE(test_server_.Start());
+  HTTPUploadDataOperationTest("POST");
+}
+
+TEST_F(URLRequestTestHTTP, PutTest) {
+  ASSERT_TRUE(test_server_.Start());
+  HTTPUploadDataOperationTest("PUT");
+}
+
+TEST_F(URLRequestTestHTTP, PostEmptyTest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    URLRequest r(test_server_.GetURL("echo"), &d, &default_context_);
+    r.set_method("POST");
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    ASSERT_EQ(1, d.response_started_count())
+        << "request failed: " << r.status().status()
+        << ", error: " << r.status().error();
+
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_TRUE(d.data_received().empty());
+  }
+}
+
+TEST_F(URLRequestTestHTTP, PostFileTest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    URLRequest r(test_server_.GetURL("echo"), &d, &default_context_);
+    r.set_method("POST");
+
+#if !defined(OS_STARBOARD)
+    FilePath dir;
+    PathService::Get(base::DIR_EXE, &dir);
+    file_util::SetCurrentDirectory(dir);
+#endif
+
+    ScopedVector<UploadElementReader> element_readers;
+
+    FilePath path;
+    PathService::Get(base::DIR_SOURCE_ROOT, &path);
+    path = path.Append(FILE_PATH_LITERAL("net"));
+    path = path.Append(FILE_PATH_LITERAL("data"));
+    path = path.Append(FILE_PATH_LITERAL("url_request_unittest"));
+    path = path.Append(FILE_PATH_LITERAL("with-headers.html"));
+    element_readers.push_back(new UploadFileElementReader(
+        path, 0, kuint64max, base::Time()));
+
+    // This file should just be ignored in the upload stream.
+    element_readers.push_back(new UploadFileElementReader(
+        FilePath(FILE_PATH_LITERAL(
+            "c:\\path\\to\\non\\existant\\file.randomness.12345")),
+        0, kuint64max, base::Time()));
+    r.set_upload(make_scoped_ptr(new UploadDataStream(&element_readers, 0)));
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    int64 size = 0;
+    ASSERT_EQ(true, file_util::GetFileSize(path, &size));
+    scoped_array<char> buf(new char[size]);
+
+    ASSERT_EQ(size, file_util::ReadFile(path, buf.get(), size));
+
+    ASSERT_EQ(1, d.response_started_count())
+        << "request failed: " << r.status().status()
+        << ", error: " << r.status().error();
+
+    EXPECT_FALSE(d.received_data_before_response());
+
+    EXPECT_EQ(size, d.bytes_received());
+    EXPECT_EQ(std::string(&buf[0], size), d.data_received());
+  }
+}
+
+TEST_F(URLRequestTestHTTP, TestPostChunkedDataBeforeStart) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    URLRequest r(test_server_.GetURL("echo"), &d, &default_context_);
+    r.EnableChunkedUpload();
+    r.set_method("POST");
+    AddChunksToUpload(&r);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    VerifyReceivedDataMatchesChunks(&r, &d);
+  }
+}
+
+TEST_F(URLRequestTestHTTP, TestPostChunkedDataJustAfterStart) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    URLRequest r(test_server_.GetURL("echo"), &d, &default_context_);
+    r.EnableChunkedUpload();
+    r.set_method("POST");
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+    AddChunksToUpload(&r);
+    MessageLoop::current()->Run();
+
+    VerifyReceivedDataMatchesChunks(&r, &d);
+  }
+}
+
+TEST_F(URLRequestTestHTTP, TestPostChunkedDataAfterStart) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    URLRequest r(test_server_.GetURL("echo"), &d, &default_context_);
+    r.EnableChunkedUpload();
+    r.set_method("POST");
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->RunUntilIdle();
+    AddChunksToUpload(&r);
+    MessageLoop::current()->Run();
+
+    VerifyReceivedDataMatchesChunks(&r, &d);
+  }
+}
+
+TEST_F(URLRequestTestHTTP, ResponseHeadersTest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  URLRequest req(
+      test_server_.GetURL("files/with-headers.html"), &d, &default_context_);
+  req.Start();
+  MessageLoop::current()->Run();
+
+  const HttpResponseHeaders* headers = req.response_headers();
+
+  // Simple sanity check that response_info() accesses the same data.
+  EXPECT_EQ(headers, req.response_info().headers.get());
+
+  std::string header;
+  EXPECT_TRUE(headers->GetNormalizedHeader("cache-control", &header));
+  EXPECT_EQ("private", header);
+
+  header.clear();
+  EXPECT_TRUE(headers->GetNormalizedHeader("content-type", &header));
+  EXPECT_EQ("text/html; charset=ISO-8859-1", header);
+
+  // The response has two "X-Multiple-Entries" headers.
+  // This verfies our output has them concatenated together.
+  header.clear();
+  EXPECT_TRUE(headers->GetNormalizedHeader("x-multiple-entries", &header));
+  EXPECT_EQ("a, b", header);
+}
+
+TEST_F(URLRequestTestHTTP, ProcessSTS) {
+  TestServer::SSLOptions ssl_options;
+  TestServer https_test_server(
+      TestServer::TYPE_HTTPS,
+      ssl_options,
+      FilePath(FILE_PATH_LITERAL("net/data/url_request_unittest")));
+  ASSERT_TRUE(https_test_server.Start());
+
+  TestDelegate d;
+  URLRequest request(
+      https_test_server.GetURL("files/hsts-headers.html"),
+      &d,
+      &default_context_);
+  request.Start();
+  MessageLoop::current()->Run();
+
+  TransportSecurityState* security_state =
+      default_context_.transport_security_state();
+  bool sni_available = true;
+  TransportSecurityState::DomainState domain_state;
+  EXPECT_TRUE(security_state->GetDomainState(
+      TestServer::kLocalhost, sni_available, &domain_state));
+  EXPECT_EQ(TransportSecurityState::DomainState::MODE_FORCE_HTTPS,
+            domain_state.upgrade_mode);
+  EXPECT_TRUE(domain_state.include_subdomains);
+}
+
+TEST_F(URLRequestTestHTTP, ProcessSTSOnce) {
+  TestServer::SSLOptions ssl_options;
+  TestServer https_test_server(
+      TestServer::TYPE_HTTPS,
+      ssl_options,
+      FilePath(FILE_PATH_LITERAL("net/data/url_request_unittest")));
+  ASSERT_TRUE(https_test_server.Start());
+
+  TestDelegate d;
+  URLRequest request(
+      https_test_server.GetURL("files/hsts-multiple-headers.html"),
+      &d,
+      &default_context_);
+  request.Start();
+  MessageLoop::current()->Run();
+
+  // We should have set parameters from the first header, not the second.
+  TransportSecurityState* security_state =
+      default_context_.transport_security_state();
+  bool sni_available = true;
+  TransportSecurityState::DomainState domain_state;
+  EXPECT_TRUE(security_state->GetDomainState(
+      TestServer::kLocalhost, sni_available, &domain_state));
+  EXPECT_EQ(TransportSecurityState::DomainState::MODE_FORCE_HTTPS,
+            domain_state.upgrade_mode);
+  EXPECT_FALSE(domain_state.include_subdomains);
+}
+
+TEST_F(URLRequestTestHTTP, ContentTypeNormalizationTest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  URLRequest req(test_server_.GetURL(
+      "files/content-type-normalization.html"), &d, &default_context_);
+  req.Start();
+  MessageLoop::current()->Run();
+
+  std::string mime_type;
+  req.GetMimeType(&mime_type);
+  EXPECT_EQ("text/html", mime_type);
+
+  std::string charset;
+  req.GetCharset(&charset);
+  EXPECT_EQ("utf-8", charset);
+  req.Cancel();
+}
+
+TEST_F(URLRequestTestHTTP, RestrictRedirects) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  URLRequest req(test_server_.GetURL(
+      "files/redirect-to-file.html"), &d, &default_context_);
+  req.Start();
+  MessageLoop::current()->Run();
+
+  EXPECT_EQ(URLRequestStatus::FAILED, req.status().status());
+  EXPECT_EQ(ERR_UNSAFE_REDIRECT, req.status().error());
+}
+
+TEST_F(URLRequestTestHTTP, RedirectToInvalidURL) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  URLRequest req(test_server_.GetURL(
+      "files/redirect-to-invalid-url.html"), &d, &default_context_);
+  req.Start();
+  MessageLoop::current()->Run();
+
+  EXPECT_EQ(URLRequestStatus::FAILED, req.status().status());
+  EXPECT_EQ(ERR_INVALID_URL, req.status().error());
+}
+
+TEST_F(URLRequestTestHTTP, NoUserPassInReferrer) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  URLRequest req(
+      test_server_.GetURL("echoheader?Referer"), &d, &default_context_);
+  req.set_referrer("http://user:pass@foo.com/");
+  req.Start();
+  MessageLoop::current()->Run();
+
+  EXPECT_EQ(std::string("http://foo.com/"), d.data_received());
+}
+
+TEST_F(URLRequestTestHTTP, CancelRedirect) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    d.set_cancel_in_received_redirect(true);
+    URLRequest req(
+        test_server_.GetURL("files/redirect-test.html"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_EQ(0, d.bytes_received());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(URLRequestStatus::CANCELED, req.status().status());
+  }
+}
+
+TEST_F(URLRequestTestHTTP, DeferredRedirect) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    d.set_quit_on_redirect(true);
+    URLRequest req(
+        test_server_.GetURL("files/redirect-test.html"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.received_redirect_count());
+
+    req.FollowDeferredRedirect();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(URLRequestStatus::SUCCESS, req.status().status());
+
+    FilePath path;
+    PathService::Get(base::DIR_SOURCE_ROOT, &path);
+    path = path.Append(FILE_PATH_LITERAL("net"));
+    path = path.Append(FILE_PATH_LITERAL("data"));
+    path = path.Append(FILE_PATH_LITERAL("url_request_unittest"));
+    path = path.Append(FILE_PATH_LITERAL("with-headers.html"));
+
+    std::string contents;
+    EXPECT_TRUE(file_util::ReadFileToString(path, &contents));
+    EXPECT_EQ(contents, d.data_received());
+  }
+}
+
+TEST_F(URLRequestTestHTTP, CancelDeferredRedirect) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    d.set_quit_on_redirect(true);
+    URLRequest req(
+        test_server_.GetURL("files/redirect-test.html"), &d, &default_context_);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.received_redirect_count());
+
+    req.Cancel();
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_EQ(0, d.bytes_received());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(URLRequestStatus::CANCELED, req.status().status());
+  }
+}
+
+TEST_F(URLRequestTestHTTP, VaryHeader) {
+  ASSERT_TRUE(test_server_.Start());
+
+  // populate the cache
+  {
+    TestDelegate d;
+    URLRequest req(
+        test_server_.GetURL("echoheadercache?foo"), &d, &default_context_);
+    HttpRequestHeaders headers;
+    headers.SetHeader("foo", "1");
+    req.SetExtraRequestHeaders(headers);
+    req.Start();
+    MessageLoop::current()->Run();
+  }
+
+  // expect a cache hit
+  {
+    TestDelegate d;
+    URLRequest req(
+        test_server_.GetURL("echoheadercache?foo"), &d, &default_context_);
+    HttpRequestHeaders headers;
+    headers.SetHeader("foo", "1");
+    req.SetExtraRequestHeaders(headers);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(req.was_cached());
+  }
+
+  // expect a cache miss
+  {
+    TestDelegate d;
+    URLRequest req(
+        test_server_.GetURL("echoheadercache?foo"), &d, &default_context_);
+    HttpRequestHeaders headers;
+    headers.SetHeader("foo", "2");
+    req.SetExtraRequestHeaders(headers);
+    req.Start();
+    MessageLoop::current()->Run();
+
+    EXPECT_FALSE(req.was_cached());
+  }
+}
+
+TEST_F(URLRequestTestHTTP, BasicAuth) {
+  ASSERT_TRUE(test_server_.Start());
+
+  // populate the cache
+  {
+    TestDelegate d;
+    d.set_credentials(AuthCredentials(kUser, kSecret));
+
+    URLRequest r(test_server_.GetURL("auth-basic"), &d, &default_context_);
+    r.Start();
+
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("user/secret") != std::string::npos);
+  }
+
+  // repeat request with end-to-end validation.  since auth-basic results in a
+  // cachable page, we expect this test to result in a 304.  in which case, the
+  // response should be fetched from the cache.
+  {
+    TestDelegate d;
+    d.set_credentials(AuthCredentials(kUser, kSecret));
+
+    URLRequest r(test_server_.GetURL("auth-basic"), &d, &default_context_);
+    r.set_load_flags(LOAD_VALIDATE_CACHE);
+    r.Start();
+
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("user/secret") != std::string::npos);
+
+    // Should be the same cached document.
+    EXPECT_TRUE(r.was_cached());
+  }
+}
+
+// Check that Set-Cookie headers in 401 responses are respected.
+// http://crbug.com/6450
+TEST_F(URLRequestTestHTTP, BasicAuthWithCookies) {
+  ASSERT_TRUE(test_server_.Start());
+
+  GURL url_requiring_auth =
+      test_server_.GetURL("auth-basic?set-cookie-if-challenged");
+
+  // Request a page that will give a 401 containing a Set-Cookie header.
+  // Verify that when the transaction is restarted, it includes the new cookie.
+  {
+    TestNetworkDelegate network_delegate;  // Must outlive URLRequest.
+    TestURLRequestContext context(true);
+    context.set_network_delegate(&network_delegate);
+    context.Init();
+
+    TestDelegate d;
+    d.set_credentials(AuthCredentials(kUser, kSecret));
+
+    URLRequest r(url_requiring_auth, &d, &context);
+    r.Start();
+
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("user/secret") != std::string::npos);
+
+    // Make sure we sent the cookie in the restarted transaction.
+    EXPECT_TRUE(d.data_received().find("Cookie: got_challenged=true")
+        != std::string::npos);
+  }
+
+  // Same test as above, except this time the restart is initiated earlier
+  // (without user intervention since identity is embedded in the URL).
+  {
+    TestNetworkDelegate network_delegate;  // Must outlive URLRequest.
+    TestURLRequestContext context(true);
+    context.set_network_delegate(&network_delegate);
+    context.Init();
+
+    TestDelegate d;
+
+    GURL::Replacements replacements;
+    std::string username("user2");
+    std::string password("secret");
+    replacements.SetUsernameStr(username);
+    replacements.SetPasswordStr(password);
+    GURL url_with_identity = url_requiring_auth.ReplaceComponents(replacements);
+
+    URLRequest r(url_with_identity, &d, &context);
+    r.Start();
+
+    MessageLoop::current()->Run();
+
+    EXPECT_TRUE(d.data_received().find("user2/secret") != std::string::npos);
+
+    // Make sure we sent the cookie in the restarted transaction.
+    EXPECT_TRUE(d.data_received().find("Cookie: got_challenged=true")
+        != std::string::npos);
+  }
+}
+
+// In this test, we do a POST which the server will 302 redirect.
+// The subsequent transaction should use GET, and should not send the
+// Content-Type header.
+// http://code.google.com/p/chromium/issues/detail?id=843
+TEST_F(URLRequestTestHTTP, Post302RedirectGet) {
+  ASSERT_TRUE(test_server_.Start());
+
+  const char kData[] = "hello world";
+
+  TestDelegate d;
+  URLRequest req(
+      test_server_.GetURL("files/redirect-to-echoall"), &d, &default_context_);
+  req.set_method("POST");
+  req.set_upload(make_scoped_ptr(CreateSimpleUploadData(kData)));
+
+  // Set headers (some of which are specific to the POST).
+  HttpRequestHeaders headers;
+  headers.AddHeadersFromString(
+    "Content-Type: multipart/form-data; "
+    "boundary=----WebKitFormBoundaryAADeAA+NAAWMAAwZ\r\n"
+    "Accept: text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,"
+    "text/plain;q=0.8,image/png,*/*;q=0.5\r\n"
+    "Accept-Language: en-US,en\r\n"
+    "Accept-Charset: ISO-8859-1,*,utf-8\r\n"
+    "Content-Length: 11\r\n"
+    "Origin: http://localhost:1337/");
+  req.SetExtraRequestHeaders(headers);
+  req.Start();
+  MessageLoop::current()->Run();
+
+  std::string mime_type;
+  req.GetMimeType(&mime_type);
+  EXPECT_EQ("text/html", mime_type);
+
+  const std::string& data = d.data_received();
+
+  // Check that the post-specific headers were stripped:
+  EXPECT_FALSE(ContainsString(data, "Content-Length:"));
+  EXPECT_FALSE(ContainsString(data, "Content-Type:"));
+  EXPECT_FALSE(ContainsString(data, "Origin:"));
+
+  // These extra request headers should not have been stripped.
+  EXPECT_TRUE(ContainsString(data, "Accept:"));
+  EXPECT_TRUE(ContainsString(data, "Accept-Language:"));
+  EXPECT_TRUE(ContainsString(data, "Accept-Charset:"));
+}
+
+// The following tests check that we handle mutating the request method for
+// HTTP redirects as expected.
+// See http://crbug.com/56373 and http://crbug.com/102130.
+
+TEST_F(URLRequestTestHTTP, Redirect301Tests) {
+  ASSERT_TRUE(test_server_.Start());
+
+  const GURL url = test_server_.GetURL("files/redirect301-to-echo");
+
+  HTTPRedirectMethodTest(url, "POST", "GET", true);
+  HTTPRedirectMethodTest(url, "PUT", "PUT", true);
+  HTTPRedirectMethodTest(url, "HEAD", "HEAD", false);
+}
+
+TEST_F(URLRequestTestHTTP, Redirect302Tests) {
+  ASSERT_TRUE(test_server_.Start());
+
+  const GURL url = test_server_.GetURL("files/redirect302-to-echo");
+
+  HTTPRedirectMethodTest(url, "POST", "GET", true);
+  HTTPRedirectMethodTest(url, "PUT", "PUT", true);
+  HTTPRedirectMethodTest(url, "HEAD", "HEAD", false);
+}
+
+TEST_F(URLRequestTestHTTP, Redirect303Tests) {
+  ASSERT_TRUE(test_server_.Start());
+
+  const GURL url = test_server_.GetURL("files/redirect303-to-echo");
+
+  HTTPRedirectMethodTest(url, "POST", "GET", true);
+  HTTPRedirectMethodTest(url, "PUT", "GET", true);
+  HTTPRedirectMethodTest(url, "HEAD", "HEAD", false);
+}
+
+TEST_F(URLRequestTestHTTP, Redirect307Tests) {
+  ASSERT_TRUE(test_server_.Start());
+
+  const GURL url = test_server_.GetURL("files/redirect307-to-echo");
+
+  HTTPRedirectMethodTest(url, "POST", "POST", true);
+  HTTPRedirectMethodTest(url, "PUT", "PUT", true);
+  HTTPRedirectMethodTest(url, "HEAD", "HEAD", false);
+}
+
+TEST_F(URLRequestTestHTTP, InterceptPost302RedirectGet) {
+  ASSERT_TRUE(test_server_.Start());
+
+  const char kData[] = "hello world";
+
+  TestDelegate d;
+  URLRequest req(test_server_.GetURL("empty.html"), &d, &default_context_);
+  req.set_method("POST");
+  req.set_upload(make_scoped_ptr(CreateSimpleUploadData(kData)));
+  HttpRequestHeaders headers;
+  headers.SetHeader(HttpRequestHeaders::kContentLength,
+                    base::UintToString(arraysize(kData) - 1));
+  req.SetExtraRequestHeaders(headers);
+
+  URLRequestRedirectJob* job = new URLRequestRedirectJob(
+      &req, default_context_.network_delegate(), test_server_.GetURL("echo"),
+      URLRequestRedirectJob::REDIRECT_302_FOUND);
+  AddTestInterceptor()->set_main_intercept_job(job);
+
+  req.Start();
+  MessageLoop::current()->Run();
+  EXPECT_EQ("GET", req.method());
+}
+
+TEST_F(URLRequestTestHTTP, InterceptPost307RedirectPost) {
+  ASSERT_TRUE(test_server_.Start());
+
+  const char kData[] = "hello world";
+
+  TestDelegate d;
+  URLRequest req(test_server_.GetURL("empty.html"), &d, &default_context_);
+  req.set_method("POST");
+  req.set_upload(make_scoped_ptr(CreateSimpleUploadData(kData)));
+  HttpRequestHeaders headers;
+  headers.SetHeader(HttpRequestHeaders::kContentLength,
+                    base::UintToString(arraysize(kData) - 1));
+  req.SetExtraRequestHeaders(headers);
+
+  URLRequestRedirectJob* job = new URLRequestRedirectJob(
+      &req, default_context_.network_delegate(), test_server_.GetURL("echo"),
+      URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT);
+  AddTestInterceptor()->set_main_intercept_job(job);
+
+  req.Start();
+  MessageLoop::current()->Run();
+  EXPECT_EQ("POST", req.method());
+  EXPECT_EQ(kData, d.data_received());
+}
+
+// Check that default A-L header is sent.
+TEST_F(URLRequestTestHTTP, DefaultAcceptLanguage) {
+  ASSERT_TRUE(test_server_.Start());
+
+  StaticHttpUserAgentSettings settings("en", EmptyString(), EmptyString());
+  TestNetworkDelegate network_delegate;  // Must outlive URLRequests.
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.set_http_user_agent_settings(&settings);
+  context.Init();
+
+  TestDelegate d;
+  URLRequest req(
+      test_server_.GetURL("echoheader?Accept-Language"), &d, &context);
+  req.Start();
+  MessageLoop::current()->Run();
+  EXPECT_EQ("en", d.data_received());
+}
+
+// Check that an empty A-L header is not sent. http://crbug.com/77365.
+TEST_F(URLRequestTestHTTP, EmptyAcceptLanguage) {
+  ASSERT_TRUE(test_server_.Start());
+
+  StaticHttpUserAgentSettings settings(
+      EmptyString(), EmptyString(), EmptyString());
+  TestNetworkDelegate network_delegate;  // Must outlive URLRequests.
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+  // We override the language after initialization because empty entries
+  // get overridden by Init().
+  context.set_http_user_agent_settings(&settings);
+
+  TestDelegate d;
+  URLRequest req(
+      test_server_.GetURL("echoheader?Accept-Language"), &d, &context);
+  req.Start();
+  MessageLoop::current()->Run();
+  EXPECT_EQ("None", d.data_received());
+}
+
+// Check that if request overrides the A-L header, the default is not appended.
+// See http://crbug.com/20894
+TEST_F(URLRequestTestHTTP, OverrideAcceptLanguage) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  URLRequest req(test_server_.GetURL("echoheader?Accept-Language"),
+                 &d,
+                 &default_context_);
+  HttpRequestHeaders headers;
+  headers.SetHeader(HttpRequestHeaders::kAcceptLanguage, "ru");
+  req.SetExtraRequestHeaders(headers);
+  req.Start();
+  MessageLoop::current()->Run();
+  EXPECT_EQ(std::string("ru"), d.data_received());
+}
+
+// Check that default A-E header is sent.
+TEST_F(URLRequestTestHTTP, DefaultAcceptEncoding) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  URLRequest req(test_server_.GetURL("echoheader?Accept-Encoding"),
+                 &d,
+                 &default_context_);
+  HttpRequestHeaders headers;
+  req.SetExtraRequestHeaders(headers);
+  req.Start();
+  MessageLoop::current()->Run();
+  EXPECT_TRUE(ContainsString(d.data_received(), "gzip"));
+}
+
+// Check that if request overrides the A-E header, the default is not appended.
+// See http://crbug.com/47381
+TEST_F(URLRequestTestHTTP, OverrideAcceptEncoding) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  URLRequest req(test_server_.GetURL("echoheader?Accept-Encoding"),
+                 &d,
+                 &default_context_);
+  HttpRequestHeaders headers;
+  headers.SetHeader(HttpRequestHeaders::kAcceptEncoding, "identity");
+  req.SetExtraRequestHeaders(headers);
+  req.Start();
+  MessageLoop::current()->Run();
+  EXPECT_FALSE(ContainsString(d.data_received(), "gzip"));
+  EXPECT_TRUE(ContainsString(d.data_received(), "identity"));
+}
+
+// Check that default A-C header is sent.
+TEST_F(URLRequestTestHTTP, DefaultAcceptCharset) {
+  ASSERT_TRUE(test_server_.Start());
+
+  StaticHttpUserAgentSettings settings(EmptyString(), "en", EmptyString());
+  TestNetworkDelegate network_delegate;  // Must outlive URLRequests.
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.set_http_user_agent_settings(&settings);
+  context.Init();
+
+  TestDelegate d;
+  URLRequest req(test_server_.GetURL("echoheader?Accept-Charset"),
+                 &d,
+                 &context);
+  req.Start();
+  MessageLoop::current()->Run();
+  EXPECT_EQ("en", d.data_received());
+}
+
+// Check that an empty A-C header is not sent. http://crbug.com/77365.
+TEST_F(URLRequestTestHTTP, EmptyAcceptCharset) {
+  ASSERT_TRUE(test_server_.Start());
+
+  StaticHttpUserAgentSettings settings(
+      EmptyString(), EmptyString(), EmptyString());
+  TestNetworkDelegate network_delegate;  // Must outlive URLRequests.
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+  // We override the accepted charset after initialization because empty
+  // entries get overridden otherwise.
+  context.set_http_user_agent_settings(&settings);
+
+  TestDelegate d;
+  URLRequest req(test_server_.GetURL("echoheader?Accept-Charset"),
+                 &d,
+                 &context);
+  req.Start();
+  MessageLoop::current()->Run();
+  EXPECT_EQ("None", d.data_received());
+}
+
+// Check that if request overrides the A-C header, the default is not appended.
+// See http://crbug.com/20894
+TEST_F(URLRequestTestHTTP, OverrideAcceptCharset) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  URLRequest req(test_server_.GetURL("echoheader?Accept-Charset"),
+                 &d,
+                 &default_context_);
+  HttpRequestHeaders headers;
+  headers.SetHeader(HttpRequestHeaders::kAcceptCharset, "koi-8r");
+  req.SetExtraRequestHeaders(headers);
+  req.Start();
+  MessageLoop::current()->Run();
+  EXPECT_EQ(std::string("koi-8r"), d.data_received());
+}
+
+// Check that default User-Agent header is sent.
+TEST_F(URLRequestTestHTTP, DefaultUserAgent) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  URLRequest req(test_server_.GetURL("echoheader?User-Agent"),
+                 &d,
+                 &default_context_);
+  req.Start();
+  MessageLoop::current()->Run();
+  EXPECT_EQ(req.context()->GetUserAgent(req.url()), d.data_received());
+}
+
+// Check that if request overrides the User-Agent header,
+// the default is not appended.
+TEST_F(URLRequestTestHTTP, OverrideUserAgent) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  URLRequest req(test_server_.GetURL("echoheader?User-Agent"),
+                 &d,
+                 &default_context_);
+  HttpRequestHeaders headers;
+  headers.SetHeader(HttpRequestHeaders::kUserAgent, "Lynx (textmode)");
+  req.SetExtraRequestHeaders(headers);
+  req.Start();
+  MessageLoop::current()->Run();
+  // If the net tests are being run with ChromeFrame then we need to allow for
+  // the 'chromeframe' suffix which is added to the user agent before the
+  // closing parentheses.
+  EXPECT_TRUE(StartsWithASCII(d.data_received(), "Lynx (textmode", true));
+}
+
+// Check that a NULL HttpUserAgentSettings causes the corresponding empty
+// User-Agent header to be sent but does not send the Accept-Language and
+// Accept-Charset headers.
+TEST_F(URLRequestTestHTTP, EmptyHttpUserAgentSettings) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestNetworkDelegate network_delegate;  // Must outlive URLRequests.
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+  // We override the HttpUserAgentSettings after initialization because empty
+  // entries get overridden by Init().
+  context.set_http_user_agent_settings(NULL);
+
+  struct {
+    const char* request;
+    const char* expected_response;
+  } tests[] = { { "echoheader?Accept-Language", "None" },
+                { "echoheader?Accept-Charset", "None" },
+                { "echoheader?User-Agent", "" } };
+
+  for (size_t i = 0; i < ARRAYSIZE_UNSAFE(tests); i++) {
+    TestDelegate d;
+    URLRequest req(test_server_.GetURL(tests[i].request), &d, &context);
+    req.Start();
+    MessageLoop::current()->Run();
+    EXPECT_EQ(tests[i].expected_response, d.data_received())
+        << " Request = \"" << tests[i].request << "\"";
+  }
+}
+
+class HTTPSRequestTest : public testing::Test {
+ public:
+  HTTPSRequestTest() : default_context_(true) {
+    default_context_.set_network_delegate(&default_network_delegate_);
+    default_context_.Init();
+  }
+  virtual ~HTTPSRequestTest() {}
+
+ protected:
+  TestNetworkDelegate default_network_delegate_;  // Must outlive URLRequest.
+  TestURLRequestContext default_context_;
+};
+
+// This test was disabled because it made chrome_frame_net_tests hang
+// (see bug 102991).
+TEST_F(HTTPSRequestTest, DISABLED_HTTPSGetTest) {
+  TestServer test_server(TestServer::TYPE_HTTPS,
+                         TestServer::kLocalhost,
+                         FilePath(FILE_PATH_LITERAL("net/data/ssl")));
+  ASSERT_TRUE(test_server.Start());
+
+  TestDelegate d;
+  {
+    URLRequest r(test_server.GetURL(""), &d, &default_context_);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_NE(0, d.bytes_received());
+    CheckSSLInfo(r.ssl_info());
+    EXPECT_EQ(test_server.host_port_pair().host(),
+              r.GetSocketAddress().host());
+    EXPECT_EQ(test_server.host_port_pair().port(),
+              r.GetSocketAddress().port());
+  }
+}
+
+TEST_F(HTTPSRequestTest, HTTPSMismatchedTest) {
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_MISMATCHED_NAME);
+  TestServer test_server(TestServer::TYPE_HTTPS,
+                         ssl_options,
+                         FilePath(FILE_PATH_LITERAL("net/data/ssl")));
+  ASSERT_TRUE(test_server.Start());
+
+  bool err_allowed = true;
+  for (int i = 0; i < 2 ; i++, err_allowed = !err_allowed) {
+    TestDelegate d;
+    {
+      d.set_allow_certificate_errors(err_allowed);
+      URLRequest r(test_server.GetURL(""), &d, &default_context_);
+
+      r.Start();
+      EXPECT_TRUE(r.is_pending());
+
+      MessageLoop::current()->Run();
+
+      EXPECT_EQ(1, d.response_started_count());
+      EXPECT_FALSE(d.received_data_before_response());
+      EXPECT_TRUE(d.have_certificate_errors());
+      if (err_allowed) {
+        EXPECT_NE(0, d.bytes_received());
+        CheckSSLInfo(r.ssl_info());
+      } else {
+        EXPECT_EQ(0, d.bytes_received());
+      }
+    }
+  }
+}
+
+TEST_F(HTTPSRequestTest, HTTPSExpiredTest) {
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_EXPIRED);
+  TestServer test_server(TestServer::TYPE_HTTPS,
+                         ssl_options,
+                         FilePath(FILE_PATH_LITERAL("net/data/ssl")));
+  ASSERT_TRUE(test_server.Start());
+
+  // Iterate from false to true, just so that we do the opposite of the
+  // previous test in order to increase test coverage.
+  bool err_allowed = false;
+  for (int i = 0; i < 2 ; i++, err_allowed = !err_allowed) {
+    TestDelegate d;
+    {
+      d.set_allow_certificate_errors(err_allowed);
+      URLRequest r(test_server.GetURL(""), &d, &default_context_);
+
+      r.Start();
+      EXPECT_TRUE(r.is_pending());
+
+      MessageLoop::current()->Run();
+
+      EXPECT_EQ(1, d.response_started_count());
+      EXPECT_FALSE(d.received_data_before_response());
+      EXPECT_TRUE(d.have_certificate_errors());
+      if (err_allowed) {
+        EXPECT_NE(0, d.bytes_received());
+        CheckSSLInfo(r.ssl_info());
+      } else {
+        EXPECT_EQ(0, d.bytes_received());
+      }
+    }
+  }
+}
+
+// Tests TLSv1.1 -> TLSv1 fallback. Verifies that we don't fall back more
+// than necessary.
+TEST_F(HTTPSRequestTest, TLSv1Fallback) {
+  uint16 default_version_max = SSLConfigService::default_version_max();
+  // The OpenSSL library in use may not support TLS 1.1.
+#if !defined(USE_OPENSSL)
+  EXPECT_GT(default_version_max, SSL_PROTOCOL_VERSION_TLS1);
+#endif
+  if (default_version_max <= SSL_PROTOCOL_VERSION_TLS1)
+    return;
+
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_OK);
+  ssl_options.tls_intolerant =
+      TestServer::SSLOptions::TLS_INTOLERANT_TLS1_1;
+  TestServer test_server(TestServer::TYPE_HTTPS,
+                         ssl_options,
+                         FilePath(FILE_PATH_LITERAL("net/data/ssl")));
+  ASSERT_TRUE(test_server.Start());
+
+  TestDelegate d;
+  TestURLRequestContext context(true);
+  context.Init();
+  d.set_allow_certificate_errors(true);
+  URLRequest r(test_server.GetURL(""), &d, &context);
+  r.Start();
+
+  MessageLoop::current()->Run();
+
+  EXPECT_EQ(1, d.response_started_count());
+  EXPECT_NE(0, d.bytes_received());
+  EXPECT_EQ(static_cast<int>(SSL_CONNECTION_VERSION_TLS1),
+            SSLConnectionStatusToVersion(r.ssl_info().connection_status));
+  EXPECT_TRUE(r.ssl_info().connection_status & SSL_CONNECTION_VERSION_FALLBACK);
+}
+
+// This tests that a load of www.google.com with a certificate error sets
+// the |certificate_errors_are_fatal| flag correctly. This flag will cause
+// the interstitial to be fatal.
+TEST_F(HTTPSRequestTest, HTTPSPreloadedHSTSTest) {
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_MISMATCHED_NAME);
+  TestServer test_server(TestServer::TYPE_HTTPS,
+                         ssl_options,
+                         FilePath(FILE_PATH_LITERAL("net/data/ssl")));
+  ASSERT_TRUE(test_server.Start());
+
+  // We require that the URL be www.google.com in order to pick up the
+  // preloaded HSTS entries in the TransportSecurityState. This means that we
+  // have to use a MockHostResolver in order to direct www.google.com to the
+  // testserver. By default, MockHostResolver maps all hosts to 127.0.0.1.
+
+  MockHostResolver host_resolver;
+  TestNetworkDelegate network_delegate;  // Must outlive URLRequest.
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.set_host_resolver(&host_resolver);
+  TransportSecurityState transport_security_state;
+  context.set_transport_security_state(&transport_security_state);
+  context.Init();
+
+  TestDelegate d;
+  URLRequest r(GURL(StringPrintf("https://www.google.com:%d",
+                                 test_server.host_port_pair().port())),
+               &d,
+               &context);
+
+  r.Start();
+  EXPECT_TRUE(r.is_pending());
+
+  MessageLoop::current()->Run();
+
+  EXPECT_EQ(1, d.response_started_count());
+  EXPECT_FALSE(d.received_data_before_response());
+  EXPECT_TRUE(d.have_certificate_errors());
+  EXPECT_TRUE(d.certificate_errors_are_fatal());
+}
+
+// This tests that cached HTTPS page loads do not cause any updates to the
+// TransportSecurityState.
+TEST_F(HTTPSRequestTest, HTTPSErrorsNoClobberTSSTest) {
+  // The actual problem -- CERT_MISMATCHED_NAME in this case -- doesn't
+  // matter. It just has to be any error.
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_MISMATCHED_NAME);
+  TestServer test_server(TestServer::TYPE_HTTPS,
+                         ssl_options,
+                         FilePath(FILE_PATH_LITERAL("net/data/ssl")));
+  ASSERT_TRUE(test_server.Start());
+
+  // We require that the URL be www.google.com in order to pick up the
+  // preloaded and dynamic HSTS and public key pin entries in the
+  // TransportSecurityState. This means that we have to use a
+  // MockHostResolver in order to direct www.google.com to the testserver.
+  // By default, MockHostResolver maps all hosts to 127.0.0.1.
+
+  MockHostResolver host_resolver;
+  TestNetworkDelegate network_delegate;  // Must outlive URLRequest.
+  TestURLRequestContext context(true);
+  context.set_network_delegate(&network_delegate);
+  context.set_host_resolver(&host_resolver);
+  TransportSecurityState transport_security_state;
+  TransportSecurityState::DomainState domain_state;
+  EXPECT_TRUE(transport_security_state.GetDomainState("www.google.com", true,
+                                                      &domain_state));
+  context.set_transport_security_state(&transport_security_state);
+  context.Init();
+
+  TestDelegate d;
+  URLRequest r(GURL(StringPrintf("https://www.google.com:%d",
+                                 test_server.host_port_pair().port())),
+               &d,
+               &context);
+
+  r.Start();
+  EXPECT_TRUE(r.is_pending());
+
+  MessageLoop::current()->Run();
+
+  EXPECT_EQ(1, d.response_started_count());
+  EXPECT_FALSE(d.received_data_before_response());
+  EXPECT_TRUE(d.have_certificate_errors());
+  EXPECT_TRUE(d.certificate_errors_are_fatal());
+
+  // Get a fresh copy of the state, and check that it hasn't been updated.
+  TransportSecurityState::DomainState new_domain_state;
+  EXPECT_TRUE(transport_security_state.GetDomainState("www.google.com", true,
+                                                      &new_domain_state));
+  EXPECT_EQ(new_domain_state.upgrade_mode, domain_state.upgrade_mode);
+  EXPECT_EQ(new_domain_state.include_subdomains,
+            domain_state.include_subdomains);
+  EXPECT_TRUE(FingerprintsEqual(new_domain_state.static_spki_hashes,
+                                domain_state.static_spki_hashes));
+  EXPECT_TRUE(FingerprintsEqual(new_domain_state.dynamic_spki_hashes,
+                                domain_state.dynamic_spki_hashes));
+  EXPECT_TRUE(FingerprintsEqual(new_domain_state.bad_static_spki_hashes,
+                                domain_state.bad_static_spki_hashes));
+}
+
+// Make sure HSTS preserves a POST request's method and body.
+TEST_F(HTTPSRequestTest, HSTSPreservesPosts) {
+  static const char kData[] = "hello world";
+
+  TestServer::SSLOptions ssl_options(TestServer::SSLOptions::CERT_OK);
+  TestServer test_server(TestServer::TYPE_HTTPS,
+                         ssl_options,
+                         FilePath(FILE_PATH_LITERAL("net/data/ssl")));
+  ASSERT_TRUE(test_server.Start());
+
+
+  // Per spec, TransportSecurityState expects a domain name, rather than an IP
+  // address, so a MockHostResolver is needed to redirect www.somewhere.com to
+  // the TestServer.  By default, MockHostResolver maps all hosts to 127.0.0.1.
+  MockHostResolver host_resolver;
+
+  // Force https for www.somewhere.com.
+  TransportSecurityState transport_security_state;
+  net::TransportSecurityState::DomainState domain_state;
+  domain_state.upgrade_expiry =
+      domain_state.created + base::TimeDelta::FromDays(1000);
+  transport_security_state.EnableHost("www.somewhere.com", domain_state);
+
+  TestNetworkDelegate network_delegate;  // Must outlive URLRequest.
+
+  TestURLRequestContext context(true);
+  context.set_host_resolver(&host_resolver);
+  context.set_transport_security_state(&transport_security_state);
+  context.set_network_delegate(&network_delegate);
+  context.Init();
+
+  TestDelegate d;
+  // Navigating to https://www.somewhere.com instead of https://127.0.0.1 will
+  // cause a certificate error.  Ignore the error.
+  d.set_allow_certificate_errors(true);
+
+  URLRequest req(GURL(StringPrintf("http://www.somewhere.com:%d/echo",
+                                   test_server.host_port_pair().port())),
+                 &d,
+                 &context);
+  req.set_method("POST");
+  req.set_upload(make_scoped_ptr(CreateSimpleUploadData(kData)));
+
+  req.Start();
+  MessageLoop::current()->Run();
+
+  EXPECT_EQ("https", req.url().scheme());
+  EXPECT_EQ("POST", req.method());
+  EXPECT_EQ(kData, d.data_received());
+}
+
+TEST_F(HTTPSRequestTest, SSLv3Fallback) {
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_OK);
+  ssl_options.tls_intolerant = TestServer::SSLOptions::TLS_INTOLERANT_ALL;
+  TestServer test_server(TestServer::TYPE_HTTPS,
+                         ssl_options,
+                         FilePath(FILE_PATH_LITERAL("net/data/ssl")));
+  ASSERT_TRUE(test_server.Start());
+
+  TestDelegate d;
+  TestURLRequestContext context(true);
+  context.Init();
+  d.set_allow_certificate_errors(true);
+  URLRequest r(test_server.GetURL(""), &d, &context);
+  r.Start();
+
+  MessageLoop::current()->Run();
+
+  EXPECT_EQ(1, d.response_started_count());
+  EXPECT_NE(0, d.bytes_received());
+  EXPECT_EQ(static_cast<int>(SSL_CONNECTION_VERSION_SSL3),
+            SSLConnectionStatusToVersion(r.ssl_info().connection_status));
+  EXPECT_TRUE(r.ssl_info().connection_status & SSL_CONNECTION_VERSION_FALLBACK);
+}
+
+namespace {
+
+class SSLClientAuthTestDelegate : public TestDelegate {
+ public:
+  SSLClientAuthTestDelegate() : on_certificate_requested_count_(0) {
+  }
+  virtual void OnCertificateRequested(
+      URLRequest* request,
+      SSLCertRequestInfo* cert_request_info) {
+    on_certificate_requested_count_++;
+    MessageLoop::current()->Quit();
+  }
+  int on_certificate_requested_count() {
+    return on_certificate_requested_count_;
+  }
+ private:
+  int on_certificate_requested_count_;
+};
+
+}  // namespace
+
+// TODO(davidben): Test the rest of the code. Specifically,
+// - Filtering which certificates to select.
+// - Sending a certificate back.
+// - Getting a certificate request in an SSL renegotiation sending the
+//   HTTP request.
+TEST_F(HTTPSRequestTest, ClientAuthTest) {
+  TestServer::SSLOptions ssl_options;
+  ssl_options.request_client_certificate = true;
+  TestServer test_server(TestServer::TYPE_HTTPS,
+                         ssl_options,
+                         FilePath(FILE_PATH_LITERAL("net/data/ssl")));
+  ASSERT_TRUE(test_server.Start());
+
+  SSLClientAuthTestDelegate d;
+  {
+    URLRequest r(test_server.GetURL(""), &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.on_certificate_requested_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(0, d.bytes_received());
+
+    // Send no certificate.
+    // TODO(davidben): Get temporary client cert import (with keys) working on
+    // all platforms so we can test sending a cert as well.
+    r.ContinueWithCertificate(NULL);
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_NE(0, d.bytes_received());
+  }
+}
+
+TEST_F(HTTPSRequestTest, ResumeTest) {
+  // Test that we attempt a session resume when making two connections to the
+  // same host.
+  TestServer::SSLOptions ssl_options;
+  ssl_options.record_resume = true;
+  TestServer test_server(TestServer::TYPE_HTTPS,
+                         ssl_options,
+                         FilePath(FILE_PATH_LITERAL("net/data/ssl")));
+  ASSERT_TRUE(test_server.Start());
+
+  SSLClientSocket::ClearSessionCache();
+
+  {
+    TestDelegate d;
+    URLRequest r(
+        test_server.GetURL("ssl-session-cache"), &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.response_started_count());
+  }
+
+  reinterpret_cast<HttpCache*>(default_context_.http_transaction_factory())->
+    CloseAllConnections();
+
+  {
+    TestDelegate d;
+    URLRequest r(
+        test_server.GetURL("ssl-session-cache"), &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    // The response will look like;
+    //   insert abc
+    //   lookup abc
+    //   insert xyz
+    //
+    // With a newline at the end which makes the split think that there are
+    // four lines.
+
+    EXPECT_EQ(1, d.response_started_count());
+    std::vector<std::string> lines;
+    base::SplitString(d.data_received(), '\n', &lines);
+    ASSERT_EQ(4u, lines.size()) << d.data_received();
+
+    std::string session_id;
+
+    for (size_t i = 0; i < 2; i++) {
+      std::vector<std::string> parts;
+      base::SplitString(lines[i], '\t', &parts);
+      ASSERT_EQ(2u, parts.size());
+      if (i == 0) {
+        EXPECT_EQ("insert", parts[0]);
+        session_id = parts[1];
+      } else {
+        EXPECT_EQ("lookup", parts[0]);
+        EXPECT_EQ(session_id, parts[1]);
+      }
+    }
+  }
+}
+
+TEST_F(HTTPSRequestTest, SSLSessionCacheShardTest) {
+  // Test that sessions aren't resumed when the value of ssl_session_cache_shard
+  // differs.
+  TestServer::SSLOptions ssl_options;
+  ssl_options.record_resume = true;
+  TestServer test_server(TestServer::TYPE_HTTPS,
+                         ssl_options,
+                         FilePath(FILE_PATH_LITERAL("net/data/ssl")));
+  ASSERT_TRUE(test_server.Start());
+
+  SSLClientSocket::ClearSessionCache();
+
+  {
+    TestDelegate d;
+    URLRequest r(
+        test_server.GetURL("ssl-session-cache"), &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.response_started_count());
+  }
+
+  // Now create a new HttpCache with a different ssl_session_cache_shard value.
+  HttpNetworkSession::Params params;
+  params.host_resolver = default_context_.host_resolver();
+  params.cert_verifier = default_context_.cert_verifier();
+  params.proxy_service = default_context_.proxy_service();
+  params.ssl_config_service = default_context_.ssl_config_service();
+  params.http_auth_handler_factory =
+      default_context_.http_auth_handler_factory();
+  params.network_delegate = default_context_.network_delegate();
+  params.http_server_properties = default_context_.http_server_properties();
+  params.ssl_session_cache_shard = "alternate";
+
+  scoped_ptr<net::HttpCache> cache(new net::HttpCache(
+      new net::HttpNetworkSession(params),
+      net::HttpCache::DefaultBackend::InMemory(0)));
+
+  default_context_.set_http_transaction_factory(cache.get());
+
+  {
+    TestDelegate d;
+    URLRequest r(
+        test_server.GetURL("ssl-session-cache"), &d, &default_context_);
+
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    // The response will look like;
+    //   insert abc
+    //   insert xyz
+    //
+    // With a newline at the end which makes the split think that there are
+    // three lines.
+
+    EXPECT_EQ(1, d.response_started_count());
+    std::vector<std::string> lines;
+    base::SplitString(d.data_received(), '\n', &lines);
+    ASSERT_EQ(3u, lines.size());
+
+    std::string session_id;
+    for (size_t i = 0; i < 2; i++) {
+      std::vector<std::string> parts;
+      base::SplitString(lines[i], '\t', &parts);
+      ASSERT_EQ(2u, parts.size());
+      EXPECT_EQ("insert", parts[0]);
+      if (i == 0) {
+        session_id = parts[1];
+      } else {
+        EXPECT_NE(session_id, parts[1]);
+      }
+    }
+  }
+}
+
+class TestSSLConfigService : public SSLConfigService {
+ public:
+  TestSSLConfigService(bool ev_enabled, bool online_rev_checking)
+      : ev_enabled_(ev_enabled),
+        online_rev_checking_(online_rev_checking) {
+  }
+
+  // SSLConfigService:
+  virtual void GetSSLConfig(SSLConfig* config) OVERRIDE {
+    *config = SSLConfig();
+    config->rev_checking_enabled = online_rev_checking_;
+    config->verify_ev_cert = ev_enabled_;
+  }
+
+ protected:
+  virtual ~TestSSLConfigService() {}
+
+ private:
+  const bool ev_enabled_;
+  const bool online_rev_checking_;
+};
+
+// This the fingerprint of the "Testing CA" certificate used by the testserver.
+// See net/data/ssl/certificates/ocsp-test-root.pem.
+static const SHA1HashValue kOCSPTestCertFingerprint =
+  { { 0xf1, 0xad, 0xf6, 0xce, 0x42, 0xac, 0xe7, 0xb4, 0xf4, 0x24,
+      0xdb, 0x1a, 0xf7, 0xa0, 0x9f, 0x09, 0xa1, 0xea, 0xf1, 0x5c } };
+
+// This is the policy OID contained in the certificates that testserver
+// generates.
+static const char kOCSPTestCertPolicy[] = "1.3.6.1.4.1.11129.2.4.1";
+
+class HTTPSOCSPTest : public HTTPSRequestTest {
+ public:
+  HTTPSOCSPTest()
+      : context_(true),
+        ev_test_policy_(
+            new ScopedTestEVPolicy(EVRootCAMetadata::GetInstance(),
+                                   kOCSPTestCertFingerprint,
+                                   kOCSPTestCertPolicy)) {
+  }
+
+  virtual void SetUp() OVERRIDE {
+    SetupContext(&context_);
+    context_.Init();
+
+    scoped_refptr<net::X509Certificate> root_cert =
+      ImportCertFromFile(GetTestCertsDirectory(), "ocsp-test-root.pem");
+    CHECK_NE(static_cast<X509Certificate*>(NULL), root_cert);
+    test_root_.reset(new ScopedTestRoot(root_cert));
+
+#if defined(USE_NSS) || defined(OS_IOS)
+    SetURLRequestContextForNSSHttpIO(&context_);
+    EnsureNSSHttpIOInit();
+#endif
+  }
+
+  void DoConnection(const TestServer::SSLOptions& ssl_options,
+                    CertStatus* out_cert_status) {
+    // We always overwrite out_cert_status.
+    *out_cert_status = 0;
+    TestServer test_server(TestServer::TYPE_HTTPS,
+                           ssl_options,
+                           FilePath(FILE_PATH_LITERAL("net/data/ssl")));
+    ASSERT_TRUE(test_server.Start());
+
+    TestDelegate d;
+    d.set_allow_certificate_errors(true);
+    URLRequest r(test_server.GetURL(""), &d, &context_);
+    r.Start();
+
+    MessageLoop::current()->Run();
+
+    EXPECT_EQ(1, d.response_started_count());
+    *out_cert_status = r.ssl_info().cert_status;
+  }
+
+  ~HTTPSOCSPTest() {
+#if defined(USE_NSS) || defined(OS_IOS)
+    ShutdownNSSHttpIO();
+#endif
+  }
+
+ protected:
+  // SetupContext configures the URLRequestContext that will be used for making
+  // connetions to testserver. This can be overridden in test subclasses for
+  // different behaviour.
+  virtual void SetupContext(URLRequestContext* context) {
+    context->set_ssl_config_service(
+        new TestSSLConfigService(true /* check for EV */,
+                                 true /* online revocation checking */));
+  }
+
+  scoped_ptr<ScopedTestRoot> test_root_;
+  TestURLRequestContext context_;
+  scoped_ptr<ScopedTestEVPolicy> ev_test_policy_;
+};
+
+static CertStatus ExpectedCertStatusForFailedOnlineRevocationCheck() {
+#if defined(OS_WIN)
+  // Windows can return CERT_STATUS_UNABLE_TO_CHECK_REVOCATION but we don't
+  // have that ability on other platforms.
+  return CERT_STATUS_UNABLE_TO_CHECK_REVOCATION;
+#else
+  return 0;
+#endif
+}
+
+// SystemUsesChromiumEVMetadata returns true iff the current operating system
+// uses Chromium's EV metadata (i.e. EVRootCAMetadata). If it does not, then
+// several tests are effected because our testing EV certificate won't be
+// recognised as EV.
+static bool SystemUsesChromiumEVMetadata() {
+#if defined(USE_OPENSSL)
+  // http://crbug.com/117478 - OpenSSL does not support EV validation.
+  return false;
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+  // On OS X, we use the system to tell us whether a certificate is EV or not
+  // and the system won't recognise our testing root.
+  return false;
+#else
+  return true;
+#endif
+}
+
+static bool SystemSupportsOCSP() {
+#if defined(USE_OPENSSL)
+  // http://crbug.com/117478 - OpenSSL does not support OCSP.
+  return false;
+#elif defined(OS_WIN)
+  return base::win::GetVersion() >= base::win::VERSION_VISTA;
+#elif defined(OS_ANDROID)
+  // TODO(jnd): http://crbug.com/117478 - EV verification is not yet supported.
+  return false;
+#else
+  return true;
+#endif
+}
+
+TEST_F(HTTPSOCSPTest, Valid) {
+  if (!SystemSupportsOCSP()) {
+    LOG(WARNING) << "Skipping test because system doesn't support OCSP";
+    return;
+  }
+
+  TestServer::SSLOptions ssl_options(TestServer::SSLOptions::CERT_AUTO);
+  ssl_options.ocsp_status = TestServer::SSLOptions::OCSP_OK;
+
+  CertStatus cert_status;
+  DoConnection(ssl_options, &cert_status);
+
+  EXPECT_EQ(0u, cert_status & CERT_STATUS_ALL_ERRORS);
+
+  EXPECT_EQ(SystemUsesChromiumEVMetadata(),
+            static_cast<bool>(cert_status & CERT_STATUS_IS_EV));
+
+  EXPECT_TRUE(cert_status & CERT_STATUS_REV_CHECKING_ENABLED);
+}
+
+TEST_F(HTTPSOCSPTest, Revoked) {
+  if (!SystemSupportsOCSP()) {
+    LOG(WARNING) << "Skipping test because system doesn't support OCSP";
+    return;
+  }
+
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_AUTO);
+  ssl_options.ocsp_status = TestServer::SSLOptions::OCSP_REVOKED;
+
+  CertStatus cert_status;
+  DoConnection(ssl_options, &cert_status);
+
+#if !(defined(OS_MACOSX) && !defined(OS_IOS))
+  // Doesn't pass on OS X yet for reasons that need to be investigated.
+  EXPECT_EQ(CERT_STATUS_REVOKED, cert_status & CERT_STATUS_ALL_ERRORS);
+#endif
+  EXPECT_FALSE(cert_status & CERT_STATUS_IS_EV);
+  EXPECT_TRUE(cert_status & CERT_STATUS_REV_CHECKING_ENABLED);
+}
+
+TEST_F(HTTPSOCSPTest, Invalid) {
+  if (!SystemSupportsOCSP()) {
+    LOG(WARNING) << "Skipping test because system doesn't support OCSP";
+    return;
+  }
+
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_AUTO);
+  ssl_options.ocsp_status = TestServer::SSLOptions::OCSP_INVALID;
+
+  CertStatus cert_status;
+  DoConnection(ssl_options, &cert_status);
+
+  EXPECT_EQ(ExpectedCertStatusForFailedOnlineRevocationCheck(),
+            cert_status & CERT_STATUS_ALL_ERRORS);
+
+  // Without a positive OCSP response, we shouldn't show the EV status.
+  EXPECT_FALSE(cert_status & CERT_STATUS_IS_EV);
+  EXPECT_TRUE(cert_status & CERT_STATUS_REV_CHECKING_ENABLED);
+}
+
+class HTTPSEVCRLSetTest : public HTTPSOCSPTest {
+ protected:
+  virtual void SetupContext(URLRequestContext* context) OVERRIDE {
+    context->set_ssl_config_service(
+        new TestSSLConfigService(true /* check for EV */,
+                                 false /* online revocation checking */));
+  }
+};
+
+TEST_F(HTTPSEVCRLSetTest, MissingCRLSetAndInvalidOCSP) {
+  if (!SystemSupportsOCSP()) {
+    LOG(WARNING) << "Skipping test because system doesn't support OCSP";
+    return;
+  }
+
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_AUTO);
+  ssl_options.ocsp_status = TestServer::SSLOptions::OCSP_INVALID;
+  SSLConfigService::SetCRLSet(scoped_refptr<CRLSet>());
+
+  CertStatus cert_status;
+  DoConnection(ssl_options, &cert_status);
+
+  EXPECT_EQ(ExpectedCertStatusForFailedOnlineRevocationCheck(),
+            cert_status & CERT_STATUS_ALL_ERRORS);
+
+  EXPECT_FALSE(cert_status & CERT_STATUS_IS_EV);
+  EXPECT_EQ(SystemUsesChromiumEVMetadata(),
+            static_cast<bool>(cert_status & CERT_STATUS_REV_CHECKING_ENABLED));
+}
+
+TEST_F(HTTPSEVCRLSetTest, MissingCRLSetAndGoodOCSP) {
+  if (!SystemSupportsOCSP()) {
+    LOG(WARNING) << "Skipping test because system doesn't support OCSP";
+    return;
+  }
+
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_AUTO);
+  ssl_options.ocsp_status = TestServer::SSLOptions::OCSP_OK;
+  SSLConfigService::SetCRLSet(scoped_refptr<CRLSet>());
+
+  CertStatus cert_status;
+  DoConnection(ssl_options, &cert_status);
+
+  EXPECT_EQ(0u, cert_status & CERT_STATUS_ALL_ERRORS);
+
+  EXPECT_EQ(SystemUsesChromiumEVMetadata(),
+            static_cast<bool>(cert_status & CERT_STATUS_IS_EV));
+  EXPECT_EQ(SystemUsesChromiumEVMetadata(),
+            static_cast<bool>(cert_status & CERT_STATUS_REV_CHECKING_ENABLED));
+}
+
+TEST_F(HTTPSEVCRLSetTest, ExpiredCRLSet) {
+  if (!SystemSupportsOCSP()) {
+    LOG(WARNING) << "Skipping test because system doesn't support OCSP";
+    return;
+  }
+
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_AUTO);
+  ssl_options.ocsp_status = TestServer::SSLOptions::OCSP_INVALID;
+  SSLConfigService::SetCRLSet(
+      scoped_refptr<CRLSet>(CRLSet::ExpiredCRLSetForTesting()));
+
+  CertStatus cert_status;
+  DoConnection(ssl_options, &cert_status);
+
+  EXPECT_EQ(ExpectedCertStatusForFailedOnlineRevocationCheck(),
+            cert_status & CERT_STATUS_ALL_ERRORS);
+
+  EXPECT_FALSE(cert_status & CERT_STATUS_IS_EV);
+  EXPECT_EQ(SystemUsesChromiumEVMetadata(),
+            static_cast<bool>(cert_status & CERT_STATUS_REV_CHECKING_ENABLED));
+}
+
+TEST_F(HTTPSEVCRLSetTest, FreshCRLSet) {
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_AUTO);
+  ssl_options.ocsp_status = TestServer::SSLOptions::OCSP_INVALID;
+  SSLConfigService::SetCRLSet(
+      scoped_refptr<CRLSet>(CRLSet::EmptyCRLSetForTesting()));
+
+  CertStatus cert_status;
+  DoConnection(ssl_options, &cert_status);
+
+  // With a valid, fresh CRLSet the bad OCSP response shouldn't matter because
+  // we wont check it.
+  EXPECT_EQ(0u, cert_status & CERT_STATUS_ALL_ERRORS);
+
+  EXPECT_EQ(SystemUsesChromiumEVMetadata(),
+            static_cast<bool>(cert_status & CERT_STATUS_IS_EV));
+
+  EXPECT_FALSE(cert_status & CERT_STATUS_REV_CHECKING_ENABLED);
+}
+
+TEST_F(HTTPSEVCRLSetTest, ExpiredCRLSetAndRevokedNonEVCert) {
+  // Test that when EV verification is requested, but online revocation
+  // checking is disabled, and the leaf certificate is not in fact EV, that
+  // no revocation checking actually happens.
+  if (!SystemSupportsOCSP()) {
+    LOG(WARNING) << "Skipping test because system doesn't support OCSP";
+    return;
+  }
+
+  // Unmark the certificate's OID as EV, which should disable revocation
+  // checking (as per the user preference)
+  ev_test_policy_.reset();
+
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_AUTO);
+  ssl_options.ocsp_status = TestServer::SSLOptions::OCSP_REVOKED;
+  SSLConfigService::SetCRLSet(
+      scoped_refptr<CRLSet>(CRLSet::ExpiredCRLSetForTesting()));
+
+  CertStatus cert_status;
+  DoConnection(ssl_options, &cert_status);
+
+  EXPECT_EQ(0u, cert_status & CERT_STATUS_ALL_ERRORS);
+
+  EXPECT_FALSE(cert_status & CERT_STATUS_IS_EV);
+  EXPECT_FALSE(cert_status & CERT_STATUS_REV_CHECKING_ENABLED);
+}
+
+class HTTPSCRLSetTest : public HTTPSOCSPTest {
+ protected:
+  virtual void SetupContext(URLRequestContext* context) OVERRIDE {
+    context->set_ssl_config_service(
+        new TestSSLConfigService(false /* check for EV */,
+                                 false /* online revocation checking */));
+  }
+};
+
+TEST_F(HTTPSCRLSetTest, ExpiredCRLSet) {
+  TestServer::SSLOptions ssl_options(
+      TestServer::SSLOptions::CERT_AUTO);
+  ssl_options.ocsp_status = TestServer::SSLOptions::OCSP_INVALID;
+  SSLConfigService::SetCRLSet(
+      scoped_refptr<CRLSet>(CRLSet::ExpiredCRLSetForTesting()));
+
+  CertStatus cert_status;
+  DoConnection(ssl_options, &cert_status);
+
+  // If we're not trying EV verification then, even if the CRLSet has expired,
+  // we don't fall back to online revocation checks.
+  EXPECT_EQ(0u, cert_status & CERT_STATUS_ALL_ERRORS);
+  EXPECT_FALSE(cert_status & CERT_STATUS_IS_EV);
+  EXPECT_FALSE(cert_status & CERT_STATUS_REV_CHECKING_ENABLED);
+}
+#endif  // !defined(OS_IOS)
+
+#if !defined(DISABLE_FTP_SUPPORT)
+class URLRequestTestFTP : public URLRequestTest {
+ public:
+  URLRequestTestFTP()
+      : test_server_(TestServer::TYPE_FTP, TestServer::kLocalhost, FilePath()) {
+  }
+
+ protected:
+  TestServer test_server_;
+};
+
+// Make sure an FTP request using an unsafe ports fails.
+TEST_F(URLRequestTestFTP, FLAKY_UnsafePort) {
+  ASSERT_TRUE(test_server_.Start());
+
+  URLRequestJobFactoryImpl job_factory;
+
+  GURL url("ftp://127.0.0.1:7");
+  FtpProtocolHandler ftp_protocol_handler(
+      default_context_.ftp_transaction_factory(),
+      default_context_.ftp_auth_cache());
+  job_factory.SetProtocolHandler(
+      "ftp",
+      new FtpProtocolHandler(default_context_.ftp_transaction_factory(),
+                             default_context_.ftp_auth_cache()));
+  default_context_.set_job_factory(&job_factory);
+
+  TestDelegate d;
+  {
+    URLRequest r(url, &d, &default_context_);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_FALSE(r.is_pending());
+    EXPECT_EQ(URLRequestStatus::FAILED, r.status().status());
+    EXPECT_EQ(ERR_UNSAFE_PORT, r.status().error());
+  }
+}
+
+// Flaky, see http://crbug.com/25045.
+TEST_F(URLRequestTestFTP, DISABLED_FTPDirectoryListing) {
+  ASSERT_TRUE(test_server_.Start());
+
+  TestDelegate d;
+  {
+    URLRequest r(test_server_.GetURL("/"), &d, &default_context_);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    EXPECT_FALSE(r.is_pending());
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_LT(0, d.bytes_received());
+    EXPECT_EQ(test_server_.host_port_pair().host(),
+              r.GetSocketAddress().host());
+    EXPECT_EQ(test_server_.host_port_pair().port(),
+              r.GetSocketAddress().port());
+  }
+}
+
+// Flaky, see http://crbug.com/25045.
+TEST_F(URLRequestTestFTP, DISABLED_FTPGetTestAnonymous) {
+  ASSERT_TRUE(test_server_.Start());
+
+  FilePath app_path;
+  PathService::Get(base::DIR_SOURCE_ROOT, &app_path);
+  app_path = app_path.AppendASCII("LICENSE");
+  TestDelegate d;
+  {
+    URLRequest r(test_server_.GetURL("/LICENSE"), &d, &default_context_);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    int64 file_size = 0;
+    file_util::GetFileSize(app_path, &file_size);
+
+    EXPECT_FALSE(r.is_pending());
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(d.bytes_received(), static_cast<int>(file_size));
+    EXPECT_EQ(test_server_.host_port_pair().host(),
+              r.GetSocketAddress().host());
+    EXPECT_EQ(test_server_.host_port_pair().port(),
+              r.GetSocketAddress().port());
+  }
+}
+
+// Flaky, see http://crbug.com/25045.
+TEST_F(URLRequestTestFTP, DISABLED_FTPGetTest) {
+  ASSERT_TRUE(test_server_.Start());
+
+  FilePath app_path;
+  PathService::Get(base::DIR_SOURCE_ROOT, &app_path);
+  app_path = app_path.AppendASCII("LICENSE");
+  TestDelegate d;
+  {
+    URLRequest r(
+        test_server_.GetURLWithUserAndPassword("/LICENSE", "chrome", "chrome"),
+        &d,
+        &default_context_);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    int64 file_size = 0;
+    file_util::GetFileSize(app_path, &file_size);
+
+    EXPECT_FALSE(r.is_pending());
+    EXPECT_EQ(test_server_.host_port_pair().host(),
+              r.GetSocketAddress().host());
+    EXPECT_EQ(test_server_.host_port_pair().port(),
+              r.GetSocketAddress().port());
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(d.bytes_received(), static_cast<int>(file_size));
+  }
+}
+
+// Flaky, see http://crbug.com/25045.
+TEST_F(URLRequestTestFTP, DISABLED_FTPCheckWrongPassword) {
+  ASSERT_TRUE(test_server_.Start());
+
+  FilePath app_path;
+  PathService::Get(base::DIR_SOURCE_ROOT, &app_path);
+  app_path = app_path.AppendASCII("LICENSE");
+  TestDelegate d;
+  {
+    URLRequest r(
+        test_server_.GetURLWithUserAndPassword("/LICENSE",
+                                               "chrome",
+                                               "wrong_password"),
+        &d,
+        &default_context_);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    int64 file_size = 0;
+    file_util::GetFileSize(app_path, &file_size);
+
+    EXPECT_FALSE(r.is_pending());
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(d.bytes_received(), 0);
+  }
+}
+
+// Flaky, see http://crbug.com/25045.
+TEST_F(URLRequestTestFTP, DISABLED_FTPCheckWrongPasswordRestart) {
+  ASSERT_TRUE(test_server_.Start());
+
+  FilePath app_path;
+  PathService::Get(base::DIR_SOURCE_ROOT, &app_path);
+  app_path = app_path.AppendASCII("LICENSE");
+  TestDelegate d;
+  // Set correct login credentials. The delegate will be asked for them when
+  // the initial login with wrong credentials will fail.
+  d.set_credentials(AuthCredentials(kChrome, kChrome));
+  {
+    URLRequest r(
+        test_server_.GetURLWithUserAndPassword("/LICENSE",
+                                               "chrome",
+                                               "wrong_password"),
+        &d,
+        &default_context_);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    int64 file_size = 0;
+    file_util::GetFileSize(app_path, &file_size);
+
+    EXPECT_FALSE(r.is_pending());
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(d.bytes_received(), static_cast<int>(file_size));
+  }
+}
+
+// Flaky, see http://crbug.com/25045.
+TEST_F(URLRequestTestFTP, DISABLED_FTPCheckWrongUser) {
+  ASSERT_TRUE(test_server_.Start());
+
+  FilePath app_path;
+  PathService::Get(base::DIR_SOURCE_ROOT, &app_path);
+  app_path = app_path.AppendASCII("LICENSE");
+  TestDelegate d;
+  {
+    URLRequest r(
+        test_server_.GetURLWithUserAndPassword("/LICENSE",
+                                               "wrong_user",
+                                               "chrome"),
+        &d,
+        &default_context_);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    int64 file_size = 0;
+    file_util::GetFileSize(app_path, &file_size);
+
+    EXPECT_FALSE(r.is_pending());
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(d.bytes_received(), 0);
+  }
+}
+
+// Flaky, see http://crbug.com/25045.
+TEST_F(URLRequestTestFTP, DISABLED_FTPCheckWrongUserRestart) {
+  ASSERT_TRUE(test_server_.Start());
+
+  FilePath app_path;
+  PathService::Get(base::DIR_SOURCE_ROOT, &app_path);
+  app_path = app_path.AppendASCII("LICENSE");
+  TestDelegate d;
+  // Set correct login credentials. The delegate will be asked for them when
+  // the initial login with wrong credentials will fail.
+  d.set_credentials(AuthCredentials(kChrome, kChrome));
+  {
+    URLRequest r(
+        test_server_.GetURLWithUserAndPassword("/LICENSE",
+                                               "wrong_user",
+                                               "chrome"),
+        &d,
+        &default_context_);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    int64 file_size = 0;
+    file_util::GetFileSize(app_path, &file_size);
+
+    EXPECT_FALSE(r.is_pending());
+    EXPECT_EQ(1, d.response_started_count());
+    EXPECT_FALSE(d.received_data_before_response());
+    EXPECT_EQ(d.bytes_received(), static_cast<int>(file_size));
+  }
+}
+
+// Flaky, see http://crbug.com/25045.
+TEST_F(URLRequestTestFTP, DISABLED_FTPCacheURLCredentials) {
+  ASSERT_TRUE(test_server_.Start());
+
+  FilePath app_path;
+  PathService::Get(base::DIR_SOURCE_ROOT, &app_path);
+  app_path = app_path.AppendASCII("LICENSE");
+
+  scoped_ptr<TestDelegate> d(new TestDelegate);
+  {
+    // Pass correct login identity in the URL.
+    URLRequest r(
+        test_server_.GetURLWithUserAndPassword("/LICENSE",
+                                               "chrome",
+                                               "chrome"),
+        d.get(),
+        &default_context_);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    int64 file_size = 0;
+    file_util::GetFileSize(app_path, &file_size);
+
+    EXPECT_FALSE(r.is_pending());
+    EXPECT_EQ(1, d->response_started_count());
+    EXPECT_FALSE(d->received_data_before_response());
+    EXPECT_EQ(d->bytes_received(), static_cast<int>(file_size));
+  }
+
+  d.reset(new TestDelegate);
+  {
+    // This request should use cached identity from previous request.
+    URLRequest r(test_server_.GetURL("/LICENSE"), d.get(), &default_context_);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    int64 file_size = 0;
+    file_util::GetFileSize(app_path, &file_size);
+
+    EXPECT_FALSE(r.is_pending());
+    EXPECT_EQ(1, d->response_started_count());
+    EXPECT_FALSE(d->received_data_before_response());
+    EXPECT_EQ(d->bytes_received(), static_cast<int>(file_size));
+  }
+}
+
+// Flaky, see http://crbug.com/25045.
+TEST_F(URLRequestTestFTP, DISABLED_FTPCacheLoginBoxCredentials) {
+  ASSERT_TRUE(test_server_.Start());
+
+  FilePath app_path;
+  PathService::Get(base::DIR_SOURCE_ROOT, &app_path);
+  app_path = app_path.AppendASCII("LICENSE");
+
+  scoped_ptr<TestDelegate> d(new TestDelegate);
+  // Set correct login credentials. The delegate will be asked for them when
+  // the initial login with wrong credentials will fail.
+  d->set_credentials(AuthCredentials(kChrome, kChrome));
+  {
+    URLRequest r(
+        test_server_.GetURLWithUserAndPassword("/LICENSE",
+                                               "chrome",
+                                               "wrong_password"),
+        d.get(),
+        &default_context_);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    int64 file_size = 0;
+    file_util::GetFileSize(app_path, &file_size);
+
+    EXPECT_FALSE(r.is_pending());
+    EXPECT_EQ(1, d->response_started_count());
+    EXPECT_FALSE(d->received_data_before_response());
+    EXPECT_EQ(d->bytes_received(), static_cast<int>(file_size));
+  }
+
+  // Use a new delegate without explicit credentials. The cached ones should be
+  // used.
+  d.reset(new TestDelegate);
+  {
+    // Don't pass wrong credentials in the URL, they would override valid cached
+    // ones.
+    URLRequest r(test_server_.GetURL("/LICENSE"), d.get(), &default_context_);
+    r.Start();
+    EXPECT_TRUE(r.is_pending());
+
+    MessageLoop::current()->Run();
+
+    int64 file_size = 0;
+    file_util::GetFileSize(app_path, &file_size);
+
+    EXPECT_FALSE(r.is_pending());
+    EXPECT_EQ(1, d->response_started_count());
+    EXPECT_FALSE(d->received_data_before_response());
+    EXPECT_EQ(d->bytes_received(), static_cast<int>(file_size));
+  }
+}
+#endif  // !defined(DISABLE_FTP_SUPPORT)
+
+}  // namespace net
diff --git a/src/net/url_request/view_cache_helper.cc b/src/net/url_request/view_cache_helper.cc
new file mode 100644
index 0000000..d468075
--- /dev/null
+++ b/src/net/url_request/view_cache_helper.cc
@@ -0,0 +1,360 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/view_cache_helper.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/stringprintf.h"
+#include "net/base/escape.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_errors.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/http/http_cache.h"
+#include "net/http/http_response_headers.h"
+#include "net/http/http_response_info.h"
+#include "net/url_request/url_request_context.h"
+
+#define VIEW_CACHE_HEAD \
+  "<html><meta charset=\"utf-8\">" \
+  "<meta http-equiv=\"Content-Security-Policy\" " \
+  "  content=\"object-src 'none'; script-src 'none' 'unsafe-eval'\">" \
+  "<body><table>"
+
+#define VIEW_CACHE_TAIL \
+  "</table></body></html>"
+
+namespace net {
+
+namespace {
+
+std::string FormatEntryInfo(disk_cache::Entry* entry,
+                            const std::string& url_prefix) {
+  std::string key = entry->GetKey();
+  GURL url = GURL(url_prefix + key);
+  std::string row =
+      "<tr><td><a href=\"" + url.spec() + "\">" + EscapeForHTML(key) +
+      "</a></td></tr>";
+  return row;
+}
+
+}  // namespace.
+
+ViewCacheHelper::ViewCacheHelper()
+    : context_(NULL),
+      disk_cache_(NULL),
+      entry_(NULL),
+      iter_(NULL),
+      buf_len_(0),
+      index_(0),
+      data_(NULL),
+      next_state_(STATE_NONE),
+      ALLOW_THIS_IN_INITIALIZER_LIST(weak_factory_(this)) {
+}
+
+ViewCacheHelper::~ViewCacheHelper() {
+  if (entry_)
+    entry_->Close();
+}
+
+int ViewCacheHelper::GetEntryInfoHTML(const std::string& key,
+                                      const URLRequestContext* context,
+                                      std::string* out,
+                                      const CompletionCallback& callback) {
+  return GetInfoHTML(key, context, std::string(), out, callback);
+}
+
+int ViewCacheHelper::GetContentsHTML(const URLRequestContext* context,
+                                     const std::string& url_prefix,
+                                     std::string* out,
+                                     const CompletionCallback& callback) {
+  return GetInfoHTML(std::string(), context, url_prefix, out, callback);
+}
+
+// static
+void ViewCacheHelper::HexDump(const char *buf, size_t buf_len,
+                              std::string* result) {
+  const size_t kMaxRows = 16;
+  int offset = 0;
+
+  const unsigned char *p;
+  while (buf_len) {
+    base::StringAppendF(result, "%08x:  ", offset);
+    offset += kMaxRows;
+
+    p = (const unsigned char *) buf;
+
+    size_t i;
+    size_t row_max = std::min(kMaxRows, buf_len);
+
+    // print hex codes:
+    for (i = 0; i < row_max; ++i)
+      base::StringAppendF(result, "%02x  ", *p++);
+    for (i = row_max; i < kMaxRows; ++i)
+      result->append("    ");
+
+    // print ASCII glyphs if possible:
+    p = (const unsigned char *) buf;
+    for (i = 0; i < row_max; ++i, ++p) {
+      if (*p < 0x7F && *p > 0x1F) {
+        AppendEscapedCharForHTML(*p, result);
+      } else {
+        result->push_back('.');
+      }
+    }
+
+    result->push_back('\n');
+
+    buf += row_max;
+    buf_len -= row_max;
+  }
+}
+
+//-----------------------------------------------------------------------------
+
+int ViewCacheHelper::GetInfoHTML(const std::string& key,
+                                 const URLRequestContext* context,
+                                 const std::string& url_prefix,
+                                 std::string* out,
+                                 const CompletionCallback& callback) {
+  DCHECK(callback_.is_null());
+  DCHECK(context);
+  key_ = key;
+  context_ = context;
+  url_prefix_ = url_prefix;
+  data_ = out;
+  next_state_ = STATE_GET_BACKEND;
+  int rv = DoLoop(OK);
+
+  if (rv == ERR_IO_PENDING)
+    callback_ = callback;
+
+  return rv;
+}
+
+void ViewCacheHelper::DoCallback(int rv) {
+  DCHECK_NE(ERR_IO_PENDING, rv);
+  DCHECK(!callback_.is_null());
+
+  callback_.Run(rv);
+  callback_.Reset();
+}
+
+void ViewCacheHelper::HandleResult(int rv) {
+  DCHECK_NE(ERR_IO_PENDING, rv);
+  DCHECK_NE(ERR_FAILED, rv);
+  context_ = NULL;
+  if (!callback_.is_null())
+    DoCallback(rv);
+}
+
+int ViewCacheHelper::DoLoop(int result) {
+  DCHECK(next_state_ != STATE_NONE);
+
+  int rv = result;
+  do {
+    State state = next_state_;
+    next_state_ = STATE_NONE;
+    switch (state) {
+      case STATE_GET_BACKEND:
+        DCHECK_EQ(OK, rv);
+        rv = DoGetBackend();
+        break;
+      case STATE_GET_BACKEND_COMPLETE:
+        rv = DoGetBackendComplete(rv);
+        break;
+      case STATE_OPEN_NEXT_ENTRY:
+        DCHECK_EQ(OK, rv);
+        rv = DoOpenNextEntry();
+        break;
+      case STATE_OPEN_NEXT_ENTRY_COMPLETE:
+        rv = DoOpenNextEntryComplete(rv);
+        break;
+      case STATE_OPEN_ENTRY:
+        DCHECK_EQ(OK, rv);
+        rv = DoOpenEntry();
+        break;
+      case STATE_OPEN_ENTRY_COMPLETE:
+        rv = DoOpenEntryComplete(rv);
+        break;
+      case STATE_READ_RESPONSE:
+        DCHECK_EQ(OK, rv);
+        rv = DoReadResponse();
+        break;
+      case STATE_READ_RESPONSE_COMPLETE:
+        rv = DoReadResponseComplete(rv);
+        break;
+      case STATE_READ_DATA:
+        DCHECK_EQ(OK, rv);
+        rv = DoReadData();
+        break;
+      case STATE_READ_DATA_COMPLETE:
+        rv = DoReadDataComplete(rv);
+        break;
+
+      default:
+        NOTREACHED() << "bad state";
+        rv = ERR_FAILED;
+        break;
+    }
+  } while (rv != ERR_IO_PENDING && next_state_ != STATE_NONE);
+
+  if (rv != ERR_IO_PENDING)
+    HandleResult(rv);
+
+  return rv;
+}
+
+int ViewCacheHelper::DoGetBackend() {
+  next_state_ = STATE_GET_BACKEND_COMPLETE;
+
+  if (!context_->http_transaction_factory())
+    return ERR_FAILED;
+
+  HttpCache* http_cache = context_->http_transaction_factory()->GetCache();
+  if (!http_cache)
+    return ERR_FAILED;
+
+  return http_cache->GetBackend(
+      &disk_cache_, base::Bind(&ViewCacheHelper::OnIOComplete,
+                               base::Unretained(this)));
+}
+
+int ViewCacheHelper::DoGetBackendComplete(int result) {
+  if (result == ERR_FAILED) {
+    data_->append("no disk cache");
+    return OK;
+  }
+
+  DCHECK_EQ(OK, result);
+  if (key_.empty()) {
+    data_->assign(VIEW_CACHE_HEAD);
+    DCHECK(!iter_);
+    next_state_ = STATE_OPEN_NEXT_ENTRY;
+    return OK;
+  }
+
+  next_state_ = STATE_OPEN_ENTRY;
+  return OK;
+}
+
+int ViewCacheHelper::DoOpenNextEntry() {
+  next_state_ = STATE_OPEN_NEXT_ENTRY_COMPLETE;
+  return disk_cache_->OpenNextEntry(
+      &iter_, &entry_,
+      base::Bind(&ViewCacheHelper::OnIOComplete, base::Unretained(this)));
+}
+
+int ViewCacheHelper::DoOpenNextEntryComplete(int result) {
+  if (result == ERR_FAILED) {
+    data_->append(VIEW_CACHE_TAIL);
+    return OK;
+  }
+
+  DCHECK_EQ(OK, result);
+  data_->append(FormatEntryInfo(entry_, url_prefix_));
+  entry_->Close();
+  entry_ = NULL;
+
+  next_state_ = STATE_OPEN_NEXT_ENTRY;
+  return OK;
+}
+
+int ViewCacheHelper::DoOpenEntry() {
+  next_state_ = STATE_OPEN_ENTRY_COMPLETE;
+  return disk_cache_->OpenEntry(
+      key_, &entry_,
+      base::Bind(&ViewCacheHelper::OnIOComplete, base::Unretained(this)));
+}
+
+int ViewCacheHelper::DoOpenEntryComplete(int result) {
+  if (result == ERR_FAILED) {
+    data_->append("no matching cache entry for: " + EscapeForHTML(key_));
+    return OK;
+  }
+
+  data_->assign(VIEW_CACHE_HEAD);
+  data_->append(EscapeForHTML(entry_->GetKey()));
+  next_state_ = STATE_READ_RESPONSE;
+  return OK;
+}
+
+int ViewCacheHelper::DoReadResponse() {
+  next_state_ = STATE_READ_RESPONSE_COMPLETE;
+  buf_len_ = entry_->GetDataSize(0);
+  if (!buf_len_)
+    return buf_len_;
+
+  buf_ = new IOBuffer(buf_len_);
+  return entry_->ReadData(
+      0, 0, buf_, buf_len_,
+      base::Bind(&ViewCacheHelper::OnIOComplete, weak_factory_.GetWeakPtr()));
+}
+
+int ViewCacheHelper::DoReadResponseComplete(int result) {
+  if (result && result == buf_len_) {
+    HttpResponseInfo response;
+    bool truncated;
+    if (HttpCache::ParseResponseInfo(buf_->data(), buf_len_, &response,
+                                          &truncated) &&
+        response.headers) {
+      if (truncated)
+        data_->append("<pre>RESPONSE_INFO_TRUNCATED</pre>");
+
+      data_->append("<hr><pre>");
+      data_->append(EscapeForHTML(response.headers->GetStatusLine()));
+      data_->push_back('\n');
+
+      void* iter = NULL;
+      std::string name, value;
+      while (response.headers->EnumerateHeaderLines(&iter, &name, &value)) {
+        data_->append(EscapeForHTML(name));
+        data_->append(": ");
+        data_->append(EscapeForHTML(value));
+        data_->push_back('\n');
+      }
+      data_->append("</pre>");
+    }
+  }
+
+  index_ = 0;
+  next_state_ = STATE_READ_DATA;
+  return OK;
+}
+
+int ViewCacheHelper::DoReadData() {
+  data_->append("<hr><pre>");
+
+  next_state_ = STATE_READ_DATA_COMPLETE;
+  buf_len_ = entry_->GetDataSize(index_);
+  if (!buf_len_)
+    return buf_len_;
+
+  buf_ = new IOBuffer(buf_len_);
+  return entry_->ReadData(
+      index_, 0, buf_, buf_len_,
+      base::Bind(&ViewCacheHelper::OnIOComplete, weak_factory_.GetWeakPtr()));
+}
+
+int ViewCacheHelper::DoReadDataComplete(int result) {
+  if (result && result == buf_len_) {
+    HexDump(buf_->data(), buf_len_, data_);
+  }
+  data_->append("</pre>");
+  index_++;
+  if (index_ < HttpCache::kNumCacheEntryDataIndices) {
+    next_state_ = STATE_READ_DATA;
+  } else {
+    data_->append(VIEW_CACHE_TAIL);
+    entry_->Close();
+    entry_ = NULL;
+  }
+  return OK;
+}
+
+void ViewCacheHelper::OnIOComplete(int result) {
+  DoLoop(result);
+}
+
+}  // namespace net.
diff --git a/src/net/url_request/view_cache_helper.h b/src/net/url_request/view_cache_helper.h
new file mode 100644
index 0000000..d5a7c42
--- /dev/null
+++ b/src/net/url_request/view_cache_helper.h
@@ -0,0 +1,124 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_VIEW_CACHE_HELPER_H_
+#define NET_URL_REQUEST_VIEW_CACHE_HELPER_H_
+
+#include <string>
+
+#include "base/memory/weak_ptr.h"
+#include "net/base/completion_callback.h"
+#include "net/base/io_buffer.h"
+#include "net/base/net_export.h"
+
+namespace disk_cache {
+class Backend;
+class Entry;
+}  // namespace disk_cache
+
+namespace net {
+
+class URLRequestContext;
+
+class NET_EXPORT ViewCacheHelper {
+ public:
+  ViewCacheHelper();
+  ~ViewCacheHelper();
+
+  // Formats the cache information for |key| as HTML. Returns a net error code.
+  // If this method returns ERR_IO_PENDING, |callback| will be notified when the
+  // operation completes. |out| must remain valid until this operation completes
+  // or the object is destroyed.
+  int GetEntryInfoHTML(const std::string& key,
+                       const URLRequestContext* context,
+                       std::string* out,
+                       const CompletionCallback& callback);
+
+  // Formats the cache contents as HTML. Returns a net error code.
+  // If this method returns ERR_IO_PENDING, |callback| will be notified when the
+  // operation completes. |out| must remain valid until this operation completes
+  // or the object is destroyed. |url_prefix| will be prepended to each entry
+  // key as a link to the entry.
+  int GetContentsHTML(const URLRequestContext* context,
+                      const std::string& url_prefix,
+                      std::string* out,
+                      const CompletionCallback& callback);
+
+  // Lower-level helper to produce a textual representation of binary data.
+  // The results are appended to |result| and can be used in HTML pages
+  // provided the dump is contained within <pre></pre> tags.
+  static void HexDump(const char *buf, size_t buf_len, std::string* result);
+
+ private:
+  enum State {
+    STATE_NONE,
+    STATE_GET_BACKEND,
+    STATE_GET_BACKEND_COMPLETE,
+    STATE_OPEN_NEXT_ENTRY,
+    STATE_OPEN_NEXT_ENTRY_COMPLETE,
+    STATE_OPEN_ENTRY,
+    STATE_OPEN_ENTRY_COMPLETE,
+    STATE_READ_RESPONSE,
+    STATE_READ_RESPONSE_COMPLETE,
+    STATE_READ_DATA,
+    STATE_READ_DATA_COMPLETE
+  };
+
+  // Implements GetEntryInfoHTML and GetContentsHTML.
+  int GetInfoHTML(const std::string& key,
+                  const URLRequestContext* context,
+                  const std::string& url_prefix,
+                  std::string* out,
+                  const CompletionCallback& callback);
+
+  // This is a helper function used to trigger a completion callback. It may
+  // only be called if callback_ is non-null.
+  void DoCallback(int rv);
+
+  // This will trigger the completion callback if appropriate.
+  void HandleResult(int rv);
+
+  // Runs the state transition loop.
+  int DoLoop(int result);
+
+  // Each of these methods corresponds to a State value. If there is an
+  // argument, the value corresponds to the return of the previous state or
+  // corresponding callback.
+  int DoGetBackend();
+  int DoGetBackendComplete(int result);
+  int DoOpenNextEntry();
+  int DoOpenNextEntryComplete(int result);
+  int DoOpenEntry();
+  int DoOpenEntryComplete(int result);
+  int DoReadResponse();
+  int DoReadResponseComplete(int result);
+  int DoReadData();
+  int DoReadDataComplete(int result);
+
+  // Called to signal completion of asynchronous IO.
+  void OnIOComplete(int result);
+
+  const URLRequestContext* context_;
+  disk_cache::Backend* disk_cache_;
+  disk_cache::Entry* entry_;
+  void* iter_;
+  scoped_refptr<IOBuffer> buf_;
+  int buf_len_;
+  int index_;
+
+  std::string key_;
+  std::string url_prefix_;
+  std::string* data_;
+  CompletionCallback callback_;
+
+  State next_state_;
+
+  base::WeakPtrFactory<ViewCacheHelper> weak_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(ViewCacheHelper);
+};
+
+}  // namespace net.
+
+#endif  // NET_URL_REQUEST_VIEW_CACHE_HELPER_H_
diff --git a/src/net/url_request/view_cache_helper_unittest.cc b/src/net/url_request/view_cache_helper_unittest.cc
new file mode 100644
index 0000000..9fda85b
--- /dev/null
+++ b/src/net/url_request/view_cache_helper_unittest.cc
@@ -0,0 +1,225 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/url_request/view_cache_helper.h"
+
+#include "base/pickle.h"
+#include "net/base/net_errors.h"
+#include "net/base/test_completion_callback.h"
+#include "net/disk_cache/disk_cache.h"
+#include "net/http/http_cache.h"
+#include "net/url_request/url_request_context.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace net {
+
+namespace {
+
+// Disk cache is turned off in lb_shell (http_cache.cc:CreateBackend())
+// The following cases which are using backend cache should be disabled:
+// ListContents, DumpEntry, Prefix, and TruncatedFlag
+#if defined(__LB_SHELL__) || defined(COBALT)
+#define MAYBE_ListContents DISABLED_ListContents
+#define MAYBE_DumpEntry DISABLED_DumpEntry
+#define MAYBE_Prefix DISABLED_Prefix
+#define MAYBE_TruncatedFlag DISABLED_TruncatedFlag
+#else
+#define MAYBE_ListContents ListContents
+#define MAYBE_DumpEntry DumpEntry
+#define MAYBE_Prefix Prefix
+#define MAYBE_TruncatedFlag TruncatedFlag
+#endif
+
+class TestURLRequestContext : public URLRequestContext {
+ public:
+  TestURLRequestContext();
+  virtual ~TestURLRequestContext() {}
+
+  // Gets a pointer to the cache backend.
+  disk_cache::Backend* GetBackend();
+
+ private:
+  HttpCache cache_;
+};
+
+TestURLRequestContext::TestURLRequestContext()
+    : cache_(reinterpret_cast<HttpTransactionFactory*>(NULL), NULL,
+             HttpCache::DefaultBackend::InMemory(0)) {
+  set_http_transaction_factory(&cache_);
+}
+
+void WriteHeaders(disk_cache::Entry* entry, int flags, const std::string data) {
+  if (data.empty())
+    return;
+
+  Pickle pickle;
+  pickle.WriteInt(flags | 1);  // Version 1.
+  pickle.WriteInt64(0);
+  pickle.WriteInt64(0);
+  pickle.WriteString(data);
+
+  scoped_refptr<WrappedIOBuffer> buf(new WrappedIOBuffer(
+      reinterpret_cast<const char*>(pickle.data())));
+  int len = static_cast<int>(pickle.size());
+
+  net::TestCompletionCallback cb;
+  int rv = entry->WriteData(0, 0, buf, len, cb.callback(), true);
+  ASSERT_EQ(len, cb.GetResult(rv));
+}
+
+void WriteData(disk_cache::Entry* entry, int index, const std::string data) {
+  if (data.empty())
+    return;
+
+  int len = data.length();
+  scoped_refptr<IOBuffer> buf(new IOBuffer(len));
+  memcpy(buf->data(), data.data(), data.length());
+
+  net::TestCompletionCallback cb;
+  int rv = entry->WriteData(index, 0, buf, len, cb.callback(), true);
+  ASSERT_EQ(len, cb.GetResult(rv));
+}
+
+void WriteToEntry(disk_cache::Backend* cache, const std::string key,
+                  const std::string data0, const std::string data1,
+                  const std::string data2) {
+  net::TestCompletionCallback cb;
+  disk_cache::Entry* entry;
+  int rv = cache->CreateEntry(key, &entry, cb.callback());
+  rv = cb.GetResult(rv);
+  if (rv != OK) {
+    rv = cache->OpenEntry(key, &entry, cb.callback());
+    ASSERT_EQ(OK, cb.GetResult(rv));
+  }
+
+  WriteHeaders(entry, 0, data0);
+  WriteData(entry, 1, data1);
+  WriteData(entry, 2, data2);
+
+  entry->Close();
+}
+
+void FillCache(URLRequestContext* context) {
+  net::TestCompletionCallback cb;
+  disk_cache::Backend* cache;
+  int rv =
+      context->http_transaction_factory()->GetCache()->GetBackend(
+          &cache, cb.callback());
+  ASSERT_EQ(OK, cb.GetResult(rv));
+
+  std::string empty;
+  WriteToEntry(cache, "first", "some", empty, empty);
+  WriteToEntry(cache, "second", "only hex_dumped", "same", "kind");
+  WriteToEntry(cache, "third", empty, "another", "thing");
+}
+
+}  // namespace.
+
+TEST(ViewCacheHelper, EmptyCache) {
+  TestURLRequestContext context;
+  ViewCacheHelper helper;
+
+  TestCompletionCallback cb;
+  std::string prefix, data;
+  int rv = helper.GetContentsHTML(&context, prefix, &data, cb.callback());
+  EXPECT_EQ(OK, cb.GetResult(rv));
+  EXPECT_FALSE(data.empty());
+}
+
+TEST(ViewCacheHelper, MAYBE_ListContents) {
+  TestURLRequestContext context;
+  ViewCacheHelper helper;
+
+  FillCache(&context);
+
+  std::string prefix, data;
+  TestCompletionCallback cb;
+  int rv = helper.GetContentsHTML(&context, prefix, &data, cb.callback());
+  EXPECT_EQ(OK, cb.GetResult(rv));
+
+  EXPECT_EQ(0U, data.find("<html>"));
+  EXPECT_NE(std::string::npos, data.find("</html>"));
+  EXPECT_NE(std::string::npos, data.find("first"));
+  EXPECT_NE(std::string::npos, data.find("second"));
+  EXPECT_NE(std::string::npos, data.find("third"));
+
+  EXPECT_EQ(std::string::npos, data.find("some"));
+  EXPECT_EQ(std::string::npos, data.find("same"));
+  EXPECT_EQ(std::string::npos, data.find("thing"));
+}
+
+TEST(ViewCacheHelper, MAYBE_DumpEntry) {
+  TestURLRequestContext context;
+  ViewCacheHelper helper;
+
+  FillCache(&context);
+
+  std::string data;
+  TestCompletionCallback cb;
+  int rv = helper.GetEntryInfoHTML("second", &context, &data, cb.callback());
+  EXPECT_EQ(OK, cb.GetResult(rv));
+
+  EXPECT_EQ(0U, data.find("<html>"));
+  EXPECT_NE(std::string::npos, data.find("</html>"));
+
+  EXPECT_NE(std::string::npos, data.find("hex_dumped"));
+  EXPECT_NE(std::string::npos, data.find("same"));
+  EXPECT_NE(std::string::npos, data.find("kind"));
+
+  EXPECT_EQ(std::string::npos, data.find("first"));
+  EXPECT_EQ(std::string::npos, data.find("third"));
+  EXPECT_EQ(std::string::npos, data.find("some"));
+  EXPECT_EQ(std::string::npos, data.find("another"));
+}
+
+// Makes sure the links are correct.
+TEST(ViewCacheHelper, MAYBE_Prefix) {
+  TestURLRequestContext context;
+  ViewCacheHelper helper;
+
+  FillCache(&context);
+
+  std::string key, data;
+  std::string prefix("prefix:");
+  TestCompletionCallback cb;
+  int rv = helper.GetContentsHTML(&context, prefix, &data, cb.callback());
+  EXPECT_EQ(OK, cb.GetResult(rv));
+
+  EXPECT_EQ(0U, data.find("<html>"));
+  EXPECT_NE(std::string::npos, data.find("</html>"));
+  EXPECT_NE(std::string::npos, data.find("<a href=\"prefix:first\">"));
+  EXPECT_NE(std::string::npos, data.find("<a href=\"prefix:second\">"));
+  EXPECT_NE(std::string::npos, data.find("<a href=\"prefix:third\">"));
+}
+
+TEST(ViewCacheHelper, MAYBE_TruncatedFlag) {
+  TestURLRequestContext context;
+  ViewCacheHelper helper;
+
+  net::TestCompletionCallback cb;
+  disk_cache::Backend* cache;
+  int rv =
+      context.http_transaction_factory()->GetCache()->GetBackend(
+          &cache, cb.callback());
+  ASSERT_EQ(OK, cb.GetResult(rv));
+
+  std::string key("the key");
+  disk_cache::Entry* entry;
+  rv = cache->CreateEntry(key, &entry, cb.callback());
+  ASSERT_EQ(OK, cb.GetResult(rv));
+
+  // RESPONSE_INFO_TRUNCATED defined on response_info.cc
+  int flags = 1 << 12;
+  WriteHeaders(entry, flags, "something");
+  entry->Close();
+
+  std::string data;
+  TestCompletionCallback cb1;
+  rv = helper.GetEntryInfoHTML(key, &context, &data, cb1.callback());
+  EXPECT_EQ(OK, cb1.GetResult(rv));
+
+  EXPECT_NE(std::string::npos, data.find("RESPONSE_INFO_TRUNCATED"));
+}
+
+}  // namespace net