blob: 5ca72b8ce3971e664503514429e3cadd53486c14 [file] [log] [blame]
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/spdy/spdy_session.h"
#include <algorithm>
#include <limits>
#include <map>
#include <string>
#include <utility>
#include "base/bind.h"
#include "base/feature_list.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "base/trace_event/memory_usage_estimator.h"
#include "base/trace_event/trace_event.h"
#include "base/values.h"
#include "net/base/url_util.h"
#include "net/cert/asn1_util.h"
#include "net/cert/cert_verify_result.h"
#include "net/cert/ct_policy_status.h"
#include "net/http/http_network_session.h"
#include "net/http/http_server_properties.h"
#include "net/http/http_util.h"
#include "net/http/http_vary_data.h"
#include "net/http/transport_security_state.h"
#include "net/log/net_log.h"
#include "net/log/net_log_capture_mode.h"
#include "net/log/net_log_event_type.h"
#include "net/log/net_log_source_type.h"
#include "net/log/net_log_with_source.h"
#include "net/quic/quic_http_utils.h"
#include "net/socket/socket.h"
#include "net/socket/ssl_client_socket.h"
#include "net/spdy/header_coalescer.h"
#include "net/spdy/spdy_buffer_producer.h"
#include "net/spdy/spdy_http_utils.h"
#include "net/spdy/spdy_log_util.h"
#include "net/spdy/spdy_session_pool.h"
#include "net/spdy/spdy_stream.h"
#include "net/ssl/channel_id_service.h"
#include "net/ssl/ssl_cipher_suite_names.h"
#include "net/ssl/ssl_connection_status_flags.h"
#include "net/third_party/quic/core/http/spdy_utils.h"
#include "net/third_party/quiche/src/spdy/core/spdy_frame_builder.h"
#include "net/third_party/quiche/src/spdy/core/spdy_protocol.h"
#include "starboard/memory.h"
#include "url/url_constants.h"
namespace net {
namespace {
constexpr net::NetworkTrafficAnnotationTag
kSpdySessionCommandsTrafficAnnotation =
net::DefineNetworkTrafficAnnotation("spdy_session_control", R"(
semantics {
sender: "Spdy Session"
description:
"Sends commands to control an HTTP/2 session."
trigger:
"Required control commands like initiating stream, requesting "
"stream reset, changing priorities, etc."
data: "No user data."
destination: OTHER
destination_other:
"Any destination the HTTP/2 session is connected to."
}
policy {
cookies_allowed: NO
setting: "This feature cannot be disabled in settings."
policy_exception_justification: "Essential for network access."
}
)");
const int kReadBufferSize = 8 * 1024;
const int kDefaultConnectionAtRiskOfLossSeconds = 10;
const int kHungIntervalSeconds = 10;
// Lifetime of unclaimed pushed stream, in seconds: after this period, a pushed
// stream is cancelled if still not claimed.
const int kPushedStreamLifetimeSeconds = 300;
// Default initial value for HTTP/2 SETTINGS.
const uint32_t kDefaultInitialHeaderTableSize = 4096;
const uint32_t kDefaultInitialEnablePush = 1;
const uint32_t kDefaultInitialInitialWindowSize = 65535;
const uint32_t kDefaultInitialMaxFrameSize = 16384;
// The maximum size of header list that the server is allowed to send.
const uint32_t kSpdyMaxHeaderListSize = 256 * 1024;
// Values of Vary response header on pushed streams. This is logged to
// Net.PushedStreamVaryResponseHeader, entries must not be changed.
enum PushedStreamVaryResponseHeaderValues {
// There is no Vary header.
kNoVaryHeader = 0,
// The value of Vary is empty.
kVaryIsEmpty = 1,
// The value of Vary is "*".
kVaryIsStar = 2,
// The value of Vary is "accept-encoding" (case insensitive).
kVaryIsAcceptEncoding = 3,
// The value of Vary contains "accept-encoding" (case insensitive) and some
// other field names as well.
kVaryHasAcceptEncoding = 4,
// The value of Vary does not contain "accept-encoding", is not empty, and is
// not "*".
kVaryHasNoAcceptEncoding = 5,
// The number of entries above.
kNumberOfVaryEntries = 6
};
// String literals for parsing the Vary header in a pushed response.
const char kVary[] = "vary";
const char kStar[] = "*";
const char kAcceptEncoding[] = "accept-encoding";
enum PushedStreamVaryResponseHeaderValues ParseVaryInPushedResponse(
const spdy::SpdyHeaderBlock& headers) {
spdy::SpdyHeaderBlock::iterator it = headers.find(kVary);
if (it == headers.end())
return kNoVaryHeader;
base::StringPiece value(it->second);
if (value.empty())
return kVaryIsEmpty;
if (value == kStar)
return kVaryIsStar;
std::string lowercase_value = ToLowerASCII(value);
if (lowercase_value == kAcceptEncoding)
return kVaryIsAcceptEncoding;
// Both comma and newline delimiters occur in the wild.
for (const auto& substr :
SplitString(lowercase_value, ",\n", base::TRIM_WHITESPACE,
base::SPLIT_WANT_NONEMPTY)) {
if (substr == kAcceptEncoding)
return kVaryHasAcceptEncoding;
}
return kVaryHasNoAcceptEncoding;
}
// A SpdyBufferProducer implementation that creates an HTTP/2 frame by adding
// stream ID to greased frame parameters.
class GreasedBufferProducer : public SpdyBufferProducer {
public:
GreasedBufferProducer() = delete;
GreasedBufferProducer(
base::WeakPtr<SpdyStream> stream,
const SpdySessionPool::GreasedHttp2Frame* greased_http2_frame,
BufferedSpdyFramer* buffered_spdy_framer)
: stream_(stream),
greased_http2_frame_(greased_http2_frame),
buffered_spdy_framer_(buffered_spdy_framer) {}
~GreasedBufferProducer() override = default;
std::unique_ptr<SpdyBuffer> ProduceBuffer() override {
const spdy::SpdyStreamId stream_id = stream_ ? stream_->stream_id() : 0;
spdy::SpdyUnknownIR frame(stream_id, greased_http2_frame_->type,
greased_http2_frame_->flags,
greased_http2_frame_->payload);
auto serialized_frame = std::make_unique<spdy::SpdySerializedFrame>(
buffered_spdy_framer_->SerializeFrame(frame));
return std::make_unique<SpdyBuffer>(std::move(serialized_frame));
}
size_t EstimateMemoryUsage() const override {
return base::trace_event::EstimateMemoryUsage(
greased_http2_frame_->payload);
}
private:
base::WeakPtr<SpdyStream> stream_;
const SpdySessionPool::GreasedHttp2Frame* const greased_http2_frame_;
BufferedSpdyFramer* buffered_spdy_framer_;
};
bool IsSpdySettingAtDefaultInitialValue(spdy::SpdySettingsId setting_id,
uint32_t value) {
switch (setting_id) {
case spdy::SETTINGS_HEADER_TABLE_SIZE:
return value == kDefaultInitialHeaderTableSize;
case spdy::SETTINGS_ENABLE_PUSH:
return value == kDefaultInitialEnablePush;
case spdy::SETTINGS_MAX_CONCURRENT_STREAMS:
// There is no initial limit on the number of concurrent streams.
return false;
case spdy::SETTINGS_INITIAL_WINDOW_SIZE:
return value == kDefaultInitialInitialWindowSize;
case spdy::SETTINGS_MAX_FRAME_SIZE:
return value == kDefaultInitialMaxFrameSize;
case spdy::SETTINGS_MAX_HEADER_LIST_SIZE:
// There is no initial limit on the size of the header list.
return false;
case spdy::SETTINGS_ENABLE_CONNECT_PROTOCOL:
return value == 0;
default:
// Undefined parameters have no initial value.
return false;
}
}
bool IsPushEnabled(const spdy::SettingsMap& initial_settings) {
const auto it = initial_settings.find(spdy::SETTINGS_ENABLE_PUSH);
// Push is enabled by default.
if (it == initial_settings.end())
return true;
return it->second == 1;
}
std::unique_ptr<base::Value> NetLogSpdyHeadersSentCallback(
const spdy::SpdyHeaderBlock* headers,
bool fin,
spdy::SpdyStreamId stream_id,
bool has_priority,
int weight,
spdy::SpdyStreamId parent_stream_id,
bool exclusive,
NetLogSource source_dependency,
NetLogCaptureMode capture_mode) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->Set("headers", ElideSpdyHeaderBlockForNetLog(*headers, capture_mode));
dict->SetBoolean("fin", fin);
dict->SetInteger("stream_id", stream_id);
dict->SetBoolean("has_priority", has_priority);
if (has_priority) {
dict->SetInteger("parent_stream_id", parent_stream_id);
dict->SetInteger("weight", weight);
dict->SetBoolean("exclusive", exclusive);
}
if (source_dependency.IsValid()) {
source_dependency.AddToEventParameters(dict.get());
}
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdyHeadersReceivedCallback(
const spdy::SpdyHeaderBlock* headers,
bool fin,
spdy::SpdyStreamId stream_id,
NetLogCaptureMode capture_mode) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->Set("headers", ElideSpdyHeaderBlockForNetLog(*headers, capture_mode));
dict->SetBoolean("fin", fin);
dict->SetInteger("stream_id", stream_id);
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdySessionCloseCallback(
int net_error,
const std::string* description,
NetLogCaptureMode /* capture_mode */) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetInteger("net_error", net_error);
dict->SetString("description", *description);
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdySessionCallback(
const HostPortProxyPair* host_pair,
NetLogCaptureMode /* capture_mode */) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetString("host", host_pair->first.ToString());
dict->SetString("proxy", host_pair->second.ToPacString());
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdyInitializedCallback(
NetLogSource source,
NetLogCaptureMode /* capture_mode */) {
auto dict = std::make_unique<base::DictionaryValue>();
if (source.IsValid()) {
source.AddToEventParameters(dict.get());
}
dict->SetString("protocol", NextProtoToString(kProtoHTTP2));
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdySendSettingsCallback(
const spdy::SettingsMap* settings,
NetLogCaptureMode /* capture_mode */) {
auto dict = std::make_unique<base::DictionaryValue>();
auto settings_list = std::make_unique<base::ListValue>();
for (auto it = settings->begin(); it != settings->end(); ++it) {
const spdy::SpdySettingsId id = it->first;
const uint32_t value = it->second;
settings_list->AppendString(
base::StringPrintf("[id:%u (%s) value:%u]", id,
spdy::SettingsIdToString(id).c_str(), value));
}
dict->Set("settings", std::move(settings_list));
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdyRecvSettingCallback(
spdy::SpdySettingsId id,
uint32_t value,
NetLogCaptureMode /* capture_mode */) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetString(
"id",
base::StringPrintf("%u (%s)", id, spdy::SettingsIdToString(id).c_str()));
dict->SetInteger("value", value);
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdyWindowUpdateFrameCallback(
spdy::SpdyStreamId stream_id,
uint32_t delta,
NetLogCaptureMode /* capture_mode */) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetInteger("stream_id", static_cast<int>(stream_id));
dict->SetInteger("delta", delta);
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdySessionWindowUpdateCallback(
int32_t delta,
int32_t window_size,
NetLogCaptureMode /* capture_mode */) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetInteger("delta", delta);
dict->SetInteger("window_size", window_size);
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdyDataCallback(
spdy::SpdyStreamId stream_id,
int size,
bool fin,
NetLogCaptureMode /* capture_mode */) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetInteger("stream_id", static_cast<int>(stream_id));
dict->SetInteger("size", size);
dict->SetBoolean("fin", fin);
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdyRecvRstStreamCallback(
spdy::SpdyStreamId stream_id,
spdy::SpdyErrorCode error_code,
NetLogCaptureMode /* capture_mode */) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetInteger("stream_id", static_cast<int>(stream_id));
dict->SetString(
"error_code",
base::StringPrintf("%u (%s)", error_code, ErrorCodeToString(error_code)));
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdySendRstStreamCallback(
spdy::SpdyStreamId stream_id,
spdy::SpdyErrorCode error_code,
const std::string* description,
NetLogCaptureMode /* capture_mode */) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetInteger("stream_id", static_cast<int>(stream_id));
dict->SetString(
"error_code",
base::StringPrintf("%u (%s)", error_code, ErrorCodeToString(error_code)));
dict->SetString("description", *description);
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdyPingCallback(
spdy::SpdyPingId unique_id,
bool is_ack,
const char* type,
NetLogCaptureMode /* capture_mode */) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetInteger("unique_id", static_cast<int>(unique_id));
dict->SetString("type", type);
dict->SetBoolean("is_ack", is_ack);
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdyRecvGoAwayCallback(
spdy::SpdyStreamId last_stream_id,
int active_streams,
int unclaimed_streams,
spdy::SpdyErrorCode error_code,
base::StringPiece debug_data,
NetLogCaptureMode capture_mode) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetInteger("last_accepted_stream_id", static_cast<int>(last_stream_id));
dict->SetInteger("active_streams", active_streams);
dict->SetInteger("unclaimed_streams", unclaimed_streams);
dict->SetString(
"error_code",
base::StringPrintf("%u (%s)", error_code, ErrorCodeToString(error_code)));
dict->SetString("debug_data",
ElideGoAwayDebugDataForNetLog(capture_mode, debug_data));
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdyPushPromiseReceivedCallback(
const spdy::SpdyHeaderBlock* headers,
spdy::SpdyStreamId stream_id,
spdy::SpdyStreamId promised_stream_id,
NetLogCaptureMode capture_mode) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->Set("headers", ElideSpdyHeaderBlockForNetLog(*headers, capture_mode));
dict->SetInteger("id", stream_id);
dict->SetInteger("promised_stream_id", promised_stream_id);
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdyAdoptedPushStreamCallback(
spdy::SpdyStreamId stream_id,
const GURL* url,
NetLogCaptureMode capture_mode) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetInteger("stream_id", stream_id);
dict->SetString("url", url->spec());
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdySessionStalledCallback(
size_t num_active_streams,
size_t num_created_streams,
size_t num_pushed_streams,
size_t max_concurrent_streams,
const std::string& url,
NetLogCaptureMode capture_mode) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetInteger("num_active_streams", num_active_streams);
dict->SetInteger("num_created_streams", num_created_streams);
dict->SetInteger("num_pushed_streams", num_pushed_streams);
dict->SetInteger("max_concurrent_streams", max_concurrent_streams);
dict->SetString("url", url);
return std::move(dict);
}
std::unique_ptr<base::Value> NetLogSpdyPriorityCallback(
spdy::SpdyStreamId stream_id,
spdy::SpdyStreamId parent_stream_id,
int weight,
bool exclusive,
NetLogCaptureMode capture_mode) {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetInteger("stream_id", stream_id);
dict->SetInteger("parent_stream_id", parent_stream_id);
dict->SetInteger("weight", weight);
dict->SetBoolean("exclusive", exclusive);
return std::move(dict);
}
// Helper function to return the total size of an array of objects
// with .size() member functions.
template <typename T, size_t N>
size_t GetTotalSize(const T (&arr)[N]) {
size_t total_size = 0;
for (size_t i = 0; i < N; ++i) {
total_size += arr[i].size();
}
return total_size;
}
// Helper class for std:find_if on STL container containing
// SpdyStreamRequest weak pointers.
class RequestEquals {
public:
explicit RequestEquals(const base::WeakPtr<SpdyStreamRequest>& request)
: request_(request) {}
bool operator()(const base::WeakPtr<SpdyStreamRequest>& request) const {
return request_.get() == request.get();
}
private:
const base::WeakPtr<SpdyStreamRequest> request_;
};
// The maximum number of concurrent streams we will ever create. Even if
// the server permits more, we will never exceed this limit.
const size_t kMaxConcurrentStreamLimit = 256;
class SpdyServerPushHelper : public ServerPushDelegate::ServerPushHelper {
public:
explicit SpdyServerPushHelper(base::WeakPtr<SpdySession> session,
const GURL& url)
: session_(session), request_url_(url) {}
void Cancel() override {
if (session_)
session_->CancelPush(request_url_);
}
const GURL& GetURL() const override { return request_url_; }
private:
base::WeakPtr<SpdySession> session_;
const GURL request_url_;
};
} // namespace
SpdyProtocolErrorDetails MapFramerErrorToProtocolError(
http2::Http2DecoderAdapter::SpdyFramerError err) {
switch (err) {
case http2::Http2DecoderAdapter::SPDY_NO_ERROR:
return SPDY_ERROR_NO_ERROR;
case http2::Http2DecoderAdapter::SPDY_INVALID_STREAM_ID:
return SPDY_ERROR_INVALID_STREAM_ID;
case http2::Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME:
return SPDY_ERROR_INVALID_CONTROL_FRAME;
case http2::Http2DecoderAdapter::SPDY_CONTROL_PAYLOAD_TOO_LARGE:
return SPDY_ERROR_CONTROL_PAYLOAD_TOO_LARGE;
case http2::Http2DecoderAdapter::SPDY_ZLIB_INIT_FAILURE:
return SPDY_ERROR_ZLIB_INIT_FAILURE;
case http2::Http2DecoderAdapter::SPDY_UNSUPPORTED_VERSION:
return SPDY_ERROR_UNSUPPORTED_VERSION;
case http2::Http2DecoderAdapter::SPDY_DECOMPRESS_FAILURE:
return SPDY_ERROR_DECOMPRESS_FAILURE;
case http2::Http2DecoderAdapter::SPDY_COMPRESS_FAILURE:
return SPDY_ERROR_COMPRESS_FAILURE;
case http2::Http2DecoderAdapter::SPDY_GOAWAY_FRAME_CORRUPT:
return SPDY_ERROR_GOAWAY_FRAME_CORRUPT;
case http2::Http2DecoderAdapter::SPDY_RST_STREAM_FRAME_CORRUPT:
return SPDY_ERROR_RST_STREAM_FRAME_CORRUPT;
case http2::Http2DecoderAdapter::SPDY_INVALID_PADDING:
return SPDY_ERROR_INVALID_PADDING;
case http2::Http2DecoderAdapter::SPDY_INVALID_DATA_FRAME_FLAGS:
return SPDY_ERROR_INVALID_DATA_FRAME_FLAGS;
case http2::Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_FLAGS:
return SPDY_ERROR_INVALID_CONTROL_FRAME_FLAGS;
case http2::Http2DecoderAdapter::SPDY_UNEXPECTED_FRAME:
return SPDY_ERROR_UNEXPECTED_FRAME;
case http2::Http2DecoderAdapter::SPDY_INTERNAL_FRAMER_ERROR:
return SPDY_ERROR_INTERNAL_FRAMER_ERROR;
case http2::Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_SIZE:
return SPDY_ERROR_INVALID_CONTROL_FRAME_SIZE;
case http2::Http2DecoderAdapter::SPDY_OVERSIZED_PAYLOAD:
return SPDY_ERROR_OVERSIZED_PAYLOAD;
case http2::Http2DecoderAdapter::LAST_ERROR:
NOTREACHED();
}
NOTREACHED();
return static_cast<SpdyProtocolErrorDetails>(-1);
}
Error MapFramerErrorToNetError(
http2::Http2DecoderAdapter::SpdyFramerError err) {
switch (err) {
case http2::Http2DecoderAdapter::SPDY_NO_ERROR:
return OK;
case http2::Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME:
return ERR_SPDY_PROTOCOL_ERROR;
case http2::Http2DecoderAdapter::SPDY_CONTROL_PAYLOAD_TOO_LARGE:
return ERR_SPDY_FRAME_SIZE_ERROR;
case http2::Http2DecoderAdapter::SPDY_ZLIB_INIT_FAILURE:
return ERR_SPDY_COMPRESSION_ERROR;
case http2::Http2DecoderAdapter::SPDY_UNSUPPORTED_VERSION:
return ERR_SPDY_PROTOCOL_ERROR;
case http2::Http2DecoderAdapter::SPDY_DECOMPRESS_FAILURE:
return ERR_SPDY_COMPRESSION_ERROR;
case http2::Http2DecoderAdapter::SPDY_COMPRESS_FAILURE:
return ERR_SPDY_COMPRESSION_ERROR;
case http2::Http2DecoderAdapter::SPDY_GOAWAY_FRAME_CORRUPT:
return ERR_SPDY_PROTOCOL_ERROR;
case http2::Http2DecoderAdapter::SPDY_RST_STREAM_FRAME_CORRUPT:
return ERR_SPDY_PROTOCOL_ERROR;
case http2::Http2DecoderAdapter::SPDY_INVALID_PADDING:
return ERR_SPDY_PROTOCOL_ERROR;
case http2::Http2DecoderAdapter::SPDY_INVALID_DATA_FRAME_FLAGS:
return ERR_SPDY_PROTOCOL_ERROR;
case http2::Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_FLAGS:
return ERR_SPDY_PROTOCOL_ERROR;
case http2::Http2DecoderAdapter::SPDY_UNEXPECTED_FRAME:
return ERR_SPDY_PROTOCOL_ERROR;
case http2::Http2DecoderAdapter::SPDY_INTERNAL_FRAMER_ERROR:
return ERR_SPDY_PROTOCOL_ERROR;
case http2::Http2DecoderAdapter::SPDY_INVALID_CONTROL_FRAME_SIZE:
return ERR_SPDY_FRAME_SIZE_ERROR;
case http2::Http2DecoderAdapter::SPDY_INVALID_STREAM_ID:
return ERR_SPDY_PROTOCOL_ERROR;
case http2::Http2DecoderAdapter::SPDY_OVERSIZED_PAYLOAD:
return ERR_SPDY_FRAME_SIZE_ERROR;
case http2::Http2DecoderAdapter::LAST_ERROR:
NOTREACHED();
}
NOTREACHED();
return ERR_SPDY_PROTOCOL_ERROR;
}
SpdyProtocolErrorDetails MapRstStreamStatusToProtocolError(
spdy::SpdyErrorCode error_code) {
switch (error_code) {
case spdy::ERROR_CODE_NO_ERROR:
return STATUS_CODE_NO_ERROR;
case spdy::ERROR_CODE_PROTOCOL_ERROR:
return STATUS_CODE_PROTOCOL_ERROR;
case spdy::ERROR_CODE_INTERNAL_ERROR:
return STATUS_CODE_INTERNAL_ERROR;
case spdy::ERROR_CODE_FLOW_CONTROL_ERROR:
return STATUS_CODE_FLOW_CONTROL_ERROR;
case spdy::ERROR_CODE_SETTINGS_TIMEOUT:
return STATUS_CODE_SETTINGS_TIMEOUT;
case spdy::ERROR_CODE_STREAM_CLOSED:
return STATUS_CODE_STREAM_CLOSED;
case spdy::ERROR_CODE_FRAME_SIZE_ERROR:
return STATUS_CODE_FRAME_SIZE_ERROR;
case spdy::ERROR_CODE_REFUSED_STREAM:
return STATUS_CODE_REFUSED_STREAM;
case spdy::ERROR_CODE_CANCEL:
return STATUS_CODE_CANCEL;
case spdy::ERROR_CODE_COMPRESSION_ERROR:
return STATUS_CODE_COMPRESSION_ERROR;
case spdy::ERROR_CODE_CONNECT_ERROR:
return STATUS_CODE_CONNECT_ERROR;
case spdy::ERROR_CODE_ENHANCE_YOUR_CALM:
return STATUS_CODE_ENHANCE_YOUR_CALM;
case spdy::ERROR_CODE_INADEQUATE_SECURITY:
return STATUS_CODE_INADEQUATE_SECURITY;
case spdy::ERROR_CODE_HTTP_1_1_REQUIRED:
return STATUS_CODE_HTTP_1_1_REQUIRED;
}
NOTREACHED();
return static_cast<SpdyProtocolErrorDetails>(-1);
}
spdy::SpdyErrorCode MapNetErrorToGoAwayStatus(Error err) {
switch (err) {
case OK:
return spdy::ERROR_CODE_NO_ERROR;
case ERR_SPDY_PROTOCOL_ERROR:
return spdy::ERROR_CODE_PROTOCOL_ERROR;
case ERR_SPDY_FLOW_CONTROL_ERROR:
return spdy::ERROR_CODE_FLOW_CONTROL_ERROR;
case ERR_SPDY_FRAME_SIZE_ERROR:
return spdy::ERROR_CODE_FRAME_SIZE_ERROR;
case ERR_SPDY_COMPRESSION_ERROR:
return spdy::ERROR_CODE_COMPRESSION_ERROR;
case ERR_SPDY_INADEQUATE_TRANSPORT_SECURITY:
return spdy::ERROR_CODE_INADEQUATE_SECURITY;
default:
return spdy::ERROR_CODE_PROTOCOL_ERROR;
}
}
SpdyStreamRequest::SpdyStreamRequest() : weak_ptr_factory_(this) {
Reset();
}
SpdyStreamRequest::~SpdyStreamRequest() {
CancelRequest();
}
int SpdyStreamRequest::StartRequest(
SpdyStreamType type,
const base::WeakPtr<SpdySession>& session,
const GURL& url,
RequestPriority priority,
const SocketTag& socket_tag,
const NetLogWithSource& net_log,
CompletionOnceCallback callback,
const NetworkTrafficAnnotationTag& traffic_annotation) {
DCHECK(session);
DCHECK(!session_);
DCHECK(!stream_);
DCHECK(callback_.is_null());
DCHECK(url.is_valid()) << url.possibly_invalid_spec();
type_ = type;
session_ = session;
url_ = SimplifyUrlForRequest(url);
priority_ = priority;
socket_tag_ = socket_tag;
net_log_ = net_log;
callback_ = std::move(callback);
traffic_annotation_ = MutableNetworkTrafficAnnotationTag(traffic_annotation);
base::WeakPtr<SpdyStream> stream;
int rv = session->TryCreateStream(weak_ptr_factory_.GetWeakPtr(), &stream);
if (rv == OK) {
Reset();
stream_ = stream;
}
return rv;
}
void SpdyStreamRequest::CancelRequest() {
if (session_)
session_->CancelStreamRequest(weak_ptr_factory_.GetWeakPtr());
Reset();
// Do this to cancel any pending CompleteStreamRequest() tasks.
weak_ptr_factory_.InvalidateWeakPtrs();
}
base::WeakPtr<SpdyStream> SpdyStreamRequest::ReleaseStream() {
DCHECK(!session_);
base::WeakPtr<SpdyStream> stream = stream_;
DCHECK(stream);
Reset();
return stream;
}
size_t SpdyStreamRequest::EstimateMemoryUsage() const {
return base::trace_event::EstimateItemMemoryUsage(url_);
}
void SpdyStreamRequest::OnRequestCompleteSuccess(
const base::WeakPtr<SpdyStream>& stream) {
DCHECK(session_);
DCHECK(!stream_);
DCHECK(!callback_.is_null());
CompletionOnceCallback callback = std::move(callback_);
Reset();
DCHECK(stream);
stream_ = stream;
std::move(callback).Run(OK);
}
void SpdyStreamRequest::OnRequestCompleteFailure(int rv) {
DCHECK(session_);
DCHECK(!stream_);
DCHECK(!callback_.is_null());
CompletionOnceCallback callback = std::move(callback_);
Reset();
DCHECK_NE(rv, OK);
std::move(callback).Run(rv);
}
void SpdyStreamRequest::Reset() {
type_ = SPDY_BIDIRECTIONAL_STREAM;
session_.reset();
stream_.reset();
url_ = GURL();
priority_ = MINIMUM_PRIORITY;
socket_tag_ = SocketTag();
net_log_ = NetLogWithSource();
callback_.Reset();
traffic_annotation_.reset();
}
// static
bool SpdySession::CanPool(TransportSecurityState* transport_security_state,
const SSLInfo& ssl_info,
const SSLConfigService& ssl_config_service,
const std::string& old_hostname,
const std::string& new_hostname) {
// Pooling is prohibited if the server cert is not valid for the new domain,
// and for connections on which client certs were sent. It is also prohibited
// when channel ID was sent if the hosts are from different eTLDs+1.
if (IsCertStatusError(ssl_info.cert_status))
return false;
if (ssl_info.client_cert_sent &&
!(ssl_config_service.CanShareConnectionWithClientCerts(old_hostname) &&
ssl_config_service.CanShareConnectionWithClientCerts(new_hostname))) {
return false;
}
if (ssl_info.channel_id_sent &&
ChannelIDService::GetDomainForHost(new_hostname) !=
ChannelIDService::GetDomainForHost(old_hostname)) {
return false;
}
if (!ssl_info.cert->VerifyNameMatch(new_hostname))
return false;
std::string pinning_failure_log;
// DISABLE_PIN_REPORTS is set here because this check can fail in
// normal operation without being indicative of a misconfiguration or
// attack. Port is left at 0 as it is never used.
if (transport_security_state->CheckPublicKeyPins(
HostPortPair(new_hostname, 0), ssl_info.is_issued_by_known_root,
ssl_info.public_key_hashes, ssl_info.unverified_cert.get(),
ssl_info.cert.get(), TransportSecurityState::DISABLE_PIN_REPORTS,
&pinning_failure_log) ==
TransportSecurityState::PKPStatus::VIOLATED) {
return false;
}
// As with CheckPublicKeyPins above, disable Expect-CT reports.
switch (transport_security_state->CheckCTRequirements(
HostPortPair(new_hostname, 0), ssl_info.is_issued_by_known_root,
ssl_info.public_key_hashes, ssl_info.cert.get(),
ssl_info.unverified_cert.get(), ssl_info.signed_certificate_timestamps,
TransportSecurityState::DISABLE_EXPECT_CT_REPORTS,
ssl_info.ct_policy_compliance)) {
case TransportSecurityState::CT_REQUIREMENTS_NOT_MET:
return false;
case TransportSecurityState::CT_REQUIREMENTS_MET:
case TransportSecurityState::CT_NOT_REQUIRED:
// Intentional fallthrough; this case is just here to make sure that all
// possible values of CheckCTRequirements() are handled.
break;
}
return true;
}
SpdySession::SpdySession(
const SpdySessionKey& spdy_session_key,
HttpServerProperties* http_server_properties,
TransportSecurityState* transport_security_state,
SSLConfigService* ssl_config_service,
const quic::QuicTransportVersionVector& quic_supported_versions,
bool enable_sending_initial_data,
bool enable_ping_based_connection_checking,
bool support_ietf_format_quic_altsvc,
bool is_trusted_proxy,
size_t session_max_recv_window_size,
const spdy::SettingsMap& initial_settings,
const base::Optional<SpdySessionPool::GreasedHttp2Frame>&
greased_http2_frame,
TimeFunc time_func,
ServerPushDelegate* push_delegate,
NetLog* net_log)
: in_io_loop_(false),
spdy_session_key_(spdy_session_key),
pool_(NULL),
http_server_properties_(http_server_properties),
transport_security_state_(transport_security_state),
ssl_config_service_(ssl_config_service),
stream_hi_water_mark_(kFirstStreamId),
last_accepted_push_stream_id_(0),
push_delegate_(push_delegate),
num_pushed_streams_(0u),
num_active_pushed_streams_(0u),
bytes_pushed_count_(0u),
bytes_pushed_and_unclaimed_count_(0u),
in_flight_write_frame_type_(spdy::SpdyFrameType::DATA),
in_flight_write_frame_size_(0),
availability_state_(STATE_AVAILABLE),
read_state_(READ_STATE_DO_READ),
write_state_(WRITE_STATE_IDLE),
error_on_close_(OK),
initial_settings_(initial_settings),
greased_http2_frame_(greased_http2_frame),
max_concurrent_streams_(kInitialMaxConcurrentStreams),
max_concurrent_pushed_streams_(
initial_settings.at(spdy::SETTINGS_MAX_CONCURRENT_STREAMS)),
streams_initiated_count_(0),
streams_pushed_count_(0),
streams_pushed_and_claimed_count_(0),
streams_abandoned_count_(0),
ping_in_flight_(false),
next_ping_id_(1),
last_read_time_(time_func()),
last_compressed_frame_len_(0),
check_ping_status_pending_(false),
session_send_window_size_(0),
session_max_recv_window_size_(session_max_recv_window_size),
session_recv_window_size_(0),
session_unacked_recv_window_bytes_(0),
stream_initial_send_window_size_(kDefaultInitialWindowSize),
max_header_table_size_(
initial_settings.at(spdy::SETTINGS_HEADER_TABLE_SIZE)),
stream_max_recv_window_size_(
initial_settings.at(spdy::SETTINGS_INITIAL_WINDOW_SIZE)),
net_log_(
NetLogWithSource::Make(net_log, NetLogSourceType::HTTP2_SESSION)),
quic_supported_versions_(quic_supported_versions),
enable_sending_initial_data_(enable_sending_initial_data),
enable_ping_based_connection_checking_(
enable_ping_based_connection_checking),
support_ietf_format_quic_altsvc_(support_ietf_format_quic_altsvc),
is_trusted_proxy_(is_trusted_proxy),
enable_push_(IsPushEnabled(initial_settings)),
support_websocket_(false),
connection_at_risk_of_loss_time_(
base::TimeDelta::FromSeconds(kDefaultConnectionAtRiskOfLossSeconds)),
hung_interval_(base::TimeDelta::FromSeconds(kHungIntervalSeconds)),
time_func_(time_func),
weak_factory_(this) {
net_log_.BeginEvent(
NetLogEventType::HTTP2_SESSION,
base::Bind(&NetLogSpdySessionCallback, &host_port_proxy_pair()));
DCHECK(
base::ContainsKey(initial_settings_, spdy::SETTINGS_HEADER_TABLE_SIZE));
DCHECK(base::ContainsKey(initial_settings_,
spdy::SETTINGS_MAX_CONCURRENT_STREAMS));
DCHECK(
base::ContainsKey(initial_settings_, spdy::SETTINGS_INITIAL_WINDOW_SIZE));
if (greased_http2_frame_) {
// See https://tools.ietf.org/html/draft-bishop-httpbis-grease-00
// for reserved frame types.
DCHECK_EQ(0x0b, greased_http2_frame_.value().type % 0x1f);
}
// TODO(mbelshe): consider randomization of the stream_hi_water_mark.
}
SpdySession::~SpdySession() {
CHECK(!in_io_loop_);
DcheckDraining();
// TODO(akalin): Check connection->is_initialized() instead. This
// requires re-working CreateFakeSpdySession(), though.
DCHECK(connection_->socket());
// With SPDY we can't recycle sockets.
connection_->socket()->Disconnect();
RecordHistograms();
net_log_.EndEvent(NetLogEventType::HTTP2_SESSION);
}
int SpdySession::GetPushedStream(const GURL& url,
spdy::SpdyStreamId pushed_stream_id,
RequestPriority priority,
SpdyStream** stream) {
CHECK(!in_io_loop_);
// |pushed_stream_id| must be valid.
DCHECK_NE(pushed_stream_id, kNoPushedStreamFound);
// |pushed_stream_id| must already have been claimed.
DCHECK_NE(pushed_stream_id,
pool_->push_promise_index()->FindStream(url, this));
if (availability_state_ == STATE_DRAINING) {
return ERR_CONNECTION_CLOSED;
}
auto active_it = active_streams_.find(pushed_stream_id);
if (active_it == active_streams_.end()) {
// A previously claimed pushed stream might not be available, for example,
// if the server has reset it in the meanwhile.
return ERR_SPDY_PUSHED_STREAM_NOT_AVAILABLE;
}
net_log_.AddEvent(
NetLogEventType::HTTP2_STREAM_ADOPTED_PUSH_STREAM,
base::Bind(&NetLogSpdyAdoptedPushStreamCallback, pushed_stream_id, &url));
*stream = active_it->second;
DCHECK_LT(streams_pushed_and_claimed_count_, streams_pushed_count_);
streams_pushed_and_claimed_count_++;
// If the stream is still open, update its priority to that of the request.
if (!(*stream)->IsClosed()) {
(*stream)->SetPriority(priority);
}
return OK;
}
void SpdySession::CancelPush(const GURL& url) {
const spdy::SpdyStreamId stream_id =
pool_->push_promise_index()->FindStream(url, this);
if (stream_id == kNoPushedStreamFound)
return;
DCHECK(IsStreamActive(stream_id));
RecordSpdyPushedStreamFateHistogram(SpdyPushedStreamFate::kAlreadyInCache);
ResetStream(stream_id, ERR_ABORTED, "Cancelled push stream.");
}
void SpdySession::InitializeWithSocket(
std::unique_ptr<ClientSocketHandle> connection,
SpdySessionPool* pool) {
CHECK(!in_io_loop_);
DCHECK_EQ(availability_state_, STATE_AVAILABLE);
DCHECK_EQ(read_state_, READ_STATE_DO_READ);
DCHECK_EQ(write_state_, WRITE_STATE_IDLE);
DCHECK(!connection_);
// TODO(akalin): Check connection->is_initialized() instead. This
// requires re-working CreateFakeSpdySession(), though.
DCHECK(connection->socket());
connection_ = std::move(connection);
session_send_window_size_ = kDefaultInitialWindowSize;
session_recv_window_size_ = kDefaultInitialWindowSize;
auto it = initial_settings_.find(spdy::SETTINGS_MAX_HEADER_LIST_SIZE);
uint32_t spdy_max_header_list_size =
(it == initial_settings_.end()) ? kSpdyMaxHeaderListSize : it->second;
buffered_spdy_framer_ =
std::make_unique<BufferedSpdyFramer>(spdy_max_header_list_size, net_log_);
buffered_spdy_framer_->set_visitor(this);
buffered_spdy_framer_->set_debug_visitor(this);
buffered_spdy_framer_->UpdateHeaderDecoderTableSize(max_header_table_size_);
net_log_.AddEvent(NetLogEventType::HTTP2_SESSION_INITIALIZED,
base::Bind(&NetLogSpdyInitializedCallback,
connection_->socket()->NetLog().source()));
DCHECK_EQ(availability_state_, STATE_AVAILABLE);
connection_->AddHigherLayeredPool(this);
if (enable_sending_initial_data_)
SendInitialData();
pool_ = pool;
// Bootstrap the read loop.
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&SpdySession::PumpReadLoop, weak_factory_.GetWeakPtr(),
READ_STATE_DO_READ, OK));
}
bool SpdySession::VerifyDomainAuthentication(const std::string& domain) const {
if (availability_state_ == STATE_DRAINING)
return false;
SSLInfo ssl_info;
if (!GetSSLInfo(&ssl_info))
return true; // This is not a secure session, so all domains are okay.
return CanPool(transport_security_state_, ssl_info, *ssl_config_service_,
host_port_pair().host(), domain);
}
void SpdySession::EnqueueStreamWrite(
const base::WeakPtr<SpdyStream>& stream,
spdy::SpdyFrameType frame_type,
std::unique_ptr<SpdyBufferProducer> producer) {
DCHECK(frame_type == spdy::SpdyFrameType::HEADERS ||
frame_type == spdy::SpdyFrameType::DATA);
EnqueueWrite(stream->priority(), frame_type, std::move(producer), stream,
stream->traffic_annotation());
}
std::unique_ptr<spdy::SpdySerializedFrame> SpdySession::CreateHeaders(
spdy::SpdyStreamId stream_id,
RequestPriority priority,
spdy::SpdyControlFlags flags,
spdy::SpdyHeaderBlock block,
NetLogSource source_dependency) {
ActiveStreamMap::const_iterator it = active_streams_.find(stream_id);
CHECK(it != active_streams_.end());
CHECK_EQ(it->second->stream_id(), stream_id);
MaybeSendPrefacePing();
DCHECK(buffered_spdy_framer_.get());
spdy::SpdyPriority spdy_priority =
ConvertRequestPriorityToSpdyPriority(priority);
bool has_priority = true;
int weight = 0;
spdy::SpdyStreamId parent_stream_id = 0;
bool exclusive = false;
priority_dependency_state_.OnStreamCreation(
stream_id, spdy_priority, &parent_stream_id, &weight, &exclusive);
if (net_log().IsCapturing()) {
net_log().AddEvent(
NetLogEventType::HTTP2_SESSION_SEND_HEADERS,
base::Bind(&NetLogSpdyHeadersSentCallback, &block,
(flags & spdy::CONTROL_FLAG_FIN) != 0, stream_id,
has_priority, weight, parent_stream_id, exclusive,
source_dependency));
}
spdy::SpdyHeadersIR headers(stream_id, std::move(block));
headers.set_has_priority(has_priority);
headers.set_weight(weight);
headers.set_parent_stream_id(parent_stream_id);
headers.set_exclusive(exclusive);
headers.set_fin((flags & spdy::CONTROL_FLAG_FIN) != 0);
streams_initiated_count_++;
return std::make_unique<spdy::SpdySerializedFrame>(
buffered_spdy_framer_->SerializeFrame(headers));
}
std::unique_ptr<SpdyBuffer> SpdySession::CreateDataBuffer(
spdy::SpdyStreamId stream_id,
IOBuffer* data,
int len,
spdy::SpdyDataFlags flags) {
if (availability_state_ == STATE_DRAINING) {
return std::unique_ptr<SpdyBuffer>();
}
ActiveStreamMap::const_iterator it = active_streams_.find(stream_id);
CHECK(it != active_streams_.end());
SpdyStream* stream = it->second;
CHECK_EQ(stream->stream_id(), stream_id);
if (len < 0) {
NOTREACHED();
return std::unique_ptr<SpdyBuffer>();
}
int effective_len = std::min(len, kMaxSpdyFrameChunkSize);
bool send_stalled_by_stream = (stream->send_window_size() <= 0);
bool send_stalled_by_session = IsSendStalled();
// NOTE: There's an enum of the same name in histograms.xml.
enum SpdyFrameFlowControlState {
SEND_NOT_STALLED,
SEND_STALLED_BY_STREAM,
SEND_STALLED_BY_SESSION,
SEND_STALLED_BY_STREAM_AND_SESSION,
};
SpdyFrameFlowControlState frame_flow_control_state = SEND_NOT_STALLED;
if (send_stalled_by_stream) {
if (send_stalled_by_session) {
frame_flow_control_state = SEND_STALLED_BY_STREAM_AND_SESSION;
} else {
frame_flow_control_state = SEND_STALLED_BY_STREAM;
}
} else if (send_stalled_by_session) {
frame_flow_control_state = SEND_STALLED_BY_SESSION;
}
UMA_HISTOGRAM_ENUMERATION("Net.SpdyFrameStreamAndSessionFlowControlState",
frame_flow_control_state,
SEND_STALLED_BY_STREAM_AND_SESSION + 1);
// Obey send window size of the stream.
if (send_stalled_by_stream) {
stream->set_send_stalled_by_flow_control(true);
// Even though we're currently stalled only by the stream, we
// might end up being stalled by the session also.
QueueSendStalledStream(*stream);
net_log().AddEvent(
NetLogEventType::HTTP2_SESSION_STREAM_STALLED_BY_STREAM_SEND_WINDOW,
NetLog::IntCallback("stream_id", stream_id));
return std::unique_ptr<SpdyBuffer>();
}
effective_len = std::min(effective_len, stream->send_window_size());
// Obey send window size of the session.
if (send_stalled_by_session) {
stream->set_send_stalled_by_flow_control(true);
QueueSendStalledStream(*stream);
net_log().AddEvent(
NetLogEventType::HTTP2_SESSION_STREAM_STALLED_BY_SESSION_SEND_WINDOW,
NetLog::IntCallback("stream_id", stream_id));
return std::unique_ptr<SpdyBuffer>();
}
effective_len = std::min(effective_len, session_send_window_size_);
DCHECK_GE(effective_len, 0);
// Clear FIN flag if only some of the data will be in the data
// frame.
if (effective_len < len)
flags = static_cast<spdy::SpdyDataFlags>(flags & ~spdy::DATA_FLAG_FIN);
if (net_log().IsCapturing()) {
net_log().AddEvent(
NetLogEventType::HTTP2_SESSION_SEND_DATA,
base::Bind(&NetLogSpdyDataCallback, stream_id, effective_len,
(flags & spdy::DATA_FLAG_FIN) != 0));
}
// Send PrefacePing for DATA_FRAMEs with nonzero payload size.
if (effective_len > 0)
MaybeSendPrefacePing();
// TODO(mbelshe): reduce memory copies here.
DCHECK(buffered_spdy_framer_.get());
std::unique_ptr<spdy::SpdySerializedFrame> frame(
buffered_spdy_framer_->CreateDataFrame(
stream_id, data->data(), static_cast<uint32_t>(effective_len),
flags));
auto data_buffer = std::make_unique<SpdyBuffer>(std::move(frame));
// Send window size is based on payload size, so nothing to do if this is
// just a FIN with no payload.
if (effective_len != 0) {
DecreaseSendWindowSize(static_cast<int32_t>(effective_len));
data_buffer->AddConsumeCallback(base::Bind(
&SpdySession::OnWriteBufferConsumed, weak_factory_.GetWeakPtr(),
static_cast<size_t>(effective_len)));
}
return data_buffer;
}
void SpdySession::UpdateStreamPriority(SpdyStream* stream,
RequestPriority old_priority,
RequestPriority new_priority) {
// There might be write frames enqueued for |stream| regardless of whether it
// is active (stream_id != 0) or inactive (no HEADERS frame has been sent out
// yet and stream_id == 0).
write_queue_.ChangePriorityOfWritesForStream(stream, old_priority,
new_priority);
// PRIORITY frames only need to be sent if |stream| is active.
const spdy::SpdyStreamId stream_id = stream->stream_id();
if (stream_id == 0)
return;
DCHECK(IsStreamActive(stream_id));
auto updates = priority_dependency_state_.OnStreamUpdate(
stream_id, ConvertRequestPriorityToSpdyPriority(new_priority));
for (auto u : updates) {
DCHECK(IsStreamActive(u.id));
EnqueuePriorityFrame(u.id, u.parent_stream_id, u.weight, u.exclusive);
}
}
void SpdySession::CloseActiveStream(spdy::SpdyStreamId stream_id, int status) {
DCHECK_NE(stream_id, 0u);
auto it = active_streams_.find(stream_id);
if (it == active_streams_.end()) {
NOTREACHED();
return;
}
CloseActiveStreamIterator(it, status);
}
void SpdySession::CloseCreatedStream(const base::WeakPtr<SpdyStream>& stream,
int status) {
DCHECK_EQ(stream->stream_id(), 0u);
auto it = created_streams_.find(stream.get());
if (it == created_streams_.end()) {
NOTREACHED();
return;
}
CloseCreatedStreamIterator(it, status);
}
void SpdySession::ResetStream(spdy::SpdyStreamId stream_id,
int error,
const std::string& description) {
DCHECK_NE(stream_id, 0u);
auto it = active_streams_.find(stream_id);
if (it == active_streams_.end()) {
NOTREACHED();
return;
}
ResetStreamIterator(it, error, description);
}
bool SpdySession::IsStreamActive(spdy::SpdyStreamId stream_id) const {
return base::ContainsKey(active_streams_, stream_id);
}
LoadState SpdySession::GetLoadState() const {
// Just report that we're idle since the session could be doing
// many things concurrently.
return LOAD_STATE_IDLE;
}
bool SpdySession::GetRemoteEndpoint(IPEndPoint* endpoint) {
return GetPeerAddress(endpoint) == OK;
}
bool SpdySession::GetSSLInfo(SSLInfo* ssl_info) const {
return connection_->socket()->GetSSLInfo(ssl_info);
}
bool SpdySession::WasAlpnNegotiated() const {
return connection_->socket()->WasAlpnNegotiated();
}
NextProto SpdySession::GetNegotiatedProtocol() const {
return connection_->socket()->GetNegotiatedProtocol();
}
void SpdySession::SendStreamWindowUpdate(spdy::SpdyStreamId stream_id,
uint32_t delta_window_size) {
ActiveStreamMap::const_iterator it = active_streams_.find(stream_id);
CHECK(it != active_streams_.end());
CHECK_EQ(it->second->stream_id(), stream_id);
SendWindowUpdateFrame(stream_id, delta_window_size, it->second->priority());
}
void SpdySession::CloseSessionOnError(Error err,
const std::string& description) {
DCHECK_LT(err, ERR_IO_PENDING);
DoDrainSession(err, description);
}
void SpdySession::MakeUnavailable() {
if (availability_state_ == STATE_AVAILABLE) {
availability_state_ = STATE_GOING_AWAY;
pool_->MakeSessionUnavailable(GetWeakPtr());
}
}
void SpdySession::StartGoingAway(spdy::SpdyStreamId last_good_stream_id,
Error status) {
DCHECK_GE(availability_state_, STATE_GOING_AWAY);
// The loops below are carefully written to avoid reentrancy problems.
while (true) {
size_t old_size = GetTotalSize(pending_create_stream_queues_);
base::WeakPtr<SpdyStreamRequest> pending_request =
GetNextPendingStreamRequest();
if (!pending_request)
break;
// No new stream requests should be added while the session is
// going away.
DCHECK_GT(old_size, GetTotalSize(pending_create_stream_queues_));
pending_request->OnRequestCompleteFailure(ERR_ABORTED);
}
while (true) {
size_t old_size = active_streams_.size();
auto it = active_streams_.lower_bound(last_good_stream_id + 1);
if (it == active_streams_.end())
break;
LogAbandonedActiveStream(it, status);
CloseActiveStreamIterator(it, status);
// No new streams should be activated while the session is going
// away.
DCHECK_GT(old_size, active_streams_.size());
}
while (!created_streams_.empty()) {
size_t old_size = created_streams_.size();
auto it = created_streams_.begin();
LogAbandonedStream(*it, status);
CloseCreatedStreamIterator(it, status);
// No new streams should be created while the session is going
// away.
DCHECK_GT(old_size, created_streams_.size());
}
write_queue_.RemovePendingWritesForStreamsAfter(last_good_stream_id);
DcheckGoingAway();
MaybeFinishGoingAway();
}
void SpdySession::MaybeFinishGoingAway() {
if (active_streams_.empty() && created_streams_.empty() &&
availability_state_ == STATE_GOING_AWAY) {
DoDrainSession(OK, "Finished going away");
}
}
std::unique_ptr<base::Value> SpdySession::GetInfoAsValue() const {
auto dict = std::make_unique<base::DictionaryValue>();
dict->SetInteger("source_id", net_log_.source().id);
dict->SetString("host_port_pair", host_port_pair().ToString());
if (!pooled_aliases_.empty()) {
auto alias_list = std::make_unique<base::ListValue>();
for (const auto& alias : pooled_aliases_) {
alias_list->AppendString(alias.host_port_pair().ToString());
}
dict->Set("aliases", std::move(alias_list));
}
dict->SetString("proxy", host_port_proxy_pair().second.ToURI());
dict->SetInteger("active_streams", active_streams_.size());
dict->SetInteger("unclaimed_pushed_streams",
pool_->push_promise_index()->CountStreamsForSession(this));
dict->SetString(
"negotiated_protocol",
NextProtoToString(connection_->socket()->GetNegotiatedProtocol()));
dict->SetInteger("error", error_on_close_);
dict->SetInteger("max_concurrent_streams", max_concurrent_streams_);
dict->SetInteger("streams_initiated_count", streams_initiated_count_);
dict->SetInteger("streams_pushed_count", streams_pushed_count_);
dict->SetInteger("streams_pushed_and_claimed_count",
streams_pushed_and_claimed_count_);
dict->SetInteger("streams_abandoned_count", streams_abandoned_count_);
DCHECK(buffered_spdy_framer_.get());
dict->SetInteger("frames_received", buffered_spdy_framer_->frames_received());
dict->SetInteger("send_window_size", session_send_window_size_);
dict->SetInteger("recv_window_size", session_recv_window_size_);
dict->SetInteger("unacked_recv_window_bytes",
session_unacked_recv_window_bytes_);
return std::move(dict);
}
bool SpdySession::IsReused() const {
return buffered_spdy_framer_->frames_received() > 0 ||
connection_->reuse_type() == ClientSocketHandle::UNUSED_IDLE;
}
bool SpdySession::GetLoadTimingInfo(spdy::SpdyStreamId stream_id,
LoadTimingInfo* load_timing_info) const {
return connection_->GetLoadTimingInfo(stream_id != kFirstStreamId,
load_timing_info);
}
int SpdySession::GetPeerAddress(IPEndPoint* address) const {
if (connection_->socket())
return connection_->socket()->GetPeerAddress(address);
return ERR_SOCKET_NOT_CONNECTED;
}
int SpdySession::GetLocalAddress(IPEndPoint* address) const {
if (connection_->socket())
return connection_->socket()->GetLocalAddress(address);
return ERR_SOCKET_NOT_CONNECTED;
}
void SpdySession::AddPooledAlias(const SpdySessionKey& alias_key) {
pooled_aliases_.insert(alias_key);
}
void SpdySession::RemovePooledAlias(const SpdySessionKey& alias_key) {
pooled_aliases_.erase(alias_key);
}
bool SpdySession::HasAcceptableTransportSecurity() const {
SSLInfo ssl_info;
CHECK(GetSSLInfo(&ssl_info));
// HTTP/2 requires TLS 1.2+
if (SSLConnectionStatusToVersion(ssl_info.connection_status) <
SSL_CONNECTION_VERSION_TLS1_2) {
return false;
}
if (!IsTLSCipherSuiteAllowedByHTTP2(
SSLConnectionStatusToCipherSuite(ssl_info.connection_status))) {
return false;
}
return true;
}
base::WeakPtr<SpdySession> SpdySession::GetWeakPtr() {
return weak_factory_.GetWeakPtr();
}
bool SpdySession::CloseOneIdleConnection() {
CHECK(!in_io_loop_);
DCHECK(pool_);
if (active_streams_.empty()) {
DoDrainSession(ERR_CONNECTION_CLOSED, "Closing idle connection.");
}
// Return false as the socket wasn't immediately closed.
return false;
}
bool SpdySession::ValidatePushedStream(spdy::SpdyStreamId stream_id,
const GURL& url,
const HttpRequestInfo& request_info,
const SpdySessionKey& key) const {
// Proxy server and privacy mode must match.
if (key.proxy_server() != spdy_session_key_.proxy_server() ||
key.privacy_mode() != spdy_session_key_.privacy_mode()) {
return false;
}
// Certificate must match for encrypted schemes only.
if (key != spdy_session_key_ && url.SchemeIsCryptographic() &&
!VerifyDomainAuthentication(key.host_port_pair().host())) {
return false;
}
auto stream_it = active_streams_.find(stream_id);
if (stream_it == active_streams_.end()) {
// Only active streams should be in Http2PushPromiseIndex.
NOTREACHED();
return false;
}
const spdy::SpdyHeaderBlock& request_headers =
stream_it->second->request_headers();
spdy::SpdyHeaderBlock::const_iterator method_it =
request_headers.find(spdy::kHttp2MethodHeader);
if (method_it == request_headers.end()) {
// TryCreatePushStream() would have reset the stream if it had no method.
NOTREACHED();
return false;
}
// Request method must match.
if (request_info.method != method_it->second) {
return false;
}
return true;
}
base::WeakPtr<SpdySession> SpdySession::GetWeakPtrToSession() {
return GetWeakPtr();
}
size_t SpdySession::DumpMemoryStats(StreamSocket::SocketMemoryStats* stats,
bool* is_session_active) const {
// TODO(xunjieli): Include |pending_create_stream_queues_| when WeakPtr is
// supported in memory_usage_estimator.h.
*is_session_active = is_active();
connection_->DumpMemoryStats(stats);
// |connection_| is estimated in stats->total_size. |read_buffer_| is
// estimated in |read_buffer_size|. TODO(xunjieli): Make them use EMU().
size_t read_buffer_size = read_buffer_ ? kReadBufferSize : 0;
return stats->total_size + read_buffer_size +
base::trace_event::EstimateMemoryUsage(spdy_session_key_) +
base::trace_event::EstimateMemoryUsage(pooled_aliases_) +
base::trace_event::EstimateMemoryUsage(active_streams_) +
base::trace_event::EstimateMemoryUsage(created_streams_) +
base::trace_event::EstimateMemoryUsage(write_queue_) +
base::trace_event::EstimateMemoryUsage(in_flight_write_) +
base::trace_event::EstimateMemoryUsage(buffered_spdy_framer_) +
base::trace_event::EstimateMemoryUsage(initial_settings_) +
base::trace_event::EstimateMemoryUsage(stream_send_unstall_queue_) +
base::trace_event::EstimateMemoryUsage(priority_dependency_state_);
}
bool SpdySession::ChangeSocketTag(const SocketTag& new_tag) {
if (!IsAvailable() || !connection_->socket())
return false;
// Changing the tag on the underlying socket will affect all streams,
// so only allow changing the tag when there are no active streams.
if (is_active())
return false;
connection_->socket()->ApplySocketTag(new_tag);
SpdySessionKey new_key(spdy_session_key_.host_port_pair(),
spdy_session_key_.proxy_server(),
spdy_session_key_.privacy_mode(), new_tag);
spdy_session_key_ = new_key;
return true;
}
// static
void SpdySession::RecordSpdyPushedStreamFateHistogram(
SpdyPushedStreamFate value) {
UMA_HISTOGRAM_ENUMERATION("Net.SpdyPushedStreamFate", value);
}
// {,Try}CreateStream() can be called with |in_io_loop_| set if a stream is
// being created in response to another being closed due to received data.
int SpdySession::TryCreateStream(
const base::WeakPtr<SpdyStreamRequest>& request,
base::WeakPtr<SpdyStream>* stream) {
DCHECK(request);
if (availability_state_ == STATE_GOING_AWAY)
return ERR_FAILED;
if (availability_state_ == STATE_DRAINING)
return ERR_CONNECTION_CLOSED;
// Fail if ChangeSocketTag() has been called.
if (request->socket_tag_ != spdy_session_key_.socket_tag())
return ERR_FAILED;
if ((active_streams_.size() + created_streams_.size() - num_pushed_streams_ <
max_concurrent_streams_)) {
return CreateStream(*request, stream);
}
if (net_log().IsCapturing()) {
net_log().AddEvent(
NetLogEventType::HTTP2_SESSION_STALLED_MAX_STREAMS,
base::Bind(&NetLogSpdySessionStalledCallback, active_streams_.size(),
created_streams_.size(), num_pushed_streams_,
max_concurrent_streams_, request->url().spec()));
}
RequestPriority priority = request->priority();
CHECK_GE(priority, MINIMUM_PRIORITY);
CHECK_LE(priority, MAXIMUM_PRIORITY);
pending_create_stream_queues_[priority].push_back(request);
return ERR_IO_PENDING;
}
int SpdySession::CreateStream(const SpdyStreamRequest& request,
base::WeakPtr<SpdyStream>* stream) {
DCHECK_GE(request.priority(), MINIMUM_PRIORITY);
DCHECK_LE(request.priority(), MAXIMUM_PRIORITY);
if (availability_state_ == STATE_GOING_AWAY)
return ERR_FAILED;
if (availability_state_ == STATE_DRAINING)
return ERR_CONNECTION_CLOSED;
DCHECK(connection_->socket());
UMA_HISTOGRAM_BOOLEAN("Net.SpdySession.CreateStreamWithSocketConnected",
connection_->socket()->IsConnected());
if (!connection_->socket()->IsConnected()) {
DoDrainSession(
ERR_CONNECTION_CLOSED,
"Tried to create SPDY stream for a closed socket connection.");
return ERR_CONNECTION_CLOSED;
}
auto new_stream = std::make_unique<SpdyStream>(
request.type(), GetWeakPtr(), request.url(), request.priority(),
stream_initial_send_window_size_, stream_max_recv_window_size_,
request.net_log(), request.traffic_annotation());
*stream = new_stream->GetWeakPtr();
InsertCreatedStream(std::move(new_stream));
return OK;
}
void SpdySession::CancelStreamRequest(
const base::WeakPtr<SpdyStreamRequest>& request) {
DCHECK(request);
RequestPriority priority = request->priority();
CHECK_GE(priority, MINIMUM_PRIORITY);
CHECK_LE(priority, MAXIMUM_PRIORITY);
#if DCHECK_IS_ON()
// |request| should not be in a queue not matching its priority.
for (int i = MINIMUM_PRIORITY; i <= MAXIMUM_PRIORITY; ++i) {
if (priority == i)
continue;
PendingStreamRequestQueue* queue = &pending_create_stream_queues_[i];
DCHECK(std::find_if(queue->begin(), queue->end(), RequestEquals(request)) ==
queue->end());
}
#endif
PendingStreamRequestQueue* queue = &pending_create_stream_queues_[priority];
// Remove |request| from |queue| while preserving the order of the
// other elements.
PendingStreamRequestQueue::iterator it =
std::find_if(queue->begin(), queue->end(), RequestEquals(request));
// The request may already be removed if there's a
// CompleteStreamRequest() in flight.
if (it != queue->end()) {
it = queue->erase(it);
// |request| should be in the queue at most once, and if it is
// present, should not be pending completion.
DCHECK(std::find_if(it, queue->end(), RequestEquals(request)) ==
queue->end());
}
}
base::WeakPtr<SpdyStreamRequest> SpdySession::GetNextPendingStreamRequest() {
for (int j = MAXIMUM_PRIORITY; j >= MINIMUM_PRIORITY; --j) {
if (pending_create_stream_queues_[j].empty())
continue;
base::WeakPtr<SpdyStreamRequest> pending_request =
pending_create_stream_queues_[j].front();
DCHECK(pending_request);
pending_create_stream_queues_[j].pop_front();
return pending_request;
}
return base::WeakPtr<SpdyStreamRequest>();
}
void SpdySession::ProcessPendingStreamRequests() {
size_t max_requests_to_process =
max_concurrent_streams_ -
(active_streams_.size() + created_streams_.size());
for (size_t i = 0; i < max_requests_to_process; ++i) {
base::WeakPtr<SpdyStreamRequest> pending_request =
GetNextPendingStreamRequest();
if (!pending_request)
break;
// Note that this post can race with other stream creations, and it's
// possible that the un-stalled stream will be stalled again if it loses.
// TODO(jgraettinger): Provide stronger ordering guarantees.
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::Bind(&SpdySession::CompleteStreamRequest,
weak_factory_.GetWeakPtr(), pending_request));
}
}
void SpdySession::TryCreatePushStream(spdy::SpdyStreamId stream_id,
spdy::SpdyStreamId associated_stream_id,
spdy::SpdyHeaderBlock headers) {
// Pushed streams are speculative, so they start at an IDLE priority.
// TODO(bnc): Send pushed stream cancellation with higher priority to avoid
// wasting bandwidth.
const RequestPriority request_priority = IDLE;
if (!enable_push_) {
RecordSpdyPushedStreamFateHistogram(SpdyPushedStreamFate::kPushDisabled);
EnqueueResetStreamFrame(stream_id, request_priority,
spdy::ERROR_CODE_REFUSED_STREAM,
"Push is disabled.");
return;
}
if ((stream_id & 0x1) != 0) {
std::string description = base::StringPrintf(
"Received invalid pushed stream id %d (must be even) on stream id %d.",
stream_id, associated_stream_id);
LOG(WARNING) << description;
RecordSpdyPushedStreamFateHistogram(
SpdyPushedStreamFate::kPromisedStreamIdParityError);
CloseSessionOnError(ERR_SPDY_PROTOCOL_ERROR, description);
return;
}
if ((associated_stream_id & 0x1) != 1) {
std::string description = base::StringPrintf(
"Received pushed stream id %d on invalid stream id %d (must be odd).",
stream_id, associated_stream_id);
LOG(WARNING) << description;
RecordSpdyPushedStreamFateHistogram(
SpdyPushedStreamFate::kAssociatedStreamIdParityError);
CloseSessionOnError(ERR_SPDY_PROTOCOL_ERROR, description);
return;
}
if (stream_id <= last_accepted_push_stream_id_) {
std::string description = base::StringPrintf(
"Received pushed stream id %d must be larger than last accepted id %d.",
stream_id, last_accepted_push_stream_id_);
LOG(WARNING) << description;
RecordSpdyPushedStreamFateHistogram(
SpdyPushedStreamFate::kStreamIdOutOfOrder);
CloseSessionOnError(ERR_SPDY_PROTOCOL_ERROR, description);
return;
}
// |last_accepted_push_stream_id_| check above guarantees that this stream has
// not been activated yet.
DCHECK(!IsStreamActive(stream_id));
last_accepted_push_stream_id_ = stream_id;
if (availability_state_ == STATE_GOING_AWAY) {
RecordSpdyPushedStreamFateHistogram(SpdyPushedStreamFate::kGoingAway);
EnqueueResetStreamFrame(stream_id, request_priority,
spdy::ERROR_CODE_REFUSED_STREAM,
"Push stream request received while going away.");
return;
}
streams_pushed_count_++;
// Verify that the response had a URL for us.
GURL gurl(quic::SpdyUtils::GetPromisedUrlFromHeaders(headers));
if (!gurl.is_valid()) {
RecordSpdyPushedStreamFateHistogram(SpdyPushedStreamFate::kInvalidUrl);
EnqueueResetStreamFrame(stream_id, request_priority,
spdy::ERROR_CODE_REFUSED_STREAM,
"Invalid pushed request headers.");
return;
}
// GetPromisedUrlFromHeaders() guarantees that the scheme is http or https.
DCHECK(gurl.SchemeIs(url::kHttpScheme) || gurl.SchemeIs(url::kHttpsScheme));
// "Promised requests MUST be cacheable and MUST be safe [...]" (RFC7540
// Section 8.2). Only cacheable safe request methods are GET and HEAD.
// GetPromisedUrlFromHeaders() guarantees that the method is GET or HEAD.
spdy::SpdyHeaderBlock::const_iterator it =
headers.find(spdy::kHttp2MethodHeader);
DCHECK(it != headers.end() && (it->second == "GET" || it->second == "HEAD"));
// Verify we have a valid stream association.
auto associated_it = active_streams_.find(associated_stream_id);
if (associated_it == active_streams_.end()) {
RecordSpdyPushedStreamFateHistogram(
SpdyPushedStreamFate::kInactiveAssociatedStream);
EnqueueResetStreamFrame(stream_id, request_priority,
spdy::ERROR_CODE_STREAM_CLOSED,
"Inactive associated stream.");
return;
}
// Cross-origin push validation.
GURL associated_url(associated_it->second->url());
if (associated_url.GetOrigin() != gurl.GetOrigin()) {
if (is_trusted_proxy_) {
if (!gurl.SchemeIs(url::kHttpScheme)) {
RecordSpdyPushedStreamFateHistogram(
SpdyPushedStreamFate::kNonHttpSchemeFromTrustedProxy);
EnqueueResetStreamFrame(
stream_id, request_priority, spdy::ERROR_CODE_REFUSED_STREAM,
"Only http scheme allowed for cross origin push by trusted proxy.");
return;
}
} else {
if (!gurl.SchemeIs(url::kHttpsScheme)) {
RecordSpdyPushedStreamFateHistogram(
SpdyPushedStreamFate::kNonHttpsPushedScheme);
EnqueueResetStreamFrame(stream_id, request_priority,
spdy::ERROR_CODE_REFUSED_STREAM,
"Pushed URL must have https scheme.");
return;
}
if (!associated_url.SchemeIs(url::kHttpsScheme)) {
RecordSpdyPushedStreamFateHistogram(
SpdyPushedStreamFate::kNonHttpsAssociatedScheme);
EnqueueResetStreamFrame(stream_id, request_priority,
spdy::ERROR_CODE_REFUSED_STREAM,
"Associated URL must have https scheme.");
return;
}
SSLInfo ssl_info;
CHECK(GetSSLInfo(&ssl_info));
if (!CanPool(transport_security_state_, ssl_info, *ssl_config_service_,
associated_url.host(), gurl.host())) {
RecordSpdyPushedStreamFateHistogram(
SpdyPushedStreamFate::kCertificateMismatch);
EnqueueResetStreamFrame(stream_id, request_priority,
spdy::ERROR_CODE_REFUSED_STREAM,
"Certificate does not match pushed URL.");
return;
}
}
}
// Insertion fails if there already is a pushed stream with the same path.
if (!pool_->push_promise_index()->RegisterUnclaimedPushedStream(
gurl, stream_id, this)) {
RecordSpdyPushedStreamFateHistogram(SpdyPushedStreamFate::kDuplicateUrl);
EnqueueResetStreamFrame(stream_id, request_priority,
spdy::ERROR_CODE_REFUSED_STREAM,
"Duplicate pushed stream with url: " + gurl.spec());
return;
}
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
base::Bind(&SpdySession::CancelPushedStreamIfUnclaimed, GetWeakPtr(),
stream_id),
base::TimeDelta::FromSeconds(kPushedStreamLifetimeSeconds));
net::NetworkTrafficAnnotationTag traffic_annotation =
net::DefineNetworkTrafficAnnotation("spdy_push_stream", R"(
semantics {
sender: "Spdy Session"
description:
"When a web server needs to push a response to a client, an "
"incoming stream is created to reply the client with pushed "
"message instead of a message from the network."
trigger:
"A request by a server to push a response to the client."
data: "None."
destination: OTHER
destination_other:
"This stream is not used for sending data."
}
policy {
cookies_allowed: NO
setting: "This feature cannot be disabled."
policy_exception_justification: "Essential for navigation."
}
)");
auto stream = std::make_unique<SpdyStream>(
SPDY_PUSH_STREAM, GetWeakPtr(), gurl, request_priority,
stream_initial_send_window_size_, stream_max_recv_window_size_, net_log_,
traffic_annotation);
stream->set_stream_id(stream_id);
// Convert RequestPriority to a spdy::SpdyPriority to send in a PRIORITY
// frame.
spdy::SpdyPriority spdy_priority =
ConvertRequestPriorityToSpdyPriority(request_priority);
spdy::SpdyStreamId dependency_id = 0;
int weight = 0;
bool exclusive = false;
priority_dependency_state_.OnStreamCreation(
stream_id, spdy_priority, &dependency_id, &weight, &exclusive);
EnqueuePriorityFrame(stream_id, dependency_id, weight, exclusive);
// PUSH_PROMISE arrives on associated stream.
associated_it->second->AddRawReceivedBytes(last_compressed_frame_len_);
last_compressed_frame_len_ = 0;
InsertActivatedStream(std::move(stream));
auto active_it = active_streams_.find(stream_id);
DCHECK(active_it != active_streams_.end());
// Notify the push_delegate that a push promise has been received.
if (push_delegate_) {
push_delegate_->OnPush(std::make_unique<SpdyServerPushHelper>(
weak_factory_.GetWeakPtr(), gurl),
net_log_);
}
active_it->second->OnPushPromiseHeadersReceived(std::move(headers),
std::move(gurl));
DCHECK(active_it->second->IsReservedRemote());
num_pushed_streams_++;
return;
}
void SpdySession::CloseActiveStreamIterator(ActiveStreamMap::iterator it,
int status) {
// TODO(mbelshe): We should send a RST_STREAM control frame here
// so that the server can cancel a large send.
std::unique_ptr<SpdyStream> owned_stream(it->second);
active_streams_.erase(it);
priority_dependency_state_.OnStreamDestruction(owned_stream->stream_id());
// TODO(akalin): When SpdyStream was ref-counted (and
// |unclaimed_pushed_streams_| held scoped_refptr<SpdyStream>), this
// was only done when status was not OK. This meant that pushed
// streams can still be claimed after they're closed. This is
// probably something that we still want to support, although server
// push is hardly used. Write tests for this and fix this. (See
// http://crbug.com/261712 .)
if (owned_stream->type() == SPDY_PUSH_STREAM) {
if (pool_->push_promise_index()->UnregisterUnclaimedPushedStream(
owned_stream->url(), owned_stream->stream_id(), this)) {
bytes_pushed_and_unclaimed_count_ += owned_stream->recv_bytes();
}
bytes_pushed_count_ += owned_stream->recv_bytes();
num_pushed_streams_--;
if (!owned_stream->IsReservedRemote())
num_active_pushed_streams_--;
}
DeleteStream(std::move(owned_stream), status);
// If there are no active streams and the socket pool is stalled, close the
// session to free up a socket slot.
if (active_streams_.empty() && created_streams_.empty() &&
connection_->IsPoolStalled()) {
DoDrainSession(ERR_CONNECTION_CLOSED, "Closing idle connection.");
}
}
void SpdySession::CloseCreatedStreamIterator(CreatedStreamSet::iterator it,
int status) {
std::unique_ptr<SpdyStream> owned_stream(*it);
created_streams_.erase(it);
DeleteStream(std::move(owned_stream), status);
}
void SpdySession::ResetStreamIterator(ActiveStreamMap::iterator it,
int error,
const std::string& description) {
// Send the RST_STREAM frame first as CloseActiveStreamIterator()
// may close us.
spdy::SpdyErrorCode error_code = spdy::ERROR_CODE_PROTOCOL_ERROR;
if (error == ERR_FAILED) {
error_code = spdy::ERROR_CODE_INTERNAL_ERROR;
} else if (error == ERR_ABORTED ||
error == ERR_SPDY_PUSHED_RESPONSE_DOES_NOT_MATCH) {
error_code = spdy::ERROR_CODE_CANCEL;
} else if (error == ERR_SPDY_FLOW_CONTROL_ERROR) {
error_code = spdy::ERROR_CODE_FLOW_CONTROL_ERROR;
} else if (error == ERR_TIMED_OUT ||
error == ERR_SPDY_CLIENT_REFUSED_STREAM) {
error_code = spdy::ERROR_CODE_REFUSED_STREAM;
} else if (error == ERR_SPDY_STREAM_CLOSED) {
error_code = spdy::ERROR_CODE_STREAM_CLOSED;
}
spdy::SpdyStreamId stream_id = it->first;
RequestPriority priority = it->second->priority();
EnqueueResetStreamFrame(stream_id, priority, error_code, description);
// Removes any pending writes for the stream except for possibly an
// in-flight one.
CloseActiveStreamIterator(it, error);
}
void SpdySession::EnqueueResetStreamFrame(spdy::SpdyStreamId stream_id,
RequestPriority priority,
spdy::SpdyErrorCode error_code,
const std::string& description) {
DCHECK_NE(stream_id, 0u);
net_log().AddEvent(NetLogEventType::HTTP2_SESSION_SEND_RST_STREAM,
base::Bind(&NetLogSpdySendRstStreamCallback, stream_id,
error_code, &description));
DCHECK(buffered_spdy_framer_.get());
std::unique_ptr<spdy::SpdySerializedFrame> rst_frame(
buffered_spdy_framer_->CreateRstStream(stream_id, error_code));
EnqueueSessionWrite(priority, spdy::SpdyFrameType::RST_STREAM,
std::move(rst_frame));
RecordProtocolErrorHistogram(MapRstStreamStatusToProtocolError(error_code));
}
void SpdySession::EnqueuePriorityFrame(spdy::SpdyStreamId stream_id,
spdy::SpdyStreamId dependency_id,
int weight,
bool exclusive) {
net_log().AddEvent(NetLogEventType::HTTP2_STREAM_SEND_PRIORITY,
base::Bind(&NetLogSpdyPriorityCallback, stream_id,
dependency_id, weight, exclusive));
DCHECK(buffered_spdy_framer_.get());
std::unique_ptr<spdy::SpdySerializedFrame> frame(
buffered_spdy_framer_->CreatePriority(stream_id, dependency_id, weight,
exclusive));
// PRIORITY frames describe sequenced updates to the tree, so they must
// be serialized. We do this by queueing all PRIORITY frames at HIGHEST
// priority.
EnqueueWrite(HIGHEST, spdy::SpdyFrameType::PRIORITY,
std::make_unique<SimpleBufferProducer>(
std::make_unique<SpdyBuffer>(std::move(frame))),
base::WeakPtr<SpdyStream>(),
kSpdySessionCommandsTrafficAnnotation);
}
void SpdySession::PumpReadLoop(ReadState expected_read_state, int result) {
CHECK(!in_io_loop_);
if (availability_state_ == STATE_DRAINING) {
return;
}
ignore_result(DoReadLoop(expected_read_state, result));
}
int SpdySession::DoReadLoop(ReadState expected_read_state, int result) {
CHECK(!in_io_loop_);
CHECK_EQ(read_state_, expected_read_state);
in_io_loop_ = true;
int bytes_read_without_yielding = 0;
const base::TimeTicks yield_after_time =
time_func_() +
base::TimeDelta::FromMilliseconds(kYieldAfterDurationMilliseconds);
// Loop until the session is draining, the read becomes blocked, or
// the read limit is exceeded.
while (true) {
switch (read_state_) {
case READ_STATE_DO_READ:
CHECK_EQ(result, OK);
result = DoRead();
break;
case READ_STATE_DO_READ_COMPLETE:
if (result > 0)
bytes_read_without_yielding += result;
result = DoReadComplete(result);
break;
default:
NOTREACHED() << "read_state_: " << read_state_;
break;
}
if (availability_state_ == STATE_DRAINING)
break;
if (result == ERR_IO_PENDING)
break;
if (read_state_ == READ_STATE_DO_READ &&
(bytes_read_without_yielding > kYieldAfterBytesRead ||
time_func_() > yield_after_time)) {
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&SpdySession::PumpReadLoop, weak_factory_.GetWeakPtr(),
READ_STATE_DO_READ, OK));
result = ERR_IO_PENDING;
break;
}
}
CHECK(in_io_loop_);
in_io_loop_ = false;
return result;
}
int SpdySession::DoRead() {
DCHECK(!read_buffer_);
CHECK(in_io_loop_);
CHECK(connection_);
CHECK(connection_->socket());
read_state_ = READ_STATE_DO_READ_COMPLETE;
int rv = ERR_READ_IF_READY_NOT_IMPLEMENTED;
read_buffer_ = base::MakeRefCounted<IOBuffer>(kReadBufferSize);
if (base::FeatureList::IsEnabled(Socket::kReadIfReadyExperiment)) {
rv = connection_->socket()->ReadIfReady(
read_buffer_.get(), kReadBufferSize,
base::Bind(&SpdySession::PumpReadLoop, weak_factory_.GetWeakPtr(),
READ_STATE_DO_READ));
if (rv == ERR_IO_PENDING) {
read_buffer_ = nullptr;
read_state_ = READ_STATE_DO_READ;
return rv;
}
}
if (rv == ERR_READ_IF_READY_NOT_IMPLEMENTED) {
// Fallback to regular Read().
return connection_->socket()->Read(
read_buffer_.get(), kReadBufferSize,
base::Bind(&SpdySession::PumpReadLoop, weak_factory_.GetWeakPtr(),
READ_STATE_DO_READ_COMPLETE));
}
return rv;
}
int SpdySession::DoReadComplete(int result) {
DCHECK(read_buffer_);
CHECK(in_io_loop_);
// Parse a frame. For now this code requires that the frame fit into our
// buffer (kReadBufferSize).
// TODO(mbelshe): support arbitrarily large frames!
if (result == 0) {
DoDrainSession(ERR_CONNECTION_CLOSED, "Connection closed");
return ERR_CONNECTION_CLOSED;
}
if (result < 0) {
DoDrainSession(
static_cast<Error>(result),
base::StringPrintf("Error %d reading from socket.", -result));
return result;
}
CHECK_LE(result, kReadBufferSize);
last_read_time_ = time_func_();
DCHECK(buffered_spdy_framer_.get());
char* data = read_buffer_->data();
while (result > 0) {
uint32_t bytes_processed =
buffered_spdy_framer_->ProcessInput(data, result);
result -= bytes_processed;
data += bytes_processed;
if (availability_state_ == STATE_DRAINING) {
return ERR_CONNECTION_CLOSED;
}
DCHECK_EQ(buffered_spdy_framer_->spdy_framer_error(),
http2::Http2DecoderAdapter::SPDY_NO_ERROR);
}
read_buffer_ = nullptr;
read_state_ = READ_STATE_DO_READ;
return OK;
}
void SpdySession::PumpWriteLoop(WriteState expected_write_state, int result) {
CHECK(!in_io_loop_);
DCHECK_EQ(write_state_, expected_write_state);
DoWriteLoop(expected_write_state, result);
if (availability_state_ == STATE_DRAINING && !in_flight_write_ &&
write_queue_.IsEmpty()) {
pool_->RemoveUnavailableSession(GetWeakPtr()); // Destroys |this|.
return;
}
}
void SpdySession::MaybePostWriteLoop() {
if (write_state_ == WRITE_STATE_IDLE) {
CHECK(!in_flight_write_);
write_state_ = WRITE_STATE_DO_WRITE;
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE,
base::Bind(&SpdySession::PumpWriteLoop, weak_factory_.GetWeakPtr(),
WRITE_STATE_DO_WRITE, OK));
}
}
int SpdySession::DoWriteLoop(WriteState expected_write_state, int result) {
CHECK(!in_io_loop_);
DCHECK_NE(write_state_, WRITE_STATE_IDLE);
DCHECK_EQ(write_state_, expected_write_state);
in_io_loop_ = true;
// Loop until the session is closed or the write becomes blocked.
while (true) {
switch (write_state_) {
case WRITE_STATE_DO_WRITE:
DCHECK_EQ(result, OK);
result = DoWrite();
break;
case WRITE_STATE_DO_WRITE_COMPLETE:
result = DoWriteComplete(result);
break;
case WRITE_STATE_IDLE:
default:
NOTREACHED() << "write_state_: " << write_state_;
break;
}
if (write_state_ == WRITE_STATE_IDLE) {
DCHECK_EQ(result, ERR_IO_PENDING);
break;
}
if (result == ERR_IO_PENDING)
break;
}
CHECK(in_io_loop_);
in_io_loop_ = false;
return result;
}
int SpdySession::DoWrite() {
CHECK(in_io_loop_);
DCHECK(buffered_spdy_framer_);
if (in_flight_write_) {
DCHECK_GT(in_flight_write_->GetRemainingSize(), 0u);
} else {
// Grab the next frame to send.
spdy::SpdyFrameType frame_type = spdy::SpdyFrameType::DATA;
std::unique_ptr<SpdyBufferProducer> producer;
base::WeakPtr<SpdyStream> stream;
if (!write_queue_.Dequeue(&frame_type, &producer, &stream,
&in_flight_write_traffic_annotation)) {
write_state_ = WRITE_STATE_IDLE;
return ERR_IO_PENDING;
}
if (stream.get())
CHECK(!stream->IsClosed());
// Activate the stream only when sending the HEADERS frame to
// guarantee monotonically-increasing stream IDs.
if (frame_type == spdy::SpdyFrameType::HEADERS) {
CHECK(stream.get());
CHECK_EQ(stream->stream_id(), 0u);
std::unique_ptr<SpdyStream> owned_stream =
ActivateCreatedStream(stream.get());
InsertActivatedStream(std::move(owned_stream));
if (stream_hi_water_mark_ > kLastStreamId) {
CHECK_EQ(stream->stream_id(), kLastStreamId);
// We've exhausted the stream ID space, and no new streams may be
// created after this one.
MakeUnavailable();
StartGoingAway(kLastStreamId, ERR_ABORTED);
}
}
in_flight_write_ = producer->ProduceBuffer();
if (!in_flight_write_) {
NOTREACHED();
return ERR_UNEXPECTED;
}
in_flight_write_frame_type_ = frame_type;
in_flight_write_frame_size_ = in_flight_write_->GetRemainingSize();
DCHECK_GE(in_flight_write_frame_size_, spdy::kFrameMinimumSize);
in_flight_write_stream_ = stream;
}
write_state_ = WRITE_STATE_DO_WRITE_COMPLETE;
scoped_refptr<IOBuffer> write_io_buffer =
in_flight_write_->GetIOBufferForRemainingData();
return connection_->socket()->Write(
write_io_buffer.get(), in_flight_write_->GetRemainingSize(),
base::Bind(&SpdySession::PumpWriteLoop, weak_factory_.GetWeakPtr(),
WRITE_STATE_DO_WRITE_COMPLETE),
NetworkTrafficAnnotationTag(in_flight_write_traffic_annotation));
}
int SpdySession::DoWriteComplete(int result) {
CHECK(in_io_loop_);
DCHECK_NE(result, ERR_IO_PENDING);
DCHECK_GT(in_flight_write_->GetRemainingSize(), 0u);
if (result < 0) {
DCHECK_NE(result, ERR_IO_PENDING);
in_flight_write_.reset();
in_flight_write_frame_type_ = spdy::SpdyFrameType::DATA;
in_flight_write_frame_size_ = 0;
in_flight_write_stream_.reset();
in_flight_write_traffic_annotation.reset();
write_state_ = WRITE_STATE_DO_WRITE;
DoDrainSession(static_cast<Error>(result), "Write error");
return OK;
}
// It should not be possible to have written more bytes than our
// in_flight_write_.
DCHECK_LE(static_cast<size_t>(result), in_flight_write_->GetRemainingSize());
if (result > 0) {
in_flight_write_->Consume(static_cast<size_t>(result));
if (in_flight_write_stream_.get())
in_flight_write_stream_->AddRawSentBytes(static_cast<size_t>(result));
// We only notify the stream when we've fully written the pending frame.
if (in_flight_write_->GetRemainingSize() == 0) {
// It is possible that the stream was cancelled while we were
// writing to the socket.
if (in_flight_write_stream_.get()) {
DCHECK_GT(in_flight_write_frame_size_, 0u);
in_flight_write_stream_->OnFrameWriteComplete(
in_flight_write_frame_type_, in_flight_write_frame_size_);
}
// Cleanup the write which just completed.
in_flight_write_.reset();
in_flight_write_frame_type_ = spdy::SpdyFrameType::DATA;
in_flight_write_frame_size_ = 0;
in_flight_write_stream_.reset();
}
}
write_state_ = WRITE_STATE_DO_WRITE;
return OK;
}
void SpdySession::SendInitialData() {
DCHECK(enable_sending_initial_data_);
DCHECK(buffered_spdy_framer_.get());
// Prepare initial SETTINGS frame. Only send settings that have a value
// different from the protocol default value.
spdy::SettingsMap settings_map;
for (auto setting : initial_settings_) {
if (!IsSpdySettingAtDefaultInitialValue(setting.first, setting.second)) {
settings_map.insert(setting);
}
}
net_log_.AddEvent(NetLogEventType::HTTP2_SESSION_SEND_SETTINGS,
base::Bind(&NetLogSpdySendSettingsCallback, &settings_map));
std::unique_ptr<spdy::SpdySerializedFrame> settings_frame(
buffered_spdy_framer_->CreateSettings(settings_map));
// Prepare initial WINDOW_UPDATE frame.
// Make sure |session_max_recv_window_size_ - session_recv_window_size_|
// does not underflow.
DCHECK_GE(session_max_recv_window_size_, session_recv_window_size_);
DCHECK_GE(session_recv_window_size_, 0);
DCHECK_EQ(0, session_unacked_recv_window_bytes_);
std::unique_ptr<spdy::SpdySerializedFrame> window_update_frame;
const bool send_window_update =
session_max_recv_window_size_ > session_recv_window_size_;
if (send_window_update) {
const int32_t delta_window_size =
session_max_recv_window_size_ - session_recv_window_size_;
session_recv_window_size_ += delta_window_size;
net_log_.AddEvent(NetLogEventType::HTTP2_SESSION_UPDATE_RECV_WINDOW,
base::Bind(&NetLogSpdySessionWindowUpdateCallback,
delta_window_size, session_recv_window_size_));
session_unacked_recv_window_bytes_ += delta_window_size;
net_log_.AddEvent(NetLogEventType::HTTP2_SESSION_SEND_WINDOW_UPDATE,
base::Bind(&NetLogSpdyWindowUpdateFrameCallback,
spdy::kSessionFlowControlStreamId,
session_unacked_recv_window_bytes_));
window_update_frame = buffered_spdy_framer_->CreateWindowUpdate(
spdy::kSessionFlowControlStreamId, session_unacked_recv_window_bytes_);
session_unacked_recv_window_bytes_ = 0;
}
// Create a single frame to hold connection prefix, initial SETTINGS frame,
// and optional initial WINDOW_UPDATE frame, so that they are sent on the wire
// in a single packet.
size_t initial_frame_size =
spdy::kHttp2ConnectionHeaderPrefixSize + settings_frame->size();
if (send_window_update)
initial_frame_size += window_update_frame->size();
#if defined(STARBOARD)
// Work-around for no C++14 support.
auto initial_frame_data =
std::unique_ptr<char[]>(new char[initial_frame_size]);
#else
auto initial_frame_data = std::make_unique<char[]>(initial_frame_size);
#endif
size_t offset = 0;
SbMemoryCopy(initial_frame_data.get() + offset,
spdy::kHttp2ConnectionHeaderPrefix,
spdy::kHttp2ConnectionHeaderPrefixSize);
offset += spdy::kHttp2ConnectionHeaderPrefixSize;
SbMemoryCopy(initial_frame_data.get() + offset, settings_frame->data(),
settings_frame->size());
offset += settings_frame->size();
if (send_window_update) {
SbMemoryCopy(initial_frame_data.get() + offset, window_update_frame->data(),
window_update_frame->size());
}
auto initial_frame = std::make_unique<spdy::SpdySerializedFrame>(
initial_frame_data.release(), initial_frame_size,
/* owns_buffer = */ true);
EnqueueSessionWrite(HIGHEST, spdy::SpdyFrameType::SETTINGS,
std::move(initial_frame));
}
void SpdySession::HandleSetting(uint32_t id, uint32_t value) {
switch (id) {
case spdy::SETTINGS_MAX_CONCURRENT_STREAMS:
max_concurrent_streams_ =
std::min(static_cast<size_t>(value), kMaxConcurrentStreamLimit);
ProcessPendingStreamRequests();
break;
case spdy::SETTINGS_INITIAL_WINDOW_SIZE: {
if (value > static_cast<uint32_t>(std::numeric_limits<int32_t>::max())) {
net_log().AddEvent(
NetLogEventType::HTTP2_SESSION_INITIAL_WINDOW_SIZE_OUT_OF_RANGE,
NetLog::IntCallback("initial_window_size", value));
return;
}
// spdy::SETTINGS_INITIAL_WINDOW_SIZE updates initial_send_window_size_
// only.
int32_t delta_window_size =
static_cast<int32_t>(value) - stream_initial_send_window_size_;
stream_initial_send_window_size_ = static_cast<int32_t>(value);
UpdateStreamsSendWindowSize(delta_window_size);
net_log().AddEvent(
NetLogEventType::HTTP2_SESSION_UPDATE_STREAMS_SEND_WINDOW_SIZE,
NetLog::IntCallback("delta_window_size", delta_window_size));
break;
}
case spdy::SETTINGS_ENABLE_CONNECT_PROTOCOL:
if ((value != 0 && value != 1) || (support_websocket_ && value == 0)) {
DoDrainSession(
ERR_SPDY_PROTOCOL_ERROR,
"Invalid value for spdy::SETTINGS_ENABLE_CONNECT_PROTOCOL.");
return;
}
if (value == 1) {
support_websocket_ = true;
}
break;
}
}
void SpdySession::UpdateStreamsSendWindowSize(int32_t delta_window_size) {
for (const auto& value : active_streams_) {
if (!value.second->AdjustSendWindowSize(delta_window_size)) {
DoDrainSession(
ERR_SPDY_FLOW_CONTROL_ERROR,
base::StringPrintf(
"New spdy::SETTINGS_INITIAL_WINDOW_SIZE value overflows "
"flow control window of stream %d.",
value.second->stream_id()));
return;
}
}
for (auto* const stream : created_streams_) {
if (!stream->AdjustSendWindowSize(delta_window_size)) {
DoDrainSession(
ERR_SPDY_FLOW_CONTROL_ERROR,
base::StringPrintf(
"New spdy::SETTINGS_INITIAL_WINDOW_SIZE value overflows "
"flow control window of stream %d.",
stream->stream_id()));
return;
}
}
}
void SpdySession::MaybeSendPrefacePing() {
if (ping_in_flight_ || check_ping_status_pending_ ||
!enable_ping_based_connection_checking_) {
return;
}
// If there has been no read activity in the session for some time,
// then send a preface-PING.
if (time_func_() > last_read_time_ + connection_at_risk_of_loss_time_)
WritePingFrame(next_ping_id_, false);
}
void SpdySession::SendWindowUpdateFrame(spdy::SpdyStreamId stream_id,
uint32_t delta_window_size,
RequestPriority priority) {
ActiveStreamMap::const_iterator it = active_streams_.find(stream_id);
if (it != active_streams_.end()) {
CHECK_EQ(it->second->stream_id(), stream_id);
} else {
CHECK_EQ(stream_id, spdy::kSessionFlowControlStreamId);
}
net_log_.AddEvent(NetLogEventType::HTTP2_SESSION_SEND_WINDOW_UPDATE,
base::Bind(&NetLogSpdyWindowUpdateFrameCallback, stream_id,
delta_window_size));
DCHECK(buffered_spdy_framer_.get());
std::unique_ptr<spdy::SpdySerializedFrame> window_update_frame(
buffered_spdy_framer_->CreateWindowUpdate(stream_id, delta_window_size));
EnqueueSessionWrite(priority, spdy::SpdyFrameType::WINDOW_UPDATE,
std::move(window_update_frame));
}
void SpdySession::WritePingFrame(spdy::SpdyPingId unique_id, bool is_ack) {
DCHECK(buffered_spdy_framer_.get());
std::unique_ptr<spdy::SpdySerializedFrame> ping_frame(
buffered_spdy_framer_->CreatePingFrame(unique_id, is_ack));
EnqueueSessionWrite(HIGHEST, spdy::SpdyFrameType::PING,
std::move(ping_frame));
if (net_log().IsCapturing()) {
net_log().AddEvent(
NetLogEventType::HTTP2_SESSION_PING,
base::Bind(&NetLogSpdyPingCallback, unique_id, is_ack, "sent"));
}
if (!is_ack) {
DCHECK(!ping_in_flight_);
ping_in_flight_ = true;
++next_ping_id_;
PlanToCheckPingStatus();
last_ping_sent_time_ = time_func_();
}
}
void SpdySession::PlanToCheckPingStatus() {
if (check_ping_status_pending_)
return;
check_ping_status_pending_ = true;
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, base::Bind(&SpdySession::CheckPingStatus,
weak_factory_.GetWeakPtr(), time_func_()),
hung_interval_);
}
void SpdySession::CheckPingStatus(base::TimeTicks last_check_time) {
CHECK(!in_io_loop_);
DCHECK(check_ping_status_pending_);
if (!ping_in_flight_) {
// A response has been received for the ping we had sent.
check_ping_status_pending_ = false;
return;
}
const base::TimeTicks now = time_func_();
if (now > last_read_time_ + hung_interval_ ||
last_read_time_ < last_check_time) {
check_ping_status_pending_ = false;
DoDrainSession(ERR_SPDY_PING_FAILED, "Failed ping.");
return;
}
// Check the status of connection after a delay.
const base::TimeDelta delay = last_read_time_ + hung_interval_ - now;
base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE, base::Bind(&SpdySession::CheckPingStatus,
weak_factory_.GetWeakPtr(), now),
delay);
}
spdy::SpdyStreamId SpdySession::GetNewStreamId() {
CHECK_LE(stream_hi_water_mark_, kLastStreamId);
spdy::SpdyStreamId id = stream_hi_water_mark_;
stream_hi_water_mark_ += 2;
return id;
}
void SpdySession::EnqueueSessionWrite(
RequestPriority priority,
spdy::SpdyFrameType frame_type,
std::unique_ptr<spdy::SpdySerializedFrame> frame) {
DCHECK(frame_type == spdy::SpdyFrameType::RST_STREAM ||
frame_type == spdy::SpdyFrameType::SETTINGS ||
frame_type == spdy::SpdyFrameType::WINDOW_UPDATE ||
frame_type == spdy::SpdyFrameType::PING ||
frame_type == spdy::SpdyFrameType::GOAWAY);
auto buffer = std::make_unique<SpdyBuffer>(std::move(frame));
EnqueueWrite(priority, frame_type,
std::make_unique<SimpleBufferProducer>(std::move(buffer)),
base::WeakPtr<SpdyStream>(),
kSpdySessionCommandsTrafficAnnotation);
}
void SpdySession::EnqueueWrite(
RequestPriority priority,
spdy::SpdyFrameType frame_type,
std::unique_ptr<SpdyBufferProducer> producer,
const base::WeakPtr<SpdyStream>& stream,
const NetworkTrafficAnnotationTag& traffic_annotation) {
if (availability_state_ == STATE_DRAINING)
return;
write_queue_.Enqueue(priority, frame_type, std::move(producer), stream,
traffic_annotation);
if (greased_http2_frame_ && (frame_type == spdy::SpdyFrameType::SETTINGS ||
frame_type == spdy::SpdyFrameType::HEADERS)) {
write_queue_.Enqueue(
priority,
static_cast<spdy::SpdyFrameType>(greased_http2_frame_.value().type),
std::make_unique<GreasedBufferProducer>(
stream, &greased_http2_frame_.value(), buffered_spdy_framer_.get()),
stream, traffic_annotation);
}
MaybePostWriteLoop();
}
void SpdySession::InsertCreatedStream(std::unique_ptr<SpdyStream> stream) {
CHECK_EQ(stream->stream_id(), 0u);
auto it = created_streams_.lower_bound(stream.get());
CHECK(it == created_streams_.end() || *it != stream.get());
created_streams_.insert(it, stream.release());
}
std::unique_ptr<SpdyStream> SpdySession::ActivateCreatedStream(
SpdyStream* stream) {
CHECK_EQ(stream->stream_id(), 0u);
auto it = created_streams_.find(stream);
CHECK(it != created_streams_.end());
stream->set_stream_id(GetNewStreamId());
std::unique_ptr<SpdyStream> owned_stream(stream);
created_streams_.erase(it);
return owned_stream;
}
void SpdySession::InsertActivatedStream(std::unique_ptr<SpdyStream> stream) {
spdy::SpdyStreamId stream_id = stream->stream_id();
CHECK_NE(stream_id, 0u);
std::pair<ActiveStreamMap::iterator, bool> result =
active_streams_.insert(std::make_pair(stream_id, stream.get()));
CHECK(result.second);
ignore_result(stream.release());
}
void SpdySession::DeleteStream(std::unique_ptr<SpdyStream> stream, int status) {
if (in_flight_write_stream_.get() == stream.get()) {
// If we're deleting the stream for the in-flight write, we still
// need to let the write complete, so we clear
// |in_flight_write_stream_| and let the write finish on its own
// without notifying |in_flight_write_stream_|.
in_flight_write_stream_.reset();
}
write_queue_.RemovePendingWritesForStream(stream.get());
stream->OnClose(status);
if (availability_state_ == STATE_AVAILABLE) {
ProcessPendingStreamRequests();
}
}
void SpdySession::RecordPingRTTHistogram(base::TimeDelta duration) {
UMA_HISTOGRAM_CUSTOM_TIMES("Net.SpdyPing.RTT", duration,
base::TimeDelta::FromMilliseconds(1),
base::TimeDelta::FromMinutes(10), 100);
}
void SpdySession::RecordHistograms() {
UMA_HISTOGRAM_CUSTOM_COUNTS("Net.SpdyStreamsPerSession",
streams_initiated_count_, 1, 300, 50);
UMA_HISTOGRAM_CUSTOM_COUNTS("Net.SpdyStreamsPushedPerSession",
streams_pushed_count_, 1, 300, 50);
UMA_HISTOGRAM_CUSTOM_COUNTS("Net.SpdyStreamsPushedAndClaimedPerSession",
streams_pushed_and_claimed_count_, 1, 300, 50);
UMA_HISTOGRAM_CUSTOM_COUNTS("Net.SpdyStreamsAbandonedPerSession",
streams_abandoned_count_, 1, 300, 50);
UMA_HISTOGRAM_COUNTS_1M("Net.SpdySession.PushedBytes", bytes_pushed_count_);
DCHECK_LE(bytes_pushed_and_unclaimed_count_, bytes_pushed_count_);
UMA_HISTOGRAM_COUNTS_1M("Net.SpdySession.PushedAndUnclaimedBytes",
bytes_pushed_and_unclaimed_count_);
UMA_HISTOGRAM_BOOLEAN("Net.SpdySession.ServerSupportsWebSocket",
support_websocket_);
}
void SpdySession::RecordProtocolErrorHistogram(
SpdyProtocolErrorDetails details) {
UMA_HISTOGRAM_ENUMERATION("Net.SpdySessionErrorDetails2", details,
NUM_SPDY_PROTOCOL_ERROR_DETAILS);
if (base::EndsWith(host_port_pair().host(), "google.com",
base::CompareCase::INSENSITIVE_ASCII)) {
UMA_HISTOGRAM_ENUMERATION("Net.SpdySessionErrorDetails_Google2", details,
NUM_SPDY_PROTOCOL_ERROR_DETAILS);
}
}
// static
void SpdySession::RecordPushedStreamVaryResponseHeaderHistogram(
const spdy::SpdyHeaderBlock& headers) {
UMA_HISTOGRAM_ENUMERATION("Net.PushedStreamVaryResponseHeader",
ParseVaryInPushedResponse(headers),
kNumberOfVaryEntries);
}
void SpdySession::DcheckGoingAway() const {
#if DCHECK_IS_ON()
DCHECK_GE(availability_state_, STATE_GOING_AWAY);
for (int i = MINIMUM_PRIORITY; i <= MAXIMUM_PRIORITY; ++i) {
DCHECK(pending_create_stream_queues_[i].empty());
}
DCHECK(created_streams_.empty());
#endif
}
void SpdySession::DcheckDraining() const {
DcheckGoingAway();
DCHECK_EQ(availability_state_, STATE_DRAINING);
DCHECK(active_streams_.empty());
DCHECK_EQ(0u, pool_->push_promise_index()->CountStreamsForSession(this));
}
void SpdySession::DoDrainSession(Error err, const std::string& description) {
if (availability_state_ == STATE_DRAINING) {
return;
}
MakeUnavailable();
// Mark host_port_pair requiring HTTP/1.1 for subsequent connections.
if (err == ERR_HTTP_1_1_REQUIRED) {
http_server_properties_->SetHTTP11Required(host_port_pair());
}
// If |err| indicates an error occurred, inform the peer that we're closing
// and why. Don't GOAWAY on a graceful or idle close, as that may
// unnecessarily wake the radio. We could technically GOAWAY on network errors
// (we'll probably fail to actually write it, but that's okay), however many
// unit-tests would need to be updated.
if (err != OK &&
err != ERR_ABORTED && // Used by SpdySessionPool to close idle sessions.
err != ERR_NETWORK_CHANGED && // Used to deprecate sessions on IP change.
err != ERR_SOCKET_NOT_CONNECTED && err != ERR_HTTP_1_1_REQUIRED &&
err != ERR_CONNECTION_CLOSED && err != ERR_CONNECTION_RESET) {
// Enqueue a GOAWAY to inform the peer of why we're closing the connection.
spdy::SpdyGoAwayIR goaway_ir(last_accepted_push_stream_id_,
MapNetErrorToGoAwayStatus(err), description);
auto frame = std::make_unique<spdy::SpdySerializedFrame>(
buffered_spdy_framer_->SerializeFrame(goaway_ir));
EnqueueSessionWrite(HIGHEST, spdy::SpdyFrameType::GOAWAY, std::move(frame));
}
availability_state_ = STATE_DRAINING;
error_on_close_ = err;
net_log_.AddEvent(
NetLogEventType::HTTP2_SESSION_CLOSE,
base::Bind(&NetLogSpdySessionCloseCallback, err, &description));
base::UmaHistogramSparse("Net.SpdySession.ClosedOnError", -err);
if (err == OK) {
// We ought to be going away already, as this is a graceful close.
DcheckGoingAway();
} else {
StartGoingAway(0, err);
}
DcheckDraining();
MaybePostWriteLoop();
}
void SpdySession::LogAbandonedStream(SpdyStream* stream, Error status) {
DCHECK(stream);
stream->LogStreamError(status, "Abandoned.");
// We don't increment the streams abandoned counter here. If the
// stream isn't active (i.e., it hasn't written anything to the wire
// yet) then it's as if it never existed. If it is active, then
// LogAbandonedActiveStream() will increment the counters.
}
void SpdySession::LogAbandonedActiveStream(ActiveStreamMap::const_iterator it,
Error status) {
DCHECK_GT(it->first, 0u);
LogAbandonedStream(it->second, status);
++streams_abandoned_count_;
}
void SpdySession::CompleteStreamRequest(
const base::WeakPtr<SpdyStreamRequest>& pending_request) {
// Abort if the request has already been cancelled.
if (!pending_request)
return;
base::WeakPtr<SpdyStream> stream;
int rv = TryCreateStream(pending_request, &stream);
if (rv == OK) {
DCHECK(stream);
pending_request->OnRequestCompleteSuccess(stream);
return;
}
DCHECK(!stream);
if (rv != ERR_IO_PENDING) {
pending_request->OnRequestCompleteFailure(rv);
}
}
void SpdySession::CancelPushedStreamIfUnclaimed(spdy::SpdyStreamId stream_id) {
auto active_it = active_streams_.find(stream_id);
if (active_it == active_streams_.end())
return;
// Make sure to cancel the correct stream. It is possible that the pushed
// stream |stream_id| is already claimed, and another stream has been pushed
// for the same URL.
const GURL& url = active_it->second->url();
if (pool_->push_promise_index()->FindStream(url, this) != stream_id) {
return;
}
RecordSpdyPushedStreamFateHistogram(SpdyPushedStreamFate::kTimeout);
LogAbandonedActiveStream(active_it, ERR_TIMED_OUT);
// CloseActiveStreamIterator() will remove the stream from
// |pool_->push_promise_index()|.
ResetStreamIterator(active_it, ERR_TIMED_OUT, "Stream not claimed.");
}
void SpdySession::OnError(
http2::Http2DecoderAdapter::SpdyFramerError spdy_framer_error) {
CHECK(in_io_loop_);
RecordProtocolErrorHistogram(
MapFramerErrorToProtocolError(spdy_framer_error));
std::string description = base::StringPrintf(
"Framer error: %d (%s).", spdy_framer_error,
http2::Http2DecoderAdapter::SpdyFramerErrorToString(spdy_framer_error));
DoDrainSession(MapFramerErrorToNetError(spdy_framer_error), description);
}
void SpdySession::OnStreamError(spdy::SpdyStreamId stream_id,
const std::string& description) {
CHECK(in_io_loop_);
auto it = active_streams_.find(stream_id);
if (it == active_streams_.end()) {
// We still want to send a frame to reset the stream even if we
// don't know anything about it.
EnqueueResetStreamFrame(stream_id, IDLE, spdy::ERROR_CODE_PROTOCOL_ERROR,
description);
return;
}
ResetStreamIterator(it, ERR_SPDY_PROTOCOL_ERROR, description);
}
void SpdySession::OnPing(spdy::SpdyPingId unique_id, bool is_ack) {
CHECK(in_io_loop_);
net_log_.AddEvent(
NetLogEventType::HTTP2_SESSION_PING,
base::Bind(&NetLogSpdyPingCallback, unique_id, is_ack, "received"));
// Send response to a PING from server.
if (!is_ack) {
WritePingFrame(unique_id, true);
return;
}
if (!ping_in_flight_) {
RecordProtocolErrorHistogram(PROTOCOL_ERROR_UNEXPECTED_PING);
DoDrainSession(ERR_SPDY_PROTOCOL_ERROR, "Unexpected PING ACK.");
return;
}
ping_in_flight_ = false;
// Record RTT in histogram when there are no more pings in flight.
RecordPingRTTHistogram(time_func_() - last_ping_sent_time_);
}
void SpdySession::OnRstStream(spdy::SpdyStreamId stream_id,
spdy::SpdyErrorCode error_code) {
CHECK(in_io_loop_);
net_log().AddEvent(
NetLogEventType::HTTP2_SESSION_RECV_RST_STREAM,
base::Bind(&NetLogSpdyRecvRstStreamCallback, stream_id, error_code));
auto it = active_streams_.find(stream_id);
if (it == active_streams_.end()) {
// NOTE: it may just be that the stream was cancelled.
LOG(WARNING) << "Received RST for invalid stream" << stream_id;
return;
}
DCHECK(it->second);
CHECK_EQ(it->second->stream_id(), stream_id);
if (it->second->ShouldRetryRSTPushStream()) {
CloseActiveStreamIterator(it,
ERR_SPDY_CLAIMED_PUSHED_STREAM_RESET_BY_SERVER);
} else if (error_code == spdy::ERROR_CODE_NO_ERROR) {
CloseActiveStreamIterator(it, ERR_SPDY_RST_STREAM_NO_ERROR_RECEIVED);
} else if (error_code == spdy::ERROR_CODE_REFUSED_STREAM) {
CloseActiveStreamIterator(it, ERR_SPDY_SERVER_REFUSED_STREAM);
} else if (error_code == spdy::ERROR_CODE_HTTP_1_1_REQUIRED) {
// TODO(bnc): Record histogram with number of open streams capped at 50.
if (net_log().IsCapturing()) {
it->second->LogStreamError(ERR_HTTP_1_1_REQUIRED,
"Closing session because server reset stream "
"with ERR_HTTP_1_1_REQUIRED.");
}
DoDrainSession(ERR_HTTP_1_1_REQUIRED, "HTTP_1_1_REQUIRED for stream.");
} else {
RecordProtocolErrorHistogram(
PROTOCOL_ERROR_RST_STREAM_FOR_NON_ACTIVE_STREAM);
if (net_log().IsCapturing()) {
it->second->LogStreamError(ERR_SPDY_PROTOCOL_ERROR,
"Server reset stream.");
}
// TODO(mbelshe): Map from Spdy-protocol errors to something sensical.
// For now, it doesn't matter much - it is a protocol error.
CloseActiveStreamIterator(it, ERR_SPDY_PROTOCOL_ERROR);
}
}
void SpdySession::OnGoAway(spdy::SpdyStreamId last_accepted_stream_id,
spdy::SpdyErrorCode error_code,
base::StringPiece debug_data) {
CHECK(in_io_loop_);
// TODO(jgraettinger): UMA histogram on |error_code|.
net_log_.AddEvent(
NetLogEventType::HTTP2_SESSION_RECV_GOAWAY,
base::Bind(&NetLogSpdyRecvGoAwayCallback, last_accepted_stream_id,
active_streams_.size(),
pool_->push_promise_index()->CountStreamsForSession(this),
error_code, debug_data));
MakeUnavailable();
if (error_code == spdy::ERROR_CODE_HTTP_1_1_REQUIRED) {
// TODO(bnc): Record histogram with number of open streams capped at 50.
DoDrainSession(ERR_HTTP_1_1_REQUIRED, "HTTP_1_1_REQUIRED for stream.");
} else if (error_code == spdy::ERROR_CODE_NO_ERROR) {
StartGoingAway(last_accepted_stream_id, ERR_SPDY_SERVER_REFUSED_STREAM);
} else {
StartGoingAway(last_accepted_stream_id, ERR_ABORTED);
}
// This is to handle the case when we already don't have any active
// streams (i.e., StartGoingAway() did nothing). Otherwise, we have
// active streams and so the last one being closed will finish the
// going away process (see DeleteStream()).
MaybeFinishGoingAway();