summaryrefslogtreecommitdiffstats
path: root/vendor/golang.org/x/net
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/golang.org/x/net')
-rw-r--r--vendor/golang.org/x/net/http2/client_conn_pool.go47
-rw-r--r--vendor/golang.org/x/net/http2/errors.go7
-rw-r--r--vendor/golang.org/x/net/http2/frame.go60
-rw-r--r--vendor/golang.org/x/net/http2/pipe.go11
-rw-r--r--vendor/golang.org/x/net/http2/server.go101
-rw-r--r--vendor/golang.org/x/net/http2/transport.go1157
6 files changed, 767 insertions, 616 deletions
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
index 652bc11a..c936843e 100644
--- a/vendor/golang.org/x/net/http2/client_conn_pool.go
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -16,6 +16,12 @@ import (
// ClientConnPool manages a pool of HTTP/2 client connections.
type ClientConnPool interface {
+ // GetClientConn returns a specific HTTP/2 connection (usually
+ // a TLS-TCP connection) to an HTTP/2 server. On success, the
+ // returned ClientConn accounts for the upcoming RoundTrip
+ // call, so the caller should not omit it. If the caller needs
+ // to, ClientConn.RoundTrip can be called with a bogus
+ // new(http.Request) to release the stream reservation.
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
MarkDead(*ClientConn)
}
@@ -42,7 +48,7 @@ type clientConnPool struct {
conns map[string][]*ClientConn // key is host:port
dialing map[string]*dialCall // currently in-flight dials
keys map[*ClientConn][]string
- addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
+ addConnCalls map[string]*addConnCall // in-flight addConnIfNeeded calls
}
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
@@ -54,28 +60,8 @@ const (
noDialOnMiss = false
)
-// shouldTraceGetConn reports whether getClientConn should call any
-// ClientTrace.GetConn hook associated with the http.Request.
-//
-// This complexity is needed to avoid double calls of the GetConn hook
-// during the back-and-forth between net/http and x/net/http2 (when the
-// net/http.Transport is upgraded to also speak http2), as well as support
-// the case where x/net/http2 is being used directly.
-func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
- // If our Transport wasn't made via ConfigureTransport, always
- // trace the GetConn hook if provided, because that means the
- // http2 package is being used directly and it's the one
- // dialing, as opposed to net/http.
- if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
- return true
- }
- // Otherwise, only use the GetConn hook if this connection has
- // been used previously for other requests. For fresh
- // connections, the net/http package does the dialing.
- return !st.freshConn
-}
-
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
+ // TODO(dneil): Dial a new connection when t.DisableKeepAlives is set?
if isConnectionCloseRequest(req) && dialOnMiss {
// It gets its own connection.
traceGetConn(req, addr)
@@ -89,10 +75,14 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
for {
p.mu.Lock()
for _, cc := range p.conns[addr] {
- if st := cc.idleState(); st.canTakeNewRequest {
- if p.shouldTraceGetConn(st) {
+ if cc.ReserveNewRequest() {
+ // When a connection is presented to us by the net/http package,
+ // the GetConn hook has already been called.
+ // Don't call it a second time here.
+ if !cc.getConnCalled {
traceGetConn(req, addr)
}
+ cc.getConnCalled = false
p.mu.Unlock()
return cc, nil
}
@@ -108,7 +98,13 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
if shouldRetryDial(call, req) {
continue
}
- return call.res, call.err
+ cc, err := call.res, call.err
+ if err != nil {
+ return nil, err
+ }
+ if cc.ReserveNewRequest() {
+ return cc, nil
+ }
}
}
@@ -205,6 +201,7 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
if err != nil {
c.err = err
} else {
+ cc.getConnCalled = true // already called by the net/http package
p.addConnLocked(key, cc)
}
delete(p.addConnCalls, key)
diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go
index c789fa33..2663e5d2 100644
--- a/vendor/golang.org/x/net/http2/errors.go
+++ b/vendor/golang.org/x/net/http2/errors.go
@@ -53,6 +53,13 @@ func (e ErrCode) String() string {
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
}
+func (e ErrCode) stringToken() string {
+ if s, ok := errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e))
+}
+
// ConnectionError is an error that results in the termination of the
// entire connection.
type ConnectionError ErrCode
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index b95d6f2d..96a74790 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{
// a frameParser parses a frame given its FrameHeader and payload
// bytes. The length of payload will always equal fh.Length (which
// might be 0).
-type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)
+type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error)
var frameParsers = map[FrameType]frameParser{
FrameData: parseDataFrame,
@@ -267,6 +267,11 @@ type Framer struct {
lastFrame Frame
errDetail error
+ // countError is a non-nil func that's called on a frame parse
+ // error with some unique error path token. It's initialized
+ // from Transport.CountError or Server.CountError.
+ countError func(errToken string)
+
// lastHeaderStream is non-zero if the last frame was an
// unfinished HEADERS/CONTINUATION.
lastHeaderStream uint32
@@ -426,6 +431,7 @@ func NewFramer(w io.Writer, r io.Reader) *Framer {
fr := &Framer{
w: w,
r: r,
+ countError: func(string) {},
logReads: logFrameReads,
logWrites: logFrameWrites,
debugReadLoggerf: log.Printf,
@@ -500,7 +506,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
if _, err := io.ReadFull(fr.r, payload); err != nil {
return nil, err
}
- f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
+ f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
if err != nil {
if ce, ok := err.(connError); ok {
return nil, fr.connError(ce.Code, ce.Reason)
@@ -588,13 +594,14 @@ func (f *DataFrame) Data() []byte {
return f.data
}
-func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
+func parseDataFrame(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
if fh.StreamID == 0 {
// DATA frames MUST be associated with a stream. If a
// DATA frame is received whose stream identifier
// field is 0x0, the recipient MUST respond with a
// connection error (Section 5.4.1) of type
// PROTOCOL_ERROR.
+ countError("frame_data_stream_0")
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
}
f := fc.getDataFrame()
@@ -605,6 +612,7 @@ func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, erro
var err error
payload, padSize, err = readByte(payload)
if err != nil {
+ countError("frame_data_pad_byte_short")
return nil, err
}
}
@@ -613,6 +621,7 @@ func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, erro
// length of the frame payload, the recipient MUST
// treat this as a connection error.
// Filed: https://github.com/http2/http2-spec/issues/610
+ countError("frame_data_pad_too_big")
return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
}
f.data = payload[:len(payload)-int(padSize)]
@@ -695,7 +704,7 @@ type SettingsFrame struct {
p []byte
}
-func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+func parseSettingsFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
// When this (ACK 0x1) bit is set, the payload of the
// SETTINGS frame MUST be empty. Receipt of a
@@ -703,6 +712,7 @@ func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error)
// field value other than 0 MUST be treated as a
// connection error (Section 5.4.1) of type
// FRAME_SIZE_ERROR.
+ countError("frame_settings_ack_with_length")
return nil, ConnectionError(ErrCodeFrameSize)
}
if fh.StreamID != 0 {
@@ -713,14 +723,17 @@ func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error)
// field is anything other than 0x0, the endpoint MUST
// respond with a connection error (Section 5.4.1) of
// type PROTOCOL_ERROR.
+ countError("frame_settings_has_stream")
return nil, ConnectionError(ErrCodeProtocol)
}
if len(p)%6 != 0 {
+ countError("frame_settings_mod_6")
// Expecting even number of 6 byte settings.
return nil, ConnectionError(ErrCodeFrameSize)
}
f := &SettingsFrame{FrameHeader: fh, p: p}
if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
+ countError("frame_settings_window_size_too_big")
// Values above the maximum flow control window size of 2^31 - 1 MUST
// be treated as a connection error (Section 5.4.1) of type
// FLOW_CONTROL_ERROR.
@@ -832,11 +845,13 @@ type PingFrame struct {
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
-func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
+func parsePingFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
if len(payload) != 8 {
+ countError("frame_ping_length")
return nil, ConnectionError(ErrCodeFrameSize)
}
if fh.StreamID != 0 {
+ countError("frame_ping_has_stream")
return nil, ConnectionError(ErrCodeProtocol)
}
f := &PingFrame{FrameHeader: fh}
@@ -872,11 +887,13 @@ func (f *GoAwayFrame) DebugData() []byte {
return f.debugData
}
-func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+func parseGoAwayFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
if fh.StreamID != 0 {
+ countError("frame_goaway_has_stream")
return nil, ConnectionError(ErrCodeProtocol)
}
if len(p) < 8 {
+ countError("frame_goaway_short")
return nil, ConnectionError(ErrCodeFrameSize)
}
return &GoAwayFrame{
@@ -912,7 +929,7 @@ func (f *UnknownFrame) Payload() []byte {
return f.p
}
-func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+func parseUnknownFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
return &UnknownFrame{fh, p}, nil
}
@@ -923,8 +940,9 @@ type WindowUpdateFrame struct {
Increment uint32 // never read with high bit set
}
-func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
if len(p) != 4 {
+ countError("frame_windowupdate_bad_len")
return nil, ConnectionError(ErrCodeFrameSize)
}
inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
@@ -936,8 +954,10 @@ func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, err
// control window MUST be treated as a connection
// error (Section 5.4.1).
if fh.StreamID == 0 {
+ countError("frame_windowupdate_zero_inc_conn")
return nil, ConnectionError(ErrCodeProtocol)
}
+ countError("frame_windowupdate_zero_inc_stream")
return nil, streamError(fh.StreamID, ErrCodeProtocol)
}
return &WindowUpdateFrame{
@@ -988,7 +1008,7 @@ func (f *HeadersFrame) HasPriority() bool {
return f.FrameHeader.Flags.Has(FlagHeadersPriority)
}
-func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
+func parseHeadersFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) {
hf := &HeadersFrame{
FrameHeader: fh,
}
@@ -997,11 +1017,13 @@ func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err er
// is received whose stream identifier field is 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR.
+ countError("frame_headers_zero_stream")
return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
}
var padLength uint8
if fh.Flags.Has(FlagHeadersPadded) {
if p, padLength, err = readByte(p); err != nil {
+ countError("frame_headers_pad_short")
return
}
}
@@ -1009,16 +1031,19 @@ func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err er
var v uint32
p, v, err = readUint32(p)
if err != nil {
+ countError("frame_headers_prio_short")
return nil, err
}
hf.Priority.StreamDep = v & 0x7fffffff
hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
p, hf.Priority.Weight, err = readByte(p)
if err != nil {
+ countError("frame_headers_prio_weight_short")
return nil, err
}
}
if len(p)-int(padLength) < 0 {
+ countError("frame_headers_pad_too_big")
return nil, streamError(fh.StreamID, ErrCodeProtocol)
}
hf.headerFragBuf = p[:len(p)-int(padLength)]
@@ -1125,11 +1150,13 @@ func (p PriorityParam) IsZero() bool {
return p == PriorityParam{}
}
-func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
+func parsePriorityFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
if fh.StreamID == 0 {
+ countError("frame_priority_zero_stream")
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
}
if len(payload) != 5 {
+ countError("frame_priority_bad_length")
return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
}
v := binary.BigEndian.Uint32(payload[:4])
@@ -1172,11 +1199,13 @@ type RSTStreamFrame struct {
ErrCode ErrCode
}
-func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
if len(p) != 4 {
+ countError("frame_rststream_bad_len")
return nil, ConnectionError(ErrCodeFrameSize)
}
if fh.StreamID == 0 {
+ countError("frame_rststream_zero_stream")
return nil, ConnectionError(ErrCodeProtocol)
}
return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
@@ -1202,8 +1231,9 @@ type ContinuationFrame struct {
headerFragBuf []byte
}
-func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
+func parseContinuationFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
if fh.StreamID == 0 {
+ countError("frame_continuation_zero_stream")
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
}
return &ContinuationFrame{fh, p}, nil
@@ -1252,7 +1282,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool {
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
}
-func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
+func parsePushPromise(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) {
pp := &PushPromiseFrame{
FrameHeader: fh,
}
@@ -1263,6 +1293,7 @@ func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err err
// with. If the stream identifier field specifies the value
// 0x0, a recipient MUST respond with a connection error
// (Section 5.4.1) of type PROTOCOL_ERROR.
+ countError("frame_pushpromise_zero_stream")
return nil, ConnectionError(ErrCodeProtocol)
}
// The PUSH_PROMISE frame includes optional padding.
@@ -1270,18 +1301,21 @@ func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err err
var padLength uint8
if fh.Flags.Has(FlagPushPromisePadded) {
if p, padLength, err = readByte(p); err != nil {
+ countError("frame_pushpromise_pad_short")
return
}
}
p, pp.PromiseID, err = readUint32(p)
if err != nil {
+ countError("frame_pushpromise_promiseid_short")
return
}
pp.PromiseID = pp.PromiseID & (1<<31 - 1)
if int(padLength) > len(p) {
// like the DATA frame, error out if padding is longer than the body.
+ countError("frame_pushpromise_pad_too_big")
return nil, ConnectionError(ErrCodeProtocol)
}
pp.headerFragBuf = p[:len(p)-int(padLength)]
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
index 2a5399ec..c15b8a77 100644
--- a/vendor/golang.org/x/net/http2/pipe.go
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -30,6 +30,17 @@ type pipeBuffer interface {
io.Reader
}
+// setBuffer initializes the pipe buffer.
+// It has no effect if the pipe is already closed.
+func (p *pipe) setBuffer(b pipeBuffer) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.err != nil || p.breakErr != nil {
+ return
+ }
+ p.b = b
+}
+
func (p *pipe) Len() int {
p.mu.Lock()
defer p.mu.Unlock()
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 19e449cf..c67e9b7f 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -130,6 +130,12 @@ type Server struct {
// If nil, a default scheduler is chosen.
NewWriteScheduler func() WriteScheduler
+ // CountError, if non-nil, is called on HTTP/2 server errors.
+ // It's intended to increment a metric for monitoring, such
+ // as an expvar or Prometheus metric.
+ // The errType consists of only ASCII word characters.
+ CountError func(errType string)
+
// Internal state. This is a pointer (rather than embedded directly)
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
@@ -405,6 +411,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
fr := NewFramer(sc.bw, c)
+ if s.CountError != nil {
+ fr.countError = s.CountError
+ }
fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
fr.MaxHeaderListSize = sc.maxHeaderListSize()
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
@@ -1399,7 +1408,7 @@ func (sc *serverConn) processFrame(f Frame) error {
// First frame received must be SETTINGS.
if !sc.sawFirstSettings {
if _, ok := f.(*SettingsFrame); !ok {
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("first_settings", ConnectionError(ErrCodeProtocol))
}
sc.sawFirstSettings = true
}
@@ -1424,7 +1433,7 @@ func (sc *serverConn) processFrame(f Frame) error {
case *PushPromiseFrame:
// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("push_promise", ConnectionError(ErrCodeProtocol))
default:
sc.vlogf("http2: server ignoring frame: %v", f.Header())
return nil
@@ -1444,7 +1453,7 @@ func (sc *serverConn) processPing(f *PingFrame) error {
// identifier field value other than 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR."
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
}
if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
return nil
@@ -1463,7 +1472,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
// or PRIORITY on a stream in this state MUST be
// treated as a connection error (Section 5.4.1) of
// type PROTOCOL_ERROR."
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("stream_idle", ConnectionError(ErrCodeProtocol))
}
if st == nil {
// "WINDOW_UPDATE can be sent by a peer that has sent a
@@ -1474,7 +1483,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
return nil
}
if !st.flow.add(int32(f.Increment)) {
- return streamError(f.StreamID, ErrCodeFlowControl)
+ return sc.countError("bad_flow", streamError(f.StreamID, ErrCodeFlowControl))
}
default: // connection-level flow control
if !sc.flow.add(int32(f.Increment)) {
@@ -1495,7 +1504,7 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
// identifying an idle stream is received, the
// recipient MUST treat this as a connection error
// (Section 5.4.1) of type PROTOCOL_ERROR.
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("reset_idle_stream", ConnectionError(ErrCodeProtocol))
}
if st != nil {
st.cancelCtx()
@@ -1547,7 +1556,7 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
// Why is the peer ACKing settings we never sent?
// The spec doesn't mention this case, but
// hang up on them anyway.
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("ack_mystery", ConnectionError(ErrCodeProtocol))
}
return nil
}
@@ -1555,7 +1564,7 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
// This isn't actually in the spec, but hang up on
// suspiciously large settings frames or those with
// duplicate entries.
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("settings_big_or_dups", ConnectionError(ErrCodeProtocol))
}
if err := f.ForeachSetting(sc.processSetting); err != nil {
return err
@@ -1622,7 +1631,7 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
// control window to exceed the maximum size as a
// connection error (Section 5.4.1) of type
// FLOW_CONTROL_ERROR."
- return ConnectionError(ErrCodeFlowControl)
+ return sc.countError("setting_win_size", ConnectionError(ErrCodeFlowControl))
}
}
return nil
@@ -1655,7 +1664,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
// or PRIORITY on a stream in this state MUST be
// treated as a connection error (Section 5.4.1) of
// type PROTOCOL_ERROR."
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("data_on_idle", ConnectionError(ErrCodeProtocol))
}
// "If a DATA frame is received whose stream is not in "open"
@@ -1672,7 +1681,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
// and return any flow control bytes since we're not going
// to consume them.
if sc.inflow.available() < int32(f.Length) {
- return streamError(id, ErrCodeFlowControl)
+ return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
}
// Deduct the flow control from inflow, since we're
// going to immediately add it back in
@@ -1685,7 +1694,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
// Already have a stream error in flight. Don't send another.
return nil
}
- return streamError(id, ErrCodeStreamClosed)
+ return sc.countError("closed", streamError(id, ErrCodeStreamClosed))
}
if st.body == nil {
panic("internal error: should have a body in this state")
@@ -1697,12 +1706,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
// value of a content-length header field does not equal the sum of the
// DATA frame payload lengths that form the body.
- return streamError(id, ErrCodeProtocol)
+ return sc.countError("send_too_much", streamError(id, ErrCodeProtocol))
}
if f.Length > 0 {
// Check whether the client has flow control quota.
if st.inflow.available() < int32(f.Length) {
- return streamError(id, ErrCodeFlowControl)
+ return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
}
st.inflow.take(int32(f.Length))
@@ -1710,7 +1719,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
wrote, err := st.body.Write(data)
if err != nil {
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
- return streamError(id, ErrCodeStreamClosed)
+ return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
}
if wrote != len(data) {
panic("internal error: bad Writer")
@@ -1796,7 +1805,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// stream identifier MUST respond with a connection error
// (Section 5.4.1) of type PROTOCOL_ERROR.
if id%2 != 1 {
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("headers_even", ConnectionError(ErrCodeProtocol))
}
// A HEADERS frame can be used to create a new stream or
// send a trailer for an open one. If we already have a stream
@@ -1813,7 +1822,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// this state, it MUST respond with a stream error (Section 5.4.2) of
// type STREAM_CLOSED.
if st.state == stateHalfClosedRemote {
- return streamError(id, ErrCodeStreamClosed)
+ return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed))
}
return st.processTrailerHeaders(f)
}
@@ -1824,7 +1833,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// receives an unexpected stream identifier MUST respond with
// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
if id <= sc.maxClientStreamID {
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol))
}
sc.maxClientStreamID = id
@@ -1841,14 +1850,14 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
if sc.curClientStreams+1 > sc.advMaxStreams {
if sc.unackedSettings == 0 {
// They should know better.
- return streamError(id, ErrCodeProtocol)
+ return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol))
}
// Assume it's a network race, where they just haven't
// received our last SETTINGS update. But actually
// this can't happen yet, because we don't yet provide
// a way for users to adjust server parameters at
// runtime.
- return streamError(id, ErrCodeRefusedStream)
+ return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream))
}
initialState := stateOpen
@@ -1858,7 +1867,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
st := sc.newStream(id, 0, initialState)
if f.HasPriority() {
- if err := checkPriority(f.StreamID, f.Priority); err != nil {
+ if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
return err
}
sc.writeSched.AdjustStream(st.id, f.Priority)
@@ -1902,15 +1911,15 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
sc := st.sc
sc.serveG.check()
if st.gotTrailerHeader {
- return ConnectionError(ErrCodeProtocol)
+ return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol))
}
st.gotTrailerHeader = true
if !f.StreamEnded() {
- return streamError(st.id, ErrCodeProtocol)
+ return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol))
}
if len(f.PseudoFields()) > 0 {
- return streamError(st.id, ErrCodeProtocol)
+ return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol))
}
if st.trailer != nil {
for _, hf := range f.RegularFields() {
@@ -1919,7 +1928,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
// TODO: send more details to the peer somehow. But http2 has
// no way to send debug data at a stream level. Discuss with
// HTTP folk.
- return streamError(st.id, ErrCodeProtocol)
+ return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol))
}
st.trailer[key] = append(st.trailer[key], hf.Value)
}
@@ -1928,13 +1937,13 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
return nil
}
-func checkPriority(streamID uint32, p PriorityParam) error {
+func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
if streamID == p.StreamDep {
// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
// Section 5.3.3 says that a stream can depend on one of its dependencies,
// so it's only self-dependencies that are forbidden.
- return streamError(streamID, ErrCodeProtocol)
+ return sc.countError("priority", streamError(streamID, ErrCodeProtocol))
}
return nil
}
@@ -1943,7 +1952,7 @@ func (sc *serverConn) processPriority(f *PriorityFrame) error {
if sc.inGoAway {
return nil
}
- if err := checkPriority(f.StreamID, f.PriorityParam); err != nil {
+ if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
return err
}
sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
@@ -2000,7 +2009,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
isConnect := rp.method == "CONNECT"
if isConnect {
if rp.path != "" || rp.scheme != "" || rp.authority == "" {
- return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
// See 8.1.2.6 Malformed Requests and Responses:
@@ -2013,13 +2022,13 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
// "All HTTP/2 requests MUST include exactly one valid
// value for the :method, :scheme, and :path
// pseudo-header fields"
- return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
}
bodyOpen := !f.StreamEnded()
if rp.method == "HEAD" && bodyOpen {
// HEAD requests can't have bodies
- return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol))
}
rp.header = make(http.Header)
@@ -2102,7 +2111,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
var err error
url_, err = url.ParseRequestURI(rp.path)
if err != nil {
- return nil, nil, streamError(st.id, ErrCodeProtocol)
+ return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
}
requestURI = rp.path
}
@@ -2985,3 +2994,31 @@ func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
}
return false
}
+
+func (sc *serverConn) countError(name string, err error) error {
+ if sc == nil || sc.srv == nil {
+ return err
+ }
+ f := sc.srv.CountError
+ if f == nil {
+ return err
+ }
+ var typ string
+ var code ErrCode
+ switch e := err.(type) {
+ case ConnectionError:
+ typ = "conn"
+ code = ErrCode(e)
+ case StreamError:
+ typ = "stream"
+ code = ErrCode(e.Code)
+ default:
+ return err
+ }
+ codeStr := errCodeName[code]
+ if codeStr == "" {
+ codeStr = strconv.Itoa(int(code))
+ }
+ f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
+ return err
+}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 74c76da5..653a1a0b 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -130,6 +130,12 @@ type Transport struct {
// Defaults to 15s.
PingTimeout time.Duration
+ // CountError, if non-nil, is called on HTTP/2 transport errors.
+ // It's intended to increment a metric for monitoring, such
+ // as an expvar or Prometheus metric.
+ // The errType consists of only ASCII word characters.
+ CountError func(errType string)
+
// t1, if non-nil, is the standard library Transport using
// this transport. Its settings are used (but not its
// RoundTrip method, etc).
@@ -236,11 +242,12 @@ func (t *Transport) initConnPool() {
// ClientConn is the state of a single HTTP/2 client connection to an
// HTTP/2 server.
type ClientConn struct {
- t *Transport
- tconn net.Conn // usually *tls.Conn, except specialized impls
- tlsState *tls.ConnectionState // nil only for specialized impls
- reused uint32 // whether conn is being reused; atomic
- singleUse bool // whether being used for a single http.Request
+ t *Transport
+ tconn net.Conn // usually *tls.Conn, except specialized impls
+ tlsState *tls.ConnectionState // nil only for specialized impls
+ reused uint32 // whether conn is being reused; atomic
+ singleUse bool // whether being used for a single http.Request
+ getConnCalled bool // used by clientConnPool
// readLoop goroutine fields:
readerDone chan struct{} // closed on error
@@ -261,25 +268,33 @@ type ClientConn struct {
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
goAwayDebug string // goAway frame's debug data, retained as a string
streams map[uint32]*clientStream // client-initiated
+ streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip
nextStreamID uint32
pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
pings map[[8]byte]chan struct{} // in flight ping data to notification channel
- bw *bufio.Writer
br *bufio.Reader
- fr *Framer
lastActive time.Time
lastIdle time.Time // time last idle
- // Settings from peer: (also guarded by mu)
+ // Settings from peer: (also guarded by wmu)
maxFrameSize uint32
maxConcurrentStreams uint32
peerMaxHeaderListSize uint64
initialWindowSize uint32
+ // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
+ // Write to reqHeaderMu to lock it, read from it to unlock.
+ // Lock reqmu BEFORE mu or wmu.
+ reqHeaderMu chan struct{}
+
+ // wmu is held while writing.
+ // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes.
+ // Only acquire both at the same time when changing peer settings.
+ wmu sync.Mutex
+ bw *bufio.Writer
+ fr *Framer
+ werr error // first write error that has occurred
hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder
-
- wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
- werr error // first write error that has occurred
}
// clientStream is the state for a single HTTP/2 stream. One of these
@@ -289,52 +304,42 @@ type clientStream struct {
req *http.Request
trace *httptrace.ClientTrace // or nil
ID uint32
- resc chan resAndError
bufPipe pipe // buffered pipe with the flow-controlled response payload
- startedWrite bool // started request body write; guarded by cc.mu
requestedGzip bool
- on100 func() // optional code to run if get a 100 continue response
+
+ abortOnce sync.Once
+ abort chan struct{} // closed to signal stream should end immediately
+ abortErr error // set if abort is closed
+
+ peerClosed chan struct{} // closed when the peer sends an END_STREAM flag
+ donec chan struct{} // closed after the stream is in the closed state
+ on100 chan struct{} // buffered; written to if a 100 is received
+
+ respHeaderRecv chan struct{} // closed when headers are received
+ res *http.Response // set if respHeaderRecv is closed
flow flow // guarded by cc.mu
inflow flow // guarded by cc.mu
bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
readErr error // sticky read error; owned by transportResponseBody.Read
stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
- didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu
-
- peerReset chan struct{} // closed on peer reset
- resetErr error // populated before peerReset is closed
- done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
+ // owned by writeRequest:
+ sentEndStream bool // sent an END_STREAM flag to the peer
+ sentHeaders bool
// owned by clientConnReadLoop:
firstByte bool // got the first response byte
pastHeaders bool // got first MetaHeadersFrame (actual headers)
pastTrailers bool // got optional second MetaHeadersFrame (trailers)
num1xx uint8 // number of 1xx responses seen
+ readClosed bool // peer sent an END_STREAM flag
+ readAborted bool // read loop reset the stream
trailer http.Header // accumulated trailers
resTrailer *http.Header // client's Response.Trailer
}
-// awaitRequestCancel waits for the user to cancel a request or for the done
-// channel to be signaled. A non-nil error is returned only if the request was
-// canceled.
-func awaitRequestCancel(req *http.Request, done <-chan struct{}) error {
- ctx := req.Context()
- if req.Cancel == nil && ctx.Done() == nil {
- return nil
- }
- select {
- case <-req.Cancel:
- return errRequestCanceled
- case <-ctx.Done():
- return ctx.Err()
- case <-done:
- return nil
- }
-}
-
var got1xxFuncForTests func(int, textproto.MIMEHeader) error
// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func,
@@ -346,50 +351,24 @@ func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error
return traceGot1xxResponseFunc(cs.trace)
}
-// awaitRequestCancel waits for the user to cancel a request, its context to
-// expire, or for the request to be done (any way it might be removed from the
-// cc.streams map: peer reset, successful completion, TCP connection breakage,
-// etc). If the request is canceled, then cs will be canceled and closed.
-func (cs *clientStream) awaitRequestCancel(req *http.Request) {
- if err := awaitRequestCancel(req, cs.done); err != nil {
- cs.cancelStream()
- cs.bufPipe.CloseWithError(err)
- }
-}
-
-func (cs *clientStream) cancelStream() {
- cc := cs.cc
- cc.mu.Lock()
- didReset := cs.didReset
- cs.didReset = true
- cc.mu.Unlock()
-
- if !didReset {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- cc.forgetStreamID(cs.ID)
- }
+func (cs *clientStream) abortStream(err error) {
+ cs.cc.mu.Lock()
+ defer cs.cc.mu.Unlock()
+ cs.abortStreamLocked(err)
}
-// checkResetOrDone reports any error sent in a RST_STREAM frame by the
-// server, or errStreamClosed if the stream is complete.
-func (cs *clientStream) checkResetOrDone() error {
- select {
- case <-cs.peerReset:
- return cs.resetErr
- case <-cs.done:
- return errStreamClosed
- default:
- return nil
+func (cs *clientStream) abortStreamLocked(err error) {
+ cs.abortOnce.Do(func() {
+ cs.abortErr = err
+ close(cs.abort)
+ })
+ // TODO(dneil): Clean up tests where cs.cc.cond is nil.
+ if cs.cc.cond != nil {
+ // Wake up writeRequestBody if it is waiting on flow control.
+ cs.cc.cond.Broadcast()
}
}
-func (cs *clientStream) getStartedWrite() bool {
- cc := cs.cc
- cc.mu.Lock()
- defer cc.mu.Unlock()
- return cs.startedWrite
-}
-
func (cs *clientStream) abortRequestBodyWrite(err error) {
if err == nil {
panic("nil error")
@@ -398,9 +377,6 @@ func (cs *clientStream) abortRequestBodyWrite(err error) {
cc.mu.Lock()
if cs.stopReqBody == nil {
cs.stopReqBody = err
- if cs.req.Body != nil {
- cs.req.Body.Close()
- }
cc.cond.Broadcast()
}
cc.mu.Unlock()
@@ -490,10 +466,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
}
reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1)
traceGotConn(req, cc, reused)
- body := req.Body
- res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req)
+ res, err := cc.RoundTrip(req)
if err != nil && retry <= 6 {
- if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil {
+ if req, err = shouldRetryRequest(req, err); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
continue
@@ -510,11 +485,6 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
}
if err != nil {
t.vlogf("RoundTrip failure: %v", err)
- // If the error occurred after the body write started,
- // the body writer will close the body. Otherwise, do so here.
- if body != nil && !gotErrAfterReqBodyWrite {
- body.Close()
- }
return nil, err
}
return res, nil
@@ -540,7 +510,7 @@ var (
// response headers. It is always called with a non-nil error.
// It returns either a request to retry (either the same request, or a
// modified clone), or an error if the request can't be replayed.
-func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) {
+func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) {
if !canRetryError(err) {
return nil, err
}
@@ -553,7 +523,6 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt
// If the request body can be reset back to its original
// state via the optional req.GetBody, do that.
if req.GetBody != nil {
- req.Body.Close()
body, err := req.GetBody()
if err != nil {
return nil, err
@@ -565,10 +534,8 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt
// The Request.Body can't reset back to the beginning, but we
// don't seem to have started to read from it yet, so reuse
- // the request directly. The "afterBodyWrite" means the
- // bodyWrite process has started, which becomes true before
- // the first Read.
- if !afterBodyWrite {
+ // the request directly.
+ if err == errClientConnUnusable {
return req, nil
}
@@ -666,6 +633,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
singleUse: singleUse,
wantSettingsAck: true,
pings: make(map[[8]byte]chan struct{}),
+ reqHeaderMu: make(chan struct{}, 1),
}
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
@@ -683,6 +651,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr})
cc.br = bufio.NewReader(c)
cc.fr = NewFramer(cc.bw, cc.br)
+ if t.CountError != nil {
+ cc.fr.countError = t.CountError
+ }
cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
@@ -759,27 +730,39 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
last := f.LastStreamID
for streamID, cs := range cc.streams {
if streamID > last {
- select {
- case cs.resc <- resAndError{err: errClientConnGotGoAway}:
- default:
- }
+ cs.abortStreamLocked(errClientConnGotGoAway)
}
}
}
// CanTakeNewRequest reports whether the connection can take a new request,
// meaning it has not been closed or received or sent a GOAWAY.
+//
+// If the caller is going to immediately make a new request on this
+// connection, use ReserveNewRequest instead.
func (cc *ClientConn) CanTakeNewRequest() bool {
cc.mu.Lock()
defer cc.mu.Unlock()
return cc.canTakeNewRequestLocked()
}
+// ReserveNewRequest is like CanTakeNewRequest but also reserves a
+// concurrent stream in cc. The reservation is decremented on the
+// next call to RoundTrip.
+func (cc *ClientConn) ReserveNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if st := cc.idleStateLocked(); !st.canTakeNewRequest {
+ return false
+ }
+ cc.streamsReserved++
+ return true
+}
+
// clientConnIdleState describes the suitability of a client
// connection to initiate a new RoundTrip request.
type clientConnIdleState struct {
canTakeNewRequest bool
- freshConn bool // whether it's unused by any previous request
}
func (cc *ClientConn) idleState() clientConnIdleState {
@@ -800,14 +783,13 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
// writing it.
maxConcurrentOkay = true
} else {
- maxConcurrentOkay = int64(len(cc.streams)+1) <= int64(cc.maxConcurrentStreams)
+ maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams)
}
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
!cc.doNotReuse &&
int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
!cc.tooIdleLocked()
- st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
return
}
@@ -838,7 +820,7 @@ func (cc *ClientConn) onIdleTimeout() {
func (cc *ClientConn) closeIfIdle() {
cc.mu.Lock()
- if len(cc.streams) > 0 {
+ if len(cc.streams) > 0 || cc.streamsReserved > 0 {
cc.mu.Unlock()
return
}
@@ -853,9 +835,15 @@ func (cc *ClientConn) closeIfIdle() {
cc.tconn.Close()
}
+func (cc *ClientConn) isDoNotReuseAndIdle() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.doNotReuse && len(cc.streams) == 0
+}
+
var shutdownEnterWaitStateHook = func() {}
-// Shutdown gracefully close the client connection, waiting for running streams to complete.
+// Shutdown gracefully closes the client connection, waiting for running streams to complete.
func (cc *ClientConn) Shutdown(ctx context.Context) error {
if err := cc.sendGoAway(); err != nil {
return err
@@ -894,15 +882,18 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
func (cc *ClientConn) sendGoAway() error {
cc.mu.Lock()
- defer cc.mu.Unlock()
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
- if cc.closing {
+ closing := cc.closing
+ cc.closing = true
+ maxStreamID := cc.nextStreamID
+ cc.mu.Unlock()
+ if closing {
// GOAWAY sent already
return nil
}
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
// Send a graceful shutdown frame to server
- maxStreamID := cc.nextStreamID
if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil {
return err
}
@@ -910,7 +901,6 @@ func (cc *ClientConn) sendGoAway() error {
return err
}
// Prevent new requests
- cc.closing = true
return nil
}
@@ -918,17 +908,12 @@ func (cc *ClientConn) sendGoAway() error {
// err is sent to streams.
func (cc *ClientConn) closeForError(err error) error {
cc.mu.Lock()
+ cc.closed = true
+ for _, cs := range cc.streams {
+ cs.abortStreamLocked(err)
+ }
defer cc.cond.Broadcast()
defer cc.mu.Unlock()
- for id, cs := range cc.streams {
- select {
- case cs.resc <- resAndError{err: err}:
- default:
- }
- cs.bufPipe.CloseWithError(err)
- delete(cc.streams, id)
- }
- cc.closed = true
return cc.tconn.Close()
}
@@ -943,6 +928,9 @@ func (cc *ClientConn) Close() error {
// closes the client connection immediately. In-flight requests are interrupted.
func (cc *ClientConn) closeForLostPing() error {
err := errors.New("http2: client connection lost")
+ if f := cc.t.CountError; f != nil {
+ f("conn_close_lost_ping")
+ }
return cc.closeForError(err)
}
@@ -1007,37 +995,132 @@ func actualContentLength(req *http.Request) int64 {
return -1
}
-func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
- resp, _, err := cc.roundTrip(req)
- return resp, err
+func (cc *ClientConn) decrStreamReservations() {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.decrStreamReservationsLocked()
}
-func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) {
- if err := checkConnHeaders(req); err != nil {
- return nil, false, err
+func (cc *ClientConn) decrStreamReservationsLocked() {
+ if cc.streamsReserved > 0 {
+ cc.streamsReserved--
}
- if cc.idleTimer != nil {
- cc.idleTimer.Stop()
+}
+
+func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ ctx := req.Context()
+ cs := &clientStream{
+ cc: cc,
+ req: req,
+ trace: httptrace.ContextClientTrace(req.Context()),
+ peerClosed: make(chan struct{}),
+ abort: make(chan struct{}),
+ respHeaderRecv: make(chan struct{}),
+ donec: make(chan struct{}),
}
+ go cs.doRequest()
- trailers, err := commaSeparatedTrailers(req)
- if err != nil {
- return nil, false, err
+ waitDone := func() error {
+ select {
+ case <-cs.donec:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-req.Cancel:
+ return errRequestCanceled
+ }
+ }
+
+ for {
+ select {
+ case <-cs.respHeaderRecv:
+ res := cs.res
+ if res.StatusCode > 299 {
+ // On error or status code 3xx, 4xx, 5xx, etc abort any
+ // ongoing write, assuming that the server doesn't care
+ // about our request body. If the server replied with 1xx or
+ // 2xx, however, then assume the server DOES potentially
+ // want our body (e.g. full-duplex streaming:
+ // golang.org/issue/13444). If it turns out the server
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
+ // we can keep it.
+ cs.abortRequestBodyWrite(errStopReqBodyWrite)
+ }
+ res.Request = req
+ res.TLS = cc.tlsState
+ if res.Body == noBody && actualContentLength(req) == 0 {
+ // If there isn't a request or response body still being
+ // written, then wait for the stream to be closed before
+ // RoundTrip returns.
+ if err := waitDone(); err != nil {
+ return nil, err
+ }
+ }
+ return res, nil
+ case <-cs.abort:
+ waitDone()
+ return nil, cs.abortErr
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-req.Cancel:
+ return nil, errRequestCanceled
+ }
+ }
+}
+
+// doRequest runs for the duration of the request lifetime.
+//
+// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
+func (cs *clientStream) doRequest() {
+ err := cs.writeRequest()
+ cs.cleanupWriteRequest(err)
+}
+
+// writeRequest sends a request.
+//
+// It returns nil after the request is written, the response read,
+// and the request stream is half-closed by the peer.
+//
+// It returns non-nil if the request ends otherwise.
+// If the returned error is StreamError, the error Code may be used in resetting the stream.
+func (cs *clientStream) writeRequest() (err error) {
+ cc := cs.cc
+ req := cs.req
+ ctx := req.Context()
+
+ if err := checkConnHeaders(cs.req); err != nil {
+ return err
+ }
+
+ // Acquire the new-request lock by writing to reqHeaderMu.
+ // This lock guards the critical section covering allocating a new stream ID
+ // (requires mu) and creating the stream (requires wmu).
+ if cc.reqHeaderMu == nil {
+ panic("RoundTrip on uninitialized ClientConn") // for tests
+ }
+ select {
+ case cc.reqHeaderMu <- struct{}{}:
+ case <-req.Cancel:
+ return errRequestCanceled
+ case <-ctx.Done():
+ return ctx.Err()
}
- hasTrailers := trailers != ""
cc.mu.Lock()
- if err := cc.awaitOpenSlotForRequest(req); err != nil {
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+ cc.decrStreamReservationsLocked()
+ if err := cc.awaitOpenSlotForStreamLocked(cs); err != nil {
cc.mu.Unlock()
- return nil, false, err
+ <-cc.reqHeaderMu
+ return err
}
-
- body := req.Body
- contentLen := actualContentLength(req)
- hasBody := contentLen != 0
+ cc.addStreamLocked(cs) // assigns stream ID
+ cc.mu.Unlock()
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
- var requestedGzip bool
if !cc.t.disableCompression() &&
req.Header.Get("Accept-Encoding") == "" &&
req.Header.Get("Range") == "" &&
@@ -1054,184 +1137,218 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
// We don't request gzip if the request is for a range, since
// auto-decoding a portion of a gzipped document will just fail
// anyway. See https://golang.org/issue/8923
- requestedGzip = true
+ cs.requestedGzip = true
}
- // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
- // sent by writeRequestBody below, along with any Trailers,
- // again in form HEADERS{1}, CONTINUATION{0,})
- hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
- if err != nil {
- cc.mu.Unlock()
- return nil, false, err
+ continueTimeout := cc.t.expectContinueTimeout()
+ if continueTimeout != 0 &&
+ !httpguts.HeaderValuesContainsToken(
+ cs.req.Header["Expect"],
+ "100-continue") {
+ continueTimeout = 0
+ cs.on100 = make(chan struct{}, 1)
}
- cs := cc.newStream()
- cs.req = req
- cs.trace = httptrace.ContextClientTrace(req.Context())
- cs.requestedGzip = requestedGzip
- bodyWriter := cc.t.getBodyWriterState(cs, body)
- cs.on100 = bodyWriter.on100
+ err = cs.encodeAndWriteHeaders()
+ <-cc.reqHeaderMu
+ if err != nil {
+ return err
+ }
- defer func() {
- cc.wmu.Lock()
- werr := cc.werr
- cc.wmu.Unlock()
- if werr != nil {
- cc.Close()
+ hasBody := actualContentLength(cs.req) != 0
+ if !hasBody {
+ cs.sentEndStream = true
+ } else {
+ if continueTimeout != 0 {
+ traceWait100Continue(cs.trace)
+ timer := time.NewTimer(continueTimeout)
+ select {
+ case <-timer.C:
+ err = nil
+ case <-cs.on100:
+ err = nil
+ case <-cs.abort:
+ err = cs.abortErr
+ case <-ctx.Done():
+ err = ctx.Err()
+ case <-req.Cancel:
+ err = errRequestCanceled
+ }
+ timer.Stop()
+ if err != nil {
+ traceWroteRequest(cs.trace, err)
+ return err
+ }
}
- }()
- cc.wmu.Lock()
- endStream := !hasBody && !hasTrailers
- werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
- cc.wmu.Unlock()
- traceWroteHeaders(cs.trace)
- cc.mu.Unlock()
-
- if werr != nil {
- if hasBody {
- bodyWriter.cancel()
+ if err = cs.writeRequestBody(req.Body); err != nil {
+ if err != errStopReqBodyWrite {
+ traceWroteRequest(cs.trace, err)
+ return err
+ }
+ } else {
+ cs.sentEndStream = true
}
- cc.forgetStreamID(cs.ID)
- // Don't bother sending a RST_STREAM (our write already failed;
- // no need to keep writing)
- traceWroteRequest(cs.trace, werr)
- // TODO(dneil): An error occurred while writing the headers.
- // Should we return an error indicating that this request can be retried?
- return nil, false, werr
}
+ traceWroteRequest(cs.trace, err)
+
var respHeaderTimer <-chan time.Time
- if hasBody {
- bodyWriter.scheduleBodyWrite()
- } else {
- traceWroteRequest(cs.trace, nil)
- if d := cc.responseHeaderTimeout(); d != 0 {
- timer := time.NewTimer(d)
- defer timer.Stop()
- respHeaderTimer = timer.C
+ var respHeaderRecv chan struct{}
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ respHeaderRecv = cs.respHeaderRecv
+ }
+ // Wait until the peer half-closes its end of the stream,
+ // or until the request is aborted (via context, error, or otherwise),
+ // whichever comes first.
+ for {
+ select {
+ case <-cs.peerClosed:
+ return nil
+ case <-respHeaderTimer:
+ return errTimeout
+ case <-respHeaderRecv:
+ respHeaderTimer = nil // keep waiting for END_STREAM
+ case <-cs.abort:
+ return cs.abortErr
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-req.Cancel:
+ return errRequestCanceled
}
}
+}
- readLoopResCh := cs.resc
- bodyWritten := false
+func (cs *clientStream) encodeAndWriteHeaders() error {
+ cc := cs.cc
+ req := cs.req
ctx := req.Context()
- handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) {
- res := re.res
- if re.err != nil || res.StatusCode > 299 {
- // On error or status code 3xx, 4xx, 5xx, etc abort any
- // ongoing write, assuming that the server doesn't care
- // about our request body. If the server replied with 1xx or
- // 2xx, however, then assume the server DOES potentially
- // want our body (e.g. full-duplex streaming:
- // golang.org/issue/13444). If it turns out the server
- // doesn't, they'll RST_STREAM us soon enough. This is a
- // heuristic to avoid adding knobs to Transport. Hopefully
- // we can keep it.
- bodyWriter.cancel()
- cs.abortRequestBodyWrite(errStopReqBodyWrite)
- if hasBody && !bodyWritten {
- <-bodyWriter.resc
- }
- }
- if re.err != nil {
- cc.forgetStreamID(cs.ID)
- return nil, cs.getStartedWrite(), re.err
- }
- res.Request = req
- res.TLS = cc.tlsState
- return res, false, nil
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ // If the request was canceled while waiting for cc.mu, just quit.
+ select {
+ case <-cs.abort:
+ return cs.abortErr
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-req.Cancel:
+ return errRequestCanceled
+ default:
}
- handleError := func(err error) (*http.Response, bool, error) {
- if !hasBody || bodyWritten {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- } else {
- bodyWriter.cancel()
- cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
- <-bodyWriter.resc
+ // Encode headers.
+ //
+ // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
+ // sent by writeRequestBody below, along with any Trailers,
+ // again in form HEADERS{1}, CONTINUATION{0,})
+ trailers, err := commaSeparatedTrailers(cs.req)
+ if err != nil {
+ return err
+ }
+ hasTrailers := trailers != ""
+ contentLen := actualContentLength(cs.req)
+ hasBody := contentLen != 0
+ hdrs, err := cc.encodeHeaders(cs.req, cs.requestedGzip, trailers, contentLen)
+ if err != nil {
+ return err
+ }
+
+ // Write the request.
+ endStream := !hasBody && !hasTrailers
+ cs.sentHeaders = true
+ err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
+ traceWroteHeaders(cs.trace)
+ return err
+}
+
+// cleanupWriteRequest performs post-request tasks.
+//
+// If err (the result of writeRequest) is non-nil and the stream is not closed,
+// cleanupWriteRequest will send a reset to the peer.
+func (cs *clientStream) cleanupWriteRequest(err error) {
+ cc := cs.cc
+ req := cs.req
+
+ if cs.ID == 0 {
+ // We were canceled before creating the stream, so return our reservation.
+ cc.decrStreamReservations()
+ }
+
+ // TODO: write h12Compare test showing whether
+ // Request.Body is closed by the Transport,
+ // and in multiple cases: server replies <=299 and >299
+ // while still writing request body
+ if req.Body != nil {
+ if e := req.Body.Close(); err == nil {
+ err = e
}
- cc.forgetStreamID(cs.ID)
- return nil, cs.getStartedWrite(), err
}
- for {
+ if err != nil && cs.sentEndStream {
+ // If the connection is closed immediately after the response is read,
+ // we may be aborted before finishing up here. If the stream was closed
+ // cleanly on both sides, there is no error.
select {
- case re := <-readLoopResCh:
- return handleReadLoopResponse(re)
- case <-respHeaderTimer:
- return handleError(errTimeout)
- case <-ctx.Done():
- return handleError(ctx.Err())
- case <-req.Cancel:
- return handleError(errRequestCanceled)
- case <-cs.peerReset:
- // processResetStream already removed the
- // stream from the streams map; no need for
- // forgetStreamID.
- return nil, cs.getStartedWrite(), cs.resetErr
- case err := <-bodyWriter.resc:
- bodyWritten = true
- // Prefer the read loop's response, if available. Issue 16102.
- select {
- case re := <-readLoopResCh:
- return handleReadLoopResponse(re)
- default:
- }
- if err != nil {
- cc.forgetStreamID(cs.ID)
- return nil, cs.getStartedWrite(), err
- }
- if d := cc.responseHeaderTimeout(); d != 0 {
- timer := time.NewTimer(d)
- defer timer.Stop()
- respHeaderTimer = timer.C
+ case <-cs.peerClosed:
+ err = nil
+ default:
+ }
+ }
+ if err != nil {
+ cs.abortStream(err) // possibly redundant, but harmless
+ if cs.sentHeaders {
+ if se, ok := err.(StreamError); ok {
+ if se.Cause != errFromPeer {
+ cc.writeStreamReset(cs.ID, se.Code, err)
+ }
+ } else {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
}
}
+ cs.bufPipe.CloseWithError(err) // no-op if already closed
+ } else {
+ if cs.sentHeaders && !cs.sentEndStream {
+ cc.writeStreamReset(cs.ID, ErrCodeNo, nil)
+ }
+ cs.bufPipe.CloseWithError(errRequestCanceled)
+ }
+ if cs.ID != 0 {
+ cc.forgetStreamID(cs.ID)
+ }
+ close(cs.donec)
+
+ cc.wmu.Lock()
+ werr := cc.werr
+ cc.wmu.Unlock()
+ if werr != nil {
+ cc.Close()
}
}
-// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams.
+// awaitOpenSlotForStream waits until len(streams) < maxConcurrentStreams.
// Must hold cc.mu.
-func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
- var waitingForConn chan struct{}
- var waitingForConnErr error // guarded by cc.mu
+func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
for {
cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() {
- if waitingForConn != nil {
- close(waitingForConn)
- }
return errClientConnUnusable
}
cc.lastIdle = time.Time{}
if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) {
- if waitingForConn != nil {
- close(waitingForConn)
- }
return nil
}
- // Unfortunately, we cannot wait on a condition variable and channel at
- // the same time, so instead, we spin up a goroutine to check if the
- // request is canceled while we wait for a slot to open in the connection.
- if waitingForConn == nil {
- waitingForConn = make(chan struct{})
- go func() {
- if err := awaitRequestCancel(req, waitingForConn); err != nil {
- cc.mu.Lock()
- waitingForConnErr = err
- cc.cond.Broadcast()
- cc.mu.Unlock()
- }
- }()
- }
cc.pendingRequests++
cc.cond.Wait()
cc.pendingRequests--
- if waitingForConnErr != nil {
- return waitingForConnErr
+ select {
+ case <-cs.abort:
+ return cs.abortErr
+ default:
}
}
}
@@ -1258,10 +1375,6 @@ func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize
cc.fr.WriteContinuation(streamID, endHeaders, chunk)
}
}
- // TODO(bradfitz): this Flush could potentially block (as
- // could the WriteHeaders call(s) above), which means they
- // wouldn't respond to Request.Cancel being readable. That's
- // rare, but this should probably be in a goroutine.
cc.bw.Flush()
return cc.werr
}
@@ -1303,28 +1416,10 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
var bufPool sync.Pool // of *[]byte
-func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
+func (cs *clientStream) writeRequestBody(body io.Reader) (err error) {
cc := cs.cc
sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
- defer func() {
- traceWroteRequest(cs.trace, err)
- // TODO: write h12Compare test showing whether
- // Request.Body is closed by the Transport,
- // and in multiple cases: server replies <=299 and >299
- // while still writing request body
- var cerr error
- cc.mu.Lock()
- if cs.stopReqBody == nil {
- cs.stopReqBody = errStopReqBodyWrite
- cerr = bodyCloser.Close()
- }
- cc.mu.Unlock()
- if err == nil {
- err = cerr
- }
- }()
-
req := cs.req
hasTrailers := req.Trailer != nil
remainLen := actualContentLength(req)
@@ -1365,7 +1460,6 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
}
if remainLen < 0 {
err = errReqBodyTooLong
- cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
return err
}
}
@@ -1373,7 +1467,6 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
sawEOF = true
err = nil
} else if err != nil {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
return err
}
@@ -1385,7 +1478,6 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
case err == errStopReqBodyWrite:
return err
case err == errStopReqBodyWriteAndCancel:
- cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
return err
case err != nil:
return err
@@ -1418,19 +1510,15 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
return nil
}
+ cc.wmu.Lock()
var trls []byte
if hasTrailers {
- cc.mu.Lock()
trls, err = cc.encodeTrailers(req)
- cc.mu.Unlock()
if err != nil {
- cc.writeStreamReset(cs.ID, ErrCodeInternal, err)
- cc.forgetStreamID(cs.ID)
+ cc.wmu.Unlock()
return err
}
}
-
- cc.wmu.Lock()
defer cc.wmu.Unlock()
// Two ways to send END_STREAM: either with trailers, or
@@ -1452,6 +1540,8 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
// if the stream is dead.
func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
cc := cs.cc
+ req := cs.req
+ ctx := req.Context()
cc.mu.Lock()
defer cc.mu.Unlock()
for {
@@ -1461,8 +1551,14 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
if cs.stopReqBody != nil {
return 0, cs.stopReqBody
}
- if err := cs.checkResetOrDone(); err != nil {
- return 0, err
+ select {
+ case <-cs.abort:
+ return 0, cs.abortErr
+ case <-ctx.Done():
+ return 0, ctx.Err()
+ case <-req.Cancel:
+ return 0, errRequestCanceled
+ default:
}
if a := cs.flow.available(); a > 0 {
take := a
@@ -1480,9 +1576,14 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
}
}
-// requires cc.mu be held.
+var errNilRequestURL = errors.New("http2: Request.URI is nil")
+
+// requires cc.wmu be held.
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
cc.hbuf.Reset()
+ if req.URL == nil {
+ return nil, errNilRequestURL
+ }
host := req.Host
if host == "" {
@@ -1668,7 +1769,7 @@ func shouldSendReqContentLength(method string, contentLength int64) bool {
}
}
-// requires cc.mu be held.
+// requires cc.wmu be held.
func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) {
cc.hbuf.Reset()
@@ -1713,51 +1814,51 @@ type resAndError struct {
}
// requires cc.mu be held.
-func (cc *ClientConn) newStream() *clientStream {
- cs := &clientStream{
- cc: cc,
- ID: cc.nextStreamID,
- resc: make(chan resAndError, 1),
- peerReset: make(chan struct{}),
- done: make(chan struct{}),
- }
+func (cc *ClientConn) addStreamLocked(cs *clientStream) {
cs.flow.add(int32(cc.initialWindowSize))
cs.flow.setConnFlow(&cc.flow)
cs.inflow.add(transportDefaultStreamFlow)
cs.inflow.setConnFlow(&cc.inflow)
+ cs.ID = cc.nextStreamID
cc.nextStreamID += 2
cc.streams[cs.ID] = cs
- return cs
+ if cs.ID == 0 {
+ panic("assigned stream ID 0")
+ }
}
func (cc *ClientConn) forgetStreamID(id uint32) {
- cc.streamByID(id, true)
-}
-
-func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
cc.mu.Lock()
- defer cc.mu.Unlock()
- cs := cc.streams[id]
- if andRemove && cs != nil && !cc.closed {
- cc.lastActive = time.Now()
- delete(cc.streams, id)
- if len(cc.streams) == 0 && cc.idleTimer != nil {
- cc.idleTimer.Reset(cc.idleTimeout)
- cc.lastIdle = time.Now()
- }
- close(cs.done)
- // Wake up checkResetOrDone via clientStream.awaitFlowControl and
- // wake up RoundTrip if there is a pending request.
- cc.cond.Broadcast()
+ slen := len(cc.streams)
+ delete(cc.streams, id)
+ if len(cc.streams) != slen-1 {
+ panic("forgetting unknown stream id")
+ }
+ cc.lastActive = time.Now()
+ if len(cc.streams) == 0 && cc.idleTimer != nil {
+ cc.idleTimer.Reset(cc.idleTimeout)
+ cc.lastIdle = time.Now()
+ }
+ // Wake up writeRequestBody via clientStream.awaitFlowControl and
+ // wake up RoundTrip if there is a pending request.
+ cc.cond.Broadcast()
+
+ closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives()
+ if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
+ if VerboseLogs {
+ cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2)
+ }
+ cc.closed = true
+ defer cc.tconn.Close()
}
- return cs
+
+ cc.mu.Unlock()
}
// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
type clientConnReadLoop struct {
- _ incomparable
- cc *ClientConn
- closeWhenIdle bool
+ _ incomparable
+ cc *ClientConn
}
// readLoop runs in its own goroutine and reads and dispatches frames.
@@ -1817,23 +1918,43 @@ func (rl *clientConnReadLoop) cleanup() {
} else if err == io.EOF {
err = io.ErrUnexpectedEOF
}
+ cc.closed = true
for _, cs := range cc.streams {
- cs.bufPipe.CloseWithError(err) // no-op if already closed
- select {
- case cs.resc <- resAndError{err: err}:
- default:
- }
- close(cs.done)
+ cs.abortStreamLocked(err)
}
- cc.closed = true
cc.cond.Broadcast()
cc.mu.Unlock()
}
+// countReadFrameError calls Transport.CountError with a string
+// representing err.
+func (cc *ClientConn) countReadFrameError(err error) {
+ f := cc.t.CountError
+ if f == nil || err == nil {
+ return
+ }
+ if ce, ok := err.(ConnectionError); ok {
+ errCode := ErrCode(ce)
+ f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken()))
+ return
+ }
+ if errors.Is(err, io.EOF) {
+ f("read_frame_eof")
+ return
+ }
+ if errors.Is(err, io.ErrUnexpectedEOF) {
+ f("read_frame_unexpected_eof")
+ return
+ }
+ if errors.Is(err, ErrFrameTooLarge) {
+ f("read_frame_too_large")
+ return
+ }
+ f("read_frame_other")
+}
+
func (rl *clientConnReadLoop) run() error {
cc := rl.cc
- rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
- gotReply := false // ever saw a HEADERS reply
gotSettings := false
readIdleTimeout := cc.t.ReadIdleTimeout
var t *time.Timer
@@ -1850,9 +1971,7 @@ func (rl *clientConnReadLoop) run() error {
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
}
if se, ok := err.(StreamError); ok {
- if cs := cc.streamByID(se.StreamID, false); cs != nil {
- cs.cc.writeStreamReset(cs.ID, se.Code, err)
- cs.cc.forgetStreamID(cs.ID)
+ if cs := rl.streamByID(se.StreamID); cs != nil {
if se.Cause == nil {
se.Cause = cc.fr.errDetail
}
@@ -1860,6 +1979,7 @@ func (rl *clientConnReadLoop) run() error {
}
continue
} else if err != nil {
+ cc.countReadFrameError(err)
return err
}
if VerboseLogs {
@@ -1872,22 +1992,16 @@ func (rl *clientConnReadLoop) run() error {
}
gotSettings = true
}
- maybeIdle := false // whether frame might transition us to idle
switch f := f.(type) {
case *MetaHeadersFrame:
err = rl.processHeaders(f)
- maybeIdle = true
- gotReply = true
case *DataFrame:
err = rl.processData(f)
- maybeIdle = true
case *GoAwayFrame:
err = rl.processGoAway(f)
- maybeIdle = true
case *RSTStreamFrame:
err = rl.processResetStream(f)
- maybeIdle = true
case *SettingsFrame:
err = rl.processSettings(f)
case *PushPromiseFrame:
@@ -1905,38 +2019,24 @@ func (rl *clientConnReadLoop) run() error {
}
return err
}
- if rl.closeWhenIdle && gotReply && maybeIdle {
- cc.closeIfIdle()
- }
}
}
func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
- cc := rl.cc
- cs := cc.streamByID(f.StreamID, false)
+ cs := rl.streamByID(f.StreamID)
if cs == nil {
// We'd get here if we canceled a request while the
// server had its response still in flight. So if this
// was just something we canceled, ignore it.
return nil
}
- if f.StreamEnded() {
- // Issue 20521: If the stream has ended, streamByID() causes
- // clientStream.done to be closed, which causes the request's bodyWriter
- // to be closed with an errStreamClosed, which may be received by
- // clientConn.RoundTrip before the result of processing these headers.
- // Deferring stream closure allows the header processing to occur first.
- // clientConn.RoundTrip may still receive the bodyWriter error first, but
- // the fix for issue 16102 prioritises any response.
- //
- // Issue 22413: If there is no request body, we should close the
- // stream before writing to cs.resc so that the stream is closed
- // immediately once RoundTrip returns.
- if cs.req.Body != nil {
- defer cc.forgetStreamID(f.StreamID)
- } else {
- cc.forgetStreamID(f.StreamID)
- }
+ if cs.readClosed {
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ Cause: errors.New("protocol error: headers after END_STREAM"),
+ })
+ return nil
}
if !cs.firstByte {
if cs.trace != nil {
@@ -1960,9 +2060,11 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
return err
}
// Any other error type is a stream error.
- cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)
- cc.forgetStreamID(cs.ID)
- cs.resc <- resAndError{err: err}
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ Cause: err,
+ })
return nil // return nil from process* funcs to keep conn alive
}
if res == nil {
@@ -1970,7 +2072,11 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
return nil
}
cs.resTrailer = &res.Trailer
- cs.resc <- resAndError{res: res}
+ cs.res = res
+ close(cs.respHeaderRecv)
+ if f.StreamEnded() {
+ rl.endStream(cs)
+ }
return nil
}
@@ -2032,6 +2138,9 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
}
if statusCode >= 100 && statusCode <= 199 {
+ if f.StreamEnded() {
+ return nil, errors.New("1xx informational response with END_STREAM flag")
+ }
cs.num1xx++
const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
if cs.num1xx > max1xxResponses {
@@ -2044,8 +2153,9 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
}
if statusCode == 100 {
traceGot100Continue(cs.trace)
- if cs.on100 != nil {
- cs.on100() // forces any write delay timer to fire
+ select {
+ case cs.on100 <- struct{}{}:
+ default:
}
}
cs.pastHeaders = false // do it all again
@@ -2074,10 +2184,9 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
return res, nil
}
- cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}}
+ cs.bufPipe.setBuffer(&dataBuffer{expected: res.ContentLength})
cs.bytesRemain = res.ContentLength
res.Body = transportResponseBody{cs}
- go cs.awaitRequestCancel(cs.req)
if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
res.Header.Del("Content-Encoding")
@@ -2137,7 +2246,7 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
n = int(cs.bytesRemain)
if err == nil {
err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
- cc.writeStreamReset(cs.ID, ErrCodeProtocol, err)
+ cs.abortStream(err)
}
cs.readErr = err
return int(cs.bytesRemain), err
@@ -2155,8 +2264,6 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
}
cc.mu.Lock()
- defer cc.mu.Unlock()
-
var connAdd, streamAdd int32
// Check the conn-level first, before the stream-level.
if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
@@ -2173,6 +2280,8 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
cs.inflow.add(streamAdd)
}
}
+ cc.mu.Unlock()
+
if connAdd != 0 || streamAdd != 0 {
cc.wmu.Lock()
defer cc.wmu.Unlock()
@@ -2193,34 +2302,40 @@ func (b transportResponseBody) Close() error {
cs := b.cs
cc := cs.cc
- serverSentStreamEnd := cs.bufPipe.Err() == io.EOF
unread := cs.bufPipe.Len()
-
- if unread > 0 || !serverSentStreamEnd {
+ if unread > 0 {
cc.mu.Lock()
- cc.wmu.Lock()
- if !serverSentStreamEnd {
- cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
- cs.didReset = true
- }
// Return connection-level flow control.
if unread > 0 {
cc.inflow.add(int32(unread))
+ }
+ cc.mu.Unlock()
+
+ cc.wmu.Lock()
+ // Return connection-level flow control.
+ if unread > 0 {
cc.fr.WriteWindowUpdate(0, uint32(unread))
}
cc.bw.Flush()
cc.wmu.Unlock()
- cc.mu.Unlock()
}
cs.bufPipe.BreakWithError(errClosedResponseBody)
- cc.forgetStreamID(cs.ID)
+ cs.abortStream(errClosedResponseBody)
+
+ select {
+ case <-cs.donec:
+ case <-cs.req.Context().Done():
+ return cs.req.Context().Err()
+ case <-cs.req.Cancel:
+ return errRequestCanceled
+ }
return nil
}
func (rl *clientConnReadLoop) processData(f *DataFrame) error {
cc := rl.cc
- cs := cc.streamByID(f.StreamID, f.StreamEnded())
+ cs := rl.streamByID(f.StreamID)
data := f.Data()
if cs == nil {
cc.mu.Lock()
@@ -2249,6 +2364,14 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
}
return nil
}
+ if cs.readClosed {
+ cc.logf("protocol error: received DATA after END_STREAM")
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ })
+ return nil
+ }
if !cs.firstByte {
cc.logf("protocol error: received DATA before a HEADERS frame")
rl.endStreamError(cs, StreamError{
@@ -2280,30 +2403,39 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
if pad := int(f.Length) - len(data); pad > 0 {
refund += pad
}
- // Return len(data) now if the stream is already closed,
- // since data will never be read.
- didReset := cs.didReset
- if didReset {
- refund += len(data)
+
+ didReset := false
+ var err error
+ if len(data) > 0 {
+ if _, err = cs.bufPipe.Write(data); err != nil {
+ // Return len(data) now if the stream is already closed,
+ // since data will never be read.
+ didReset = true
+ refund += len(data)
+ }
}
+
if refund > 0 {
cc.inflow.add(int32(refund))
+ if !didReset {
+ cs.inflow.add(int32(refund))
+ }
+ }
+ cc.mu.Unlock()
+
+ if refund > 0 {
cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(refund))
if !didReset {
- cs.inflow.add(int32(refund))
cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
}
cc.bw.Flush()
cc.wmu.Unlock()
}
- cc.mu.Unlock()
- if len(data) > 0 && !didReset {
- if _, err := cs.bufPipe.Write(data); err != nil {
- rl.endStreamError(cs, err)
- return err
- }
+ if err != nil {
+ rl.endStreamError(cs, err)
+ return nil
}
}
@@ -2316,24 +2448,26 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
func (rl *clientConnReadLoop) endStream(cs *clientStream) {
// TODO: check that any declared content-length matches, like
// server.go's (*stream).endStream method.
- rl.endStreamError(cs, nil)
+ if !cs.readClosed {
+ cs.readClosed = true
+ cs.bufPipe.closeWithErrorAndCode(io.EOF, cs.copyTrailers)
+ close(cs.peerClosed)
+ }
}
func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
- var code func()
- if err == nil {
- err = io.EOF
- code = cs.copyTrailers
- }
- if isConnectionCloseRequest(cs.req) {
- rl.closeWhenIdle = true
- }
- cs.bufPipe.closeWithErrorAndCode(err, code)
+ cs.readAborted = true
+ cs.abortStream(err)
+}
- select {
- case cs.resc <- resAndError{err: err}:
- default:
+func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream {
+ rl.cc.mu.Lock()
+ defer rl.cc.mu.Unlock()
+ cs := rl.cc.streams[id]
+ if cs != nil && !cs.readAborted {
+ return cs
}
+ return nil
}
func (cs *clientStream) copyTrailers() {
@@ -2352,6 +2486,10 @@ func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
if f.ErrCode != 0 {
// TODO: deal with GOAWAY more. particularly the error code
cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
+ if fn := cc.t.CountError; fn != nil {
+ fn("recv_goaway_" + f.ErrCode.stringToken())
+ }
+
}
cc.setGoAway(f)
return nil
@@ -2359,6 +2497,23 @@ func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
cc := rl.cc
+ // Locking both mu and wmu here allows frame encoding to read settings with only wmu held.
+ // Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless.
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ if err := rl.processSettingsNoWrite(f); err != nil {
+ return err
+ }
+ if !f.IsAck() {
+ cc.fr.WriteSettingsAck()
+ cc.bw.Flush()
+ }
+ return nil
+}
+
+func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
+ cc := rl.cc
cc.mu.Lock()
defer cc.mu.Unlock()
@@ -2420,17 +2575,12 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
cc.seenSettings = true
}
- cc.wmu.Lock()
- defer cc.wmu.Unlock()
-
- cc.fr.WriteSettingsAck()
- cc.bw.Flush()
- return cc.werr
+ return nil
}
func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
cc := rl.cc
- cs := cc.streamByID(f.StreamID, false)
+ cs := rl.streamByID(f.StreamID)
if f.StreamID != 0 && cs == nil {
return nil
}
@@ -2450,31 +2600,22 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
}
func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
- cs := rl.cc.streamByID(f.StreamID, true)
+ cs := rl.streamByID(f.StreamID)
if cs == nil {
- // TODO: return error if server tries to RST_STEAM an idle stream
+ // TODO: return error if server tries to RST_STREAM an idle stream
return nil
}
- select {
- case <-cs.peerReset:
- // Already reset.
- // This is the only goroutine
- // which closes this, so there
- // isn't a race.
- default:
- serr := streamError(cs.ID, f.ErrCode)
- if f.ErrCode == ErrCodeProtocol {
- rl.cc.SetDoNotReuse()
- serr.Cause = errFromPeer
- // TODO(bradfitz): increment a varz here, once Transport
- // takes an optional interface-typed field that expvar.Map.Add
- // implements.
- }
- cs.resetErr = serr
- close(cs.peerReset)
- cs.bufPipe.CloseWithError(serr)
- cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
+ serr := streamError(cs.ID, f.ErrCode)
+ serr.Cause = errFromPeer
+ if f.ErrCode == ErrCodeProtocol {
+ rl.cc.SetDoNotReuse()
+ }
+ if fn := cs.cc.t.CountError; fn != nil {
+ fn("recv_rststream_" + f.ErrCode.stringToken())
}
+ cs.abortStream(serr)
+
+ cs.bufPipe.CloseWithError(serr)
return nil
}
@@ -2496,19 +2637,24 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
}
cc.mu.Unlock()
}
- cc.wmu.Lock()
- if err := cc.fr.WritePing(false, p); err != nil {
- cc.wmu.Unlock()
- return err
- }
- if err := cc.bw.Flush(); err != nil {
- cc.wmu.Unlock()
- return err
- }
- cc.wmu.Unlock()
+ errc := make(chan error, 1)
+ go func() {
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if err := cc.fr.WritePing(false, p); err != nil {
+ errc <- err
+ return
+ }
+ if err := cc.bw.Flush(); err != nil {
+ errc <- err
+ return
+ }
+ }()
select {
case <-c:
return nil
+ case err := <-errc:
+ return err
case <-ctx.Done():
return ctx.Err()
case <-cc.readerDone:
@@ -2630,87 +2776,6 @@ type errorReader struct{ err error }
func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }
-// bodyWriterState encapsulates various state around the Transport's writing
-// of the request body, particularly regarding doing delayed writes of the body
-// when the request contains "Expect: 100-continue".
-type bodyWriterState struct {
- cs *clientStream
- timer *time.Timer // if non-nil, we're doing a delayed write
- fnonce *sync.Once // to call fn with
- fn func() // the code to run in the goroutine, writing the body
- resc chan error // result of fn's execution
- delay time.Duration // how long we should delay a delayed write for
-}
-
-func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) {
- s.cs = cs
- if body == nil {
- return
- }
- resc := make(chan error, 1)
- s.resc = resc
- s.fn = func() {
- cs.cc.mu.Lock()
- cs.startedWrite = true
- cs.cc.mu.Unlock()
- resc <- cs.writeRequestBody(body, cs.req.Body)
- }
- s.delay = t.expectContinueTimeout()
- if s.delay == 0 ||
- !httpguts.HeaderValuesContainsToken(
- cs.req.Header["Expect"],
- "100-continue") {
- return
- }
- s.fnonce = new(sync.Once)
-
- // Arm the timer with a very large duration, which we'll
- // intentionally lower later. It has to be large now because
- // we need a handle to it before writing the headers, but the
- // s.delay value is defined to not start until after the
- // request headers were written.
- const hugeDuration = 365 * 24 * time.Hour
- s.timer = time.AfterFunc(hugeDuration, func() {
- s.fnonce.Do(s.fn)
- })
- return
-}
-
-func (s bodyWriterState) cancel() {
- if s.timer != nil {
- if s.timer.Stop() {
- s.resc <- nil
- }
- }
-}
-
-func (s bodyWriterState) on100() {
- if s.timer == nil {
- // If we didn't do a delayed write, ignore the server's
- // bogus 100 continue response.
- return
- }
- s.timer.Stop()
- go func() { s.fnonce.Do(s.fn) }()
-}
-
-// scheduleBodyWrite starts writing the body, either immediately (in
-// the common case) or after the delay timeout. It should not be
-// called until after the headers have been written.
-func (s bodyWriterState) scheduleBodyWrite() {
- if s.timer == nil {
- // We're not doing a delayed write (see
- // getBodyWriterState), so just start the writing
- // goroutine immediately.
- go s.fn()
- return
- }
- traceWait100Continue(s.cs.trace)
- if s.timer.Stop() {
- s.timer.Reset(s.delay)
- }
-}
-
// isConnectionCloseRequest reports whether req should use its own
// connection for a single request and then close the connection.
func isConnectionCloseRequest(req *http.Request) bool {