summaryrefslogtreecommitdiffstats
path: root/vendor/golang.org/x/net/http2
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/golang.org/x/net/http2')
-rw-r--r--vendor/golang.org/x/net/http2/h2c/h2c.go2
-rw-r--r--vendor/golang.org/x/net/http2/pipe.go6
-rw-r--r--vendor/golang.org/x/net/http2/server.go16
-rw-r--r--vendor/golang.org/x/net/http2/transport.go78
-rw-r--r--vendor/golang.org/x/net/http2/writesched.go3
-rw-r--r--vendor/golang.org/x/net/http2/writesched_roundrobin.go119
6 files changed, 198 insertions, 26 deletions
diff --git a/vendor/golang.org/x/net/http2/h2c/h2c.go b/vendor/golang.org/x/net/http2/h2c/h2c.go
index a72bbed1..2d6bf861 100644
--- a/vendor/golang.org/x/net/http2/h2c/h2c.go
+++ b/vendor/golang.org/x/net/http2/h2c/h2c.go
@@ -44,7 +44,7 @@ func init() {
// HTTP/1, but unlikely to occur in practice and (2) Upgrading from HTTP/1 to
// h2c - this works by using the HTTP/1 Upgrade header to request an upgrade to
// h2c. When either of those situations occur we hijack the HTTP/1 connection,
-// convert it to a HTTP/2 connection and pass the net.Conn to http2.ServeConn.
+// convert it to an HTTP/2 connection and pass the net.Conn to http2.ServeConn.
type h2cHandler struct {
Handler http.Handler
s *http2.Server
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
index c15b8a77..684d984f 100644
--- a/vendor/golang.org/x/net/http2/pipe.go
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -88,13 +88,9 @@ func (p *pipe) Write(d []byte) (n int, err error) {
p.c.L = &p.mu
}
defer p.c.Signal()
- if p.err != nil {
+ if p.err != nil || p.breakErr != nil {
return 0, errClosedPipeWrite
}
- if p.breakErr != nil {
- p.unread += len(d)
- return len(d), nil // discard when there is no reader
- }
return p.b.Write(d)
}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 8cb14f3c..033b6e6d 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -441,7 +441,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
if s.NewWriteScheduler != nil {
sc.writeSched = s.NewWriteScheduler()
} else {
- sc.writeSched = NewPriorityWriteScheduler(nil)
+ sc.writeSched = newRoundRobinWriteScheduler()
}
// These start at the RFC-specified defaults. If there is a higher
@@ -1822,15 +1822,18 @@ func (sc *serverConn) processData(f *DataFrame) error {
}
if len(data) > 0 {
+ st.bodyBytes += int64(len(data))
wrote, err := st.body.Write(data)
if err != nil {
+ // The handler has closed the request body.
+ // Return the connection-level flow control for the discarded data,
+ // but not the stream-level flow control.
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
- return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
+ return nil
}
if wrote != len(data) {
panic("internal error: bad Writer")
}
- st.bodyBytes += int64(len(data))
}
// Return any padded flow control now, since we won't
@@ -2426,7 +2429,7 @@ type requestBody struct {
conn *serverConn
closeOnce sync.Once // for use by Close only
sawEOF bool // for use by Read only
- pipe *pipe // non-nil if we have a HTTP entity message body
+ pipe *pipe // non-nil if we have an HTTP entity message body
needsContinue bool // need to send a 100-continue
}
@@ -2566,7 +2569,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
clen = ""
}
}
- if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
+ _, hasContentLength := rws.snapHeader["Content-Length"]
+ if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
clen = strconv.Itoa(len(p))
}
_, hasContentType := rws.snapHeader["Content-Type"]
@@ -2771,7 +2775,7 @@ func (w *responseWriter) FlushError() error {
err = rws.bw.Flush()
} else {
// The bufio.Writer won't call chunkWriter.Write
- // (writeChunk with zero bytes, so we have to do it
+ // (writeChunk with zero bytes), so we have to do it
// ourselves to force the HTTP response header and/or
// final DATA frame (with END_STREAM) to be sent.
_, err = chunkWriter{rws}.Write(nil)
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 05ba23d3..b0d482f9 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -19,6 +19,7 @@ import (
"io/fs"
"log"
"math"
+ "math/bits"
mathrand "math/rand"
"net"
"net/http"
@@ -518,11 +519,14 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
func authorityAddr(scheme string, authority string) (addr string) {
host, port, err := net.SplitHostPort(authority)
if err != nil { // authority didn't have a port
+ host = authority
+ port = ""
+ }
+ if port == "" { // authority's port was empty
port = "443"
if scheme == "http" {
port = "80"
}
- host = authority
}
if a, err := idna.ToASCII(host); err == nil {
host = a
@@ -560,10 +564,11 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
traceGotConn(req, cc, reused)
res, err := cc.RoundTrip(req)
if err != nil && retry <= 6 {
+ roundTripErr := err
if req, err = shouldRetryRequest(req, err); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
- t.vlogf("RoundTrip retrying after failure: %v", err)
+ t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
}
backoff := float64(uint(1) << (uint(retry) - 1))
@@ -572,7 +577,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
timer := backoffNewTimer(d)
select {
case <-timer.C:
- t.vlogf("RoundTrip retrying after failure: %v", err)
+ t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
timer.Stop()
@@ -1265,6 +1270,29 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return res, nil
}
+ cancelRequest := func(cs *clientStream, err error) error {
+ cs.cc.mu.Lock()
+ bodyClosed := cs.reqBodyClosed
+ cs.cc.mu.Unlock()
+ // Wait for the request body to be closed.
+ //
+ // If nothing closed the body before now, abortStreamLocked
+ // will have started a goroutine to close it.
+ //
+ // Closing the body before returning avoids a race condition
+ // with net/http checking its readTrackingBody to see if the
+ // body was read from or closed. See golang/go#60041.
+ //
+ // The body is closed in a separate goroutine without the
+ // connection mutex held, but dropping the mutex before waiting
+ // will keep us from holding it indefinitely if the body
+ // close is slow for some reason.
+ if bodyClosed != nil {
+ <-bodyClosed
+ }
+ return err
+ }
+
for {
select {
case <-cs.respHeaderRecv:
@@ -1284,10 +1312,10 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
case <-ctx.Done():
err := ctx.Err()
cs.abortStream(err)
- return nil, err
+ return nil, cancelRequest(cs, err)
case <-cs.reqCancel:
cs.abortStream(errRequestCanceled)
- return nil, errRequestCanceled
+ return nil, cancelRequest(cs, errRequestCanceled)
}
}
}
@@ -1653,7 +1681,27 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
return int(n) // doesn't truncate; max is 512K
}
-var bufPool sync.Pool // of *[]byte
+// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running
+// streaming requests using small frame sizes occupy large buffers initially allocated for prior
+// requests needing big buffers. The size ranges are as follows:
+// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB],
+// {256 KB, 512 KB], {512 KB, infinity}
+// In practice, the maximum scratch buffer size should not exceed 512 KB due to
+// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used.
+// It exists mainly as a safety measure, for potential future increases in max buffer size.
+var bufPools [7]sync.Pool // of *[]byte
+func bufPoolIndex(size int) int {
+ if size <= 16384 {
+ return 0
+ }
+ size -= 1
+ bits := bits.Len(uint(size))
+ index := bits - 14
+ if index >= len(bufPools) {
+ return len(bufPools) - 1
+ }
+ return index
+}
func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
cc := cs.cc
@@ -1671,12 +1719,13 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
// Scratch buffer for reading into & writing from.
scratchLen := cs.frameScratchBufferLen(maxFrameSize)
var buf []byte
- if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen {
- defer bufPool.Put(bp)
+ index := bufPoolIndex(scratchLen)
+ if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen {
+ defer bufPools[index].Put(bp)
buf = *bp
} else {
buf = make([]byte, scratchLen)
- defer bufPool.Put(&buf)
+ defer bufPools[index].Put(&buf)
}
var sawEOF bool
@@ -1844,6 +1893,9 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
if err != nil {
return nil, err
}
+ if !httpguts.ValidHostHeader(host) {
+ return nil, errors.New("http2: invalid Host header")
+ }
var path string
if req.Method != "CONNECT" {
@@ -1880,7 +1932,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
// 8.1.2.3 Request Pseudo-Header Fields
// The :path pseudo-header field includes the path and query parts of the
// target URI (the path-absolute production and optionally a '?' character
- // followed by the query production (see Sections 3.3 and 3.4 of
+ // followed by the query production, see Sections 3.3 and 3.4 of
// [RFC3986]).
f(":authority", host)
m := req.Method
@@ -2555,6 +2607,9 @@ func (b transportResponseBody) Close() error {
cs := b.cs
cc := cs.cc
+ cs.bufPipe.BreakWithError(errClosedResponseBody)
+ cs.abortStream(errClosedResponseBody)
+
unread := cs.bufPipe.Len()
if unread > 0 {
cc.mu.Lock()
@@ -2573,9 +2628,6 @@ func (b transportResponseBody) Close() error {
cc.wmu.Unlock()
}
- cs.bufPipe.BreakWithError(errClosedResponseBody)
- cs.abortStream(errClosedResponseBody)
-
select {
case <-cs.donec:
case <-cs.ctx.Done():
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index c7cd0017..cc893adc 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -184,7 +184,8 @@ func (wr *FrameWriteRequest) replyToWriter(err error) {
// writeQueue is used by implementations of WriteScheduler.
type writeQueue struct {
- s []FrameWriteRequest
+ s []FrameWriteRequest
+ prev, next *writeQueue
}
func (q *writeQueue) empty() bool { return len(q.s) == 0 }
diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
new file mode 100644
index 00000000..54fe8632
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
@@ -0,0 +1,119 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+)
+
+type roundRobinWriteScheduler struct {
+ // control contains control frames (SETTINGS, PING, etc.).
+ control writeQueue
+
+ // streams maps stream ID to a queue.
+ streams map[uint32]*writeQueue
+
+ // stream queues are stored in a circular linked list.
+ // head is the next stream to write, or nil if there are no streams open.
+ head *writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool writeQueuePool
+}
+
+// newRoundRobinWriteScheduler constructs a new write scheduler.
+// The round robin scheduler priorizes control frames
+// like SETTINGS and PING over DATA frames.
+// When there are no control frames to send, it performs a round-robin
+// selection from the ready streams.
+func newRoundRobinWriteScheduler() WriteScheduler {
+ ws := &roundRobinWriteScheduler{
+ streams: make(map[uint32]*writeQueue),
+ }
+ return ws
+}
+
+func (ws *roundRobinWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+ if ws.streams[streamID] != nil {
+ panic(fmt.Errorf("stream %d already opened", streamID))
+ }
+ q := ws.queuePool.get()
+ ws.streams[streamID] = q
+ if ws.head == nil {
+ ws.head = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.head.prev
+ q.next = ws.head
+ q.prev.next = q
+ q.next.prev = q
+ }
+}
+
+func (ws *roundRobinWriteScheduler) CloseStream(streamID uint32) {
+ q := ws.streams[streamID]
+ if q == nil {
+ return
+ }
+ if q.next == q {
+ // This was the only open stream.
+ ws.head = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.head == q {
+ ws.head = q.next
+ }
+ }
+ delete(ws.streams, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *roundRobinWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {}
+
+func (ws *roundRobinWriteScheduler) Push(wr FrameWriteRequest) {
+ if wr.isControl() {
+ ws.control.push(wr)
+ return
+ }
+ q := ws.streams[wr.StreamID()]
+ if q == nil {
+ // This is a closed stream.
+ // wr should not be a HEADERS or DATA frame.
+ // We push the request onto the control queue.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ ws.control.push(wr)
+ return
+ }
+ q.push(wr)
+}
+
+func (ws *roundRobinWriteScheduler) Pop() (FrameWriteRequest, bool) {
+ // Control and RST_STREAM frames first.
+ if !ws.control.empty() {
+ return ws.control.shift(), true
+ }
+ if ws.head == nil {
+ return FrameWriteRequest{}, false
+ }
+ q := ws.head
+ for {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ ws.head = q.next
+ return wr, true
+ }
+ q = q.next
+ if q == ws.head {
+ break
+ }
+ }
+ return FrameWriteRequest{}, false
+}