diff options
Diffstat (limited to 'llgo/third_party/gofrontend/libgo/go/net/http/server.go')
| -rw-r--r-- | llgo/third_party/gofrontend/libgo/go/net/http/server.go | 279 |
1 files changed, 225 insertions, 54 deletions
diff --git a/llgo/third_party/gofrontend/libgo/go/net/http/server.go b/llgo/third_party/gofrontend/libgo/go/net/http/server.go index 008d5aa7a74..a3e43555bb3 100644 --- a/llgo/third_party/gofrontend/libgo/go/net/http/server.go +++ b/llgo/third_party/gofrontend/libgo/go/net/http/server.go @@ -15,6 +15,7 @@ import ( "io/ioutil" "log" "net" + "net/textproto" "net/url" "os" "path" @@ -55,9 +56,12 @@ type Handler interface { // A ResponseWriter interface is used by an HTTP handler to // construct an HTTP response. type ResponseWriter interface { - // Header returns the header map that will be sent by WriteHeader. - // Changing the header after a call to WriteHeader (or Write) has - // no effect. + // Header returns the header map that will be sent by + // WriteHeader. Changing the header after a call to + // WriteHeader (or Write) has no effect unless the modified + // headers were declared as trailers by setting the + // "Trailer" header before the call to WriteHeader (see example). + // To suppress implicit response headers, set their value to nil. Header() Header // Write writes the data to the connection as part of an HTTP reply. @@ -93,8 +97,14 @@ type Hijacker interface { // Hijack lets the caller take over the connection. // After a call to Hijack(), the HTTP server library // will not do anything else with the connection. + // // It becomes the caller's responsibility to manage // and close the connection. + // + // The returned net.Conn may have read or write deadlines + // already set, depending on the configuration of the + // Server. It is the caller's responsibility to set + // or clear those deadlines as needed. Hijack() (net.Conn, *bufio.ReadWriter, error) } @@ -120,6 +130,7 @@ type conn struct { lr *io.LimitedReader // io.LimitReader(sr) buf *bufio.ReadWriter // buffered(lr,rwc), reading from bufio->limitReader->sr->rwc tlsState *tls.ConnectionState // or nil when not using TLS + lastMethod string // method of previous request, or "" mu sync.Mutex // guards the following clientGone bool // if client has disconnected mid-request @@ -188,20 +199,14 @@ func (c *conn) noteClientGone() { c.clientGone = true } -// A switchReader can have its Reader changed at runtime. -// It's not safe for concurrent Reads and switches. -type switchReader struct { - io.Reader -} - // A switchWriter can have its Writer changed at runtime. // It's not safe for concurrent Writes and switches. type switchWriter struct { io.Writer } -// A liveSwitchReader is a switchReader that's safe for concurrent -// reads and switches, if its mutex is held. +// A liveSwitchReader can have its Reader changed at runtime. It's +// safe for concurrent reads and switches, if its mutex is held. type liveSwitchReader struct { sync.Mutex r io.Reader @@ -288,10 +293,21 @@ func (cw *chunkWriter) close() { cw.writeHeader(nil) } if cw.chunking { - // zero EOF chunk, trailer key/value pairs (currently - // unsupported in Go's server), followed by a blank - // line. - cw.res.conn.buf.WriteString("0\r\n\r\n") + bw := cw.res.conn.buf // conn's bufio writer + // zero chunk to mark EOF + bw.WriteString("0\r\n") + if len(cw.res.trailers) > 0 { + trailers := make(Header) + for _, h := range cw.res.trailers { + if vv := cw.res.handlerHeader[h]; len(vv) > 0 { + trailers[h] = vv + } + } + trailers.Write(bw) // the writer handles noting errors + } + // final blank line after the trailers (whether + // present or not) + bw.WriteString("\r\n") } } @@ -332,6 +348,12 @@ type response struct { // input from it. requestBodyLimitHit bool + // trailers are the headers to be sent after the handler + // finishes writing the body. This field is initialized from + // the Trailer response header when the response header is + // written. + trailers []string + handlerDone bool // set true when the handler exits // Buffers for Date and Content-Length @@ -339,6 +361,19 @@ type response struct { clenBuf [10]byte } +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (w *response) declareTrailer(k string) { + k = CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Content-Length", "Trailer": + // Forbidden by RFC 2616 14.40. + return + } + w.trailers = append(w.trailers, k) +} + // requestTooLarge is called by maxBytesReader when too much input has // been read from the client. func (w *response) requestTooLarge() { @@ -438,7 +473,7 @@ func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) { if debugServerConnections { c.rwc = newLoggingConn("server", c.rwc) } - c.sr = liveSwitchReader{r: c.rwc} + c.sr.r = c.rwc c.lr = io.LimitReader(&c.sr, noLimit).(*io.LimitedReader) br := newBufioReader(c.lr) bw := newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) @@ -468,6 +503,8 @@ func newBufioReader(r io.Reader) *bufio.Reader { br.Reset(r) return br } + // Note: if this reader size is every changed, update + // TestHandlerBodyClose's assumptions. return bufio.NewReader(r) } @@ -517,6 +554,7 @@ type expectContinueReader struct { resp *response readCloser io.ReadCloser closed bool + sawEOF bool } func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { @@ -528,7 +566,11 @@ func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { ecr.resp.conn.buf.WriteString("HTTP/1.1 100 Continue\r\n\r\n") ecr.resp.conn.buf.Flush() } - return ecr.readCloser.Read(p) + n, err = ecr.readCloser.Read(p) + if err == io.EOF { + ecr.sawEOF = true + } + return } func (ecr *expectContinueReader) Close() error { @@ -582,6 +624,11 @@ func (c *conn) readRequest() (w *response, err error) { } c.lr.N = c.server.initialLimitedReaderSize() + if c.lastMethod == "POST" { + // RFC 2616 section 4.1 tolerance for old buggy clients. + peek, _ := c.buf.Reader.Peek(4) // ReadRequest will get err below + c.buf.Reader.Discard(numLeadingCRorLF(peek)) + } var req *Request if req, err = ReadRequest(c.buf.Reader); err != nil { if c.lr.N == 0 { @@ -590,9 +637,13 @@ func (c *conn) readRequest() (w *response, err error) { return nil, err } c.lr.N = noLimit + c.lastMethod = req.Method req.RemoteAddr = c.remoteAddr req.TLS = c.tlsState + if body, ok := req.Body.(*body); ok { + body.doEarlyClose = true + } w = &response{ conn: c, @@ -747,6 +798,15 @@ func (cw *chunkWriter) writeHeader(p []byte) { } var setHeader extraHeader + trailers := false + for _, v := range cw.header["Trailer"] { + trailers = true + foreachHeaderElement(v, cw.res.declareTrailer) + } + + te := header.get("Transfer-Encoding") + hasTE := te != "" + // If the handler is done but never sent a Content-Length // response header and this is our first (and last) write, set // it, even to zero. This helps HTTP/1.0 clients keep their @@ -759,7 +819,9 @@ func (cw *chunkWriter) writeHeader(p []byte) { // write non-zero bytes. If it's actually 0 bytes and the // handler never looked at the Request.Method, we just don't // send a Content-Length header. - if w.handlerDone && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { + // Further, we don't send an automatic Content-Length if they + // set a Transfer-Encoding, because they're generally incompatible. + if w.handlerDone && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { w.contentLength = int64(len(p)) setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) } @@ -789,21 +851,78 @@ func (cw *chunkWriter) writeHeader(p []byte) { w.closeAfterReply = true } + // If the client wanted a 100-continue but we never sent it to + // them (or, more strictly: we never finished reading their + // request body), don't reuse this connection because it's now + // in an unknown state: we might be sending this response at + // the same time the client is now sending its request body + // after a timeout. (Some HTTP clients send Expect: + // 100-continue but knowing that some servers don't support + // it, the clients set a timer and send the body later anyway) + // If we haven't seen EOF, we can't skip over the unread body + // because we don't know if the next bytes on the wire will be + // the body-following-the-timer or the subsequent request. + // See Issue 11549. + if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { + w.closeAfterReply = true + } + // Per RFC 2616, we should consume the request body before // replying, if the handler hasn't already done so. But we // don't want to do an unbounded amount of reading here for // DoS reasons, so we only try up to a threshold. if w.req.ContentLength != 0 && !w.closeAfterReply { - ecr, isExpecter := w.req.Body.(*expectContinueReader) - if !isExpecter || ecr.resp.wroteContinue { - n, _ := io.CopyN(ioutil.Discard, w.req.Body, maxPostHandlerReadBytes+1) - if n >= maxPostHandlerReadBytes { - w.requestTooLarge() - delHeader("Connection") - setHeader.connection = "close" - } else { - w.req.Body.Close() + var discard, tooBig bool + + switch bdy := w.req.Body.(type) { + case *expectContinueReader: + if bdy.resp.wroteContinue { + discard = true + } + case *body: + bdy.mu.Lock() + switch { + case bdy.closed: + if !bdy.sawEOF { + // Body was closed in handler with non-EOF error. + w.closeAfterReply = true + } + case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: + tooBig = true + default: + discard = true } + bdy.mu.Unlock() + default: + discard = true + } + + if discard { + _, err := io.CopyN(ioutil.Discard, w.req.Body, maxPostHandlerReadBytes+1) + switch err { + case nil: + // There must be even more data left over. + tooBig = true + case ErrBodyReadAfterClose: + // Body was already consumed and closed. + case io.EOF: + // The remaining body was just consumed, close it. + err = w.req.Body.Close() + if err != nil { + w.closeAfterReply = true + } + default: + // Some other kind of error occured, like a read timeout, or + // corrupt chunked encoding. In any case, whatever remains + // on the wire must not be parsed as another HTTP request. + w.closeAfterReply = true + } + } + + if tooBig { + w.requestTooLarge() + delHeader("Connection") + setHeader.connection = "close" } } @@ -811,7 +930,7 @@ func (cw *chunkWriter) writeHeader(p []byte) { if bodyAllowedForStatus(code) { // If no content type, apply sniffing algorithm to body. _, haveType := header["Content-Type"] - if !haveType { + if !haveType && !hasTE { setHeader.contentType = DetectContentType(p) } } else { @@ -824,8 +943,6 @@ func (cw *chunkWriter) writeHeader(p []byte) { setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) } - te := header.get("Transfer-Encoding") - hasTE := te != "" if hasCL && hasTE && te != "identity" { // TODO: return an error if WriteHeader gets a return parameter // For now just ignore the Content-Length. @@ -885,6 +1002,24 @@ func (cw *chunkWriter) writeHeader(p []byte) { w.conn.buf.Write(crlf) } +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 2616 section 2.1 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + // statusLines is a cache of Status-Line strings, keyed by code (for // HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a // map keyed by struct of two fields. This map's max size is bounded @@ -930,7 +1065,7 @@ func statusLine(req *Request, code int) string { return line } -// bodyAllowed returns true if a Write is allowed for this response type. +// bodyAllowed reports whether a Write is allowed for this response type. // It's illegal to call this before the header has been flushed. func (w *response) bodyAllowed() bool { if !w.wroteHeader { @@ -1027,17 +1162,39 @@ func (w *response) finishRequest() { if w.req.MultipartForm != nil { w.req.MultipartForm.RemoveAll() } +} + +// shouldReuseConnection reports whether the underlying TCP connection can be reused. +// It must only be called after the handler is done executing. +func (w *response) shouldReuseConnection() bool { + if w.closeAfterReply { + // The request or something set while executing the + // handler indicated we shouldn't reuse this + // connection. + return false + } if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { // Did not write enough. Avoid getting out of sync. - w.closeAfterReply = true + return false } // There was some error writing to the underlying connection // during the request, so don't re-use this conn. if w.conn.werr != nil { - w.closeAfterReply = true + return false } + + if w.closedRequestBodyEarly() { + return false + } + + return true +} + +func (w *response) closedRequestBodyEarly() bool { + body, ok := w.req.Body.(*body) + return ok && body.didEarlyClose() } func (w *response) Flush() { @@ -1093,7 +1250,7 @@ var _ closeWriter = (*net.TCPConn)(nil) // pause for a bit, hoping the client processes it before any // subsequent RST. // -// See http://golang.org/issue/3595 +// See https://golang.org/issue/3595 func (c *conn) closeWriteAndWait() { c.finalFlush() if tcp, ok := c.rwc.(closeWriter); ok { @@ -1206,8 +1363,8 @@ func (c *conn) serve() { return } w.finishRequest() - if w.closeAfterReply { - if w.requestBodyLimitHit { + if !w.shouldReuseConnection() { + if w.requestBodyLimitHit || w.closedRequestBodyEarly() { c.closeWriteAndWait() } break @@ -1271,6 +1428,7 @@ func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { // The error message should be plain text. func Error(w ResponseWriter, error string, code int) { w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.Header().Set("X-Content-Type-Options", "nosniff") w.WriteHeader(code) fmt.Fprintln(w, error) } @@ -1576,7 +1734,8 @@ func (mux *ServeMux) Handle(pattern string, handler Handler) { // strings.Index can't be -1. path = pattern[strings.Index(pattern, "/"):] } - mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(path, StatusMovedPermanently), pattern: pattern} + url := &url.URL{Path: path} + mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(url.String(), StatusMovedPermanently), pattern: pattern} } } @@ -1760,11 +1919,11 @@ func (s *Server) doKeepAlives() bool { // By default, keep-alives are always enabled. Only very // resource-constrained environments or servers in the process of // shutting down should disable them. -func (s *Server) SetKeepAlivesEnabled(v bool) { +func (srv *Server) SetKeepAlivesEnabled(v bool) { if v { - atomic.StoreInt32(&s.disableKeepAlives, 0) + atomic.StoreInt32(&srv.disableKeepAlives, 0) } else { - atomic.StoreInt32(&s.disableKeepAlives, 1) + atomic.StoreInt32(&srv.disableKeepAlives, 1) } } @@ -1812,7 +1971,7 @@ func ListenAndServe(addr string, handler Handler) error { // expects HTTPS connections. Additionally, files containing a certificate and // matching private key for the server must be provided. If the certificate // is signed by a certificate authority, the certFile should be the concatenation -// of the server's certificate followed by the CA's certificate. +// of the server's certificate, any intermediates, and the CA's certificate. // // A trivial example server is: // @@ -1844,10 +2003,11 @@ func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Han // ListenAndServeTLS listens on the TCP network address srv.Addr and // then calls Serve to handle requests on incoming TLS connections. // -// Filenames containing a certificate and matching private key for -// the server must be provided. If the certificate is signed by a -// certificate authority, the certFile should be the concatenation -// of the server's certificate followed by the CA's certificate. +// Filenames containing a certificate and matching private key for the +// server must be provided if the Server's TLSConfig.Certificates is +// not populated. If the certificate is signed by a certificate +// authority, the certFile should be the concatenation of the server's +// certificate, any intermediates, and the CA's certificate. // // If srv.Addr is blank, ":https" is used. func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { @@ -1855,19 +2015,18 @@ func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { if addr == "" { addr = ":https" } - config := &tls.Config{} - if srv.TLSConfig != nil { - *config = *srv.TLSConfig - } + config := cloneTLSConfig(srv.TLSConfig) if config.NextProtos == nil { config.NextProtos = []string{"http/1.1"} } - var err error - config.Certificates = make([]tls.Certificate, 1) - config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err + if len(config.Certificates) == 0 || certFile != "" || keyFile != "" { + var err error + config.Certificates = make([]tls.Certificate, 1) + config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } } ln, err := net.Listen("tcp", addr) @@ -2094,3 +2253,15 @@ func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { } return } + +func numLeadingCRorLF(v []byte) (n int) { + for _, b := range v { + if b == '\r' || b == '\n' { + n++ + continue + } + break + } + return + +} |

