175 @ 0x44fc10 0x44ab2c 0x44a2ff 0x4e1cee 0x4e1d4a 0x4e3323 0x4f3493 0x5d3fc7 0x5d4484 0x5d786b 0x54948c 0x6aaca1 0x6ab86b 0x6ab9da 0x6ee432 0x6ee251 0x544395 0x54a68d 0x54e177 0x47c391 # 0x44a2fe net.runtime_pollWait+0x4e /root/src/go/src/runtime/netpoll.go:160 # 0x4e1ced net.(*pollDesc).wait+0x2d /root/src/go/src/net/fd_poll_runtime.go:73 # 0x4e1d49 net.(*pollDesc).waitRead+0x29 /root/src/go/src/net/fd_poll_runtime.go:78 # 0x4e3322 net.(*netFD).Read+0x182 /root/src/go/src/net/fd_unix.go:212 # 0x4f3492 net.(*conn).Read+0x62 /root/src/go/src/net/net.go:173 # 0x5d3fc6 crypto/tls.(*block).readFromUntil+0x86 /root/src/go/src/crypto/tls/conn.go:457 # 0x5d4483 crypto/tls.(*Conn).readRecord+0xb3 /root/src/go/src/crypto/tls/conn.go:559 # 0x5d786a crypto/tls.(*Conn).Read+0xfa /root/src/go/src/crypto/tls/conn.go:1006 # 0x54948b net/http.(*connReader).Read+0x12b /root/src/go/src/net/http/server.go:550 # 0x6aaca0 bufio.(*Reader).fill+0x100 /root/src/go/src/bufio/bufio.go:97 # 0x6ab86a bufio.(*Reader).ReadSlice+0xaa /root/src/go/src/bufio/bufio.go:330 # 0x6ab9d9 bufio.(*Reader).ReadLine+0x29 /root/src/go/src/bufio/bufio.go:359 # 0x6ee431 net/textproto.(*Reader).readLineSlice+0x51 /root/src/go/src/net/textproto/reader.go:55 # 0x6ee250 net/textproto.(*Reader).ReadLine+0x20 /root/src/go/src/net/textproto/reader.go:36 # 0x544394 net/http.readRequest+0x94 /root/src/go/src/net/http/request.go:772 # 0x54a68c net/http.(*conn).readRequest+0xfc /root/src/go/src/net/http/server.go:729 # 0x54e176 net/http.(*conn).serve+0x3a6 /root/src/go/src/net/http/server.go:1469 # 0x0
When there is no activity for a long period over HTTP. I'm not sure if it's Cloudflare keeping connections open, or there's a bug in Go's HTTP/2. In this instance, there is also 182 established TCP connections from netstat to the server's socket.
2 @ 0x44fc10 0x44ab2c 0x44a2ff 0x4e1cee 0x4e1d4a 0x4e3323 0x4f3493 0x5d3fc7 0x5d4484 0x5d786b 0x6aaca1 0x6ab282 0x4c8df8 0x4c8f4e 0x51e26e 0x51ea31 0x539b5c 0x53969c 0x47c391 # 0x44a2fe net.runtime_pollWait+0x4e /root/src/go/src/runtime/netpoll.go:160 # 0x4e1ced net.(*pollDesc).wait+0x2d /root/src/go/src/net/fd_poll_runtime.go:73 # 0x4e1d49 net.(*pollDesc).waitRead+0x29 /root/src/go/src/net/fd_poll_runtime.go:78 # 0x4e3322 net.(*netFD).Read+0x182 /root/src/go/src/net/fd_unix.go:212 # 0x4f3492 net.(*conn).Read+0x62 /root/src/go/src/net/net.go:173 # 0x5d3fc6 crypto/tls.(*block).readFromUntil+0x86 /root/src/go/src/crypto/tls/conn.go:457 # 0x5d4483 crypto/tls.(*Conn).readRecord+0xb3 /root/src/go/src/crypto/tls/conn.go:559 # 0x5d786a crypto/tls.(*Conn).Read+0xfa /root/src/go/src/crypto/tls/conn.go:1006 # 0x6aaca0 bufio.(*Reader).fill+0x100 /root/src/go/src/bufio/bufio.go:97 # 0x6ab281 bufio.(*Reader).Read+0x1a1 /root/src/go/src/bufio/bufio.go:209 # 0x4c8df7 io.ReadAtLeast+0x97 /root/src/go/src/io/io.go:315 # 0x4c8f4d io.ReadFull+0x4d /root/src/go/src/io/io.go:333 # 0x51e26d net/http.http2readFrameHeader+0x6d /root/src/go/src/net/http/h2_bundle.go:744 # 0x51ea30 net/http.(*http2Framer).ReadFrame+0x90 /root/src/go/src/net/http/h2_bundle.go:966 # 0x539b5b net/http.(*http2clientConnReadLoop).run+0x6b /root/src/go/src/net/http/h2_bundle.go:5734 # 0x53969b net/http.(*http2ClientConn).readLoop+0x9b /root/src/go/src/net/http/h2_bundle.go:5695
--
You received this message because you are subscribed to a topic in the Google Groups "golang-nuts" group.
To unsubscribe from this topic, visit https://groups.google.com/d/topic/golang-nuts/zqsC6xcnP24/unsubscribe.
To unsubscribe from this group and all its topics, send an email to golang-nuts...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
--
You received this message because you are subscribed to the Google Groups "golang-nuts" group.
To unsubscribe from this group and stop receiving emails from it, send an email to golang-nuts...@googlegroups.com.
I have found one resp.Body that wasn't closed, but it was completely consumed, and I don't believe this causes a leak, but I'll continue investigating.
The client must close the response body when finished with it:
Clients must call resp.Body.Close when finished reading resp.Body.
Callers should close resp.Body when done reading from it. If resp.Body is not closed, the Client's underlying RoundTripper (typically Transport) may not be able to re-use a persistent TCP connection to the server for a subsequent "keep-alive" request.
Both Keep-Alives and HTTP2 not working. High latency for every connection.
https://groups.google.com/d/topic/golang-nuts/K5sJ3gX1YO8/discussion
By default, Transport caches connections for future re-use. This may leave many open connections when accessing many hosts. This behavior can be managed using Transport's CloseIdleConnections method and the MaxIdleConnsPerHost and DisableKeepAlives fields.
--
Yes, because you will potentially leak FDs when you don't time out.
> o I'm not setting ReadTimeout or WriteTimeout at all. Does that matter?
Yes, because you will potentially leak FDs when you don't time out.
goroutine 273959793 [IO wait, 1607 minutes]: net.runtime_pollWait(0x7f2f44618248, 0x72, 0xc829ab1000) /root/src/go/src/runtime/netpoll.go:160 +0x60 net.(*pollDesc).Wait(0xc829bee290, 0x72, 0x0, 0x0) /root/src/go/src/net/fd_poll_runtime.go:73 +0x3a net.(*pollDesc).WaitRead(0xc829bee290, 0x0, 0x0) /root/src/go/src/net/fd_poll_runtime.go:78 +0x36 net.(*netFD).Read(0xc829bee230, 0xc829ab1000, 0x1000, 0x1000, 0x0, 0x7f2f4ef91050, 0xc820016180) /root/src/go/src/net/fd_unix.go:250 +0x23a net.(*conn).Read(0xc82a7bf230, 0xc829ab1000, 0x1000, 0x1000, 0x0, 0x0, 0x0) /root/src/go/src/net/net.go:172 +0xe4 net/http.(*connReader).Read(0xc82c9244c0, 0xc829ab1000, 0x1000, 0x1000, 0x0, 0x0, 0x0) /root/src/go/src/net/http/server.go:526 +0x196 bufio.(*Reader).fill(0xc821d50420) /root/src/go/src/bufio/bufio.go:97 +0x1e9 bufio.(*Reader).ReadSlice(0xc821d50420, 0xa, 0x0, 0x0, 0x0, 0x0, 0x0) /root/src/go/src/bufio/bufio.go:328 +0x21a bufio.(*Reader).ReadLine(0xc821d50420, 0x0, 0x0, 0x0, 0xc23400, 0x0, 0x0) /root/src/go/src/bufio/bufio.go:357 +0x53 net/textproto.(*Reader).readLineSlice(0xc835c44150, 0x0, 0x0, 0x0, 0x0, 0x0) /root/src/go/src/net/textproto/reader.go:55 +0x81 net/textproto.(*Reader).ReadLine(0xc835c44150, 0x0, 0x0, 0x0, 0x0) /root/src/go/src/net/textproto/reader.go:36 +0x40 net/http.readRequest(0xc821d50420, 0x0, 0xc8289e3260, 0x0, 0x0) /root/src/go/src/net/http/request.go:721 +0xb6 net/http.(*conn).readRequest(0xc823c64f00, 0x0, 0x0, 0x0) /root/src/go/src/net/http/server.go:705 +0x359 net/http.(*conn).serve(0xc823c64f00) /root/src/go/src/net/http/server.go:1425 +0x947 created by net/http.(*Server).Serve /root/src/go/src/net/http/server.go:2137 +0x44e
It does not.If you are seeing connections held open for > 90s (default CloudFlare read timeout) or > 100s (write timeout) as per our nginx config today, then the remote end isn't holding the connection open—the local machine just thinks that to be the case.