why makeSlice so expensive on memory usage?

1,225 views
Skip to first unread message

davy zhang

unread,
Feb 8, 2013, 10:32:40 AM2/8/13
to golan...@googlegroups.com
I have a simple echo server test program, I test it compare to c epoll program and python2.7 epoll. The good news is golang's routine worked as fast as the other 2 programs.

but sadly the memory usage is the biggest one. C program takes less than 1MB, python2.7 version eatup 20MB, but golang program used 3MB at first, and 54MB after all the packets processed.

and never freed up back to the first 3MB

I track down the memory report using go tool pprof and get this
Welcome to pprof!  For help, type 'help'.
(pprof) top10
Total: 44.0 MB
    35.0  79.5%  79.5%     35.0  79.5% bytes.makeSlice
     7.5  17.0%  96.6%     43.5  98.9% main.client
     0.5   1.1%  97.7%      0.5   1.1% net.(*pollster).AddFD
     0.5   1.1%  98.9%      0.5   1.1% net.newFD
     0.5   1.1% 100.0%      0.5   1.1% runtime.deferproc
     0.0   0.0% 100.0%     35.0  79.5% bytes.(*Buffer).Write
     0.0   0.0% 100.0%     35.0  79.5% bytes.(*Buffer).grow
     0.0   0.0% 100.0%      0.5   1.1% main.svrListen
     0.0   0.0% 100.0%      1.0   2.3% net.(*TCPConn).Read
     0.0   0.0% 100.0%      0.5   1.1% net.(*TCPListener).Accept
I used 3 buffer.Buffer for msg, send and recv buffer. 

I list the main.client which is my main go routine

(pprof) list main.client
Total: 44.0 MB
ROUTINE ====================== main.client in /tmp/server.go
   7.5   43.5 Total MB (flat / cumulative)
     .      .    8:     "os"
     .      .    9:     "runtime"
     .      .   10:     "runtime/pprof"
     .      .   11: )
     .      .   12: 
---
     .      .   13: func client(conn net.Conn, c chan int) {
   6.5    6.5   14:     data := make([]byte, 4096)
   0.5    0.5   15:     recvBuf := new(bytes.Buffer)
   0.5    0.5   16:     sndBuf := new(bytes.Buffer)
     .      .   17:     msgBuf := new(bytes.Buffer)
     .      .   18:     defer conn.Close()
     .      .   19: 
     .      .   20:     for {
     .    1.0   21:             rn, err := conn.Read(data)
     .      .   22:             //              fmt.Println("read size,err ", rn, ",", err)
     .      .   23:             switch err {
     .      .   24:             case io.EOF:
     .      .   25:                     //                      fmt.Println("end of buff, quit... ", err)
     .      .   26:                     //                      fmt.Println("routine count ",runtime.NumGoroutine())
     .      .   27:                     runtime.GC()
     .      .   28:                     return
     .      .   29:             case nil:
     .    6.5   30:                     recvBuf.Write(data[:rn])
     .      .   31:                     for {
     .      .   32:                             line, err := recvBuf.ReadBytes('\n')
     .      .   33:                             //                                      fmt.Println("bytes recved:", string(line), err)
     .   12.5   34:                             msgBuf.Write(line)
     .      .   35:                             if err == io.EOF {
     .      .   36:                                     //                                              fmt.Println("finally left ", string(line))
     .      .   37:                                     break
     .      .   38:                             }
     .      .   39: 
     .      .   40:                             //                                      fmt.Println("msg recved:",string(msgBuf.Bytes()))
     .      .   41: 
     .   16.0   42:                             sndBuf.Write(msgBuf.Bytes())
     .      .   43:                             //                              wn, werr := sndBuf.WriteTo(conn)
     .      .   44:                             sndBuf.WriteTo(conn)
     .      .   45:                             //                              fmt.Println("written ", wn, werr)
     .      .   46:                             msgBuf.Reset()
     .      .   47:                             recvBuf.Reset()
     .      .   48:                             sndBuf.Reset()
     .      .   49:                             //                                      fmt.Println("left ", len(recvBuf.Bytes()))
     .      .   50:                     }
     .      .   51:             default:
     .      .   52:                     fmt.Println("err:", err)
     .      .   53:                     break
     .      .   54:             }
     .      .   55:     }
     .      .   56: }

Is there anything wrong with code?

here is my server code and test client code

server code


package main


import (

"bytes"

"fmt"

"io"

"net"

"os"

"runtime"

"runtime/pprof"

)


func client(conn net.Conn, c chan int) {

data := make([]byte, 4096)

recvBuf := new(bytes.Buffer)

sndBuf := new(bytes.Buffer)

msgBuf := new(bytes.Buffer)

defer conn.Close()


for {

rn, err := conn.Read(data)

// fmt.Println("read size,err ", rn, ",", err)

switch err {

case io.EOF:

// fmt.Println("end of buff, quit... ", err)

// fmt.Println("routine count ",runtime.NumGoroutine())

runtime.GC()

return

case nil:

recvBuf.Write(data[:rn])

for {

line, err := recvBuf.ReadBytes('\n')

// fmt.Println("bytes recved:", string(line), err)

msgBuf.Write(line)

if err == io.EOF {

// fmt.Println("finally left ", string(line))

break

}


// fmt.Println("msg recved:",string(msgBuf.Bytes()))


sndBuf.Write(msgBuf.Bytes())

// wn, werr := sndBuf.WriteTo(conn)

sndBuf.WriteTo(conn)

// fmt.Println("written ", wn, werr)

msgBuf.Reset()

recvBuf.Reset()

sndBuf.Reset()

// fmt.Println("left ", len(recvBuf.Bytes()))

}

default:

fmt.Println("err:", err)

break

}

}

}


func svrListen(lis net.Listener, c chan int) {

for {


conn, err := lis.Accept()

if err != nil {

fmt.Errorf("accept error ", err)

}

// fmt.Println("accepted ", conn.RemoteAddr())

go client(conn, c)

}

}


func debugListen(lis net.Listener, c chan int) {

for {

conn, err := lis.Accept()

if err != nil {

fmt.Errorf("accept error ", err)

}

fmt.Println("debug accepted ", conn)

f, _ := os.Create("/tmp/memprof")

defer f.Close()

pprof.WriteHeapProfile(f)

pprof.StopCPUProfile()

conn.Close()

}

}


func cpuProfileStart() {

f, _ := os.Create("/tmp/server_cpu")

pprof.StartCPUProfile(f)

}


func main() {

var (

host  = ""

port1 = "9000"

port2 = "8000"

c     = make(chan int, 1)

)


lis1, _ := net.Listen("tcp", host+":"+port1)

lis2, _ := net.Listen("tcp", host+":"+port2)

lis3, _ := net.Listen("tcp", host+":6666")

go svrListen(lis1, c)

go svrListen(lis2, c)

go debugListen(lis3, c)

cpuProfileStart()

code := <-c

for {

if code == -1 {

break

}

}

}





test client code

package main


import (

"bufio"

"fmt"

"net"

"time"

"strings"

)


var addr string = "10.23.1.247:8000"

var debugAddr string = "10.23.1.247:6666"

//var addr string = "10.23.1.246:8000"


//var addr string = "localhost:8000"


func newConn(c chan int, packet string,count int) {

conn, err := net.Dial("tcp", addr)

defer conn.Close()

if err != nil {

panic(err)

}

rd := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))

for i := 0; i < count; i++ {

rd.WriteString(packet)

rd.Flush()

rd.ReadString('\n')

}

// print(line)

c <- 1

}


func debugConn(){

conn,_ := net.Dial("tcp",debugAddr)

defer conn.Close()

}


func main() {

c := make(chan int, 1)

clientCount := 3000

repeatCount := 10

packet := strings.Repeat("0",1024 * 10)

packet += "\n"


for i := 0; i < clientCount; i++ {

go newConn(c,packet,repeatCount)

}


counter := 0

t1 := time.Now()

for ; counter < clientCount; {

counter += <-c

// print(counter)

}

fmt.Println(counter)

fmt.Println(time.Since(t1))

// debugConn()

}


here is the memory profile file I took in the middle of the runtime


heap profile: 88: 862800 [170: 1516240] @ heap/1048576

25: 366848 [75: 819456] @ 0x415907 0x41298d 0x412838 0x419315 0x418e05 0x418ee3 0x400e2d 0x40ee31

32: 335872 [47: 493312] @ 0x415907 0x41298d 0x412838 0x419315 0x418e05 0x418ee3 0x400efa 0x40ee31

13: 53248 [18: 73728] @ 0x415907 0x41298d 0x412838 0x400c3e 0x40ee31

1: 53248 [1: 53248] @ 0x415907 0x4164db 0x405250 0x405b58 0x40637b 0x406738 0x4072f9 0x40756a 0x4356f0 0x431e08 0x4327f5 0x43358a 0x440dcc 0x400cfe 0x40ee31

13: 53248 [17: 75776] @ 0x415907 0x41298d 0x412838 0x419315 0x418e05 0x418ee3 0x400dd4 0x40ee31

1: 96 [1: 96] @ 0x415907 0x416533 0x400c65 0x40ee31

1: 96 [1: 96] @ 0x415907 0x4164db 0x4034ab 0x403584 0x432ad7 0x434d2a 0x442246 0x442340 0x40106d 0x40ee31

1: 96 [1: 96] @ 0x415907 0x416533 0x400c7c 0x40ee31

1: 48 [9: 432] @ 0x415907 0x415abf 0x410970 0x433473 0x440dcc 0x400cfe 0x40ee31







Thanks for any hints


Russ Cox

unread,
Feb 9, 2013, 11:17:45 AM2/9/13
to davy zhang, golang-nuts
You are sending 3,000 10kB lines simultaneously. The Go server will process them simultaneously. That requires 30 MB of memory. The buffers don't know how big they need to be so they grow multiplicatively, explaining how you got to 44 MB of data.

It sounds like your C program is processing fewer connections simultaneously. Perhaps it is single threaded, or perhaps there is a limit to its parallelism?

Russ

davy zhang

unread,
Feb 9, 2013, 10:28:16 PM2/9/13
to golan...@googlegroups.com, davy zhang
Hi,
Thanks so much for looking into my code. Yes, you are right the 4kb buffer is the main reason 4kb * 3000. The C version is single threaded and didn't allocate buffer for every connection. And I changed the buffer to 1kb per client, the memory usage move down to 20MB just like python. and the cpu efficiency remains the same
Reply all
Reply to author
Forward
0 new messages