all: replace if/else statements with calls to min or max
Simplify conditional assignments by using the built-in min and max
functions, introduced in Go 1.21.
Generated by
go fix -minmax ./...
using
go version go1.26-devel_ad85395442 Wed Dec 17 15:56:48 2025 -0800 linux/amd64
diff --git a/src/archive/zip/zip_test.go b/src/archive/zip/zip_test.go
index 82f6ac4..f08a126 100644
--- a/src/archive/zip/zip_test.go
+++ b/src/archive/zip/zip_test.go
@@ -423,10 +423,7 @@
ss.size += int64(len(p))
if len(ss.buf) < ss.keep {
space := ss.keep - len(ss.buf)
- add := len(p)
- if add > space {
- add = space
- }
+ add := min(len(p), space)
ss.buf = append(ss.buf, p[:add]...)
p = p[add:]
}
diff --git a/src/bufio/bufio_test.go b/src/bufio/bufio_test.go
index a61361a..a11b2dd 100644
--- a/src/bufio/bufio_test.go
+++ b/src/bufio/bufio_test.go
@@ -923,13 +923,7 @@
}
func (t *testReader) Read(buf []byte) (n int, err error) {
- n = t.stride
- if n > len(t.data) {
- n = len(t.data)
- }
- if n > len(buf) {
- n = len(buf)
- }
+ n = min(min(t.stride, len(t.data)), len(buf))
copy(buf, t.data)
t.data = t.data[n:]
if len(t.data) == 0 {
diff --git a/src/bufio/scan_test.go b/src/bufio/scan_test.go
index 9be02df..56b0e3e 100644
--- a/src/bufio/scan_test.go
+++ b/src/bufio/scan_test.go
@@ -544,10 +544,7 @@
func (r *negativeEOFReader) Read(p []byte) (int, error) {
if *r > 0 {
- c := int(*r)
- if c > len(p) {
- c = len(p)
- }
+ c := min(int(*r), len(p))
for i := 0; i < c; i++ {
p[i] = 'a'
}
diff --git a/src/bytes/buffer.go b/src/bytes/buffer.go
index 6cb4d6a..2b7b78d 100644
--- a/src/bytes/buffer.go
+++ b/src/bytes/buffer.go
@@ -258,12 +258,12 @@
//
// Instead use the append-make pattern with a nil slice to ensure that
// we allocate buffers rounded up to the closest size class.
- c := len(b) + n // ensure enough space for n elements
- if c < 2*cap(b) {
+ c := max(
+ // ensure enough space for n elements
+ len(b)+n,
// The growth rate has historically always been 2x. In the future,
// we could rely purely on append to determine the growth rate.
- c = 2 * cap(b)
- }
+ 2*cap(b))
b2 := append([]byte(nil), make([]byte, c)...)
i := copy(b2, b)
return b2[:i]
diff --git a/src/bytes/buffer_test.go b/src/bytes/buffer_test.go
index 6820f1b..6ce616d 100644
--- a/src/bytes/buffer_test.go
+++ b/src/bytes/buffer_test.go
@@ -459,10 +459,7 @@
t.Fatalf("Read %d returned %d", i, n)
}
bb := buf.Next(k)
- want := k
- if want > j-i {
- want = j - i
- }
+ want := min(k, j-i)
if len(bb) != want {
t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb))
}
diff --git a/src/compress/flate/deflate.go b/src/compress/flate/deflate.go
index f6cda02..050c5a0 100644
--- a/src/compress/flate/deflate.go
+++ b/src/compress/flate/deflate.go
@@ -200,10 +200,7 @@
loops := (n + 256 - minMatchLength) / 256
for j := range loops {
index := j * 256
- end := index + 256 + minMatchLength - 1
- if end > n {
- end = n
- }
+ end := min(index+256+minMatchLength-1, n)
toCheck := d.window[index:end]
dstSize := len(toCheck) - minMatchLength + 1
@@ -231,18 +228,12 @@
// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
- minMatchLook := maxMatchLength
- if lookahead < minMatchLook {
- minMatchLook = lookahead
- }
+ minMatchLook := min(lookahead, maxMatchLength)
win := d.window[0 : pos+minMatchLook]
// We quit when we get a match that's at least nice long
- nice := len(win) - pos
- if d.nice < nice {
- nice = d.nice
- }
+ nice := min(d.nice, len(win)-pos)
// If we've got a match that's good enough, only look in 1/4 the chain.
tries := d.chain
@@ -428,10 +419,7 @@
prevOffset := d.offset
d.length = minMatchLength - 1
d.offset = 0
- minIndex := d.index - windowSize
- if minIndex < 0 {
- minIndex = 0
- }
+ minIndex := max(d.index-windowSize, 0)
if d.chainHead-d.hashOffset >= minIndex &&
(d.fastSkipHashing != skipNever && lookahead > minMatchLength-1 ||
diff --git a/src/compress/flate/deflatefast.go b/src/compress/flate/deflatefast.go
index e5554d6..c4d6f83 100644
--- a/src/compress/flate/deflatefast.go
+++ b/src/compress/flate/deflatefast.go
@@ -209,10 +209,7 @@
// t can be negative to indicate the match is starting in e.prev.
// We assume that src[s-4:s] and src[t-4:t] already match.
func (e *deflateFast) matchLen(s, t int32, src []byte) int32 {
- s1 := int(s) + maxMatchLength - 4
- if s1 > len(src) {
- s1 = len(src)
- }
+ s1 := min(int(s)+maxMatchLength-4, len(src))
// If we are inside the current block
if t >= 0 {
@@ -293,14 +290,12 @@
// Shift down everything in the table that isn't already too far away.
for i := range e.table[:] {
- v := e.table[i].offset - e.cur + maxMatchOffset + 1
- if v < 0 {
+ v := max(e.table[i].offset-e.cur+maxMatchOffset+1,
// We want to reset e.cur to maxMatchOffset + 1, so we need to shift
// all table entries down by (e.cur - (maxMatchOffset + 1)).
// Because we ignore matches > maxMatchOffset, we can cap
// any negative offsets at 0.
- v = 0
- }
+ 0)
e.table[i].offset = v
}
e.cur = maxMatchOffset + 1
diff --git a/src/compress/flate/dict_decoder.go b/src/compress/flate/dict_decoder.go
index d2c1904..ae6bf09 100644
--- a/src/compress/flate/dict_decoder.go
+++ b/src/compress/flate/dict_decoder.go
@@ -104,10 +104,7 @@
dstBase := dd.wrPos
dstPos := dstBase
srcPos := dstPos - dist
- endPos := dstPos + length
- if endPos > len(dd.hist) {
- endPos = len(dd.hist)
- }
+ endPos := min(dstPos+length, len(dd.hist))
// Copy non-overlapping section after destination position.
//
diff --git a/src/compress/flate/huffman_bit_writer.go b/src/compress/flate/huffman_bit_writer.go
index 27df489e..07777db 100644
--- a/src/compress/flate/huffman_bit_writer.go
+++ b/src/compress/flate/huffman_bit_writer.go
@@ -234,10 +234,7 @@
w.codegenFreq[size]++
count--
for count >= 3 {
- n := 6
- if n > count {
- n = count
- }
+ n := min(6, count)
codegen[outIndex] = 16
outIndex++
codegen[outIndex] = uint8(n - 3)
@@ -247,10 +244,7 @@
}
} else {
for count >= 11 {
- n := 138
- if n > count {
- n = count
- }
+ n := min(138, count)
codegen[outIndex] = 18
outIndex++
codegen[outIndex] = uint8(n - 11)
diff --git a/src/crypto/cipher/ctr.go b/src/crypto/cipher/ctr.go
index 8e63ed7..7c0d69f 100644
--- a/src/crypto/cipher/ctr.go
+++ b/src/crypto/cipher/ctr.go
@@ -51,10 +51,7 @@
if len(iv) != block.BlockSize() {
panic("cipher.NewCTR: IV length must equal block size")
}
- bufSize := streamBufferSize
- if bufSize < block.BlockSize() {
- bufSize = block.BlockSize()
- }
+ bufSize := max(streamBufferSize, block.BlockSize())
return &ctr{
b: block,
ctr: bytes.Clone(iv),
diff --git a/src/crypto/cipher/ofb.go b/src/crypto/cipher/ofb.go
index ee5dfaf..d8c1ce6 100644
--- a/src/crypto/cipher/ofb.go
+++ b/src/crypto/cipher/ofb.go
@@ -37,10 +37,7 @@
if len(iv) != blockSize {
panic("cipher.NewOFB: IV length must equal block size")
}
- bufSize := streamBufferSize
- if bufSize < blockSize {
- bufSize = blockSize
- }
+ bufSize := max(streamBufferSize, blockSize)
x := &ofb{
b: b,
cipher: make([]byte, blockSize),
diff --git a/src/crypto/internal/fips140/bigmod/nat.go b/src/crypto/internal/fips140/bigmod/nat.go
index c0b07fd..130ceda 100644
--- a/src/crypto/internal/fips140/bigmod/nat.go
+++ b/src/crypto/internal/fips140/bigmod/nat.go
@@ -655,10 +655,7 @@
i := len(x.limbs) - 1
// For the first N - 1 limbs we can skip the actual shifting and position
// them at the shifted position, which starts at min(N - 2, i).
- start := len(m.nat.limbs) - 2
- if i < start {
- start = i
- }
+ start := min(i, len(m.nat.limbs)-2)
for j := start; j >= 0; j-- {
out.limbs[j] = x.limbs[i]
i--
diff --git a/src/crypto/internal/sysrand/rand_getrandom.go b/src/crypto/internal/sysrand/rand_getrandom.go
index 11e9683..31ea1889 100644
--- a/src/crypto/internal/sysrand/rand_getrandom.go
+++ b/src/crypto/internal/sysrand/rand_getrandom.go
@@ -39,10 +39,7 @@
}
for len(b) > 0 {
- size := len(b)
- if size > maxSize {
- size = maxSize
- }
+ size := min(len(b), maxSize)
n, err := unix.GetRandom(b[:size], 0)
if errors.Is(err, syscall.ENOSYS) {
// If getrandom(2) is not available, presumably on Linux versions
diff --git a/src/crypto/rc4/rc4_test.go b/src/crypto/rc4/rc4_test.go
index f092f4f..f18dbec 100644
--- a/src/crypto/rc4/rc4_test.go
+++ b/src/crypto/rc4/rc4_test.go
@@ -106,10 +106,7 @@
off := 0
for off < len(g.keystream) {
- n := len(g.keystream) - off
- if n > size {
- n = size
- }
+ n := min(len(g.keystream)-off, size)
desc := fmt.Sprintf("#%d@[%d:%d]", gi, off, off+n)
testEncrypt(t, desc, c, data[off:off+n], expect[off:off+n])
off += n
diff --git a/src/crypto/tls/conn.go b/src/crypto/tls/conn.go
index a840125..04a5d90 100644
--- a/src/crypto/tls/conn.go
+++ b/src/crypto/tls/conn.go
@@ -288,11 +288,9 @@
good = byte(int32(^t) >> 31)
// The maximum possible padding length plus the actual length field
- toCheck := 256
- // The length of the padded data is public, so we can use an if here
- if toCheck > len(payload) {
- toCheck = len(payload)
- }
+ toCheck := min(
+ // The length of the padded data is public, so we can use an if here
+ 256, len(payload))
for i := 0; i < toCheck; i++ {
t := uint(paddingLen) - uint(i)
@@ -938,10 +936,7 @@
return maxPlaintext // avoid overflow in multiply below
}
- n := payloadBytes * int(pkt+1)
- if n > maxPlaintext {
- n = maxPlaintext
- }
+ n := min(payloadBytes*int(pkt+1), maxPlaintext)
return n
}
diff --git a/src/crypto/tls/tls_test.go b/src/crypto/tls/tls_test.go
index 2a0380d..c59ad73 100644
--- a/src/crypto/tls/tls_test.go
+++ b/src/crypto/tls/tls_test.go
@@ -1061,10 +1061,7 @@
wrote := 0
for wrote < len(p) {
time.Sleep(100 * time.Microsecond)
- allowed := int(time.Since(t0).Seconds()*float64(c.bps)) / 8
- if allowed > len(p) {
- allowed = len(p)
- }
+ allowed := min(int(time.Since(t0).Seconds()*float64(c.bps))/8, len(p))
if wrote < allowed {
n, err := c.Conn.Write(p[wrote:allowed])
wrote += n
diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go
index a59cf38..21de0ba 100644
--- a/src/database/sql/sql.go
+++ b/src/database/sql/sql.go
@@ -1028,10 +1028,7 @@
// The default is 0 (unlimited).
func (db *DB) SetMaxOpenConns(n int) {
db.mu.Lock()
- db.maxOpen = n
- if n < 0 {
- db.maxOpen = 0
- }
+ db.maxOpen = max(n, 0)
syncMaxIdle := db.maxOpen > 0 && db.maxIdleConnsLocked() > db.maxOpen
db.mu.Unlock()
if syncMaxIdle {
@@ -2689,10 +2686,7 @@
// To avoid lock contention on DB.mu, we do it only when
// s.db.numClosed - s.lastNum is large enough.
func (s *Stmt) removeClosedStmtLocked() {
- t := len(s.css)/2 + 1
- if t > 10 {
- t = 10
- }
+ t := min(len(s.css)/2+1, 10)
dbClosed := s.db.numClosed.Load()
if dbClosed-s.lastNumClosed < uint64(t) {
return
diff --git a/src/debug/buildinfo/buildinfo.go b/src/debug/buildinfo/buildinfo.go
index d202d50..6442545 100644
--- a/src/debug/buildinfo/buildinfo.go
+++ b/src/debug/buildinfo/buildinfo.go
@@ -357,10 +357,7 @@
// chunk boundary, but since it must be 16-byte aligned we know it will
// fall within a single chunk.
remaining := end - start
- chunkSize := uint64(searchChunkSize)
- if chunkSize > remaining {
- chunkSize = remaining
- }
+ chunkSize := min(uint64(searchChunkSize), remaining)
if buf == nil {
buf = make([]byte, chunkSize)
diff --git a/src/debug/elf/file_test.go b/src/debug/elf/file_test.go
index 596ad12..0e69e98 100644
--- a/src/debug/elf/file_test.go
+++ b/src/debug/elf/file_test.go
@@ -1174,10 +1174,7 @@
}
// Read data from the new position.
- end := pos + 16
- if end > int64(len(buf)) {
- end = int64(len(buf))
- }
+ end := min(pos+16, int64(len(buf)))
n, err := io.ReadFull(sf, buf[pos:end])
if err != nil {
t.Fatal(err)
diff --git a/src/debug/gosym/symtab.go b/src/debug/gosym/symtab.go
index 08d4668..78a2966 100644
--- a/src/debug/gosym/symtab.go
+++ b/src/debug/gosym/symtab.go
@@ -70,10 +70,7 @@
return ""
}
- pathend := strings.LastIndex(name, "/")
- if pathend < 0 {
- pathend = 0
- }
+ pathend := max(strings.LastIndex(name, "/"), 0)
if i := strings.Index(name[pathend:], "."); i != -1 {
return name[:pathend+i]
@@ -88,10 +85,7 @@
name := s.nameWithoutInst()
// If we find a slash in name, it should precede any bracketed expression
// that was removed, so pathend will apply correctly to name and s.Name.
- pathend := strings.LastIndex(name, "/")
- if pathend < 0 {
- pathend = 0
- }
+ pathend := max(strings.LastIndex(name, "/"), 0)
// Find the first dot after pathend (or from the beginning, if there was
// no slash in name).
l := strings.Index(name[pathend:], ".")
diff --git a/src/encoding/ascii85/ascii85.go b/src/encoding/ascii85/ascii85.go
index 18bf9f0..3323f27 100644
--- a/src/encoding/ascii85/ascii85.go
+++ b/src/encoding/ascii85/ascii85.go
@@ -126,10 +126,7 @@
// Large interior chunks.
for len(p) >= 4 {
- nn := len(e.out) / 5 * 4
- if nn > len(p) {
- nn = len(p)
- }
+ nn := min(len(e.out)/5*4, len(p))
nn -= nn % 4
if nn > 0 {
nout := Encode(e.out[0:], p[0:nn])
diff --git a/src/encoding/ascii85/ascii85_test.go b/src/encoding/ascii85/ascii85_test.go
index 06fde07..0450c1d 100644
--- a/src/encoding/ascii85/ascii85_test.go
+++ b/src/encoding/ascii85/ascii85_test.go
@@ -89,10 +89,7 @@
bb := &strings.Builder{}
encoder := NewEncoder(bb)
for pos := 0; pos < len(input); pos += bs {
- end := pos + bs
- if end > len(input) {
- end = len(input)
- }
+ end := min(pos+bs, len(input))
n, err := encoder.Write(input[pos:end])
testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, error(nil))
testEqual(t, "Write(%q) gave length %v, want %v", input[pos:end], n, end-pos)
diff --git a/src/encoding/base32/base32.go b/src/encoding/base32/base32.go
index 8bda6c6..1f1d47e 100644
--- a/src/encoding/base32/base32.go
+++ b/src/encoding/base32/base32.go
@@ -471,13 +471,7 @@
}
// Read a chunk.
- nn := (len(p) + 4) / 5 * 8
- if nn < 8 {
- nn = 8
- }
- if nn > len(d.buf) {
- nn = len(d.buf)
- }
+ nn := min(max((len(p)+4)/5*8, 8), len(d.buf))
// Minimum amount of bytes that needs to be read each cycle
var min int
diff --git a/src/encoding/base32/base32_test.go b/src/encoding/base32/base32_test.go
index 4b10948..7c92044 100644
--- a/src/encoding/base32/base32_test.go
+++ b/src/encoding/base32/base32_test.go
@@ -78,10 +78,7 @@
bb := &strings.Builder{}
encoder := NewEncoder(StdEncoding, bb)
for pos := 0; pos < len(input); pos += bs {
- end := pos + bs
- if end > len(input) {
- end = len(input)
- }
+ end := min(pos+bs, len(input))
n, err := encoder.Write(input[pos:end])
testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, error(nil))
testEqual(t, "Write(%q) gave length %v, want %v", input[pos:end], n, end-pos)
diff --git a/src/encoding/base64/base64.go b/src/encoding/base64/base64.go
index 57aa1a6..4e4c491 100644
--- a/src/encoding/base64/base64.go
+++ b/src/encoding/base64/base64.go
@@ -459,13 +459,7 @@
// Refill buffer.
for d.nbuf < 4 && d.readErr == nil {
- nn := len(p) / 3 * 4
- if nn < 4 {
- nn = 4
- }
- if nn > len(d.buf) {
- nn = len(d.buf)
- }
+ nn := min(max(len(p)/3*4, 4), len(d.buf))
nn, d.readErr = d.r.Read(d.buf[d.nbuf:nn])
d.nbuf += nn
}
diff --git a/src/encoding/base64/base64_test.go b/src/encoding/base64/base64_test.go
index 0a15ee3..f42c9ea 100644
--- a/src/encoding/base64/base64_test.go
+++ b/src/encoding/base64/base64_test.go
@@ -138,10 +138,7 @@
bb := &strings.Builder{}
encoder := NewEncoder(StdEncoding, bb)
for pos := 0; pos < len(input); pos += bs {
- end := pos + bs
- if end > len(input) {
- end = len(input)
- }
+ end := min(pos+bs, len(input))
n, err := encoder.Write(input[pos:end])
testEqual(t, "Write(%q) gave error %v, want %v", input[pos:end], err, error(nil))
testEqual(t, "Write(%q) gave length %v, want %v", input[pos:end], n, end-pos)
diff --git a/src/encoding/gob/decode.go b/src/encoding/gob/decode.go
index 75f6c57..a38175a 100644
--- a/src/encoding/gob/decode.go
+++ b/src/encoding/gob/decode.go
@@ -388,10 +388,7 @@
}
// Copy into s up to the capacity or n,
// whichever is less.
- ln = value.Cap()
- if ln > n {
- ln = n
- }
+ ln = min(value.Cap(), n)
value.SetLen(ln)
sub := value.Slice(i, ln)
if _, err := state.b.Read(sub.Bytes()); err != nil {
@@ -551,10 +548,7 @@
// This is a slice that we only partially allocated.
// Grow it up to length.
value.Grow(1)
- cp := value.Cap()
- if cp > length {
- cp = length
- }
+ cp := min(value.Cap(), length)
value.SetLen(cp)
ln = cp
}
diff --git a/src/encoding/hex/hex.go b/src/encoding/hex/hex.go
index ba9cc0f..8110ccf 100644
--- a/src/encoding/hex/hex.go
+++ b/src/encoding/hex/hex.go
@@ -176,10 +176,7 @@
func (e *encoder) Write(p []byte) (n int, err error) {
for len(p) > 0 && e.err == nil {
- chunkSize := bufferSize / 2
- if len(p) < chunkSize {
- chunkSize = len(p)
- }
+ chunkSize := min(len(p), bufferSize/2)
var written int
encoded := Encode(e.out[:], p[:chunkSize])
diff --git a/src/encoding/hex/hex_test.go b/src/encoding/hex/hex_test.go
index f90dec5..590097a 100644
--- a/src/encoding/hex/hex_test.go
+++ b/src/encoding/hex/hex_test.go
@@ -184,10 +184,7 @@
dumper := Dumper(&out)
done := 0
for done < len(in) {
- todo := done + stride
- if todo > len(in) {
- todo = len(in)
- }
+ todo := min(done+stride, len(in))
dumper.Write(in[done:todo])
done = todo
}
diff --git a/src/encoding/json/scanner_test.go b/src/encoding/json/scanner_test.go
index a062e91..ecaf599 100644
--- a/src/encoding/json/scanner_test.go
+++ b/src/encoding/json/scanner_test.go
@@ -213,10 +213,7 @@
t.Helper()
for i := 0; ; i++ {
if i >= len(a) || i >= len(b) || a[i] != b[i] {
- j := i - 10
- if j < 0 {
- j = 0
- }
+ j := max(i-10, 0)
t.Errorf("diverge at %d: «%s» vs «%s»", i, trim(a[j:]), trim(b[j:]))
return
}
@@ -277,13 +274,7 @@
}
func genArray(n int) []any {
- f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
- if f > n {
- f = n
- }
- if f < 1 {
- f = 1
- }
+ f := max(min(int(math.Abs(rand.NormFloat64())*math.Min(10, float64(n/2))), n), 1)
x := make([]any, f)
for i := range x {
x[i] = genValue(((i+1)*n)/f - (i*n)/f)
@@ -292,10 +283,7 @@
}
func genMap(n int) map[string]any {
- f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
- if f > n {
- f = n
- }
+ f := min(int(math.Abs(rand.NormFloat64())*math.Min(10, float64(n/2))), n)
if n > 0 && f == 0 {
f = 1
}
diff --git a/src/go/printer/printer.go b/src/go/printer/printer.go
index 763d1e7..6487781 100644
--- a/src/go/printer/printer.go
+++ b/src/go/printer/printer.go
@@ -440,10 +440,9 @@
// determine number of linebreaks before the comment
n := 0
if pos.IsValid() && p.last.IsValid() {
- n = pos.Line - p.last.Line
- if n < 0 { // should never happen
- n = 0
- }
+ n = max(pos.Line-p.last.Line,
+ // should never happen
+ 0)
}
// at the package scope level only (p.indent == 0),
diff --git a/src/hash/crc32/crc32_test.go b/src/hash/crc32/crc32_test.go
index eb5e73c..1a50c41 100644
--- a/src/hash/crc32/crc32_test.go
+++ b/src/hash/crc32/crc32_test.go
@@ -287,10 +287,7 @@
for delta := 1; delta <= 7; delta++ {
testGoldenIEEE(t, func(b []byte) uint32 {
ieee := NewIEEE()
- d := delta
- if d >= len(b) {
- d = len(b)
- }
+ d := min(delta, len(b))
ieee.Write(b[:d])
ieee.Write(b[d:])
return ieee.Sum32()
@@ -313,10 +310,7 @@
for delta := 1; delta <= 7; delta++ {
testGoldenCastagnoli(t, func(b []byte) uint32 {
castagnoli := New(castagnoliTab)
- d := delta
- if d >= len(b) {
- d = len(b)
- }
+ d := min(delta, len(b))
castagnoli.Write(b[:d])
castagnoli.Write(b[d:])
return castagnoli.Sum32()
diff --git a/src/html/escape.go b/src/html/escape.go
index d66a3e4..76d8a89 100644
--- a/src/html/escape.go
+++ b/src/html/escape.go
@@ -147,10 +147,7 @@
dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
} else if !attribute {
- maxLen := len(entityName) - 1
- if maxLen > longestEntityWithoutSemicolon {
- maxLen = longestEntityWithoutSemicolon
- }
+ maxLen := min(len(entityName)-1, longestEntityWithoutSemicolon)
for j := maxLen; j > 1; j-- {
if x := entity[string(entityName[:j])]; x != 0 {
return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
diff --git a/src/image/color/ycbcr.go b/src/image/color/ycbcr.go
index a6d17ab..b87440f 100644
--- a/src/image/color/ycbcr.go
+++ b/src/image/color/ycbcr.go
@@ -316,13 +316,7 @@
rr := uint32(r)
gg := uint32(g)
bb := uint32(b)
- w := rr
- if w < gg {
- w = gg
- }
- if w < bb {
- w = bb
- }
+ w := max(max(rr, gg), bb)
if w == 0 {
return 0, 0, 0, 0xff
}
diff --git a/src/image/gif/writer.go b/src/image/gif/writer.go
index 2a3e33c..fd3b618 100644
--- a/src/image/gif/writer.go
+++ b/src/image/gif/writer.go
@@ -313,10 +313,7 @@
}
}
- litWidth := paddedSize + 1
- if litWidth < 2 {
- litWidth = 2
- }
+ litWidth := max(paddedSize+1, 2)
e.writeByte(uint8(litWidth)) // LZW Minimum Code Size.
bw := blockWriter{e: e}
diff --git a/src/image/gif/writer_test.go b/src/image/gif/writer_test.go
index a16dbaa..b7e63a3 100644
--- a/src/image/gif/writer_test.go
+++ b/src/image/gif/writer_test.go
@@ -141,10 +141,7 @@
// palettesEqual reports whether two color.Palette values are equal, ignoring
// any trailing opaque-black palette entries.
func palettesEqual(p, q color.Palette) bool {
- n := len(p)
- if n > len(q) {
- n = len(q)
- }
+ n := min(len(p), len(q))
for i := 0; i < n; i++ {
if p[i] != q[i] {
return false
diff --git a/src/image/jpeg/reader.go b/src/image/jpeg/reader.go
index 5aa51ad..0cd27f7 100644
--- a/src/image/jpeg/reader.go
+++ b/src/image/jpeg/reader.go
@@ -281,10 +281,7 @@
}
for {
- m := d.bytes.j - d.bytes.i
- if m > n {
- m = n
- }
+ m := min(d.bytes.j-d.bytes.i, n)
d.bytes.i += m
n -= m
if n == 0 {
diff --git a/src/image/jpeg/reader_test.go b/src/image/jpeg/reader_test.go
index 79702d6..afa7f24 100644
--- a/src/image/jpeg/reader_test.go
+++ b/src/image/jpeg/reader_test.go
@@ -199,10 +199,7 @@
t.Fatal("SOS marker not found")
}
i += len(sosMarker)
- j := i + 10
- if j > len(b) {
- j = len(b)
- }
+ j := min(i+10, len(b))
for ; i < j; i++ {
Decode(bytes.NewReader(b[:i]))
}
diff --git a/src/image/jpeg/writer.go b/src/image/jpeg/writer.go
index a5befc7..e157db3 100644
--- a/src/image/jpeg/writer.go
+++ b/src/image/jpeg/writer.go
@@ -422,16 +422,10 @@
xmax := b.Max.X - 1
ymax := b.Max.Y - 1
for j := range 8 {
- sj := p.Y + j
- if sj > ymax {
- sj = ymax
- }
+ sj := min(p.Y+j, ymax)
offset := (sj-b.Min.Y)*m.Stride - b.Min.X*4
for i := range 8 {
- sx := p.X + i
- if sx > xmax {
- sx = xmax
- }
+ sx := min(p.X+i, xmax)
pix := m.Pix[offset+sx*4:]
yy, cb, cr := color.RGBToYCbCr(pix[0], pix[1], pix[2])
yBlock[8*j+i] = int32(yy)
@@ -447,15 +441,9 @@
xmax := b.Max.X - 1
ymax := b.Max.Y - 1
for j := range 8 {
- sy := p.Y + j
- if sy > ymax {
- sy = ymax
- }
+ sy := min(p.Y+j, ymax)
for i := range 8 {
- sx := p.X + i
- if sx > xmax {
- sx = xmax
- }
+ sx := min(p.X+i, xmax)
yi := m.YOffset(sx, sy)
ci := m.COffset(sx, sy)
yBlock[8*j+i] = int32(m.Y[yi])
diff --git a/src/index/suffixarray/sais.go b/src/index/suffixarray/sais.go
index bede889..8f4b802 100644
--- a/src/index/suffixarray/sais.go
+++ b/src/index/suffixarray/sais.go
@@ -698,10 +698,7 @@
}
if len(tmp) < numLMS {
// TestSAIS/forcealloc reaches this code.
- n := maxID
- if n < numLMS/2 {
- n = numLMS / 2
- }
+ n := max(maxID, numLMS/2)
tmp = make([]int32, n)
}
diff --git a/src/internal/coverage/calloc/batchcounteralloc.go b/src/internal/coverage/calloc/batchcounteralloc.go
index 2b6495d..b41967e 100644
--- a/src/internal/coverage/calloc/batchcounteralloc.go
+++ b/src/internal/coverage/calloc/batchcounteralloc.go
@@ -17,10 +17,7 @@
func (ca *BatchCounterAlloc) AllocateCounters(n int) []uint32 {
const chunk = 8192
if n > cap(ca.pool) {
- siz := chunk
- if n > chunk {
- siz = n
- }
+ siz := max(n, chunk)
ca.pool = make([]uint32, siz)
}
rv := ca.pool[:n]
diff --git a/src/internal/diff/diff.go b/src/internal/diff/diff.go
index 28aae49..e3a8e2d 100644
--- a/src/internal/diff/diff.go
+++ b/src/internal/diff/diff.go
@@ -116,10 +116,7 @@
// End chunk with common lines for context.
if len(ctext) > 0 {
- n := end.x - start.x
- if n > C {
- n = C
- }
+ n := min(end.x-start.x, C)
for _, s := range x[start.x : start.x+n] {
ctext = append(ctext, " "+s)
count.x++
diff --git a/src/internal/saferio/io.go b/src/internal/saferio/io.go
index 5c428e6..848a93b 100644
--- a/src/internal/saferio/io.go
+++ b/src/internal/saferio/io.go
@@ -44,10 +44,7 @@
var buf []byte
buf1 := make([]byte, chunk)
for n > 0 {
- next := n
- if next > chunk {
- next = chunk
- }
+ next := min(n, chunk)
_, err := io.ReadFull(r, buf1[:next])
if err != nil {
if len(buf) > 0 && err == io.EOF {
@@ -87,10 +84,7 @@
var buf []byte
buf1 := make([]byte, chunk)
for n > 0 {
- next := n
- if next > chunk {
- next = chunk
- }
+ next := min(n, chunk)
_, err := r.ReadAt(buf1[:next], off)
if err != nil {
return nil, err
diff --git a/src/internal/trace/gc.go b/src/internal/trace/gc.go
index 46890e7..6ceacdf 100644
--- a/src/internal/trace/gc.go
+++ b/src/internal/trace/gc.go
@@ -384,17 +384,12 @@
// these bands.
//
// Compute the duration of each band.
- numBands := bandsPerSeries
- if numBands > len(util) {
+ numBands := min(bandsPerSeries,
// There's no point in having lots of bands if there
// aren't many events.
- numBands = len(util)
- }
+ len(util))
dur := util[len(util)-1].Time - util[0].Time
- bandDur := (dur + int64(numBands) - 1) / int64(numBands)
- if bandDur < 1 {
- bandDur = 1
- }
+ bandDur := max((dur+int64(numBands)-1)/int64(numBands), 1)
// Compute the bands. There are numBands+1 bands in order to
// record the final cumulative sum.
bands := make([]mmuBand, numBands+1)
@@ -746,10 +741,7 @@
panic("maxBands < 2")
}
tailDur := int64(window) % c.bandDur
- nUtil := len(c.bands) - maxBands + 1
- if nUtil < 0 {
- nUtil = 0
- }
+ nUtil := max(len(c.bands)-maxBands+1, 0)
bandU := make([]bandUtil, nUtil)
for i := range bandU {
// To compute the worst-case MU, we assume the minimum
diff --git a/src/internal/zstd/block.go b/src/internal/zstd/block.go
index 11a99cd..2cbbed0 100644
--- a/src/internal/zstd/block.go
+++ b/src/internal/zstd/block.go
@@ -414,10 +414,7 @@
// We are being asked to copy data that we are adding to the
// buffer in the same copy.
for match > 0 {
- copy := uint32(len(r.buffer)) - bufferOffset
- if copy > match {
- copy = match
- }
+ copy := min(uint32(len(r.buffer))-bufferOffset, match)
r.buffer = append(r.buffer, r.buffer[bufferOffset:bufferOffset+copy]...)
match -= copy
}
diff --git a/src/internal/zstd/fuzz_test.go b/src/internal/zstd/fuzz_test.go
index c576681..79e2e6a 100644
--- a/src/internal/zstd/fuzz_test.go
+++ b/src/internal/zstd/fuzz_test.go
@@ -123,10 +123,7 @@
// to determine.
// So we just check the prefix.
- c := len(goExp)
- if c > len(zstdExp) {
- c = len(zstdExp)
- }
+ c := min(len(goExp), len(zstdExp))
goExp = goExp[:c]
zstdExp = zstdExp[:c]
if !bytes.Equal(goExp, zstdExp) {
diff --git a/src/math/big/decimal.go b/src/math/big/decimal.go
index 9e391ad..5f95f05 100644
--- a/src/math/big/decimal.go
+++ b/src/math/big/decimal.go
@@ -65,10 +65,9 @@
// decimal format (since that is likely slower).
if shift < 0 {
ntz := m.trailingZeroBits()
- s := uint(-shift)
- if s >= ntz {
- s = ntz // shift at most ntz bits
- }
+ s := min(uint(-shift),
+ // shift at most ntz bits
+ ntz)
m = nat(nil).rsh(m, s)
shift += int(s)
}
diff --git a/src/math/big/float_test.go b/src/math/big/float_test.go
index 497e2f9..11d708e 100644
--- a/src/math/big/float_test.go
+++ b/src/math/big/float_test.go
@@ -128,10 +128,7 @@
{"-123", 1e6, "-123", Exact},
} {
x := makeFloat(test.x).SetPrec(test.prec)
- prec := test.prec
- if prec > MaxPrec {
- prec = MaxPrec
- }
+ prec := min(test.prec, MaxPrec)
if got := x.Prec(); got != prec {
t.Errorf("%s.SetPrec(%d).Prec() == %d; want %d", test.x, test.prec, got, prec)
}
diff --git a/src/math/big/floatmarsh.go b/src/math/big/floatmarsh.go
index e220cbc..57357f4 100644
--- a/src/math/big/floatmarsh.go
+++ b/src/math/big/floatmarsh.go
@@ -28,15 +28,14 @@
n := 0 // number of mantissa words
if x.form == finite {
// add space for mantissa and exponent
- n = int((x.prec + (_W - 1)) / _W) // required mantissa length in words for given precision
- // actual mantissa slice could be shorter (trailing 0's) or longer (unused bits):
- // - if shorter, only encode the words present
- // - if longer, cut off unused words when encoding in bytes
- // (in practice, this should never happen since rounding
- // takes care of it, but be safe and do it always)
- if len(x.mant) < n {
- n = len(x.mant)
- }
+ n = min(
+ // required mantissa length in words for given precision
+ // actual mantissa slice could be shorter (trailing 0's) or longer (unused bits):
+ // - if shorter, only encode the words present
+ // - if longer, cut off unused words when encoding in bytes
+ // (in practice, this should never happen since rounding
+ // takes care of it, but be safe and do it always)
+ len(x.mant), int((x.prec+(_W-1))/_W))
// len(x.mant) >= n
sz += 4 + n*_S // exp + mant
}
diff --git a/src/math/big/nat.go b/src/math/big/nat.go
index 1835fdc..98c1f37 100644
--- a/src/math/big/nat.go
+++ b/src/math/big/nat.go
@@ -530,10 +530,7 @@
func (z nat) andNot(x, y nat) nat {
m := len(x)
- n := len(y)
- if n > m {
- n = m
- }
+ n := min(len(y), m)
// m >= n
z = z.make(m)
diff --git a/src/math/rand/default_test.go b/src/math/rand/default_test.go
index 78d8039..967bae6 100644
--- a/src/math/rand/default_test.go
+++ b/src/math/rand/default_test.go
@@ -63,10 +63,7 @@
t.Fatalf("internal error: unrecognized code %q", v)
}
- goroutines := runtime.GOMAXPROCS(0)
- if goroutines < 4 {
- goroutines = 4
- }
+ goroutines := max(runtime.GOMAXPROCS(0), 4)
ch := make(chan uint64, goroutines*3)
var wg sync.WaitGroup
diff --git a/src/math/rand/rand_test.go b/src/math/rand/rand_test.go
index 7ed9443..7d335b9 100644
--- a/src/math/rand/rand_test.go
+++ b/src/math/rand/rand_test.go
@@ -520,10 +520,7 @@
// the expected normal distribution given n!-1 degrees of freedom.
// See https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test and
// https://www.johndcook.com/Beautiful_Testing_ch10.pdf.
- nsamples := 10 * nfact
- if nsamples < 200 {
- nsamples = 200
- }
+ nsamples := max(10*nfact, 200)
samples := make([]float64, nsamples)
for i := range samples {
// Generate some uniformly distributed values and count their occurrences.
diff --git a/src/math/rand/v2/rand_test.go b/src/math/rand/v2/rand_test.go
index ddcb102..a99823b 100644
--- a/src/math/rand/v2/rand_test.go
+++ b/src/math/rand/v2/rand_test.go
@@ -428,10 +428,7 @@
// the expected normal distribution given n!-1 degrees of freedom.
// See https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test and
// https://www.johndcook.com/Beautiful_Testing_ch10.pdf.
- nsamples := 10 * nfact
- if nsamples < 1000 {
- nsamples = 1000
- }
+ nsamples := max(10*nfact, 1000)
samples := make([]float64, nsamples)
for i := range samples {
// Generate some uniformly distributed values and count their occurrences.
diff --git a/src/mime/multipart/multipart.go b/src/mime/multipart/multipart.go
index 17088bc..5643875 100644
--- a/src/mime/multipart/multipart.go
+++ b/src/mime/multipart/multipart.go
@@ -213,10 +213,7 @@
if p.n == 0 {
return 0, p.err
}
- n := len(d)
- if n > p.n {
- n = p.n
- }
+ n := min(len(d), p.n)
n, _ = br.Read(d[:n])
p.total += int64(n)
p.n -= n
diff --git a/src/net/dial.go b/src/net/dial.go
index a87c576..8d132d6 100644
--- a/src/net/dial.go
+++ b/src/net/dial.go
@@ -280,11 +280,7 @@
// If the time per address is too short, steal from the end of the list.
const saneMinimum = 2 * time.Second
if timeout < saneMinimum {
- if timeRemaining < saneMinimum {
- timeout = timeRemaining
- } else {
- timeout = saneMinimum
- }
+ timeout = min(timeRemaining, saneMinimum)
}
return now.Add(timeout), nil
}
diff --git a/src/net/http/fcgi/fcgi.go b/src/net/http/fcgi/fcgi.go
index 56f7d40..19a9203 100644
--- a/src/net/http/fcgi/fcgi.go
+++ b/src/net/http/fcgi/fcgi.go
@@ -258,10 +258,7 @@
func (w *streamWriter) Write(p []byte) (int, error) {
nn := 0
for len(p) > 0 {
- n := len(p)
- if n > maxWrite {
- n = maxWrite
- }
+ n := min(len(p), maxWrite)
if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil {
return nn, err
}
diff --git a/src/regexp/regexp.go b/src/regexp/regexp.go
index 66c7369..83c7cb1 100644
--- a/src/regexp/regexp.go
+++ b/src/regexp/regexp.go
@@ -177,10 +177,7 @@
if err != nil {
return nil, err
}
- matchcap := prog.NumCap
- if matchcap < 2 {
- matchcap = 2
- }
+ matchcap := max(prog.NumCap, 2)
regexp := &Regexp{
expr: expr,
prog: prog,
diff --git a/src/runtime/arena.go b/src/runtime/arena.go
index ea49b0c..747739c 100644
--- a/src/runtime/arena.go
+++ b/src/runtime/arena.go
@@ -558,10 +558,7 @@
nb := typ.PtrBytes / goarch.PtrSize
for i := uintptr(0); i < nb; i += ptrBits {
- k := nb - i
- if k > ptrBits {
- k = ptrBits
- }
+ k := min(nb-i, ptrBits)
// N.B. On big endian platforms we byte swap the data that we
// read from GCData, which is always stored in little-endian order
// by the compiler. writeUserArenaHeapBits handles data in
@@ -672,10 +669,7 @@
// Add zero bits up to the bitmap word boundary
if zeros > 0 {
- z := ptrBits - h.valid
- if z > zeros {
- z = zeros
- }
+ z := min(ptrBits-h.valid, zeros)
h.valid += z
zeros -= z
}
diff --git a/src/runtime/debug/garbage_test.go b/src/runtime/debug/garbage_test.go
index 506f698..2cfac23 100644
--- a/src/runtime/debug/garbage_test.go
+++ b/src/runtime/debug/garbage_test.go
@@ -39,10 +39,7 @@
if stats.LastGC.UnixNano() != int64(mstats.LastGC) {
t.Errorf("stats.LastGC.UnixNano = %d, but mstats.LastGC = %d", stats.LastGC.UnixNano(), mstats.LastGC)
}
- n := int(mstats.NumGC)
- if n > len(mstats.PauseNs) {
- n = len(mstats.PauseNs)
- }
+ n := min(int(mstats.NumGC), len(mstats.PauseNs))
if len(stats.Pause) != n {
t.Errorf("len(stats.Pause) = %d, want %d", len(stats.Pause), n)
} else {
diff --git a/src/runtime/debuglog.go b/src/runtime/debuglog.go
index 2015cdb..65fcbc6 100644
--- a/src/runtime/debuglog.go
+++ b/src/runtime/debuglog.go
@@ -867,11 +867,9 @@
print("[")
var tmpbuf [21]byte
- pnano := int64(nano) - runtimeInitTime
- if pnano < 0 {
+ pnano := max(int64(nano)-runtimeInitTime,
// Logged before runtimeInitTime was set.
- pnano = 0
- }
+ 0)
pnanoBytes := itoaDiv(tmpbuf[:], uint64(pnano), 9)
print(slicebytetostringtmp((*byte)(noescape(unsafe.Pointer(&pnanoBytes[0]))), len(pnanoBytes)))
print(" P ", p, "] ")
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index fc9fb84..72225e4 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -683,10 +683,7 @@
// buggy, as usual: it won't adjust the pointer
// upward. So adjust it upward a little bit ourselves:
// 1/4 MB to get away from the running binary image.
- p := firstmoduledata.end
- if p < procBrk {
- p = procBrk
- }
+ p := max(firstmoduledata.end, procBrk)
if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
p = mheap_.heapArenaAlloc.end
}
@@ -2194,10 +2191,7 @@
goschedguarded()
}
// clear min(avail, lump) bytes
- n := vsize - voff
- if n > chunkBytes {
- n = chunkBytes
- }
+ n := min(vsize-voff, chunkBytes)
memclrNoHeapPointers(unsafe.Pointer(voff), n)
}
}
diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go
index a582a20..e120152 100644
--- a/src/runtime/mbarrier.go
+++ b/src/runtime/mbarrier.go
@@ -274,10 +274,7 @@
//go:linkname typedslicecopy
//go:nosplit
func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int {
- n := dstLen
- if n > srcLen {
- n = srcLen
- }
+ n := min(dstLen, srcLen)
if n == 0 {
return 0
}
diff --git a/src/runtime/metrics.go b/src/runtime/metrics.go
index f8f0da2..8b6be7f 100644
--- a/src/runtime/metrics.go
+++ b/src/runtime/metrics.go
@@ -862,10 +862,7 @@
// include system goroutines in this count because we included
// them above.
a.gTotal = uint64(gcount(true))
- a.gWaiting = a.gTotal - (a.gRunning + a.gRunnable + a.gNonGo)
- if a.gWaiting < 0 {
- a.gWaiting = 0
- }
+ a.gWaiting = max(a.gTotal-(a.gRunning+a.gRunnable+a.gNonGo), 0)
unlock(&sched.lock)
}
diff --git a/src/runtime/mgcmark_greenteagc.go b/src/runtime/mgcmark_greenteagc.go
index ce4e2fa..998744b 100644
--- a/src/runtime/mgcmark_greenteagc.go
+++ b/src/runtime/mgcmark_greenteagc.go
@@ -493,10 +493,7 @@
// avoid a situation where a single worker ends up queuing O(heap)
// work and then forever retains a queue of that size.
const maxCap = 1 << 20 / goarch.PtrSize
- newCap := q.chain.head.cap * 2
- if newCap > maxCap {
- newCap = maxCap
- }
+ newCap := min(q.chain.head.cap*2, maxCap)
newHead := newSpanSPMC(newCap)
if !q.tryDrain(newHead, n) {
throw("failed to put span on newly-allocated spanSPMC")
diff --git a/src/runtime/mgcpacer.go b/src/runtime/mgcpacer.go
index 922d9b7..bdd4fbb 100644
--- a/src/runtime/mgcpacer.go
+++ b/src/runtime/mgcpacer.go
@@ -559,8 +559,7 @@
// (scanWork), so allocation will change this difference
// slowly in the soft regime and not at all in the hard
// regime.
- scanWorkRemaining := scanWorkExpected - work
- if scanWorkRemaining < 1000 {
+ scanWorkRemaining := max(scanWorkExpected-work,
// We set a somewhat arbitrary lower bound on
// remaining scan work since if we aim a little high,
// we can miss by a little.
@@ -569,8 +568,7 @@
// since marking is racy and double-scanning objects
// may legitimately make the remaining scan work
// negative, even in the hard goal regime.
- scanWorkRemaining = 1000
- }
+ 1000)
// Compute the heap distance remaining.
heapRemaining := heapGoal - int64(live)
@@ -1141,12 +1139,10 @@
// Apply some headroom to the goal to account for pacing inaccuracies and to reduce
// the impact of scavenging at allocation time in response to a high allocation rate
// when GOGC=off. See issue #57069. Also, be careful about small limits.
- headroom := goal / 100 * memoryLimitHeapGoalHeadroomPercent
- if headroom < memoryLimitMinHeapGoalHeadroom {
+ headroom := max(goal/100*memoryLimitHeapGoalHeadroomPercent,
// Set a fixed minimum to deal with the particularly large effect pacing inaccuracies
// have for smaller heaps.
- headroom = memoryLimitMinHeapGoalHeadroom
- }
+ memoryLimitMinHeapGoalHeadroom)
if goal < headroom || goal-headroom < headroom {
goal = headroom
} else {
diff --git a/src/runtime/mgcpacer_test.go b/src/runtime/mgcpacer_test.go
index 9167912..4602621 100644
--- a/src/runtime/mgcpacer_test.go
+++ b/src/runtime/mgcpacer_test.go
@@ -1019,10 +1019,7 @@
}
func applyMemoryLimitHeapGoalHeadroom(goal uint64) uint64 {
- headroom := goal / 100 * MemoryLimitHeapGoalHeadroomPercent
- if headroom < MemoryLimitMinHeapGoalHeadroom {
- headroom = MemoryLimitMinHeapGoalHeadroom
- }
+ headroom := max(goal/100*MemoryLimitHeapGoalHeadroomPercent, MemoryLimitMinHeapGoalHeadroom)
if goal < headroom || goal-headroom < headroom {
goal = headroom
} else {
diff --git a/src/runtime/mgcscavenge_test.go b/src/runtime/mgcscavenge_test.go
index aa7231d..e25a7a7 100644
--- a/src/runtime/mgcscavenge_test.go
+++ b/src/runtime/mgcscavenge_test.go
@@ -304,10 +304,7 @@
type test struct {
request, expect uintptr
}
- minPages := PhysPageSize / PageSize
- if minPages < 1 {
- minPages = 1
- }
+ minPages := max(PhysPageSize/PageSize, 1)
type setup struct {
beforeAlloc map[ChunkIdx][]BitRange
beforeScav map[ChunkIdx][]BitRange
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 4eecb1c..e814b02 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -881,10 +881,7 @@
}
print("\n")
if zombie {
- length := s.elemsize
- if length > 1024 {
- length = 1024
- }
+ length := min(s.elemsize, 1024)
hexdumpWords(addr, length, nil)
}
mbits.advance()
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 63e6996..7d7d641 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -554,10 +554,7 @@
assertLockHeld(&h.lock)
if len(h.allspans) >= cap(h.allspans) {
- n := 64 * 1024 / goarch.PtrSize
- if n < cap(h.allspans)*3/2 {
- n = cap(h.allspans) * 3 / 2
- }
+ n := max(64*1024/goarch.PtrSize, cap(h.allspans)*3/2)
var new []*mspan
sp := (*slice)(unsafe.Pointer(&new))
sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys, "allspans array")
@@ -862,11 +859,9 @@
for npage > 0 {
// Pull from accumulated credit first.
if credit := h.reclaimCredit.Load(); credit > 0 {
- take := credit
- if take > npage {
+ take := min(credit,
// Take only what we need.
- take = npage
- }
+ npage)
if h.reclaimCredit.CompareAndSwap(credit, credit-take) {
npage -= take
}
@@ -1100,10 +1095,7 @@
// Compute how far into the arena we extend into, capped
// at heapArenaBytes.
- arenaLimit := arenaBase + npage*pageSize
- if arenaLimit > heapArenaBytes {
- arenaLimit = heapArenaBytes
- }
+ arenaLimit := min(arenaBase+npage*pageSize, heapArenaBytes)
// Increase ha.zeroedBase so it's >= arenaLimit.
// We may be racing with other updates.
for arenaLimit > zeroedBase {
diff --git a/src/runtime/mpagealloc_64bit.go b/src/runtime/mpagealloc_64bit.go
index 2e36430..8a89c34 100644
--- a/src/runtime/mpagealloc_64bit.go
+++ b/src/runtime/mpagealloc_64bit.go
@@ -205,12 +205,9 @@
haveMin := s.min.Load()
haveMax := s.max.Load()
needMin := alignDown(uintptr(chunkIndex(base)), physPageSize/scSize)
- needMax := alignUp(uintptr(chunkIndex(limit)), physPageSize/scSize)
-
- // We need a contiguous range, so extend the range if there's no overlap.
- if needMax < haveMin {
- needMax = haveMin
- }
+ needMax := max(
+ // We need a contiguous range, so extend the range if there's no overlap.
+ alignUp(uintptr(chunkIndex(limit)), physPageSize/scSize), haveMin)
if haveMax != 0 && needMin > haveMax {
needMin = haveMax
}
diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go
index febfb69..2354376 100644
--- a/src/runtime/mstats.go
+++ b/src/runtime/mstats.go
@@ -584,10 +584,7 @@
// Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
lock(&mheap_.lock)
- n := memstats.numgc
- if n > uint32(len(memstats.pause_ns)) {
- n = uint32(len(memstats.pause_ns))
- }
+ n := min(memstats.numgc, uint32(len(memstats.pause_ns)))
// The pause buffer is circular. The most recent pause is at
// pause_ns[(numgc-1)%len(pause_ns)], and then backward
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 48567df..1619583 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -3734,10 +3734,7 @@
if now == 0 {
now = nanotime()
}
- delay = pollUntil - now
- if delay < 0 {
- delay = 0
- }
+ delay = max(pollUntil-now, 0)
}
if faketime != 0 {
// When using fake time, just poll.
@@ -5430,10 +5427,7 @@
if callergp.ancestors != nil {
callerAncestors = *callergp.ancestors
}
- n := int32(len(callerAncestors)) + 1
- if n > debug.tracebackancestors {
- n = debug.tracebackancestors
- }
+ n := min(int32(len(callerAncestors))+1, debug.tracebackancestors)
ancestors := make([]ancestorInfo, n)
copy(ancestors[1:], callerAncestors)
@@ -6523,10 +6517,7 @@
unlock(&sched.lock)
// Make wake-up period small enough
// for the sampling to be correct.
- sleep := forcegcperiod / 2
- if next-now < sleep {
- sleep = next - now
- }
+ sleep := min(next-now, forcegcperiod/2)
shouldRelax := sleep >= osRelaxMinNS
if shouldRelax {
osRelax(true)
diff --git a/src/runtime/rand.go b/src/runtime/rand.go
index 1739e9f..466763b 100644
--- a/src/runtime/rand.go
+++ b/src/runtime/rand.go
@@ -97,10 +97,7 @@
for len(r) > 0 {
v ^= 0xa0761d6478bd642f
v *= 0xe7037ed1a0b428db
- size := 8
- if len(r) < 8 {
- size = len(r)
- }
+ size := min(len(r), 8)
for i := 0; i < size; i++ {
r[i] ^= byte(v >> (8 * i))
}
diff --git a/src/runtime/slice.go b/src/runtime/slice.go
index 2a44297..1c932cd 100644
--- a/src/runtime/slice.go
+++ b/src/runtime/slice.go
@@ -394,10 +394,7 @@
return 0
}
- n := fromLen
- if toLen < n {
- n = toLen
- }
+ n := min(toLen, fromLen)
if width == 0 {
return n
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index 7571fe6..7591ed9 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -1416,15 +1416,12 @@
startingStackSize = fixedStack
return
}
- avg := scannedStackSize/scannedStacks + stackGuard
- // Note: we add stackGuard to ensure that a goroutine that
- // uses the average space will not trigger a growth.
- if avg > uint64(maxstacksize) {
- avg = uint64(maxstacksize)
- }
- if avg < fixedStack {
- avg = fixedStack
- }
+ avg := max(
+ // Note: we add stackGuard to ensure that a goroutine that
+ // uses the average space will not trigger a growth.
+ min(
+
+ scannedStackSize/scannedStacks+stackGuard, uint64(maxstacksize)), fixedStack)
// Note: maxstacksize fits in 30 bits, so avg also does.
startingStackSize = uint32(round2(int32(avg)))
}
diff --git a/src/runtime/stack_test.go b/src/runtime/stack_test.go
index fc9a6db..597409a 100644
--- a/src/runtime/stack_test.go
+++ b/src/runtime/stack_test.go
@@ -832,10 +832,7 @@
numGoroutines := 3
numFrames := 2
- ancestorsExpected := numGoroutines
- if numGoroutines > tracebackDepth {
- ancestorsExpected = tracebackDepth
- }
+ ancestorsExpected := min(numGoroutines, tracebackDepth)
matches := goroutineRegex.FindAllStringSubmatch(output, -1)
if len(matches) != 2 {
diff --git a/src/strings/replace_test.go b/src/strings/replace_test.go
index 56f0c1d..ce55e3a 100644
--- a/src/strings/replace_test.go
+++ b/src/strings/replace_test.go
@@ -76,10 +76,7 @@
// repeat maps "a"->"a", "b"->"bb", "c"->"ccc", ...
s = nil
for i := range 256 {
- n := i + 1 - 'a'
- if n < 1 {
- n = 1
- }
+ n := max(i+1-'a', 1)
s = append(s, str(byte(i)), Repeat(str(byte(i)), n))
}
repeat := NewReplacer(s...)
diff --git a/src/sync/poolqueue.go b/src/sync/poolqueue.go
index e9593f8..85fa0a5 100644
--- a/src/sync/poolqueue.go
+++ b/src/sync/poolqueue.go
@@ -234,11 +234,9 @@
// The current dequeue is full. Allocate a new one of twice
// the size.
- newSize := len(d.vals) * 2
- if newSize >= dequeueLimit {
+ newSize := min(len(d.vals)*2,
// Can't make it any bigger.
- newSize = dequeueLimit
- }
+ dequeueLimit)
d2 := &poolChainElt{}
d2.prev.Store(d)
diff --git a/src/testing/iotest/reader.go b/src/testing/iotest/reader.go
index 8529e1c..e9c6ecc 100644
--- a/src/testing/iotest/reader.go
+++ b/src/testing/iotest/reader.go
@@ -114,10 +114,7 @@
return 0, nil
}
r.n = r.n%3 + 1
- n := r.n
- if n > len(p) {
- n = len(p)
- }
+ n := min(r.n, len(p))
n, err := r.r.Read(p[0:n])
if err != nil && err != io.EOF {
err = fmt.Errorf("Read(%d bytes at offset %d): %v", n, r.off, err)
diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go
index 5f439e2..638d8e4 100644
--- a/src/time/sleep_test.go
+++ b/src/time/sleep_test.go
@@ -939,10 +939,7 @@
for j := range timerCount {
expectedWakeup := Now().Add(delay)
AfterFunc(delay, func() {
- late := Since(expectedWakeup)
- if late < 0 {
- late = 0
- }
+ late := max(Since(expectedWakeup), 0)
stats[j].count++
stats[j].sum += float64(late.Nanoseconds())
if late > stats[j].max {
@@ -1018,10 +1015,7 @@
for ; c > 0; c-- {
<-ticker.C
- late := Since(expectedWakeup)
- if late < 0 {
- late = 0
- }
+ late := max(Since(expectedWakeup), 0)
stats[j].count++
stats[j].sum += float64(late.Nanoseconds())
if late > stats[j].max {
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
| Commit-Queue | +1 |
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
scannedStackSize/scannedStacks+stackGuard, uint64(maxstacksize)), fixedStack)Sorry for quick drive by comment, but to me, this change here does not seem to make things more readable.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
scannedStackSize/scannedStacks+stackGuard, uint64(maxstacksize)), fixedStack)Sorry for quick drive by comment, but to me, this change here does not seem to make things more readable.
Agreed!
Changes to use min/max should be applied judiciously, not automatically.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |
scannedStackSize/scannedStacks+stackGuard, uint64(maxstacksize)), fixedStack)Robert GriesemerSorry for quick drive by comment, but to me, this change here does not seem to make things more readable.
Agreed!
Changes to use min/max should be applied judiciously, not automatically.
In general, I perceive `go fix` as a tool to adopt the coding style to the latest Go (and thus apply all its suggestions). @adon...@google.com is there a way (annotation etc.) to prevent minmax (or any other "fixer") from suggesting/implementing a particular change? Or, perhaps, some "fixers" should not be enabled by default.
| Inspect html for hidden footers to help with email filtering. To unsubscribe visit settings. |