mirror of
https://github.com/QuilibriumNetwork/ceremonyclient.git
synced 2026-02-21 10:27:26 +08:00
Memory management adjustments (#397)
* Do not vendor go-buffer-pool * Do not change GOGC * Use local buffer
This commit is contained in:
parent
4be1888496
commit
1e441e280d
@ -14,8 +14,6 @@ replace github.com/multiformats/go-multiaddr => ../go-multiaddr
|
||||
|
||||
replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns
|
||||
|
||||
replace github.com/libp2p/go-buffer-pool => ../go-buffer-pool
|
||||
|
||||
replace github.com/libp2p/go-libp2p => ../go-libp2p
|
||||
|
||||
replace github.com/libp2p/go-libp2p-kad-dht => ../go-libp2p-kad-dht
|
||||
|
||||
@ -263,6 +263,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c=
|
||||
github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
|
||||
18
go-buffer-pool/.github/workflows/go-check.yml
vendored
18
go-buffer-pool/.github/workflows/go-check.yml
vendored
@ -1,18 +0,0 @@
|
||||
name: Go Checks
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: ["master"]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
go-check:
|
||||
uses: ipdxco/unified-github-workflows/.github/workflows/go-check.yml@v1.0
|
||||
20
go-buffer-pool/.github/workflows/go-test.yml
vendored
20
go-buffer-pool/.github/workflows/go-test.yml
vendored
@ -1,20 +0,0 @@
|
||||
name: Go Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: ["master"]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
go-test:
|
||||
uses: ipdxco/unified-github-workflows/.github/workflows/go-test.yml@v1.0
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
@ -1,19 +0,0 @@
|
||||
name: Release Checker
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
paths: [ 'version.json' ]
|
||||
types: [ opened, synchronize, reopened, labeled, unlabeled ]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
release-check:
|
||||
uses: ipdxco/unified-github-workflows/.github/workflows/release-check.yml@v1.0
|
||||
17
go-buffer-pool/.github/workflows/releaser.yml
vendored
17
go-buffer-pool/.github/workflows/releaser.yml
vendored
@ -1,17 +0,0 @@
|
||||
name: Releaser
|
||||
|
||||
on:
|
||||
push:
|
||||
paths: [ 'version.json' ]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.sha }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
releaser:
|
||||
uses: ipdxco/unified-github-workflows/.github/workflows/releaser.yml@v1.0
|
||||
13
go-buffer-pool/.github/workflows/stale.yml
vendored
13
go-buffer-pool/.github/workflows/stale.yml
vendored
@ -1,13 +0,0 @@
|
||||
name: Close and mark stale issue
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
uses: pl-strflt/.github/.github/workflows/reusable-stale-issue.yml@v0.3
|
||||
18
go-buffer-pool/.github/workflows/tagpush.yml
vendored
18
go-buffer-pool/.github/workflows/tagpush.yml
vendored
@ -1,18 +0,0 @@
|
||||
name: Tag Push Checker
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
releaser:
|
||||
uses: ipdxco/unified-github-workflows/.github/workflows/tagpush.yml@v1.0
|
||||
@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Juan Batiz-Benet
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
@ -1,29 +0,0 @@
|
||||
### Applies to buffer.go and buffer_test.go ###
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
@ -1,56 +0,0 @@
|
||||
go-buffer-pool
|
||||
==================
|
||||
|
||||
[](https://protocol.ai)
|
||||
[](https://libp2p.io/)
|
||||
[](https://webchat.freenode.net/?channels=%23libp2p)
|
||||
[](https://codecov.io/gh/libp2p/go-buffer-pool)
|
||||
[](https://travis-ci.org/libp2p/go-buffer-pool)
|
||||
[](https://discuss.libp2p.io)
|
||||
|
||||
> A variable size buffer pool for go.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [About](#about)
|
||||
- [Advantages over GC](#advantages-over-gc)
|
||||
- [Disadvantages over GC:](#disadvantages-over-gc)
|
||||
- [Contribute](#contribute)
|
||||
- [License](#license)
|
||||
|
||||
## About
|
||||
|
||||
This library provides:
|
||||
|
||||
1. `BufferPool`: A pool for re-using byte slices of varied sizes. This pool will always return a slice with at least the size requested and a capacity up to the next power of two. Each size class is pooled independently which makes the `BufferPool` more space efficient than a plain `sync.Pool` when used in situations where data size may vary over an arbitrary range.
|
||||
2. `Buffer`: a buffer compatible with `bytes.Buffer` but backed by a `BufferPool`. Unlike `bytes.Buffer`, `Buffer` will automatically "shrink" on read, using the buffer pool to avoid causing too much work for the allocator. This is primarily useful for long lived buffers that usually sit empty.
|
||||
|
||||
### Advantages over GC
|
||||
|
||||
* Reduces Memory Usage:
|
||||
* We don't have to wait for a GC to run before we can reuse memory. This is essential if you're repeatedly allocating large short-lived buffers.
|
||||
|
||||
* Reduces CPU usage:
|
||||
* It takes some load off of the GC (due to buffer reuse).
|
||||
* We don't have to zero buffers (fewer wasteful memory writes).
|
||||
|
||||
### Disadvantages over GC:
|
||||
|
||||
* Can leak memory contents. Unlike the go GC, we *don't* zero memory.
|
||||
* All buffers have a capacity of a power of 2. This is fine if you either expect these buffers to be temporary or you need buffers of this size.
|
||||
* Requires that buffers be explicitly put back into the pool. This can lead to race conditions and memory corruption if the buffer is released while it's still in use.
|
||||
|
||||
## Contribute
|
||||
|
||||
PRs are welcome!
|
||||
|
||||
Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification.
|
||||
|
||||
## License
|
||||
|
||||
MIT © Protocol Labs
|
||||
BSD © The Go Authors
|
||||
|
||||
---
|
||||
|
||||
The last gx published version of this module was: 0.1.3: QmQDvJoB6aJWN3sjr3xsgXqKCXf4jU5zdMXpDMsBkYVNqa
|
||||
@ -1,302 +0,0 @@
|
||||
// This is a derivitive work of Go's bytes.Buffer implementation.
|
||||
//
|
||||
// Originally copyright 2009 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Modifications copyright 2018 Steven Allen. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by both a BSD-style and an MIT-style
|
||||
// license that can be found in the LICENSE_BSD and LICENSE files.
|
||||
|
||||
package pool
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Buffer is a buffer like bytes.Buffer that:
|
||||
//
|
||||
// 1. Uses a buffer pool.
|
||||
// 2. Frees memory on read.
|
||||
//
|
||||
// If you only have a few buffers and read/write at a steady rate, *don't* use
|
||||
// this package, it'll be slower.
|
||||
//
|
||||
// However:
|
||||
//
|
||||
// 1. If you frequently create/destroy buffers, this implementation will be
|
||||
// significantly nicer to the allocator.
|
||||
// 2. If you have many buffers with bursty traffic, this implementation will use
|
||||
// significantly less memory.
|
||||
type Buffer struct {
|
||||
// Pool is the buffer pool to use. If nil, this Buffer will use the
|
||||
// global buffer pool.
|
||||
Pool *BufferPool
|
||||
|
||||
buf []byte
|
||||
rOff int
|
||||
|
||||
// Preallocated slice for samll reads/writes.
|
||||
// This is *really* important for performance and only costs 8 words.
|
||||
bootstrap [64]byte
|
||||
}
|
||||
|
||||
// NewBuffer constructs a new buffer initialized to `buf`.
|
||||
// Unlike `bytes.Buffer`, we *copy* the buffer but don't reuse it (to ensure
|
||||
// that we *only* use buffers from the pool).
|
||||
func NewBuffer(buf []byte) *Buffer {
|
||||
b := new(Buffer)
|
||||
if len(buf) > 0 {
|
||||
b.buf = b.getBuf(len(buf))
|
||||
copy(b.buf, buf)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// NewBufferString is identical to NewBuffer *except* that it allows one to
|
||||
// initialize the buffer from a string (without having to allocate an
|
||||
// intermediate bytes slice).
|
||||
func NewBufferString(buf string) *Buffer {
|
||||
b := new(Buffer)
|
||||
if len(buf) > 0 {
|
||||
b.buf = b.getBuf(len(buf))
|
||||
copy(b.buf, buf)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Buffer) grow(n int) int {
|
||||
wOff := len(b.buf)
|
||||
bCap := cap(b.buf)
|
||||
|
||||
if bCap >= wOff+n {
|
||||
b.buf = b.buf[:wOff+n]
|
||||
return wOff
|
||||
}
|
||||
|
||||
bSize := b.Len()
|
||||
|
||||
minCap := 2*bSize + n
|
||||
|
||||
// Slide if cap >= minCap.
|
||||
// Reallocate otherwise.
|
||||
if bCap >= minCap {
|
||||
copy(b.buf, b.buf[b.rOff:])
|
||||
} else {
|
||||
// Needs new buffer.
|
||||
newBuf := b.getBuf(minCap)
|
||||
copy(newBuf, b.buf[b.rOff:])
|
||||
b.returnBuf()
|
||||
b.buf = newBuf
|
||||
}
|
||||
|
||||
b.rOff = 0
|
||||
b.buf = b.buf[:bSize+n]
|
||||
return bSize
|
||||
}
|
||||
|
||||
func (b *Buffer) getPool() *BufferPool {
|
||||
if b.Pool == nil {
|
||||
return GlobalPool
|
||||
}
|
||||
return b.Pool
|
||||
}
|
||||
|
||||
func (b *Buffer) returnBuf() {
|
||||
if cap(b.buf) > len(b.bootstrap) {
|
||||
b.getPool().Put(b.buf)
|
||||
}
|
||||
b.buf = nil
|
||||
}
|
||||
|
||||
func (b *Buffer) getBuf(n int) []byte {
|
||||
if n <= len(b.bootstrap) {
|
||||
return b.bootstrap[:n]
|
||||
}
|
||||
return b.getPool().Get(n)
|
||||
}
|
||||
|
||||
// Len returns the number of bytes that can be read from this buffer.
|
||||
func (b *Buffer) Len() int {
|
||||
return len(b.buf) - b.rOff
|
||||
}
|
||||
|
||||
// Cap returns the current capacity of the buffer.
|
||||
//
|
||||
// Note: Buffer *may* re-allocate when writing (or growing by) `n` bytes even if
|
||||
// `Cap() < Len() + n` to avoid excessive copying.
|
||||
func (b *Buffer) Cap() int {
|
||||
return cap(b.buf)
|
||||
}
|
||||
|
||||
// Bytes returns the slice of bytes currently buffered in the Buffer.
|
||||
//
|
||||
// The buffer returned by Bytes is valid until the next call grow, truncate,
|
||||
// read, or write. Really, just don't touch the Buffer until you're done with
|
||||
// the return value of this function.
|
||||
func (b *Buffer) Bytes() []byte {
|
||||
return b.buf[b.rOff:]
|
||||
}
|
||||
|
||||
// String returns the string representation of the buffer.
|
||||
//
|
||||
// It returns `<nil>` the buffer is a nil pointer.
|
||||
func (b *Buffer) String() string {
|
||||
if b == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return string(b.buf[b.rOff:])
|
||||
}
|
||||
|
||||
// WriteString writes a string to the buffer.
|
||||
//
|
||||
// This function is identical to Write except that it allows one to write a
|
||||
// string directly without allocating an intermediate byte slice.
|
||||
func (b *Buffer) WriteString(buf string) (int, error) {
|
||||
wOff := b.grow(len(buf))
|
||||
return copy(b.buf[wOff:], buf), nil
|
||||
}
|
||||
|
||||
// Truncate truncates the Buffer.
|
||||
//
|
||||
// Panics if `n > b.Len()`.
|
||||
//
|
||||
// This function may free memory by shrinking the internal buffer.
|
||||
func (b *Buffer) Truncate(n int) {
|
||||
if n < 0 || n > b.Len() {
|
||||
panic("truncation out of range")
|
||||
}
|
||||
b.buf = b.buf[:b.rOff+n]
|
||||
b.shrink()
|
||||
}
|
||||
|
||||
// Reset is equivalent to Truncate(0).
|
||||
func (b *Buffer) Reset() {
|
||||
b.returnBuf()
|
||||
b.rOff = 0
|
||||
}
|
||||
|
||||
// ReadByte reads a single byte from the Buffer.
|
||||
func (b *Buffer) ReadByte() (byte, error) {
|
||||
if b.rOff >= len(b.buf) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
c := b.buf[b.rOff]
|
||||
b.rOff++
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// WriteByte writes a single byte to the Buffer.
|
||||
func (b *Buffer) WriteByte(c byte) error {
|
||||
wOff := b.grow(1)
|
||||
b.buf[wOff] = c
|
||||
return nil
|
||||
}
|
||||
|
||||
// Grow grows the internal buffer such that `n` bytes can be written without
|
||||
// reallocating.
|
||||
func (b *Buffer) Grow(n int) {
|
||||
wOff := b.grow(n)
|
||||
b.buf = b.buf[:wOff]
|
||||
}
|
||||
|
||||
// Next is an alternative to `Read` that returns a byte slice instead of taking
|
||||
// one.
|
||||
//
|
||||
// The returned byte slice is valid until the next read, write, grow, or
|
||||
// truncate.
|
||||
func (b *Buffer) Next(n int) []byte {
|
||||
m := b.Len()
|
||||
if m < n {
|
||||
n = m
|
||||
}
|
||||
data := b.buf[b.rOff : b.rOff+n]
|
||||
b.rOff += n
|
||||
return data
|
||||
}
|
||||
|
||||
// Write writes the byte slice to the buffer.
|
||||
func (b *Buffer) Write(buf []byte) (int, error) {
|
||||
wOff := b.grow(len(buf))
|
||||
return copy(b.buf[wOff:], buf), nil
|
||||
}
|
||||
|
||||
// WriteTo copies from the buffer into the given writer until the buffer is
|
||||
// empty.
|
||||
func (b *Buffer) WriteTo(w io.Writer) (int64, error) {
|
||||
if b.rOff < len(b.buf) {
|
||||
n, err := w.Write(b.buf[b.rOff:])
|
||||
b.rOff += n
|
||||
if b.rOff > len(b.buf) {
|
||||
panic("invalid write count")
|
||||
}
|
||||
b.shrink()
|
||||
return int64(n), err
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// MinRead is the minimum slice size passed to a Read call by
|
||||
// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
|
||||
// what is required to hold the contents of r, ReadFrom will not grow the
|
||||
// underlying buffer.
|
||||
const MinRead = 512
|
||||
|
||||
// ReadFrom reads from the given reader into the buffer.
|
||||
func (b *Buffer) ReadFrom(r io.Reader) (int64, error) {
|
||||
n := int64(0)
|
||||
for {
|
||||
wOff := b.grow(MinRead)
|
||||
// Use *entire* buffer.
|
||||
b.buf = b.buf[:cap(b.buf)]
|
||||
|
||||
read, err := r.Read(b.buf[wOff:])
|
||||
b.buf = b.buf[:wOff+read]
|
||||
n += int64(read)
|
||||
switch err {
|
||||
case nil:
|
||||
case io.EOF:
|
||||
err = nil
|
||||
fallthrough
|
||||
default:
|
||||
b.shrink()
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads at most `len(buf)` bytes from the internal buffer into the given
|
||||
// buffer.
|
||||
func (b *Buffer) Read(buf []byte) (int, error) {
|
||||
if len(buf) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if b.rOff >= len(b.buf) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n := copy(buf, b.buf[b.rOff:])
|
||||
b.rOff += n
|
||||
b.shrink()
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (b *Buffer) shrink() {
|
||||
c := b.Cap()
|
||||
// Either nil or bootstrap.
|
||||
if c <= len(b.bootstrap) {
|
||||
return
|
||||
}
|
||||
|
||||
l := b.Len()
|
||||
if l == 0 {
|
||||
// Shortcut if empty.
|
||||
b.returnBuf()
|
||||
b.rOff = 0
|
||||
} else if l*8 < c {
|
||||
// Only shrink when capacity > 8x length. Avoids shrinking too aggressively.
|
||||
newBuf := b.getBuf(l)
|
||||
copy(newBuf, b.buf[b.rOff:])
|
||||
b.returnBuf()
|
||||
b.rOff = 0
|
||||
b.buf = newBuf[:l]
|
||||
}
|
||||
}
|
||||
@ -1,400 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Modified by stebalien, 2018
|
||||
|
||||
package pool
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const N = 10000 // make this bigger for a larger (and slower) test
|
||||
var data string // test data for write tests
|
||||
var testBytes []byte // test data; same as data but as a slice.
|
||||
|
||||
func init() {
|
||||
testBytes = make([]byte, N)
|
||||
for i := 0; i < N; i++ {
|
||||
testBytes[i] = 'a' + byte(i%26)
|
||||
}
|
||||
data = string(testBytes)
|
||||
}
|
||||
|
||||
// Verify that contents of buf match the string s.
|
||||
func check(t *testing.T, testname string, buf *Buffer, s string) {
|
||||
bytes := buf.Bytes()
|
||||
str := buf.String()
|
||||
if buf.Len() != len(bytes) {
|
||||
t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes))
|
||||
}
|
||||
|
||||
if buf.Len() != len(str) {
|
||||
t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str))
|
||||
}
|
||||
|
||||
if buf.Len() != len(s) {
|
||||
t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s))
|
||||
}
|
||||
|
||||
if string(bytes) != s {
|
||||
t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s)
|
||||
}
|
||||
}
|
||||
|
||||
// Fill buf through n writes of string fus.
|
||||
// The initial contents of buf corresponds to the string s;
|
||||
// the result is the final contents of buf returned as a string.
|
||||
func fillString(t *testing.T, testname string, buf *Buffer, s string, n int, fus string) string {
|
||||
check(t, testname+" (fill 1)", buf, s)
|
||||
for ; n > 0; n-- {
|
||||
m, err := buf.WriteString(fus)
|
||||
if m != len(fus) {
|
||||
t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fus))
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
|
||||
}
|
||||
s += fus
|
||||
check(t, testname+" (fill 4)", buf, s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Fill buf through n writes of byte slice fub.
|
||||
// The initial contents of buf corresponds to the string s;
|
||||
// the result is the final contents of buf returned as a string.
|
||||
func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string {
|
||||
check(t, testname+" (fill 1)", buf, s)
|
||||
for ; n > 0; n-- {
|
||||
m, err := buf.Write(fub)
|
||||
if m != len(fub) {
|
||||
t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub))
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
|
||||
}
|
||||
s += string(fub)
|
||||
check(t, testname+" (fill 4)", buf, s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestNewBuffer(t *testing.T) {
|
||||
buf := NewBuffer(testBytes)
|
||||
check(t, "NewBuffer", buf, data)
|
||||
}
|
||||
|
||||
func TestNewBufferString(t *testing.T) {
|
||||
buf := NewBufferString(data)
|
||||
check(t, "NewBufferString", buf, data)
|
||||
}
|
||||
|
||||
// Empty buf through repeated reads into fub.
|
||||
// The initial contents of buf corresponds to the string s.
|
||||
func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) {
|
||||
check(t, testname+" (empty 1)", buf, s)
|
||||
|
||||
for {
|
||||
n, err := buf.Read(fub)
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err)
|
||||
}
|
||||
s = s[n:]
|
||||
check(t, testname+" (empty 3)", buf, s)
|
||||
}
|
||||
|
||||
check(t, testname+" (empty 4)", buf, "")
|
||||
}
|
||||
|
||||
func TestBasicOperations(t *testing.T) {
|
||||
var buf Buffer
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
check(t, "TestBasicOperations (1)", &buf, "")
|
||||
|
||||
buf.Reset()
|
||||
check(t, "TestBasicOperations (2)", &buf, "")
|
||||
|
||||
buf.Truncate(0)
|
||||
check(t, "TestBasicOperations (3)", &buf, "")
|
||||
|
||||
n, err := buf.Write([]byte(data[0:1]))
|
||||
if n != 1 {
|
||||
t.Errorf("wrote 1 byte, but n == %d", n)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err should always be nil, but err == %s", err)
|
||||
}
|
||||
check(t, "TestBasicOperations (4)", &buf, "a")
|
||||
|
||||
buf.WriteByte(data[1])
|
||||
check(t, "TestBasicOperations (5)", &buf, "ab")
|
||||
|
||||
n, err = buf.Write([]byte(data[2:26]))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 24 {
|
||||
t.Errorf("wrote 25 bytes, but n == %d", n)
|
||||
}
|
||||
check(t, "TestBasicOperations (6)", &buf, string(data[0:26]))
|
||||
|
||||
buf.Truncate(26)
|
||||
check(t, "TestBasicOperations (7)", &buf, string(data[0:26]))
|
||||
|
||||
buf.Truncate(20)
|
||||
check(t, "TestBasicOperations (8)", &buf, string(data[0:20]))
|
||||
|
||||
empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5))
|
||||
empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100))
|
||||
|
||||
buf.WriteByte(data[1])
|
||||
c, err := buf.ReadByte()
|
||||
if err != nil {
|
||||
t.Error("ReadByte unexpected eof")
|
||||
}
|
||||
if c != data[1] {
|
||||
t.Errorf("ReadByte wrong value c=%v", c)
|
||||
}
|
||||
if _, err = buf.ReadByte(); err == nil {
|
||||
t.Error("ReadByte unexpected not eof")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLargeStringWrites(t *testing.T) {
|
||||
var buf Buffer
|
||||
limit := 30
|
||||
if testing.Short() {
|
||||
limit = 9
|
||||
}
|
||||
for i := 3; i < limit; i += 3 {
|
||||
s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, data)
|
||||
empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(data)/i))
|
||||
}
|
||||
check(t, "TestLargeStringWrites (3)", &buf, "")
|
||||
}
|
||||
|
||||
func TestLargeByteWrites(t *testing.T) {
|
||||
var buf Buffer
|
||||
limit := 30
|
||||
if testing.Short() {
|
||||
limit = 9
|
||||
}
|
||||
for i := 3; i < limit; i += 3 {
|
||||
s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes)
|
||||
empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i))
|
||||
}
|
||||
check(t, "TestLargeByteWrites (3)", &buf, "")
|
||||
}
|
||||
|
||||
func TestLargeStringReads(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, data[0:len(data)/i])
|
||||
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
|
||||
}
|
||||
check(t, "TestLargeStringReads (3)", &buf, "")
|
||||
}
|
||||
|
||||
func TestLargeByteReads(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
|
||||
}
|
||||
check(t, "TestLargeByteReads (3)", &buf, "")
|
||||
}
|
||||
|
||||
func TestMixedReadsAndWrites(t *testing.T) {
|
||||
var buf Buffer
|
||||
s := ""
|
||||
for i := 0; i < 50; i++ {
|
||||
wlen := rand.Intn(len(data))
|
||||
if i%2 == 0 {
|
||||
s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, data[0:wlen])
|
||||
} else {
|
||||
s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen])
|
||||
}
|
||||
|
||||
rlen := rand.Intn(len(data))
|
||||
fub := make([]byte, rlen)
|
||||
n, _ := buf.Read(fub)
|
||||
s = s[n:]
|
||||
}
|
||||
empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()))
|
||||
}
|
||||
|
||||
func TestNil(t *testing.T) {
|
||||
var b *Buffer
|
||||
if b.String() != "<nil>" {
|
||||
t.Errorf("expected <nil>; got %q", b.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadFrom(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
var b Buffer
|
||||
b.ReadFrom(&buf)
|
||||
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteTo(t *testing.T) {
|
||||
var buf Buffer
|
||||
for i := 3; i < 30; i += 3 {
|
||||
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
|
||||
var b Buffer
|
||||
buf.WriteTo(&b)
|
||||
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNext(t *testing.T) {
|
||||
b := []byte{0, 1, 2, 3, 4}
|
||||
tmp := make([]byte, 5)
|
||||
for i := 0; i <= 5; i++ {
|
||||
for j := i; j <= 5; j++ {
|
||||
for k := 0; k <= 6; k++ {
|
||||
// 0 <= i <= j <= 5; 0 <= k <= 6
|
||||
// Check that if we start with a buffer
|
||||
// of length j at offset i and ask for
|
||||
// Next(k), we get the right bytes.
|
||||
buf := NewBuffer(b[0:j])
|
||||
n, _ := buf.Read(tmp[0:i])
|
||||
if n != i {
|
||||
t.Fatalf("Read %d returned %d", i, n)
|
||||
}
|
||||
bb := buf.Next(k)
|
||||
want := k
|
||||
if want > j-i {
|
||||
want = j - i
|
||||
}
|
||||
if len(bb) != want {
|
||||
t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb))
|
||||
}
|
||||
for l, v := range bb {
|
||||
if v != byte(l+i) {
|
||||
t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGrow(t *testing.T) {
|
||||
x := []byte{'x'}
|
||||
y := []byte{'y'}
|
||||
tmp := make([]byte, 72)
|
||||
for _, startLen := range []int{0, 100, 1000, 10000, 100000} {
|
||||
xBytes := bytes.Repeat(x, startLen)
|
||||
for _, growLen := range []int{0, 100, 1000, 10000, 100000} {
|
||||
buf := NewBuffer(xBytes)
|
||||
// If we read, this affects buf.off, which is good to test.
|
||||
readBytes, _ := buf.Read(tmp)
|
||||
buf.Grow(growLen)
|
||||
yBytes := bytes.Repeat(y, growLen)
|
||||
// Check no allocation occurs in write, as long as we're single-threaded.
|
||||
var m1, m2 runtime.MemStats
|
||||
runtime.ReadMemStats(&m1)
|
||||
buf.Write(yBytes)
|
||||
runtime.ReadMemStats(&m2)
|
||||
if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs {
|
||||
t.Errorf("allocation occurred during write")
|
||||
}
|
||||
// Check that buffer has correct data.
|
||||
if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) {
|
||||
t.Errorf("bad initial data at %d %d", startLen, growLen)
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) {
|
||||
t.Errorf("bad written data at %d %d", startLen, growLen)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Was a bug: used to give EOF reading empty slice at EOF.
|
||||
func TestReadEmptyAtEOF(t *testing.T) {
|
||||
b := new(Buffer)
|
||||
slice := make([]byte, 0)
|
||||
n, err := b.Read(slice)
|
||||
if err != nil {
|
||||
t.Errorf("read error: %v", err)
|
||||
}
|
||||
if n != 0 {
|
||||
t.Errorf("wrong count; got %d want 0", n)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that we occasionally compact. Issue 5154.
|
||||
func TestBufferGrowth(t *testing.T) {
|
||||
var b Buffer
|
||||
buf := make([]byte, 1024)
|
||||
b.Write(buf[0:1])
|
||||
var cap0 int
|
||||
for i := 0; i < 5<<10; i++ {
|
||||
b.Write(buf)
|
||||
b.Read(buf)
|
||||
if i == 0 {
|
||||
cap0 = b.Cap()
|
||||
}
|
||||
}
|
||||
cap1 := b.Cap()
|
||||
// (*Buffer).grow allows for 2x capacity slop before sliding,
|
||||
// so set our error threshold at 3x.
|
||||
if cap1 > cap0*3 {
|
||||
t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteByte(b *testing.B) {
|
||||
const n = 4 << 10
|
||||
b.SetBytes(n)
|
||||
buf := NewBuffer(make([]byte, n))
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf.Reset()
|
||||
for i := 0; i < n; i++ {
|
||||
buf.WriteByte('x')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// From Issue 5154.
|
||||
func BenchmarkBufferNotEmptyWriteRead(b *testing.B) {
|
||||
buf := make([]byte, 1024)
|
||||
for i := 0; i < b.N; i++ {
|
||||
var b Buffer
|
||||
b.Write(buf[0:1])
|
||||
for i := 0; i < 5<<10; i++ {
|
||||
b.Write(buf)
|
||||
b.Read(buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check that we don't compact too often. From Issue 5154.
|
||||
func BenchmarkBufferFullSmallReads(b *testing.B) {
|
||||
buf := make([]byte, 1024)
|
||||
for i := 0; i < b.N; i++ {
|
||||
var b Buffer
|
||||
b.Write(buf)
|
||||
for b.Len()+20 < b.Cap() {
|
||||
b.Write(buf[:10])
|
||||
}
|
||||
for i := 0; i < 5<<10; i++ {
|
||||
b.Read(buf[:1])
|
||||
b.Write(buf[:1])
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,3 +0,0 @@
|
||||
coverage:
|
||||
range: "50...100"
|
||||
comment: off
|
||||
@ -1,3 +0,0 @@
|
||||
module github.com/libp2p/go-buffer-pool
|
||||
|
||||
go 1.22
|
||||
@ -1,103 +0,0 @@
|
||||
// Package pool provides a sync.Pool equivalent that buckets incoming
|
||||
// requests to one of 32 sub-pools, one for each power of 2, 0-32.
|
||||
//
|
||||
// import (pool "github.com/libp2p/go-buffer-pool")
|
||||
// var p pool.BufferPool
|
||||
//
|
||||
// small := make([]byte, 1024)
|
||||
// large := make([]byte, 4194304)
|
||||
// p.Put(small)
|
||||
// p.Put(large)
|
||||
//
|
||||
// small2 := p.Get(1024)
|
||||
// large2 := p.Get(4194304)
|
||||
// fmt.Println("small2 len:", len(small2))
|
||||
// fmt.Println("large2 len:", len(large2))
|
||||
//
|
||||
// // Output:
|
||||
// // small2 len: 1024
|
||||
// // large2 len: 4194304
|
||||
package pool
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/bits"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// GlobalPool is a static Pool for reusing byteslices of various sizes.
|
||||
var GlobalPool = new(BufferPool)
|
||||
|
||||
// MaxLength is the maximum length of an element that can be added to the Pool.
|
||||
const MaxLength = math.MaxInt32
|
||||
|
||||
// BufferPool is a pool to handle cases of reusing elements of varying sizes. It
|
||||
// maintains 32 internal pools, for each power of 2 in 0-32.
|
||||
//
|
||||
// You should generally just call the package level Get and Put methods or use
|
||||
// the GlobalPool BufferPool instead of constructing your own.
|
||||
//
|
||||
// You MUST NOT copy Pool after using.
|
||||
type BufferPool struct {
|
||||
pools [32]sync.Pool // a list of singlePools
|
||||
}
|
||||
|
||||
// Get retrieves a buffer of the appropriate length from the buffer pool or
|
||||
// allocates a new one. Get may choose to ignore the pool and treat it as empty.
|
||||
// Callers should not assume any relation between values passed to Put and the
|
||||
// values returned by Get.
|
||||
//
|
||||
// If no suitable buffer exists in the pool, Get creates one.
|
||||
func (p *BufferPool) Get(length int) []byte {
|
||||
if length == 0 {
|
||||
return nil
|
||||
}
|
||||
// Calling this function with a negative length is invalid.
|
||||
// make will panic if length is negative, so we don't have to.
|
||||
if length > MaxLength || length < 0 {
|
||||
return make([]byte, length)
|
||||
}
|
||||
idx := nextLogBase2(uint32(length))
|
||||
if ptr := p.pools[idx].Get(); ptr != nil {
|
||||
buf := ptr.([]byte)
|
||||
buf = buf[:uint32(length)]
|
||||
return buf
|
||||
}
|
||||
return make([]byte, 1<<idx)[:uint32(length)]
|
||||
}
|
||||
|
||||
// Put adds x to the pool.
|
||||
func (p *BufferPool) Put(buf []byte) {
|
||||
capacity := cap(buf)
|
||||
if capacity == 0 || capacity > MaxLength {
|
||||
return // drop it
|
||||
}
|
||||
idx := prevLogBase2(uint32(capacity))
|
||||
// nolint: staticcheck
|
||||
p.pools[idx].Put(buf)
|
||||
}
|
||||
|
||||
// Get retrieves a buffer of the appropriate length from the global buffer pool
|
||||
// (or allocates a new one).
|
||||
func Get(length int) []byte {
|
||||
return GlobalPool.Get(length)
|
||||
}
|
||||
|
||||
// Put returns a buffer to the global buffer pool.
|
||||
func Put(slice []byte) {
|
||||
GlobalPool.Put(slice)
|
||||
}
|
||||
|
||||
// Log of base two, round up (for v > 0).
|
||||
func nextLogBase2(v uint32) uint32 {
|
||||
return uint32(bits.Len32(v - 1))
|
||||
}
|
||||
|
||||
// Log of base two, round down (for v > 0)
|
||||
func prevLogBase2(num uint32) uint32 {
|
||||
next := nextLogBase2(num)
|
||||
if num == (1 << uint32(next)) {
|
||||
return next
|
||||
}
|
||||
return next - 1
|
||||
}
|
||||
@ -1,185 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Pool is no-op under race detector, so all these tests do not work.
|
||||
//go:build !race
|
||||
|
||||
package pool
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRange(t *testing.T) {
|
||||
min := nextLogBase2(1)
|
||||
max := nextLogBase2(uint32(MaxLength))
|
||||
if int(max) != len(GlobalPool.pools)-1 {
|
||||
t.Errorf("expected %d pools, found %d", max, len(GlobalPool.pools))
|
||||
}
|
||||
if min != 0 {
|
||||
t.Errorf("unused min pool")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPool(t *testing.T) {
|
||||
// disable GC so we can control when it happens.
|
||||
defer debug.SetGCPercent(debug.SetGCPercent(-1))
|
||||
var p BufferPool
|
||||
|
||||
a := make([]byte, 21)
|
||||
a[0] = 1
|
||||
b := make([]byte, 2050)
|
||||
b[0] = 2
|
||||
p.Put(a)
|
||||
p.Put(b)
|
||||
if g := p.Get(16); &g[0] != &a[0] {
|
||||
t.Fatalf("got [%d,...]; want [1,...]", g[0])
|
||||
}
|
||||
if g := p.Get(2048); &g[0] != &b[0] {
|
||||
t.Fatalf("got [%d,...]; want [2,...]", g[0])
|
||||
}
|
||||
if g := p.Get(16); cap(g) != 16 || !bytes.Equal(g[:16], make([]byte, 16)) {
|
||||
t.Fatalf("got existing slice; want new slice")
|
||||
}
|
||||
if g := p.Get(2048); cap(g) != 2048 || !bytes.Equal(g[:2048], make([]byte, 2048)) {
|
||||
t.Fatalf("got existing slice; want new slice")
|
||||
}
|
||||
if g := p.Get(1); cap(g) != 1 || !bytes.Equal(g[:1], make([]byte, 1)) {
|
||||
t.Fatalf("got existing slice; want new slice")
|
||||
}
|
||||
d := make([]byte, 1023)
|
||||
d[0] = 3
|
||||
p.Put(d)
|
||||
if g := p.Get(1024); cap(g) != 1024 || !bytes.Equal(g, make([]byte, 1024)) {
|
||||
t.Fatalf("got existing slice; want new slice")
|
||||
}
|
||||
if g := p.Get(512); cap(g) != 1023 || g[0] != 3 {
|
||||
t.Fatalf("got [%d,...]; want [3,...]", g[0])
|
||||
}
|
||||
p.Put(a)
|
||||
|
||||
debug.SetGCPercent(100) // to allow following GC to actually run
|
||||
runtime.GC()
|
||||
// For some reason, you need to run GC twice on go 1.16 if you want it to reliably work.
|
||||
runtime.GC()
|
||||
if g := p.Get(10); &g[0] == &a[0] {
|
||||
t.Fatalf("got a; want new slice after GC")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPoolStressByteSlicePool(t *testing.T) {
|
||||
var p BufferPool
|
||||
|
||||
const P = 10
|
||||
chs := 10
|
||||
maxSize := 1 << 16
|
||||
N := int(1e4)
|
||||
if testing.Short() {
|
||||
N /= 100
|
||||
}
|
||||
done := make(chan bool)
|
||||
errs := make(chan error)
|
||||
for i := 0; i < P; i++ {
|
||||
go func() {
|
||||
ch := make(chan []byte, chs+1)
|
||||
|
||||
for i := 0; i < chs; i++ {
|
||||
j := rand.Int() % maxSize
|
||||
ch <- p.Get(j)
|
||||
}
|
||||
|
||||
for j := 0; j < N; j++ {
|
||||
r := 0
|
||||
for i := 0; i < chs; i++ {
|
||||
v := <-ch
|
||||
p.Put(v)
|
||||
r = rand.Int() % maxSize
|
||||
v = p.Get(r)
|
||||
if len(v) < r {
|
||||
errs <- fmt.Errorf("expect len(v) >= %d, got %d", j, len(v))
|
||||
}
|
||||
ch <- v
|
||||
}
|
||||
|
||||
if r%1000 == 0 {
|
||||
runtime.GC()
|
||||
}
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < P; {
|
||||
select {
|
||||
case <-done:
|
||||
i++
|
||||
case err := <-errs:
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPool(b *testing.B) {
|
||||
var p BufferPool
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
i := 7
|
||||
for pb.Next() {
|
||||
if i > 1<<20 {
|
||||
i = 7
|
||||
} else {
|
||||
i = i << 1
|
||||
}
|
||||
b := p.Get(i)
|
||||
b[0] = byte(i)
|
||||
p.Put(b)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkAlloc(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
i := 7
|
||||
for pb.Next() {
|
||||
if i > 1<<20 {
|
||||
i = 7
|
||||
} else {
|
||||
i = i << 1
|
||||
}
|
||||
b := make([]byte, i)
|
||||
b[1] = byte(i)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkPoolOverlflow(b *testing.B) {
|
||||
var p BufferPool
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
bufs := make([][]byte, 2100)
|
||||
for pow := uint32(0); pow < 21; pow++ {
|
||||
for i := 0; i < 100; i++ {
|
||||
bufs = append(bufs, p.Get(1<<pow))
|
||||
}
|
||||
}
|
||||
for _, b := range bufs {
|
||||
p.Put(b)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func ExampleGet() {
|
||||
buf := Get(100)
|
||||
fmt.Println("length", len(buf))
|
||||
fmt.Println("capacity", cap(buf))
|
||||
Put(buf)
|
||||
// Output:
|
||||
// length 100
|
||||
// capacity 128
|
||||
}
|
||||
@ -1,3 +0,0 @@
|
||||
{
|
||||
"version": "v0.1.0"
|
||||
}
|
||||
@ -1,119 +0,0 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const WriterBufferSize = 4096
|
||||
|
||||
var bufioWriterPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return bufio.NewWriterSize(nil, WriterBufferSize)
|
||||
},
|
||||
}
|
||||
|
||||
// Writer is a buffered writer that returns its internal buffer in a pool when
|
||||
// not in use.
|
||||
type Writer struct {
|
||||
W io.Writer
|
||||
bufw *bufio.Writer
|
||||
}
|
||||
|
||||
func (w *Writer) ensureBuffer() {
|
||||
if w.bufw == nil {
|
||||
w.bufw = bufioWriterPool.Get().(*bufio.Writer)
|
||||
w.bufw.Reset(w.W)
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes the given byte slice to the underlying connection.
|
||||
//
|
||||
// Note: Write won't return the write buffer to the pool even if it ends up
|
||||
// being empty after the write. You must call Flush() to do that.
|
||||
func (w *Writer) Write(b []byte) (int, error) {
|
||||
if w.bufw == nil {
|
||||
if len(b) >= WriterBufferSize {
|
||||
return w.W.Write(b)
|
||||
}
|
||||
w.bufw = bufioWriterPool.Get().(*bufio.Writer)
|
||||
w.bufw.Reset(w.W)
|
||||
}
|
||||
return w.bufw.Write(b)
|
||||
}
|
||||
|
||||
// Size returns the size of the underlying buffer.
|
||||
func (w *Writer) Size() int {
|
||||
return WriterBufferSize
|
||||
}
|
||||
|
||||
// Available returns the amount buffer space available.
|
||||
func (w *Writer) Available() int {
|
||||
if w.bufw != nil {
|
||||
return w.bufw.Available()
|
||||
}
|
||||
return WriterBufferSize
|
||||
}
|
||||
|
||||
// Buffered returns the amount of data buffered.
|
||||
func (w *Writer) Buffered() int {
|
||||
if w.bufw != nil {
|
||||
return w.bufw.Buffered()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// WriteByte writes a single byte.
|
||||
func (w *Writer) WriteByte(b byte) error {
|
||||
w.ensureBuffer()
|
||||
return w.bufw.WriteByte(b)
|
||||
}
|
||||
|
||||
// WriteRune writes a single rune, returning the number of bytes written.
|
||||
func (w *Writer) WriteRune(r rune) (int, error) {
|
||||
w.ensureBuffer()
|
||||
return w.bufw.WriteRune(r)
|
||||
}
|
||||
|
||||
// WriteString writes a string, returning the number of bytes written.
|
||||
func (w *Writer) WriteString(s string) (int, error) {
|
||||
w.ensureBuffer()
|
||||
return w.bufw.WriteString(s)
|
||||
}
|
||||
|
||||
// Flush flushes the write buffer, if any, and returns it to the pool.
|
||||
func (w *Writer) Flush() error {
|
||||
if w.bufw == nil {
|
||||
return nil
|
||||
}
|
||||
if err := w.bufw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.bufw.Reset(nil)
|
||||
bufioWriterPool.Put(w.bufw)
|
||||
w.bufw = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close flushes the underlying writer and closes it if it implements the
|
||||
// io.Closer interface.
|
||||
//
|
||||
// Note: Close() closes the writer even if Flush() fails to avoid leaking system
|
||||
// resources. If you want to make sure Flush() succeeds, call it first.
|
||||
func (w *Writer) Close() error {
|
||||
var (
|
||||
ferr, cerr error
|
||||
)
|
||||
ferr = w.Flush()
|
||||
|
||||
// always close even if flush fails.
|
||||
if closer, ok := w.W.(io.Closer); ok {
|
||||
cerr = closer.Close()
|
||||
}
|
||||
|
||||
if ferr != nil {
|
||||
return ferr
|
||||
}
|
||||
return cerr
|
||||
}
|
||||
@ -1,91 +0,0 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func checkSize(t *testing.T, w *Writer) {
|
||||
if w.Size()-w.Buffered() != w.Available() {
|
||||
t.Fatalf("size (%d), buffered (%d), available (%d) mismatch", w.Size(), w.Buffered(), w.Available())
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriter(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
w := Writer{W: &b}
|
||||
n, err := w.Write([]byte("foobar"))
|
||||
checkSize(t, &w)
|
||||
|
||||
if err != nil || n != 6 {
|
||||
t.Fatalf("write failed: %d, %s", n, err)
|
||||
}
|
||||
if b.Len() != 0 {
|
||||
t.Fatal("expected the buffer to be empty")
|
||||
}
|
||||
if w.Buffered() != 6 {
|
||||
t.Fatalf("expected 6 bytes to be buffered, got %d", w.Buffered())
|
||||
}
|
||||
checkSize(t, &w)
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkSize(t, &w)
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkSize(t, &w)
|
||||
if b.String() != "foobar" {
|
||||
t.Fatal("expected to have written foobar")
|
||||
}
|
||||
b.Reset()
|
||||
|
||||
buf := make([]byte, WriterBufferSize)
|
||||
n, err = w.Write(buf)
|
||||
if n != WriterBufferSize || err != nil {
|
||||
t.Fatalf("write failed: %d, %s", n, err)
|
||||
}
|
||||
checkSize(t, &w)
|
||||
if b.Len() != WriterBufferSize {
|
||||
t.Fatal("large write should have gone through directly")
|
||||
}
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkSize(t, &w)
|
||||
|
||||
b.Reset()
|
||||
if err := w.WriteByte(1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if w.Buffered() != 1 {
|
||||
t.Fatalf("expected 1 byte to be buffered, got %d", w.Buffered())
|
||||
}
|
||||
if n, err := w.WriteRune('1'); err != nil || n != 1 {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if w.Buffered() != 2 {
|
||||
t.Fatalf("expected 2 bytes to be buffered, got %d", w.Buffered())
|
||||
}
|
||||
checkSize(t, &w)
|
||||
if n, err := w.WriteString("foobar"); err != nil || n != 6 {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if w.Buffered() != 8 {
|
||||
t.Fatalf("expected 8 bytes to be buffered, got %d", w.Buffered())
|
||||
}
|
||||
checkSize(t, &w)
|
||||
if b.Len() != 0 {
|
||||
t.Fatal("write should have been buffered")
|
||||
}
|
||||
n, err = w.Write(buf)
|
||||
if n != WriterBufferSize || err != nil {
|
||||
t.Fatalf("write failed: %d, %s", n, err)
|
||||
}
|
||||
if b.Len() != WriterBufferSize || b.Bytes()[0] != 1 || b.String()[1:8] != "1foobar" {
|
||||
t.Fatalf("failed to flush properly: len:%d, prefix:%#v", b.Len(), b.Bytes()[:10])
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -169,10 +169,11 @@ func (p *PubSub) handlePeerDead(s network.Stream) {
|
||||
}
|
||||
|
||||
func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, q *rpcQueue) {
|
||||
getBuffer, returnLastBuffer := makeBufferSource()
|
||||
defer returnLastBuffer()
|
||||
writeRPC := func(rpc *RPC) error {
|
||||
size := uint64(rpc.Size())
|
||||
buf := pool.Get(varint.UvarintSize(size) + int(size))
|
||||
defer pool.Put(buf)
|
||||
buf := getBuffer(varint.UvarintSize(size) + int(size))
|
||||
n := binary.PutUvarint(buf, size)
|
||||
_, err := rpc.MarshalTo(buf[n:])
|
||||
if err != nil {
|
||||
@ -234,3 +235,28 @@ func copyRPC(rpc *RPC) *RPC {
|
||||
res.RPC = (proto.Clone(rpc.RPC)).(*pb.RPC)
|
||||
return res
|
||||
}
|
||||
|
||||
// makeBufferSource returns a function that can be used to allocate buffers of
|
||||
// a given size, and a function that can be used to return the last buffer
|
||||
// allocated.
|
||||
// The returned function will attempt to reuse the last buffer allocated if
|
||||
// the requested size is less than or equal to the capacity of the last buffer.
|
||||
// If the requested size is greater than the capacity of the last buffer, the
|
||||
// last buffer is returned to the pool and a new buffer is allocated.
|
||||
// If the requested size is less than or equal to half the capacity of the last
|
||||
// buffer, the last buffer is returned to the pool and a new buffer is allocated.
|
||||
func makeBufferSource() (func(int) []byte, func()) {
|
||||
b := pool.Get(0)
|
||||
mk := func(n int) []byte {
|
||||
if c := cap(b); c/2 < n && n <= c {
|
||||
return b[:n]
|
||||
}
|
||||
pool.Put(b)
|
||||
b = pool.Get(n)
|
||||
return b
|
||||
}
|
||||
rt := func() {
|
||||
pool.Put(b)
|
||||
}
|
||||
return mk, rt
|
||||
}
|
||||
|
||||
@ -8,8 +8,6 @@ replace github.com/multiformats/go-multiaddr => ../go-multiaddr
|
||||
|
||||
replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns
|
||||
|
||||
replace github.com/libp2p/go-buffer-pool => ../go-buffer-pool
|
||||
|
||||
replace github.com/libp2p/go-libp2p => ../go-libp2p
|
||||
|
||||
replace github.com/libp2p/go-libp2p-gostream => ../go-libp2p-gostream
|
||||
|
||||
@ -140,6 +140,8 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
|
||||
@ -10,8 +10,6 @@ replace github.com/multiformats/go-multiaddr => ../go-multiaddr
|
||||
|
||||
replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns
|
||||
|
||||
replace github.com/libp2p/go-buffer-pool => ../go-buffer-pool
|
||||
|
||||
replace github.com/libp2p/go-libp2p => ../go-libp2p
|
||||
|
||||
require (
|
||||
|
||||
@ -246,6 +246,10 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ=
|
||||
github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c=
|
||||
github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic=
|
||||
github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8=
|
||||
|
||||
@ -6,8 +6,6 @@ toolchain go1.22.5
|
||||
|
||||
retract v0.26.1 // Tag was applied incorrectly due to a bug in the release workflow.
|
||||
|
||||
replace github.com/libp2p/go-buffer-pool => ../go-buffer-pool
|
||||
|
||||
replace github.com/multiformats/go-multiaddr => ../go-multiaddr
|
||||
|
||||
replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns
|
||||
|
||||
@ -186,6 +186,8 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
|
||||
@ -15,8 +15,6 @@ replace github.com/multiformats/go-multiaddr => ../go-multiaddr
|
||||
|
||||
replace github.com/multiformats/go-multiaddr-dns => ../go-multiaddr-dns
|
||||
|
||||
replace github.com/libp2p/go-buffer-pool => ../go-buffer-pool
|
||||
|
||||
replace github.com/libp2p/go-libp2p => ../go-libp2p
|
||||
|
||||
replace github.com/libp2p/go-libp2p-kad-dht => ../go-libp2p-kad-dht
|
||||
|
||||
@ -275,6 +275,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c=
|
||||
github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
|
||||
@ -406,7 +406,6 @@ func main() {
|
||||
}
|
||||
|
||||
if *core != 0 {
|
||||
rdebug.SetGCPercent(9999)
|
||||
rdebug.SetMemoryLimit(nodeConfig.Engine.DataWorkerMemoryLimit)
|
||||
|
||||
if *parentProcess == 0 && len(nodeConfig.Engine.DataWorkerMultiaddrs) == 0 {
|
||||
@ -458,9 +457,6 @@ func main() {
|
||||
fmt.Println("The memory available to the node, unallocated to the data workers, is less than 8GiB.")
|
||||
fmt.Println("You are at risk of running out of memory during runtime.")
|
||||
default:
|
||||
if _, explicitGOGC := os.LookupEnv("GOGC"); !explicitGOGC {
|
||||
rdebug.SetGCPercent(9999)
|
||||
}
|
||||
if _, explicitGOMEMLIMIT := os.LookupEnv("GOMEMLIMIT"); !explicitGOMEMLIMIT {
|
||||
rdebug.SetMemoryLimit(availableOverhead * 8 / 10)
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user