Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix function comments based on best practices from Effective Go #18

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion blocksources/fixed_size_block_resolver.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ func (r *FixedSizeBlockResolver) GetBlockEndOffset(blockID uint) int64 {
}
}

// Split blocks into chunks of the desired size, or less. This implementation assumes a fixed block size at the source.
// SplitBlockRangeToDesiredSize splits blocks into chunks of the desired size, or less. This implementation assumes a fixed block size at the source.
func (r *FixedSizeBlockResolver) SplitBlockRangeToDesiredSize(startBlockID, endBlockID uint) []QueuedRequest {

if r.MaxDesiredRequestSize == 0 {
Expand Down
2 changes: 1 addition & 1 deletion chunks/chunks.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ type ChunkChecksum struct {
StrongChecksum []byte
}

// compares a checksum to another based on the checksums, not the offset
// Match compares a checksum to another based on the checksums, not the offset
func (chunk ChunkChecksum) Match(other ChunkChecksum) bool {
weakEqual := bytes.Compare(chunk.WeakChecksum, other.WeakChecksum) == 0
strongEqual := false
Expand Down
4 changes: 2 additions & 2 deletions circularbuffer/noalloc.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ func (c *C2) Truncate(byteCount int) (evicted []byte) {
return bufferToRead.buffer[start : start+byteCount]
}

// get the current buffer contents of block
// GetBlock gets the current buffer contents of block
func (c *C2) GetBlock() []byte {
// figure out which buffer has it stored contiguously
bufferToRead := c.getBlockBuffer()
Expand All @@ -116,7 +116,7 @@ func (c *C2) GetBlock() []byte {
return bufferToRead.buffer[start:bufferToRead.head]
}

// get the data that was evicted by the last write
// Evicted gets the data that was evicted by the last write
func (c *C2) Evicted() []byte {
if c.totalWritten <= c.blocksize {
return nil
Expand Down
4 changes: 2 additions & 2 deletions filechecksum/filechecksum.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,13 +84,13 @@ func (check *FileChecksumGenerator) GetChecksumSizes() (int, int) {
return check.WeakRollingHash.Size(), check.GetStrongHash().Size()
}

// Gets the Hash function for the overall file used on each block
// GetFileHash gets the Hash function for the overall file used on each block
// defaults to md5
func (check *FileChecksumGenerator) GetFileHash() hash.Hash {
return check.FileChecksumHash
}

// Gets the Hash function for the strong hash used on each block
// GetStrongHash gets the Hash function for the strong hash used on each block
// defaults to md5, but can be overriden by the generator
func (check *FileChecksumGenerator) GetStrongHash() hash.Hash {
return check.StrongHash
Expand Down
3 changes: 1 addition & 2 deletions patcher/sequential/sequential.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,7 @@ const (
ABSOLUTE_POSITION = 0
)

/*
This simple example currently doesn't do any pipelining of needed blocks, nor does it deal with
/* This simple example currently doesn't do any pipelining of needed blocks, nor does it deal with
blocks being delivered out of order.
*/
func SequentialPatcher(
Expand Down
2 changes: 1 addition & 1 deletion rollsum/rollsum_32.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func (r *Rollsum32) BlockSize() int {
return int(r.blockSize)
}

// the number of bytes
// The number of bytes
func (r *Rollsum32) Size() int {
return 4
}
Expand Down
6 changes: 3 additions & 3 deletions rollsum/rollsum_32_base.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (

const FULL_BYTES_16 = (1 << 16) - 1

// Rollsum32Base decouples the rollsum algorithm from the implementation of
// NewRollsum32Base decouples the rollsum algorithm from the implementation of
// hash.Hash and the storage the rolling checksum window
// this allows us to write different versions of the storage for the distinctly different
// use-cases and optimize the storage with the usage pattern.
Expand All @@ -21,7 +21,7 @@ type Rollsum32Base struct {
a, b uint32
}

// Add a single byte into the rollsum
// AddByte adds a single byte into the rollsum
func (r *Rollsum32Base) AddByte(b byte) {
r.a += uint32(b)
r.b += r.a
Expand All @@ -34,7 +34,7 @@ func (r *Rollsum32Base) AddBytes(bs []byte) {
}
}

// Remove a byte from the end of the rollsum
// RemoveByte removes a byte from the end of the rollsum
// Use the previous length (before removal)
func (r *Rollsum32Base) RemoveByte(b byte, length int) {
r.a -= uint32(b)
Expand Down
2 changes: 1 addition & 1 deletion util/readers/sequencelimit.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import (
"io"
)

// read from 'readers' in sequence up to a limit of 'size'
// SequenceLimit reads from 'readers' in sequence up to a limit of 'size'
func SequenceLimit(size int64, readers ...io.Reader) io.Reader {
return io.LimitReader(
io.MultiReader(readers...),
Expand Down