updated things

This commit is contained in:
Graham Steffaniak 2023-08-12 11:30:41 -05:00
parent 6f83300f92
commit 4e01929dc8
228 changed files with 2444 additions and 1627 deletions

2
.gitignore vendored
View File

@ -7,7 +7,7 @@ rice-box.go
/filebrowser.exe
/frontend/dist
/backend/vendor
/backend/*.cov
.DS_Store
node_modules

View File

@ -2,6 +2,16 @@
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
# v0.1.4
- various UI fixes
- Added download button back to toolbar
- Added upload button to side menu
- breadcrumb spacing fix
- Added "compact" view option
- various backend fixes
- search no longer searches by word with spaces, includes space in searches
- prepared for full json configuration
-
## v0.1.3
- improved styling, colors, transparency, blur

View File

@ -5,7 +5,7 @@ RUN npm i
COPY ./frontend/ ./
RUN npm run build
FROM golang:alpine as base
FROM golang:1.21-alpine as base
WORKDIR /app
COPY ./backend ./
RUN go build -ldflags="-w -s" -o filebrowser .
@ -15,7 +15,7 @@ RUN apk --no-cache add \
ca-certificates \
mailcap
VOLUME /srv
EXPOSE 80
EXPOSE 8080
WORKDIR /
COPY --from=base /app/.filebrowser.json /.filebrowser.json
COPY --from=base /app/filebrowser /filebrowser

View File

@ -1,25 +1,25 @@
package http
import (
"net/http"
"github.com/gtsteffaniak/filebrowser/search"
"net/http"
)
var searchHandler = withUser(func(w http.ResponseWriter, r *http.Request, d *data) (int, error) {
response := []map[string]interface{}{}
query := r.URL.Query().Get("query")
indexInfo, fileTypes := search.SearchAllIndexes(query, r.URL.Path)
for _,path := range(indexInfo){
for _, path := range indexInfo {
f := fileTypes[path]
responseObj := map[string]interface{}{
"path" : path,
"path": path,
}
for filterType,_ := range(f) {
for filterType, _ := range f {
if f[filterType] {
responseObj[filterType] = f[filterType]
}
}
response = append(response,responseObj)
response = append(response, responseObj)
}
return renderJSON(w, r, response)
})

0
backend/render.yml Normal file
View File

18
backend/run_benchmark.sh Executable file
View File

@ -0,0 +1,18 @@
#!/bin/sh
## TEST file used by docker testing containers
touch render.yml
checkExit() {
if [ "$?" -ne 0 ];then
exit 1
fi
}
if command -v go &> /dev/null
then
printf "\n == Running benchmark (sends to results.txt) == \n"
go test -bench=. -benchmem ./...
checkExit
else
echo "ERROR: unable to perform tests"
exit 1
fi

3
backend/run_check_coverage.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
go test -race -v -coverpkg=./... -coverprofile=coverage.cov ./...
go tool cover -html=coverage.cov

2
backend/run_fmt.sh Executable file
View File

@ -0,0 +1,2 @@
#!/bin/bash
for i in $(find $(pwd) -name '*.go');do gofmt -w $i;done

21
backend/run_tests.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/sh
## TEST file used by docker testing containers
touch render.yml
checkExit() {
if [ "$?" -ne 0 ];then
exit 1
fi
}
if command -v go &> /dev/null
then
printf "\n == Running tests == \n"
go test -race -v ./...
checkExit
printf "\n == Running benchmark (sends to results.txt) == \n"
go test -bench=. -benchtime=100x -benchmem ./...
checkExit
else
echo "ERROR: unable to perform tests"
exit 1
fi

View File

@ -25,14 +25,14 @@ var compressedFile = []string{
".tar.xz",
}
type searchOptions struct {
type SearchOptions struct {
Conditions map[string]bool
Size int
Terms []string
}
func ParseSearch(value string) *searchOptions {
opts := &searchOptions{
func ParseSearch(value string) *SearchOptions {
opts := &SearchOptions{
Conditions: map[string]bool{
"exact": strings.Contains(value, "case:exact"),
},
@ -79,8 +79,8 @@ func ParseSearch(value string) *searchOptions {
}
if len(types) > 0 {
// Remove the fields from the search value, including added space
value = typeRegexp.ReplaceAllString(value+" ", "")
// Remove the fields from the search value
value = typeRegexp.ReplaceAllString(value, "")
}
if value == "" {

View File

@ -2,14 +2,14 @@ package search
import (
"log"
"math/rand"
"mime"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"mime"
"math/rand"
)
var (
@ -23,8 +23,6 @@ var (
func InitializeIndex(intervalMinutes uint32) {
// Initialize the indexes map
indexes = make(map[string][]string)
indexes["dirs"] = []string{}
indexes["files"] = []string{}
var numFiles, numDirs int
log.Println("Indexing files...")
lastIndexedStart := time.Now()
@ -97,15 +95,14 @@ func addToIndex(path string, fileName string, isDir bool) {
mutex.Lock()
defer mutex.Unlock()
path = strings.TrimPrefix(path, rootPath+"/")
path = strings.TrimSuffix(path, "/")
adjustedPath := path + "/" + fileName
if path == rootPath {
adjustedPath = fileName
path = strings.TrimSuffix(path, "/") + "/"
if path == "" {
path = "/"
}
if isDir {
indexes["dirs"] = append(indexes["dirs"], adjustedPath)
}else{
indexes["files"] = append(indexes["files"], adjustedPath)
indexes[path] = []string{}
} else {
indexes[path] = append(indexes[path], fileName)
}
}
@ -119,37 +116,52 @@ func SearchAllIndexes(search string, scope string) ([]string, map[string]map[str
defer mutex.RUnlock()
fileListTypes := make(map[string]map[string]bool)
var matching []string
maximum := 100
// 250 items total seems like a reasonable limit
maximum := 250
for _, searchTerm := range searchOptions.Terms {
if searchTerm == "" {
continue
}
// Create a reused fileType map
reusedFileType := map[string]bool{}
// Iterate over the indexes
for _,i := range([]string{"dirs","files"}) {
isdir := i == "dirs"
count := 0
for _, path := range indexes[i] {
for pathName, files := range indexes {
if count > maximum {
break
}
// this is here to terminate a search if a new one has started
// currently limited to one search per container, should be session based
value, found := sessionInProgress.Load(sourceSession)
if !found || value != runningHash {
return []string{}, map[string]map[string]bool{}
}
if count > maximum {
break
}
pathName := scopedPathNameFilter(path, scope)
pathName = scopedPathNameFilter(pathName, scope)
if pathName == "" {
continue
}
matches, fileType := containsSearchTerm(path, searchTerm, *searchOptions, isdir)
// check if dir matches
matches, fileType := containsSearchTerm(pathName, searchTerm, *searchOptions, false)
if matches {
matching = append(matching, pathName)
fileListTypes[pathName] = fileType
count++
}
for _, fileName := range files {
// check if file matches
matches, fileType := containsSearchTerm(pathName+fileName, searchTerm, *searchOptions, false)
if !matches {
continue
}
if isdir {
pathName = pathName+"/"
matching = append(matching, pathName+fileName)
// Clear and reuse the fileType map
for key := range reusedFileType {
delete(reusedFileType, key)
}
matching = append(matching, pathName)
fileListTypes[pathName] = fileType
for key, value := range fileType {
reusedFileType[key] = value
}
fileListTypes[pathName] = copyFileTypeMap(reusedFileType)
count++
}
}
@ -163,29 +175,50 @@ func SearchAllIndexes(search string, scope string) ([]string, map[string]map[str
return matching, fileListTypes
}
func scopedPathNameFilter(pathName string, scope string) string {
scope = strings.TrimPrefix(scope, "/")
if strings.HasPrefix(pathName, scope) {
pathName = strings.TrimPrefix(pathName, scope)
} else {
pathName = ""
func copyFileTypeMap(src map[string]bool) map[string]bool {
dest := make(map[string]bool, len(src))
for key, value := range src {
dest[key] = value
}
return pathName
return dest
}
func containsSearchTerm(pathName string, searchTerm string, options searchOptions, isDir bool) (bool, map[string]bool) {
func scopedPathNameFilter(pathName string, scope string) string {
if strings.HasPrefix(pathName, scope) {
return strings.TrimPrefix(pathName, scope)
}
return ""
}
func containsSearchTerm(pathName string, searchTerm string, options SearchOptions, isDir bool) (bool, map[string]bool) {
conditions := options.Conditions
path := getLastPathComponent(pathName)
// Convert to lowercase once
lowerPath := path
lowerSearchTerm := searchTerm
if !conditions["exact"] {
path = strings.ToLower(path)
searchTerm = strings.ToLower(searchTerm)
lowerPath = strings.ToLower(path)
lowerSearchTerm = strings.ToLower(searchTerm)
}
if strings.Contains(path, searchTerm) {
fileTypes := map[string]bool{}
fileSize := getFileSize(pathName)
matchesCondition := false
if strings.Contains(lowerPath, lowerSearchTerm) {
// Reuse the fileTypes map and clear its values
fileTypes := map[string]bool{
"audio": false,
"image": false,
"video": false,
"doc": false,
"archive": false,
"dir": false,
}
// Calculate fileSize only if needed
var fileSize int64
if conditions["larger"] || conditions["smaller"] {
fileSize = getFileSize(pathName)
}
matchesAllConditions := true
extension := filepath.Ext(strings.ToLower(path))
extension := filepath.Ext(lowerPath)
mimetype := mime.TypeByExtension(extension)
fileTypes["audio"] = strings.HasPrefix(mimetype, "audio")
fileTypes["image"] = strings.HasPrefix(mimetype, "image")
@ -194,20 +227,27 @@ func containsSearchTerm(pathName string, searchTerm string, options searchOption
fileTypes["archive"] = isArchive(extension)
fileTypes["dir"] = isDir
for t,v := range conditions {
switch t {
case "exact" : continue
case "larger" : matchesCondition = fileSize > int64(options.Size) * 1000000
case "smaller" : matchesCondition = fileSize < int64(options.Size) * 1000000
default : matchesCondition = v == fileTypes[t]
for t, v := range conditions {
if t == "exact" {
continue
}
if (!matchesCondition) {
var matchesCondition bool
switch t {
case "larger":
matchesCondition = fileSize > int64(options.Size)*1000000
case "smaller":
matchesCondition = fileSize < int64(options.Size)*1000000
default:
matchesCondition = v == fileTypes[t]
}
if !matchesCondition {
matchesAllConditions = false
}
}
return matchesAllConditions, fileTypes
}
// Clear variables and return
return false, map[string]bool{}
}
@ -221,7 +261,7 @@ func isDoc(extension string) bool {
}
func getFileSize(filepath string) int64 {
fileInfo, err := os.Stat(rootPath+"/"+filepath)
fileInfo, err := os.Stat(rootPath + "/" + filepath)
if err != nil {
return 0
}

View File

@ -0,0 +1,150 @@
package search
import (
"encoding/json"
"fmt"
"math/rand"
"reflect"
"testing"
"time"
)
// loop over test files and compare output
func TestParseSearch(t *testing.T) {
value := ParseSearch("my test search")
want := &SearchOptions{
Conditions: map[string]bool{
"exact": false,
},
Terms: []string{"my test search"},
}
if !reflect.DeepEqual(value, want) {
t.Fatalf("\n got: %+v\n want: %+v", value, want)
}
value = ParseSearch("case:exact my|test|search")
want = &SearchOptions{
Conditions: map[string]bool{
"exact": true,
},
Terms: []string{"my", "test", "search"},
}
if !reflect.DeepEqual(value, want) {
t.Fatalf("\n got: %+v\n want: %+v", value, want)
}
}
func BenchmarkSearchAllIndexes(b *testing.B) {
indexes = make(map[string][]string)
// Create mock data
createMockData(500, 3) // 1000 dirs, 3 files per dir
// Generate 100 random search terms
searchTerms := generateRandomSearchTerms(100)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
// Execute the SearchAllIndexes function
for _, term := range searchTerms {
SearchAllIndexes(term, "/")
}
}
printBenchmarkResults(b)
}
func BenchmarkFillIndex(b *testing.B) {
indexes = make(map[string][]string)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
createMockData(10000, 10) // 1000 dirs, 3 files per dir
}
for a, _ := range indexes {
b.Logf(a)
}
printBenchmarkResults(b)
}
func createMockData(numDirs, numFilesPerDir int) {
for i := 0; i < numDirs; i++ {
dirName := getRandomTerm()
addToIndex("/", dirName, true)
for j := 0; j < numFilesPerDir; j++ {
fileName := "file-" + getRandomTerm() + getRandomExtension()
addToIndex("/"+dirName, fileName, false)
}
}
}
func getRandomTerm() string {
wordbank := []string{
"hi", "test", "other", "name",
"cool", "things", "more", "items",
}
rand.Seed(time.Now().UnixNano())
index := rand.Intn(len(wordbank))
return wordbank[index]
}
func getRandomExtension() string {
wordbank := []string{
".txt", ".mp3", ".mov", ".doc",
".mp4", ".bak", ".zip", ".jpg",
}
rand.Seed(time.Now().UnixNano())
index := rand.Intn(len(wordbank))
return wordbank[index]
}
func generateRandomSearchTerms(numTerms int) []string {
// Generate random search terms
searchTerms := make([]string, numTerms)
for i := 0; i < numTerms; i++ {
searchTerms[i] = getRandomTerm()
}
return searchTerms
}
// JSONBytesEqual compares the JSON in two byte slices.
func JSONBytesEqual(a, b []byte) (bool, error) {
var j, j2 interface{}
if err := json.Unmarshal(a, &j); err != nil {
return false, err
}
if err := json.Unmarshal(b, &j2); err != nil {
return false, err
}
return reflect.DeepEqual(j2, j), nil
}
func passedFunc(t *testing.T) {
t.Logf("%s passed!", t.Name())
}
func formatDuration(duration time.Duration) string {
if duration >= time.Second {
return fmt.Sprintf("%.2f seconds", duration.Seconds())
} else if duration >= time.Millisecond {
return fmt.Sprintf("%.2f ms", float64(duration.Milliseconds()))
}
return fmt.Sprintf("%.2f ns", float64(duration.Nanoseconds()))
}
func formatMemory(bytes int64) string {
sizes := []string{"B", "KB", "MB", "GB", "TB"}
i := 0
for bytes >= 1024 && i < len(sizes)-1 {
bytes /= 1024
i++
}
return fmt.Sprintf("%d %s", bytes, sizes[i])
}
// Output the benchmark results with human-readable units
func printBenchmarkResults(b *testing.B) {
averageTimePerIteration := b.Elapsed() / time.Duration(b.N)
fmt.Printf("\nIterations : %d\n", b.N)
fmt.Printf("Total time : %s\n", formatDuration(b.Elapsed()))
fmt.Printf("Avg time per op : %s\n", formatDuration(averageTimePerIteration))
}

View File

@ -305,8 +305,11 @@ func computeMinimumCopyLength(start_cost float32, nodes []zopfliNode, num_bytes
return uint(len)
}
/* REQUIRES: nodes[pos].cost < kInfinity
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */
/*
REQUIRES: nodes[pos].cost < kInfinity
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant".
*/
func computeDistanceShortcut(block_start uint, pos uint, max_backward_limit uint, gap uint, nodes []zopfliNode) uint32 {
var clen uint = uint(zopfliNodeCopyLength(&nodes[pos]))
var ilen uint = uint(nodes[pos].dcode_insert_length & 0x7FFFFFF)
@ -326,13 +329,16 @@ func computeDistanceShortcut(block_start uint, pos uint, max_backward_limit uint
}
}
/* Fills in dist_cache[0..3] with the last four distances (as defined by
/*
Fills in dist_cache[0..3] with the last four distances (as defined by
Section 4. of the Spec) that would be used at (block_start + pos) if we
used the shortest path of commands from block_start, computed from
nodes[0..pos]. The last four distances at block_start are in
starting_dist_cache[0..3].
REQUIRES: nodes[pos].cost < kInfinity
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant".
*/
func computeDistanceCache(pos uint, starting_dist_cache []int, nodes []zopfliNode, dist_cache []int) {
var idx int = 0
var p uint = uint(nodes[pos].u.shortcut)
@ -353,8 +359,11 @@ func computeDistanceCache(pos uint, starting_dist_cache []int, nodes []zopfliNod
}
}
/* Maintains "ZopfliNode array invariant" and pushes node to the queue, if it
is eligible. */
/*
Maintains "ZopfliNode array invariant" and pushes node to the queue, if it
is eligible.
*/
func evaluateNode(block_start uint, pos uint, max_backward_limit uint, gap uint, starting_dist_cache []int, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) {
/* Save cost, because ComputeDistanceCache invalidates it. */
var node_cost float32 = nodes[pos].u.cost
@ -606,7 +615,9 @@ func zopfliIterate(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_
return computeShortestPathFromNodes(num_bytes, nodes)
}
/* Computes the shortest path of commands from position to at most
/*
Computes the shortest path of commands from position to at most
position + num_bytes.
On return, path->size() is the number of commands found and path[i] is the
@ -620,7 +631,8 @@ func zopfliIterate(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_
(2) nodes[i].command_length() <= i and
(3) nodes[i - nodes[i].command_length()].cost < kInfinity
REQUIRES: nodes != nil and len(nodes) >= num_bytes + 1 */
REQUIRES: nodes != nil and len(nodes) >= num_bytes + 1
*/
func zopfliComputeShortestPath(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, dist_cache []int, hasher *h10, nodes []zopfliNode) uint {
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
var max_zopfli_len uint = maxZopfliLen(params)

View File

@ -70,11 +70,14 @@ type bitReaderState struct {
/* Initializes the BrotliBitReader fields. */
/* Ensures that accumulator is not empty.
/*
Ensures that accumulator is not empty.
May consume up to sizeof(brotli_reg_t) - 1 bytes of input.
Returns false if data is required but there is no input available.
For BROTLI_ALIGNED_READ this function also prepares bit reader for aligned
reading. */
reading.
*/
func bitReaderSaveState(from *bitReader, to *bitReaderState) {
to.val_ = from.val_
to.bit_pos_ = from.bit_pos_
@ -95,22 +98,31 @@ func getAvailableBits(br *bitReader) uint32 {
return 64 - br.bit_pos_
}
/* Returns amount of unread bytes the bit reader still has buffered from the
BrotliInput, including whole bytes in br->val_. */
/*
Returns amount of unread bytes the bit reader still has buffered from the
BrotliInput, including whole bytes in br->val_.
*/
func getRemainingBytes(br *bitReader) uint {
return uint(uint32(br.input_len-br.byte_pos) + (getAvailableBits(br) >> 3))
}
/* Checks if there is at least |num| bytes left in the input ring-buffer
(excluding the bits remaining in br->val_). */
/*
Checks if there is at least |num| bytes left in the input ring-buffer
(excluding the bits remaining in br->val_).
*/
func checkInputAmount(br *bitReader, num uint) bool {
return br.input_len-br.byte_pos >= num
}
/* Guarantees that there are at least |n_bits| + 1 bits in accumulator.
/*
Guarantees that there are at least |n_bits| + 1 bits in accumulator.
Precondition: accumulator contains at least 1 bit.
|n_bits| should be in the range [1..24] for regular build. For portable
non-64-bit little-endian build only 16 bits are safe to request. */
non-64-bit little-endian build only 16 bits are safe to request.
*/
func fillBitWindow(br *bitReader, n_bits uint32) {
if br.bit_pos_ >= 32 {
br.val_ >>= 32
@ -120,14 +132,20 @@ func fillBitWindow(br *bitReader, n_bits uint32) {
}
}
/* Mostly like BrotliFillBitWindow, but guarantees only 16 bits and reads no
more than BROTLI_SHORT_FILL_BIT_WINDOW_READ bytes of input. */
/*
Mostly like BrotliFillBitWindow, but guarantees only 16 bits and reads no
more than BROTLI_SHORT_FILL_BIT_WINDOW_READ bytes of input.
*/
func fillBitWindow16(br *bitReader) {
fillBitWindow(br, 17)
}
/* Tries to pull one byte of input to accumulator.
Returns false if there is no input available. */
/*
Tries to pull one byte of input to accumulator.
Returns false if there is no input available.
*/
func pullByte(br *bitReader) bool {
if br.byte_pos == br.input_len {
return false
@ -140,28 +158,40 @@ func pullByte(br *bitReader) bool {
return true
}
/* Returns currently available bits.
The number of valid bits could be calculated by BrotliGetAvailableBits. */
/*
Returns currently available bits.
The number of valid bits could be calculated by BrotliGetAvailableBits.
*/
func getBitsUnmasked(br *bitReader) uint64 {
return br.val_ >> br.bit_pos_
}
/* Like BrotliGetBits, but does not mask the result.
The result contains at least 16 valid bits. */
/*
Like BrotliGetBits, but does not mask the result.
The result contains at least 16 valid bits.
*/
func get16BitsUnmasked(br *bitReader) uint32 {
fillBitWindow(br, 16)
return uint32(getBitsUnmasked(br))
}
/* Returns the specified number of bits from |br| without advancing bit
position. */
/*
Returns the specified number of bits from |br| without advancing bit
position.
*/
func getBits(br *bitReader, n_bits uint32) uint32 {
fillBitWindow(br, n_bits)
return uint32(getBitsUnmasked(br)) & bitMask(n_bits)
}
/* Tries to peek the specified amount of bits. Returns false, if there
is not enough input. */
/*
Tries to peek the specified amount of bits. Returns false, if there
is not enough input.
*/
func safeGetBits(br *bitReader, n_bits uint32, val *uint32) bool {
for getAvailableBits(br) < n_bits {
if !pullByte(br) {
@ -191,15 +221,21 @@ func bitReaderUnload(br *bitReader) {
br.bit_pos_ += unused_bits
}
/* Reads the specified number of bits from |br| and advances the bit pos.
Precondition: accumulator MUST contain at least |n_bits|. */
/*
Reads the specified number of bits from |br| and advances the bit pos.
Precondition: accumulator MUST contain at least |n_bits|.
*/
func takeBits(br *bitReader, n_bits uint32, val *uint32) {
*val = uint32(getBitsUnmasked(br)) & bitMask(n_bits)
dropBits(br, n_bits)
}
/* Reads the specified number of bits from |br| and advances the bit pos.
Assumes that there is enough input to perform BrotliFillBitWindow. */
/*
Reads the specified number of bits from |br| and advances the bit pos.
Assumes that there is enough input to perform BrotliFillBitWindow.
*/
func readBits(br *bitReader, n_bits uint32) uint32 {
var val uint32
fillBitWindow(br, n_bits)
@ -207,8 +243,11 @@ func readBits(br *bitReader, n_bits uint32) uint32 {
return val
}
/* Tries to read the specified amount of bits. Returns false, if there
is not enough input. |n_bits| MUST be positive. */
/*
Tries to read the specified amount of bits. Returns false, if there
is not enough input. |n_bits| MUST be positive.
*/
func safeReadBits(br *bitReader, n_bits uint32, val *uint32) bool {
for getAvailableBits(br) < n_bits {
if !pullByte(br) {
@ -220,8 +259,11 @@ func safeReadBits(br *bitReader, n_bits uint32, val *uint32) bool {
return true
}
/* Advances the bit reader position to the next byte boundary and verifies
that any skipped bits are set to zero. */
/*
Advances the bit reader position to the next byte boundary and verifies
that any skipped bits are set to zero.
*/
func bitReaderJumpToByteBoundary(br *bitReader) bool {
var pad_bits_count uint32 = getAvailableBits(br) & 0x7
var pad_bits uint32 = 0
@ -232,9 +274,12 @@ func bitReaderJumpToByteBoundary(br *bitReader) bool {
return pad_bits == 0
}
/* Copies remaining input bytes stored in the bit reader to the output. Value
/*
Copies remaining input bytes stored in the bit reader to the output. Value
|num| may not be larger than BrotliGetRemainingBytes. The bit reader must be
warmed up again after this. */
warmed up again after this.
*/
func copyBytes(dest []byte, br *bitReader, num uint) {
for getAvailableBits(br) >= 8 && num > 0 {
dest[0] = byte(getBitsUnmasked(br))

View File

@ -51,9 +51,12 @@ func refineEntropyCodesCommand(data []uint16, length uint, stride uint, num_hist
}
}
/* Assigns a block id from the range [0, num_histograms) to each data element
/*
Assigns a block id from the range [0, num_histograms) to each data element
in data[0..length) and fills in block_id[0..length) with the assigned values.
Returns the number of blocks, i.e. one plus the number of block switches. */
Returns the number of blocks, i.e. one plus the number of block switches.
*/
func findBlocksCommand(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramCommand, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
var data_size uint = histogramDataSizeCommand()
var bitmaplen uint = (num_histograms + 7) >> 3

View File

@ -51,9 +51,12 @@ func refineEntropyCodesDistance(data []uint16, length uint, stride uint, num_his
}
}
/* Assigns a block id from the range [0, num_histograms) to each data element
/*
Assigns a block id from the range [0, num_histograms) to each data element
in data[0..length) and fills in block_id[0..length) with the assigned values.
Returns the number of blocks, i.e. one plus the number of block switches. */
Returns the number of blocks, i.e. one plus the number of block switches.
*/
func findBlocksDistance(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramDistance, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
var data_size uint = histogramDataSizeDistance()
var bitmaplen uint = (num_histograms + 7) >> 3

View File

@ -51,9 +51,12 @@ func refineEntropyCodesLiteral(data []byte, length uint, stride uint, num_histog
}
}
/* Assigns a block id from the range [0, num_histograms) to each data element
/*
Assigns a block id from the range [0, num_histograms) to each data element
in data[0..length) and fills in block_id[0..length) with the assigned values.
Returns the number of blocks, i.e. one plus the number of block switches. */
Returns the number of blocks, i.e. one plus the number of block switches.
*/
func findBlocksLiteral(data []byte, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramLiteral, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
var data_size uint = histogramDataSizeLiteral()
var bitmaplen uint = (num_histograms + 7) >> 3

View File

@ -7,12 +7,18 @@ import (
const maxHuffmanTreeSize = (2*numCommandSymbols + 1)
/* The maximum size of Huffman dictionary for distances assuming that
NPOSTFIX = 0 and NDIRECT = 0. */
/*
The maximum size of Huffman dictionary for distances assuming that
NPOSTFIX = 0 and NDIRECT = 0.
*/
const maxSimpleDistanceAlphabetSize = 140
/* Represents the range of values belonging to a prefix code:
[offset, offset + 2^nbits) */
/*
Represents the range of values belonging to a prefix code:
[offset, offset + 2^nbits)
*/
type prefixCodeRange struct {
offset uint32
nbits uint32
@ -96,9 +102,12 @@ func nextBlockTypeCode(calculator *blockTypeCodeCalculator, type_ byte) uint {
return type_code
}
/* |nibblesbits| represents the 2 bits to encode MNIBBLES (0-3)
/*
|nibblesbits| represents the 2 bits to encode MNIBBLES (0-3)
REQUIRES: length > 0
REQUIRES: length <= (1 << 24) */
REQUIRES: length <= (1 << 24)
*/
func encodeMlen(length uint, bits *uint64, numbits *uint, nibblesbits *uint64) {
var lg uint
if length == 1 {
@ -132,8 +141,11 @@ func storeCommandExtra(cmd *command, bw *bitWriter) {
bw.writeBits(uint(insnumextra+getCopyExtra(copycode)), bits)
}
/* Data structure that stores almost everything that is needed to encode each
block switch command. */
/*
Data structure that stores almost everything that is needed to encode each
block switch command.
*/
type blockSplitCode struct {
type_code_calculator blockTypeCodeCalculator
type_depths [maxBlockTypeSymbols]byte
@ -154,9 +166,12 @@ func storeVarLenUint8(n uint, bw *bitWriter) {
}
}
/* Stores the compressed meta-block header.
/*
Stores the compressed meta-block header.
REQUIRES: length > 0
REQUIRES: length <= (1 << 24) */
REQUIRES: length <= (1 << 24)
*/
func storeCompressedMetaBlockHeader(is_final_block bool, length uint, bw *bitWriter) {
var lenbits uint64
var nlenbits uint
@ -186,9 +201,12 @@ func storeCompressedMetaBlockHeader(is_final_block bool, length uint, bw *bitWri
}
}
/* Stores the uncompressed meta-block header.
/*
Stores the uncompressed meta-block header.
REQUIRES: length > 0
REQUIRES: length <= (1 << 24) */
REQUIRES: length <= (1 << 24)
*/
func storeUncompressedMetaBlockHeader(length uint, bw *bitWriter) {
var lenbits uint64
var nlenbits uint
@ -312,8 +330,11 @@ func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max
}
}
/* num = alphabet size
depths = symbol depths */
/*
num = alphabet size
depths = symbol depths
*/
func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, bw *bitWriter) {
var huffman_tree [numCommandSymbols]byte
var huffman_tree_extra_bits [numCommandSymbols]byte
@ -367,8 +388,11 @@ func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, bw *bitWriter
storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], bw)
}
/* Builds a Huffman tree from histogram[0:length] into depth[0:length] and
bits[0:length] and stores the encoded tree to the bit stream. */
/*
Builds a Huffman tree from histogram[0:length] into depth[0:length] and
bits[0:length] and stores the encoded tree to the bit stream.
*/
func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, bw *bitWriter) {
var count uint = 0
var s4 = [4]uint{0}
@ -668,12 +692,15 @@ func moveToFrontTransform(v_in []uint32, v_size uint, v_out []uint32) {
}
}
/* Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of
/*
Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of
the run length plus extra bits (lower 9 bits is the prefix code and the rest
are the extra bits). Non-zero values in v[] are shifted by
*max_length_prefix. Will not create prefix codes bigger than the initial
value of *max_run_length_prefix. The prefix code of run length L is simply
Log2Floor(L) and the number of extra bits is the same as the prefix code. */
Log2Floor(L) and the number of extra bits is the same as the prefix code.
*/
func runLengthCodeZeros(in_size uint, v []uint32, out_size *uint, max_run_length_prefix *uint32) {
var max_reps uint32 = 0
var i uint
@ -793,8 +820,11 @@ func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, i
bw.writeBits(uint(len_nextra), uint64(len_extra))
}
/* Builds a BlockSplitCode data structure from the block split given by the
vector of block types and block lengths and stores it to the bit stream. */
/*
Builds a BlockSplitCode data structure from the block split given by the
vector of block types and block lengths and stores it to the bit stream.
*/
func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, bw *bitWriter) {
var type_histo [maxBlockTypeSymbols]uint32
var length_histo [numBlockLenSymbols]uint32
@ -913,14 +943,20 @@ func cleanupBlockEncoder(self *blockEncoder) {
blockEncoderPool.Put(self)
}
/* Creates entropy codes of block lengths and block types and stores them
to the bit stream. */
/*
Creates entropy codes of block lengths and block types and stores them
to the bit stream.
*/
func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, bw *bitWriter) {
buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, bw)
}
/* Stores the next symbol with the entropy code of the current block type.
Updates the block type and block length at block boundaries. */
/*
Stores the next symbol with the entropy code of the current block type.
Updates the block type and block length at block boundaries.
*/
func storeSymbol(self *blockEncoder, symbol uint, bw *bitWriter) {
if self.block_len_ == 0 {
self.block_ix_++
@ -939,9 +975,12 @@ func storeSymbol(self *blockEncoder, symbol uint, bw *bitWriter) {
}
}
/* Stores the next symbol with the entropy code of the current block type and
/*
Stores the next symbol with the entropy code of the current block type and
context value.
Updates the block type and block length at block boundaries. */
Updates the block type and block length at block boundaries.
*/
func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, bw *bitWriter, context_bits uint) {
if self.block_len_ == 0 {
self.block_ix_++
@ -1257,8 +1296,11 @@ func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is
}
}
/* This is for storing uncompressed blocks (simple raw storage of
bytes-as-bytes). */
/*
This is for storing uncompressed blocks (simple raw storage of
bytes-as-bytes).
*/
func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, bw *bitWriter) {
var masked_pos uint = position & mask
storeUncompressedMetaBlockHeader(uint(len), bw)

View File

@ -8,8 +8,11 @@ import "math"
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */
/*
Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue.
*/
func compareAndPushToQueueCommand(out []histogramCommand, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) {
var is_good_pair bool = false
var p histogramPair
@ -165,10 +168,13 @@ func histogramBitCostDistanceCommand(histogram *histogramCommand, candidate *his
}
}
/* Find the best 'out' histogram for each of the 'in' histograms.
/*
Find the best 'out' histogram for each of the 'in' histograms.
When called, clusters[0..num_clusters) contains the unique values from
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date. */
Note: we assume that out[]->bit_cost_ is already up-to-date.
*/
func histogramRemapCommand(in []histogramCommand, in_size uint, clusters []uint32, num_clusters uint, out []histogramCommand, symbols []uint32) {
var i uint
for i = 0; i < in_size; i++ {

View File

@ -8,8 +8,11 @@ import "math"
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */
/*
Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue.
*/
func compareAndPushToQueueDistance(out []histogramDistance, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) {
var is_good_pair bool = false
var p histogramPair
@ -165,10 +168,13 @@ func histogramBitCostDistanceDistance(histogram *histogramDistance, candidate *h
}
}
/* Find the best 'out' histogram for each of the 'in' histograms.
/*
Find the best 'out' histogram for each of the 'in' histograms.
When called, clusters[0..num_clusters) contains the unique values from
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date. */
Note: we assume that out[]->bit_cost_ is already up-to-date.
*/
func histogramRemapDistance(in []histogramDistance, in_size uint, clusters []uint32, num_clusters uint, out []histogramDistance, symbols []uint32) {
var i uint
for i = 0; i < in_size; i++ {

View File

@ -8,8 +8,11 @@ import "math"
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */
/*
Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue.
*/
func compareAndPushToQueueLiteral(out []histogramLiteral, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) {
var is_good_pair bool = false
var p histogramPair
@ -165,10 +168,13 @@ func histogramBitCostDistanceLiteral(histogram *histogramLiteral, candidate *his
}
}
/* Find the best 'out' histogram for each of the 'in' histograms.
/*
Find the best 'out' histogram for each of the 'in' histograms.
When called, clusters[0..num_clusters) contains the unique values from
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date. */
Note: we assume that out[]->bit_cost_ is already up-to-date.
*/
func histogramRemapLiteral(in []histogramLiteral, in_size uint, clusters []uint32, num_clusters uint, out []histogramLiteral, symbols []uint32) {
var i uint
for i = 0; i < in_size; i++ {

View File

@ -37,14 +37,17 @@ func isMatch5(p1 []byte, p2 []byte) bool {
p1[4] == p2[4]
}
/* Builds a literal prefix code into "depths" and "bits" based on the statistics
/*
Builds a literal prefix code into "depths" and "bits" based on the statistics
of the "input" string and stores it into the bit stream.
Note that the prefix code here is built from the pre-LZ77 input, therefore
we can only approximate the statistics of the actual literal stream.
Moreover, for long inputs we build a histogram from a sample of the input
and thus have to assign a non-zero depth for each literal.
Returns estimated compression ratio millibytes/char for encoding given input
with generated code. */
with generated code.
*/
func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, bw *bitWriter) uint {
var histogram = [256]uint32{0}
var histogram_total uint
@ -96,8 +99,11 @@ func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte
}
}
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
"bits" based on "histogram" and stores it into the bit stream. */
/*
Builds a command and distance prefix code (each 64 symbols) into "depth" and
"bits" based on "histogram" and stores it into the bit stream.
*/
func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) {
var tree [129]huffmanTree
var cmd_depth = [numCommandSymbols]byte{0}
@ -637,7 +643,8 @@ next_block:
}
}
/* Compresses "input" string to bw as one or more complete meta-blocks.
/*
Compresses "input" string to bw as one or more complete meta-blocks.
If "is_last" is 1, emits an additional empty last meta-block.
@ -657,7 +664,8 @@ next_block:
REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two
OUTPUT: maximal copy distance <= |input_size|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18)
*/
func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, bw *bitWriter) {
var initial_storage_ix uint = bw.getPos()
var table_bits uint = uint(log2FloorNonZero(table_size))

View File

@ -39,8 +39,11 @@ func isMatch1(p1 []byte, p2 []byte, length uint) bool {
return p1[4] == p2[4] && p1[5] == p2[5]
}
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
"bits" based on "histogram" and stores it into the bit stream. */
/*
Builds a command and distance prefix code (each 64 symbols) into "depth" and
"bits" based on "histogram" and stores it into the bit stream.
*/
func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) {
var tree [129]huffmanTree
var cmd_depth = [numCommandSymbols]byte{0}
@ -558,7 +561,8 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
}
}
/* Compresses "input" string to bw as one or more complete meta-blocks.
/*
Compresses "input" string to bw as one or more complete meta-blocks.
If "is_last" is 1, emits an additional empty last meta-block.
@ -569,7 +573,8 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
REQUIRES: "table_size" is a power of two
OUTPUT: maximal copy distance <= |input_size|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18)
*/
func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, bw *bitWriter) {
var initial_storage_ix uint = bw.getPos()
var table_bits uint = uint(log2FloorNonZero(table_size))

View File

@ -69,9 +69,11 @@ const huffmanTableBits = 8
const huffmanTableMask = 0xFF
/* We need the slack region for the following reasons:
/*
We need the slack region for the following reasons:
- doing up to two 16-byte copies for fast backward copying
- inserting transformed dictionary word (5 prefix + 24 base + 8 suffix) */
- inserting transformed dictionary word (5 prefix + 24 base + 8 suffix)
*/
const kRingBufferWriteAheadSlack uint32 = 42
var kCodeLengthCodeOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15}
@ -121,8 +123,11 @@ func saveErrorCode(s *Reader, e int) int {
}
}
/* Decodes WBITS by reading 1 - 7 bits, or 0x11 for "Large Window Brotli".
Precondition: bit-reader accumulator has at least 8 bits. */
/*
Decodes WBITS by reading 1 - 7 bits, or 0x11 for "Large Window Brotli".
Precondition: bit-reader accumulator has at least 8 bits.
*/
func decodeWindowBits(s *Reader, br *bitReader) int {
var n uint32
var large_window bool = s.large_window
@ -361,10 +366,13 @@ func decodeMetaBlockLength(s *Reader, br *bitReader) int {
}
}
/* Decodes the Huffman code.
/*
Decodes the Huffman code.
This method doesn't read data from the bit reader, BUT drops the amount of
bits that correspond to the decoded symbol.
bits MUST contain at least 15 (BROTLI_HUFFMAN_MAX_CODE_LENGTH) valid bits. */
bits MUST contain at least 15 (BROTLI_HUFFMAN_MAX_CODE_LENGTH) valid bits.
*/
func decodeSymbol(bits uint32, table []huffmanCode, br *bitReader) uint32 {
table = table[bits&huffmanTableMask:]
if table[0].bits > huffmanTableBits {
@ -377,14 +385,20 @@ func decodeSymbol(bits uint32, table []huffmanCode, br *bitReader) uint32 {
return uint32(table[0].value)
}
/* Reads and decodes the next Huffman code from bit-stream.
This method peeks 16 bits of input and drops 0 - 15 of them. */
/*
Reads and decodes the next Huffman code from bit-stream.
This method peeks 16 bits of input and drops 0 - 15 of them.
*/
func readSymbol(table []huffmanCode, br *bitReader) uint32 {
return decodeSymbol(get16BitsUnmasked(br), table, br)
}
/* Same as DecodeSymbol, but it is known that there is less than 15 bits of
input are currently available. */
/*
Same as DecodeSymbol, but it is known that there is less than 15 bits of
input are currently available.
*/
func safeDecodeSymbol(table []huffmanCode, br *bitReader, result *uint32) bool {
var val uint32
var available_bits uint32 = getAvailableBits(br)
@ -448,8 +462,11 @@ func preloadSymbol(safe int, table []huffmanCode, br *bitReader, bits *uint32, v
*value = uint32(table[0].value)
}
/* Decodes the next Huffman code using data prepared by PreloadSymbol.
Reads 0 - 15 bits. Also peeks 8 following bits. */
/*
Decodes the next Huffman code using data prepared by PreloadSymbol.
Reads 0 - 15 bits. Also peeks 8 following bits.
*/
func readPreloadedSymbol(table []huffmanCode, br *bitReader, bits *uint32, value *uint32) uint32 {
var result uint32 = *value
var ext []huffmanCode
@ -479,9 +496,12 @@ func log2Floor(x uint32) uint32 {
return result
}
/* Reads (s->symbol + 1) symbols.
/*
Reads (s->symbol + 1) symbols.
Totally 1..4 symbols are read, 1..11 bits each.
The list of symbols MUST NOT contain duplicates. */
The list of symbols MUST NOT contain duplicates.
*/
func readSimpleHuffmanSymbols(alphabet_size uint32, max_symbol uint32, s *Reader) int {
var br *bitReader = &s.br
var max_bits uint32 = log2Floor(alphabet_size - 1)
@ -517,12 +537,15 @@ func readSimpleHuffmanSymbols(alphabet_size uint32, max_symbol uint32, s *Reader
return decoderSuccess
}
/* Process single decoded symbol code length:
/*
Process single decoded symbol code length:
A) reset the repeat variable
B) remember code length (if it is not 0)
C) extend corresponding index-chain
D) reduce the Huffman space
E) update the histogram */
E) update the histogram
*/
func processSingleCodeLength(code_len uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) {
*repeat = 0
if code_len != 0 { /* code_len == 1..15 */
@ -536,7 +559,9 @@ func processSingleCodeLength(code_len uint32, symbol *uint32, repeat *uint32, sp
(*symbol)++
}
/* Process repeated symbol code length.
/*
Process repeated symbol code length.
A) Check if it is the extension of previous repeat sequence; if the decoded
value is not BROTLI_REPEAT_PREVIOUS_CODE_LENGTH, then it is a new
symbol-skip
@ -545,7 +570,8 @@ func processSingleCodeLength(code_len uint32, symbol *uint32, repeat *uint32, sp
D) For each symbol do the same operations as in ProcessSingleCodeLength
PRECONDITION: code_len == BROTLI_REPEAT_PREVIOUS_CODE_LENGTH or
code_len == BROTLI_REPEAT_ZERO_CODE_LENGTH */
code_len == BROTLI_REPEAT_ZERO_CODE_LENGTH
*/
func processRepeatedCodeLength(code_len uint32, repeat_delta uint32, alphabet_size uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, repeat_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) {
var old_repeat uint32 /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */ /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */
var extra_bits uint32 = 3
@ -688,8 +714,11 @@ func safeReadSymbolCodeLengths(alphabet_size uint32, s *Reader) int {
return decoderSuccess
}
/* Reads and decodes 15..18 codes using static prefix code.
Each code is 2..4 bits long. In total 30..72 bits are used. */
/*
Reads and decodes 15..18 codes using static prefix code.
Each code is 2..4 bits long. In total 30..72 bits are used.
*/
func readCodeLengthCodeLengths(s *Reader) int {
var br *bitReader = &s.br
var num_codes uint32 = s.repeat
@ -737,7 +766,9 @@ func readCodeLengthCodeLengths(s *Reader) int {
return decoderSuccess
}
/* Decodes the Huffman tables.
/*
Decodes the Huffman tables.
There are 2 scenarios:
A) Huffman code contains only few symbols (1..4). Those symbols are read
directly; their code lengths are defined by the number of symbols.
@ -747,7 +778,8 @@ func readCodeLengthCodeLengths(s *Reader) int {
B.1) Small Huffman table is decoded; it is specified with code lengths
encoded with predefined entropy code. 32 - 74 bits are used.
B.2) Decoded table is used to decode code lengths of symbols in resulting
Huffman table. In worst case 3520 bits are read. */
Huffman table. In worst case 3520 bits are read.
*/
func readHuffmanCode(alphabet_size uint32, max_symbol uint32, table []huffmanCode, opt_table_size *uint32, s *Reader) int {
var br *bitReader = &s.br
@ -887,8 +919,11 @@ func readBlockLength(table []huffmanCode, br *bitReader) uint32 {
return kBlockLengthPrefixCode[code].offset + readBits(br, nbits)
}
/* WARNING: if state is not BROTLI_STATE_READ_BLOCK_LENGTH_NONE, then
reading can't be continued with ReadBlockLength. */
/*
WARNING: if state is not BROTLI_STATE_READ_BLOCK_LENGTH_NONE, then
reading can't be continued with ReadBlockLength.
*/
func safeReadBlockLength(s *Reader, result *uint32, table []huffmanCode, br *bitReader) bool {
var index uint32
if s.substate_read_block_length == stateReadBlockLengthNone {
@ -913,9 +948,12 @@ func safeReadBlockLength(s *Reader, result *uint32, table []huffmanCode, br *bit
}
}
/* Transform:
1) initialize list L with values 0, 1,... 255
2) For each input element X:
/*
Transform:
1. initialize list L with values 0, 1,... 255
2. For each input element X:
2.1) let Y = L[X]
2.2) remove X-th element from L
2.3) prepend Y to L
@ -926,7 +964,8 @@ func safeReadBlockLength(s *Reader, result *uint32, table []huffmanCode, br *bit
of Y values, and reinitialize only first elements in L.
Most of input values are 0 and 1. To reduce number of branches, we replace
inner for loop with do-while. */
inner for loop with do-while.
*/
func inverseMoveToFrontTransform(v []byte, v_len uint32, state *Reader) {
var mtf [256]byte
var i int
@ -973,14 +1012,17 @@ func huffmanTreeGroupDecode(group *huffmanTreeGroup, s *Reader) int {
return decoderSuccess
}
/* Decodes a context map.
/*
Decodes a context map.
Decoding is done in 4 phases:
1) Read auxiliary information (6..16 bits) and allocate memory.
In case of trivial context map, decoding is finished at this phase.
2) Decode Huffman table using ReadHuffmanCode function.
This table will be used for reading context map items.
3) Read context map items; "0" values could be run-length encoded.
4) Optionally, apply InverseMoveToFront transform to the resulting map. */
4) Optionally, apply InverseMoveToFront transform to the resulting map.
*/
func decodeContextMap(context_map_size uint32, num_htrees *uint32, context_map_arg *[]byte, s *Reader) int {
var br *bitReader = &s.br
var result int = decoderSuccess
@ -1121,8 +1163,11 @@ func decodeContextMap(context_map_size uint32, num_htrees *uint32, context_map_a
}
}
/* Decodes a command or literal and updates block type ring-buffer.
Reads 3..54 bits. */
/*
Decodes a command or literal and updates block type ring-buffer.
Reads 3..54 bits.
*/
func decodeBlockTypeAndLength(safe int, s *Reader, tree_type int) bool {
var max_block_type uint32 = s.num_block_types[tree_type]
var type_tree []huffmanCode
@ -1207,8 +1252,11 @@ func prepareLiteralDecoding(s *Reader) {
s.context_lookup = getContextLUT(int(context_mode))
}
/* Decodes the block type and updates the state for literal context.
Reads 3..54 bits. */
/*
Decodes the block type and updates the state for literal context.
Reads 3..54 bits.
*/
func decodeLiteralBlockSwitchInternal(safe int, s *Reader) bool {
if !decodeBlockTypeAndLength(safe, s, 0) {
return false
@ -1226,8 +1274,11 @@ func safeDecodeLiteralBlockSwitch(s *Reader) bool {
return decodeLiteralBlockSwitchInternal(1, s)
}
/* Block switch for insert/copy length.
Reads 3..54 bits. */
/*
Block switch for insert/copy length.
Reads 3..54 bits.
*/
func decodeCommandBlockSwitchInternal(safe int, s *Reader) bool {
if !decodeBlockTypeAndLength(safe, s, 1) {
return false
@ -1245,8 +1296,11 @@ func safeDecodeCommandBlockSwitch(s *Reader) bool {
return decodeCommandBlockSwitchInternal(1, s)
}
/* Block switch for distance codes.
Reads 3..54 bits. */
/*
Block switch for distance codes.
Reads 3..54 bits.
*/
func decodeDistanceBlockSwitchInternal(safe int, s *Reader) bool {
if !decodeBlockTypeAndLength(safe, s, 2) {
return false
@ -1276,9 +1330,12 @@ func unwrittenBytes(s *Reader, wrap bool) uint {
return partial_pos_rb - s.partial_pos_out
}
/* Dumps output.
/*
Dumps output.
Returns BROTLI_DECODER_NEEDS_MORE_OUTPUT only if there is more output to push
and either ring-buffer is as big as window size, or |force| is true. */
and either ring-buffer is as big as window size, or |force| is true.
*/
func writeRingBuffer(s *Reader, available_out *uint, next_out *[]byte, total_out *uint, force bool) int {
var start []byte
start = s.ringbuffer[s.partial_pos_out&uint(s.ringbuffer_mask):]
@ -1336,13 +1393,15 @@ func wrapRingBuffer(s *Reader) {
}
}
/* Allocates ring-buffer.
/*
Allocates ring-buffer.
s->ringbuffer_size MUST be updated by BrotliCalculateRingBufferSize before
this function is called.
Last two bytes of ring-buffer are initialized to 0, so context calculation
could be done uniformly for the first two and all other positions. */
could be done uniformly for the first two and all other positions.
*/
func ensureRingBuffer(s *Reader) bool {
var old_ringbuffer []byte = s.ringbuffer
if s.ringbuffer_size == s.new_ringbuffer_size {
@ -1429,12 +1488,14 @@ func copyUncompressedBlockToOutput(available_out *uint, next_out *[]byte, total_
}
}
/* Calculates the smallest feasible ring buffer.
/*
Calculates the smallest feasible ring buffer.
If we know the data size is small, do not allocate more ring buffer
size than needed to reduce memory usage.
When this method is called, metablock size and flags MUST be decoded. */
When this method is called, metablock size and flags MUST be decoded.
*/
func calculateRingBufferSize(s *Reader) {
var window_size int = 1 << s.window_bits
var new_ringbuffer_size int = window_size
@ -2060,7 +2121,8 @@ func maxDistanceSymbol(ndirect uint32, npostfix uint32) uint32 {
}
}
/* Invariant: input stream is never overconsumed:
/*
Invariant: input stream is never overconsumed:
- invalid input implies that the whole stream is invalid -> any amount of
input could be read and discarded
- when result is "needs more input", then at least one more byte is REQUIRED
@ -2070,7 +2132,8 @@ func maxDistanceSymbol(ndirect uint32, npostfix uint32) uint32 {
hold more than 7 bits in bit reader; this saves client from swapping input
buffer ahead of time
- when result is "success" decoder MUST return all unused data back to input
buffer; this is possible because the invariant is held on enter */
buffer; this is possible because the invariant is held on enter
*/
func decoderDecompressStream(s *Reader, available_in *uint, next_in *[]byte, available_out *uint, next_out *[]byte) int {
var result int = decoderSuccess
var br *bitReader = &s.br

View File

@ -126,8 +126,11 @@ func remainingInputBlockSize(s *Writer) uint {
return block_size - uint(delta)
}
/* Wraps 64-bit input position to 32-bit ring-buffer position preserving
"not-a-first-lap" feature. */
/*
Wraps 64-bit input position to 32-bit ring-buffer position preserving
"not-a-first-lap" feature.
*/
func wrapPosition(position uint64) uint32 {
var result uint32 = uint32(position)
var gb uint64 = position >> 30
@ -619,11 +622,11 @@ func encoderInitState(s *Writer) {
}
/*
Copies the given input data to the internal ring buffer of the compressor.
No processing of the data occurs at this time and this function can be
called multiple times before calling WriteBrotliData() to process the
accumulated input. At most input_block_size() bytes of input data can be
copied to the ring buffer, otherwise the next WriteBrotliData() will fail.
Copies the given input data to the internal ring buffer of the compressor.
No processing of the data occurs at this time and this function can be
called multiple times before calling WriteBrotliData() to process the
accumulated input. At most input_block_size() bytes of input data can be
copied to the ring buffer, otherwise the next WriteBrotliData() will fail.
*/
func copyInputToRingBuffer(s *Writer, input_size uint, input_buffer []byte) {
var ringbuffer_ *ringBuffer = &s.ringbuffer_
@ -678,8 +681,11 @@ func copyInputToRingBuffer(s *Writer, input_size uint, input_buffer []byte) {
}
}
/* Marks all input as processed.
Returns true if position wrapping occurs. */
/*
Marks all input as processed.
Returns true if position wrapping occurs.
*/
func updateLastProcessedPos(s *Writer) bool {
var wrapped_last_processed_pos uint32 = wrapPosition(s.last_processed_pos_)
var wrapped_input_pos uint32 = wrapPosition(s.input_pos_)
@ -717,15 +723,15 @@ func extendLastCommand(s *Writer, bytes *uint32, wrapped_last_processed_pos *uin
}
/*
Processes the accumulated input data and writes
the new output meta-block to s.dest, if one has been
created (otherwise the processed input data is buffered internally).
If |is_last| or |force_flush| is true, an output meta-block is
always created. However, until |is_last| is true encoder may retain up
to 7 bits of the last byte of output. To force encoder to dump the remaining
bits use WriteMetadata() to append an empty meta-data block.
Returns false if the size of the input data is larger than
input_block_size().
Processes the accumulated input data and writes
the new output meta-block to s.dest, if one has been
created (otherwise the processed input data is buffered internally).
If |is_last| or |force_flush| is true, an output meta-block is
always created. However, until |is_last| is true encoder may retain up
to 7 bits of the last byte of output. To force encoder to dump the remaining
bits use WriteMetadata() to append an empty meta-data block.
Returns false if the size of the input data is larger than
input_block_size().
*/
func encodeData(s *Writer, is_last bool, force_flush bool) bool {
var delta uint64 = unprocessedInputSize(s)
@ -883,8 +889,11 @@ func encodeData(s *Writer, is_last bool, force_flush bool) bool {
}
}
/* Dumps remaining output bits and metadata header to s.bw.
REQUIRED: |block_size| <= (1 << 24). */
/*
Dumps remaining output bits and metadata header to s.bw.
REQUIRED: |block_size| <= (1 << 24).
*/
func writeMetadataHeader(s *Writer, block_size uint) {
bw := &s.bw

View File

@ -112,7 +112,8 @@ func sortHuffmanTree(v0 huffmanTree, v1 huffmanTree) bool {
return v0.index_right_or_value_ > v1.index_right_or_value_
}
/* This function will create a Huffman tree.
/*
This function will create a Huffman tree.
The catch here is that the tree cannot be arbitrarily deep.
Brotli specifies a maximum depth of 15 bits for "code trees"
@ -126,7 +127,8 @@ func sortHuffmanTree(v0 huffmanTree, v1 huffmanTree) bool {
especially when population counts are longer than 2**tree_limit, but
we are not planning to use this with extremely long blocks.
See http://en.wikipedia.org/wiki/Huffman_coding */
See http://en.wikipedia.org/wiki/Huffman_coding
*/
func createHuffmanTree(data []uint32, length uint, tree_limit int, tree []huffmanTree, depth []byte) {
var count_limit uint32
var sentinel huffmanTree
@ -297,13 +299,16 @@ func writeHuffmanTreeRepetitionsZeros(repetitions uint, tree_size *uint, tree []
}
}
/* Change the population counts in a way that the consequent
/*
Change the population counts in a way that the consequent
Huffman tree compression, especially its RLE-part will be more
likely to compress this data more efficiently.
length contains the size of the histogram.
counts contains the population counts.
good_for_rle is a buffer of at least length size */
good_for_rle is a buffer of at least length size
*/
func optimizeHuffmanCountsForRLE(length uint, counts []uint32, good_for_rle []byte) {
var nonzero_count uint = 0
var stride uint
@ -481,9 +486,12 @@ func decideOverRLEUse(depth []byte, length uint, use_rle_for_non_zero *bool, use
*use_rle_for_zero = total_reps_zero > count_reps_zero*2
}
/* Write a Huffman tree from bit depths into the bit-stream representation
/*
Write a Huffman tree from bit depths into the bit-stream representation
of a Huffman tree. The generated Huffman tree is to be compressed once
more using a Huffman tree */
more using a Huffman tree
*/
func writeHuffmanTree(depth []byte, length uint, tree_size *uint, tree []byte, extra_bits_data []byte) {
var previous_value byte = initialRepeatedCodeLength
var i uint

View File

@ -23,10 +23,13 @@ func log2FloorNonZero(n uint) uint32 {
return result
}
/* A lookup table for small values of log2(int) to be used in entropy
/*
A lookup table for small values of log2(int) to be used in entropy
computation.
", ".join(["%.16ff" % x for x in [0.0]+[log2(x) for x in range(1, 256)]]) */
", ".join(["%.16ff" % x for x in [0.0]+[log2(x) for x in range(1, 256)]])
*/
var kLog2Table = []float32{
0.0000000000000000,
0.0000000000000000,

View File

@ -24,12 +24,15 @@ func hashBytesH10(data []byte) uint32 {
return h >> (32 - 17)
}
/* A (forgetful) hash table where each hash bucket contains a binary tree of
/*
A (forgetful) hash table where each hash bucket contains a binary tree of
sequences whose first 4 bytes share the same hash code.
Each sequence is 128 long and is identified by its starting
position in the input data. The binary tree is sorted by the lexicographic
order of the sequences, and it is also a max-heap with respect to the
starting positions. */
starting positions.
*/
type h10 struct {
hasherCommon
window_mask_ uint
@ -61,7 +64,9 @@ func rightChildIndexH10(self *h10, pos uint) uint {
return 2*(pos&self.window_mask_) + 1
}
/* Stores the hash of the next 4 bytes and in a single tree-traversal, the
/*
Stores the hash of the next 4 bytes and in a single tree-traversal, the
hash bucket's binary tree is searched for matches and is re-rooted at the
current position.
@ -70,7 +75,8 @@ func rightChildIndexH10(self *h10, pos uint) uint {
is not changed, since we can not know the final sorting order of the
current (incomplete) sequence.
This function must be called with increasing cur_ix positions. */
This function must be called with increasing cur_ix positions.
*/
func storeAndFindMatchesH10(self *h10, data []byte, cur_ix uint, ring_buffer_mask uint, max_length uint, max_backward uint, best_len *uint, matches []backwardMatch) []backwardMatch {
var cur_ix_masked uint = cur_ix & ring_buffer_mask
var max_comp_len uint = brotli_min_size_t(max_length, 128)
@ -152,13 +158,16 @@ func storeAndFindMatchesH10(self *h10, data []byte, cur_ix uint, ring_buffer_mas
return matches
}
/* Finds all backward matches of &data[cur_ix & ring_buffer_mask] up to the
/*
Finds all backward matches of &data[cur_ix & ring_buffer_mask] up to the
length of max_length and stores the position cur_ix in the hash table.
Sets *num_matches to the number of matches found, and stores the found
matches in matches[0] to matches[*num_matches - 1]. The matches will be
sorted by strictly increasing length and (non-strictly) increasing
distance. */
distance.
*/
func findAllMatchesH10(handle *h10, dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, cur_ix uint, max_length uint, max_backward uint, gap uint, params *encoderParams, matches []backwardMatch) uint {
var orig_matches []backwardMatch = matches
var cur_ix_masked uint = cur_ix & ring_buffer_mask
@ -224,9 +233,12 @@ func findAllMatchesH10(handle *h10, dictionary *encoderDictionary, data []byte,
return uint(-cap(matches) + cap(orig_matches))
}
/* Stores the hash of the next 4 bytes and re-roots the binary tree at the
/*
Stores the hash of the next 4 bytes and re-roots the binary tree at the
current sequence, without returning any matches.
REQUIRES: ix + 128 <= end-of-current-block */
REQUIRES: ix + 128 <= end-of-current-block
*/
func (h *h10) Store(data []byte, mask uint, ix uint) {
var max_backward uint = h.window_mask_ - windowGap + 1
/* Maximum distance is window size - 16, see section 9.1. of the spec. */

View File

@ -8,12 +8,15 @@ import "encoding/binary"
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* A (forgetful) hash table to the data seen by the compressor, to
/*
A (forgetful) hash table to the data seen by the compressor, to
help create backward references to previous data.
This is a hash map of fixed size (bucket_size_) to a ring buffer of
fixed size (block_size_). The ring buffer contains the last block_size_
index positions of the given hash key in the compressed data. */
index positions of the given hash key in the compressed data.
*/
func (*h5) HashTypeLength() uint {
return 4
}
@ -67,8 +70,11 @@ func (h *h5) Prepare(one_shot bool, input_size uint, data []byte) {
}
}
/* Look at 4 bytes at &data[ix & mask].
Compute a hash from these, and store the value of ix at that position. */
/*
Look at 4 bytes at &data[ix & mask].
Compute a hash from these, and store the value of ix at that position.
*/
func (h *h5) Store(data []byte, mask uint, ix uint) {
var num []uint16 = h.num
var key uint32 = hashBytesH5(data[ix&mask:], h.hash_shift_)
@ -100,7 +106,9 @@ func (h *h5) PrepareDistanceCache(distance_cache []int) {
prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check)
}
/* Find a longest backward match of &data[cur_ix] up to the length of
/*
Find a longest backward match of &data[cur_ix] up to the length of
max_length and stores the position cur_ix in the hash table.
REQUIRES: PrepareDistanceCacheH5 must be invoked for current distance cache
@ -110,7 +118,8 @@ func (h *h5) PrepareDistanceCache(distance_cache []int) {
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found. */
|out|->score is updated only if a better match is found.
*/
func (h *h5) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var num []uint16 = h.num
var buckets []uint32 = h.buckets

View File

@ -8,12 +8,15 @@ import "encoding/binary"
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* A (forgetful) hash table to the data seen by the compressor, to
/*
A (forgetful) hash table to the data seen by the compressor, to
help create backward references to previous data.
This is a hash map of fixed size (bucket_size_) to a ring buffer of
fixed size (block_size_). The ring buffer contains the last block_size_
index positions of the given hash key in the compressed data. */
index positions of the given hash key in the compressed data.
*/
func (*h6) HashTypeLength() uint {
return 8
}
@ -69,8 +72,11 @@ func (h *h6) Prepare(one_shot bool, input_size uint, data []byte) {
}
}
/* Look at 4 bytes at &data[ix & mask].
Compute a hash from these, and store the value of ix at that position. */
/*
Look at 4 bytes at &data[ix & mask].
Compute a hash from these, and store the value of ix at that position.
*/
func (h *h6) Store(data []byte, mask uint, ix uint) {
var num []uint16 = h.num
var key uint32 = hashBytesH6(data[ix&mask:], h.hash_mask_, h.hash_shift_)
@ -102,7 +108,9 @@ func (h *h6) PrepareDistanceCache(distance_cache []int) {
prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check)
}
/* Find a longest backward match of &data[cur_ix] up to the length of
/*
Find a longest backward match of &data[cur_ix] up to the length of
max_length and stores the position cur_ix in the hash table.
REQUIRES: PrepareDistanceCacheH6 must be invoked for current distance cache
@ -112,7 +120,8 @@ func (h *h6) PrepareDistanceCache(distance_cache []int) {
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found. */
|out|->score is updated only if a better match is found.
*/
func (h *h6) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var num []uint16 = h.num
var buckets []uint32 = h.buckets

View File

@ -44,12 +44,14 @@ type hasherSearchResult struct {
len_code_delta int
}
/* kHashMul32 multiplier has these properties:
* The multiplier must be odd. Otherwise we may lose the highest bit.
* No long streaks of ones or zeros.
* There is no effort to ensure that it is a prime, the oddity is enough
/*
kHashMul32 multiplier has these properties:
- The multiplier must be odd. Otherwise we may lose the highest bit.
- No long streaks of ones or zeros.
- There is no effort to ensure that it is a prime, the oddity is enough
for this use.
* The number has been tuned heuristically against compression benchmarks. */
- The number has been tuned heuristically against compression benchmarks.
*/
const kHashMul32 uint32 = 0x1E35A7BD
const kHashMul64 uint64 = 0x1E35A7BD1E35A7BD
@ -92,7 +94,9 @@ const distanceBitPenalty = 30
/* Score must be positive after applying maximal penalty. */
const scoreBase = (distanceBitPenalty * 8 * 8)
/* Usually, we always choose the longest backward reference. This function
/*
Usually, we always choose the longest backward reference. This function
allows for the exception of that rule.
If we choose a backward reference that is further away, it will
@ -107,7 +111,8 @@ const scoreBase = (distanceBitPenalty * 8 * 8)
when it is not much longer and the bit cost for encoding it is more
than the saved literals.
backward_reference_offset MUST be positive. */
backward_reference_offset MUST be positive.
*/
func backwardReferenceScore(copy_length uint, backward_reference_offset uint) uint {
return scoreBase + literalByteScore*uint(copy_length) - distanceBitPenalty*uint(log2FloorNonZero(backward_reference_offset))
}

View File

@ -26,8 +26,11 @@ func (h *hashComposite) StoreLookahead() uint {
}
}
/* Composite hasher: This hasher allows to combine two other hashers, HASHER_A
and HASHER_B. */
/*
Composite hasher: This hasher allows to combine two other hashers, HASHER_A
and HASHER_B.
*/
type hashComposite struct {
hasherCommon
ha hasherHandle
@ -39,10 +42,13 @@ func (h *hashComposite) Initialize(params *encoderParams) {
h.params = params
}
/* TODO: Initialize of the hashers is defered to Prepare (and params
/*
TODO: Initialize of the hashers is defered to Prepare (and params
remembered here) because we don't get the one_shot and input_size params
here that are needed to know the memory size of them. Instead provide
those params to all hashers InitializehashComposite */
those params to all hashers InitializehashComposite
*/
func (h *hashComposite) Prepare(one_shot bool, input_size uint, data []byte) {
if h.ha == nil {
var common_a *hasherCommon

View File

@ -30,12 +30,15 @@ type slot struct {
next uint16
}
/* A (forgetful) hash table to the data seen by the compressor, to
/*
A (forgetful) hash table to the data seen by the compressor, to
help create backward references to previous data.
Hashes are stored in chains which are bucketed to groups. Group of chains
share a storage "bank". When more than "bank size" chain nodes are added,
oldest nodes are replaced; this way several chains may share a tail. */
oldest nodes are replaced; this way several chains may share a tail.
*/
type hashForgetfulChain struct {
hasherCommon
@ -105,8 +108,11 @@ func (h *hashForgetfulChain) Prepare(one_shot bool, input_size uint, data []byte
}
}
/* Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend
node to corresponding chain; also update tiny_hash for current position. */
/*
Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend
node to corresponding chain; also update tiny_hash for current position.
*/
func (h *hashForgetfulChain) Store(data []byte, mask uint, ix uint) {
var key uint = h.HashBytes(data[ix&mask:])
var bank uint = key & (h.numBanks - 1)
@ -146,7 +152,9 @@ func (h *hashForgetfulChain) PrepareDistanceCache(distance_cache []int) {
prepareDistanceCache(distance_cache, h.numLastDistancesToCheck)
}
/* Find a longest backward match of &data[cur_ix] up to the length of
/*
Find a longest backward match of &data[cur_ix] up to the length of
max_length and stores the position cur_ix in the hash table.
REQUIRES: PrepareDistanceCachehashForgetfulChain must be invoked for current distance cache
@ -156,7 +164,8 @@ func (h *hashForgetfulChain) PrepareDistanceCache(distance_cache []int) {
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found. */
|out|->score is updated only if a better match is found.
*/
func (h *hashForgetfulChain) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var cur_ix_masked uint = cur_ix & ring_buffer_mask
var min_score uint = out.score

View File

@ -20,9 +20,12 @@ func (*hashLongestMatchQuickly) StoreLookahead() uint {
return 8
}
/* HashBytes is the function that chooses the bucket to place
/*
HashBytes is the function that chooses the bucket to place
the address in. The HashLongestMatch and hashLongestMatchQuickly
classes have separate, different implementations of hashing. */
classes have separate, different implementations of hashing.
*/
func (h *hashLongestMatchQuickly) HashBytes(data []byte) uint32 {
var hash uint64 = ((binary.LittleEndian.Uint64(data) << (64 - 8*h.hashLen)) * kHashMul64)
@ -31,11 +34,14 @@ func (h *hashLongestMatchQuickly) HashBytes(data []byte) uint32 {
return uint32(hash >> (64 - h.bucketBits))
}
/* A (forgetful) hash table to the data seen by the compressor, to
/*
A (forgetful) hash table to the data seen by the compressor, to
help create backward references to previous data.
This is a hash map of fixed size (1 << 16). Starting from the
given index, 1 buckets are used to store values of a key. */
given index, 1 buckets are used to store values of a key.
*/
type hashLongestMatchQuickly struct {
hasherCommon
@ -73,9 +79,12 @@ func (h *hashLongestMatchQuickly) Prepare(one_shot bool, input_size uint, data [
}
}
/* Look at 5 bytes at &data[ix & mask].
/*
Look at 5 bytes at &data[ix & mask].
Compute a hash from these, and store the value somewhere within
[ix .. ix+3]. */
[ix .. ix+3].
*/
func (h *hashLongestMatchQuickly) Store(data []byte, mask uint, ix uint) {
var key uint32 = h.HashBytes(data[ix&mask:])
var off uint32 = uint32(ix>>3) % uint32(h.bucketSweep)
@ -104,14 +113,17 @@ func (h *hashLongestMatchQuickly) StitchToPreviousBlock(num_bytes uint, position
func (*hashLongestMatchQuickly) PrepareDistanceCache(distance_cache []int) {
}
/* Find a longest backward match of &data[cur_ix & ring_buffer_mask]
/*
Find a longest backward match of &data[cur_ix & ring_buffer_mask]
up to the length of max_length and stores the position cur_ix in the
hash table.
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found. */
|out|->score is updated only if a better match is found.
*/
func (h *hashLongestMatchQuickly) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var best_len_in uint = out.len
var cur_ix_masked uint = cur_ix & ring_buffer_mask

View File

@ -13,9 +13,12 @@ const kRollingHashMul32 uint32 = 69069
const kInvalidPosHashRolling uint32 = 0xffffffff
/* This hasher uses a longer forward length, but returning a higher value here
/*
This hasher uses a longer forward length, but returning a higher value here
will hurt compression by the main hasher when combined with a composite
hasher. The hasher tests for forward itself instead. */
hasher. The hasher tests for forward itself instead.
*/
func (*hashRolling) HashTypeLength() uint {
return 4
}
@ -24,8 +27,11 @@ func (*hashRolling) StoreLookahead() uint {
return 4
}
/* Computes a code from a single byte. A lookup table of 256 values could be
used, but simply adding 1 works about as good. */
/*
Computes a code from a single byte. A lookup table of 256 values could be
used, but simply adding 1 works about as good.
*/
func (*hashRolling) HashByte(b byte) uint32 {
return uint32(b) + 1
}
@ -38,8 +44,11 @@ func (h *hashRolling) HashRollingFunction(state uint32, add byte, rem byte, fact
return uint32(factor*state + h.HashByte(add) - factor_remove*h.HashByte(rem))
}
/* Rolling hash for long distance long string matches. Stores one position
per bucket, bucket key is computed over a long region. */
/*
Rolling hash for long distance long string matches. Stores one position
per bucket, bucket key is computed over a long region.
*/
type hashRolling struct {
hasherCommon

View File

@ -10,8 +10,11 @@ package brotli
const huffmanMaxCodeLength = 15
/* Maximum possible Huffman table size for an alphabet size of (index * 32),
max code length 15 and root table bits 8. */
/*
Maximum possible Huffman table size for an alphabet size of (index * 32),
max code length 15 and root table bits 8.
*/
var kMaxHuffmanTableSize = []uint16{
256,
402,
@ -363,9 +366,12 @@ var kReverseBits = [1 << reverseBitsMax]byte{
const reverseBitsLowest = (uint64(1) << (reverseBitsMax - 1 + reverseBitsBase))
/* Returns reverse(num >> BROTLI_REVERSE_BITS_BASE, BROTLI_REVERSE_BITS_MAX),
/*
Returns reverse(num >> BROTLI_REVERSE_BITS_BASE, BROTLI_REVERSE_BITS_MAX),
where reverse(value, len) is the bit-wise reversal of the len least
significant bits of value. */
significant bits of value.
*/
func reverseBits8(num uint64) uint64 {
return uint64(kReverseBits[num])
}
@ -382,9 +388,12 @@ func replicateValue(table []huffmanCode, step int, end int, code huffmanCode) {
}
}
/* Returns the table width of the next 2nd level table. |count| is the histogram
/*
Returns the table width of the next 2nd level table. |count| is the histogram
of bit lengths for the remaining symbols, |len| is the code length of the
next processed symbol. */
next processed symbol.
*/
func nextTableBitSize(count []uint16, len int, root_bits int) int {
var left int = 1 << uint(len-root_bits)
for len < huffmanMaxCodeLength {

View File

@ -268,8 +268,11 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
const maxStaticContexts = 13
/* Greedy block splitter for one block category (literal, command or distance).
Gathers histograms for all context buckets. */
/*
Greedy block splitter for one block category (literal, command or distance).
Gathers histograms for all context buckets.
*/
type contextBlockSplitter struct {
alphabet_size_ uint
num_contexts_ uint
@ -328,10 +331,13 @@ func initContextBlockSplitter(self *contextBlockSplitter, alphabet_size uint, nu
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
}
/* Does either of three things:
/*
Does either of three things:
(1) emits the current block with a new block type;
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block. */
(3) merges the current block with the last block.
*/
func contextBlockSplitterFinishBlock(self *contextBlockSplitter, is_final bool) {
var split *blockSplit = self.split_
var num_contexts uint = self.num_contexts_
@ -459,8 +465,11 @@ func contextBlockSplitterFinishBlock(self *contextBlockSplitter, is_final bool)
}
}
/* Adds the next symbol to the current block type and context. When the
current block reaches the target size, decides on merging the block. */
/*
Adds the next symbol to the current block type and context. When the
current block reaches the target size, decides on merging the block.
*/
func contextBlockSplitterAddSymbol(self *contextBlockSplitter, symbol uint, context uint) {
histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_+context], symbol)
self.block_size_++

View File

@ -58,10 +58,13 @@ func initBlockSplitterCommand(self *blockSplitterCommand, alphabet_size uint, mi
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
}
/* Does either of three things:
/*
Does either of three things:
(1) emits the current block with a new block type;
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block. */
(3) merges the current block with the last block.
*/
func blockSplitterFinishBlockCommand(self *blockSplitterCommand, is_final bool) {
var split *blockSplit = self.split_
var last_entropy []float64 = self.last_entropy_[:]
@ -154,8 +157,11 @@ func blockSplitterFinishBlockCommand(self *blockSplitterCommand, is_final bool)
}
}
/* Adds the next symbol to the current histogram. When the current histogram
reaches the target size, decides on merging the block. */
/*
Adds the next symbol to the current histogram. When the current histogram
reaches the target size, decides on merging the block.
*/
func blockSplitterAddSymbolCommand(self *blockSplitterCommand, symbol uint) {
histogramAddCommand(&self.histograms_[self.curr_histogram_ix_], symbol)
self.block_size_++

View File

@ -58,10 +58,13 @@ func initBlockSplitterDistance(self *blockSplitterDistance, alphabet_size uint,
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
}
/* Does either of three things:
/*
Does either of three things:
(1) emits the current block with a new block type;
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block. */
(3) merges the current block with the last block.
*/
func blockSplitterFinishBlockDistance(self *blockSplitterDistance, is_final bool) {
var split *blockSplit = self.split_
var last_entropy []float64 = self.last_entropy_[:]
@ -154,8 +157,11 @@ func blockSplitterFinishBlockDistance(self *blockSplitterDistance, is_final bool
}
}
/* Adds the next symbol to the current histogram. When the current histogram
reaches the target size, decides on merging the block. */
/*
Adds the next symbol to the current histogram. When the current histogram
reaches the target size, decides on merging the block.
*/
func blockSplitterAddSymbolDistance(self *blockSplitterDistance, symbol uint) {
histogramAddDistance(&self.histograms_[self.curr_histogram_ix_], symbol)
self.block_size_++

View File

@ -58,10 +58,13 @@ func initBlockSplitterLiteral(self *blockSplitterLiteral, alphabet_size uint, mi
self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
}
/* Does either of three things:
/*
Does either of three things:
(1) emits the current block with a new block type;
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block. */
(3) merges the current block with the last block.
*/
func blockSplitterFinishBlockLiteral(self *blockSplitterLiteral, is_final bool) {
var split *blockSplit = self.split_
var last_entropy []float64 = self.last_entropy_[:]
@ -154,8 +157,11 @@ func blockSplitterFinishBlockLiteral(self *blockSplitterLiteral, is_final bool)
}
}
/* Adds the next symbol to the current histogram. When the current histogram
reaches the target size, decides on merging the block. */
/*
Adds the next symbol to the current histogram. When the current histogram
reaches the target size, decides on merging the block.
*/
func blockSplitterAddSymbolLiteral(self *blockSplitterLiteral, symbol uint) {
histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_], symbol)
self.block_size_++

View File

@ -9,8 +9,11 @@ package brotli
/* Functions for encoding of integers into prefix codes the amount of extra
bits, and the actual values of the extra bits. */
/* Here distance_code is an intermediate code, i.e. one of the special codes or
the actual distance increased by BROTLI_NUM_DISTANCE_SHORT_CODES - 1. */
/*
Here distance_code is an intermediate code, i.e. one of the special codes or
the actual distance increased by BROTLI_NUM_DISTANCE_SHORT_CODES - 1.
*/
func prefixEncodeCopyDistance(distance_code uint, num_direct_codes uint, postfix_bits uint, code *uint16, extra_bits *uint32) {
if distance_code < numDistanceShortCodes+num_direct_codes {
*code = uint16(distance_code)

View File

@ -24,8 +24,11 @@ const minQualityForHqContextModeling = 7
const minQualityForHqBlockSplitting = 10
/* For quality below MIN_QUALITY_FOR_BLOCK_SPLIT there is no block splitting,
so we buffer at most this much literals and commands. */
/*
For quality below MIN_QUALITY_FOR_BLOCK_SPLIT there is no block splitting,
so we buffer at most this much literals and commands.
*/
const maxNumDelayedSymbols = 0x2FFF
/* Returns hash-table size for quality levels 0 and 1. */
@ -102,11 +105,14 @@ func computeLgBlock(params *encoderParams) int {
return lgblock
}
/* Returns log2 of the size of main ring buffer area.
/*
Returns log2 of the size of main ring buffer area.
Allocate at least lgwin + 1 bits for the ring buffer so that the newly
added block fits there completely and we still get lgwin bits and at least
read_block_size_bits + 1 bits because the copy tail length needs to be
smaller than ring-buffer size. */
smaller than ring-buffer size.
*/
func computeRbBits(params *encoderParams) int {
return 1 + brotli_max_int(int(params.lgwin), params.lgblock)
}
@ -116,12 +122,15 @@ func maxMetablockSize(params *encoderParams) uint {
return uint(1) << uint(bits)
}
/* When searching for backward references and have not seen matches for a long
/*
When searching for backward references and have not seen matches for a long
time, we can skip some match lookups. Unsuccessful match lookups are very
expensive and this kind of a heuristic speeds up compression quite a lot.
At first 8 byte strides are taken and every second byte is put to hasher.
After 4x more literals stride by 16 bytes, every put 4-th byte to hasher.
Applied only to qualities 2 to 9. */
Applied only to qualities 2 to 9.
*/
func literalSpreeLengthForSparseSearch(params *encoderParams) uint {
if params.quality < 9 {
return 64

View File

@ -6,7 +6,9 @@ package brotli
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* A ringBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of
/*
A ringBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of
data in a circular manner: writing a byte writes it to:
`position() % (1 << window_bits)'.
For convenience, the ringBuffer array contains another copy of the
@ -14,7 +16,8 @@ package brotli
buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits),
and another copy of the last two bytes:
buffer_[-1] == buffer_[(1 << window_bits) - 1] and
buffer_[-2] == buffer_[(1 << window_bits) - 2]. */
buffer_[-2] == buffer_[(1 << window_bits) - 2].
*/
type ringBuffer struct {
size_ uint32
mask_ uint32
@ -41,8 +44,11 @@ func ringBufferSetup(params *encoderParams, rb *ringBuffer) {
const kSlackForEightByteHashingEverywhere uint = 7
/* Allocates or re-allocates data_ to the given length + plus some slack
region before and after. Fills the slack regions with zeros. */
/*
Allocates or re-allocates data_ to the given length + plus some slack
region before and after. Fills the slack regions with zeros.
*/
func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) {
var new_data []byte
var i uint

View File

@ -14,7 +14,8 @@ const maxStaticDictionaryMatchLen = 37
const kInvalidMatch uint32 = 0xFFFFFFF
/* Copyright 2013 Google Inc. All Rights Reserved.
/*
Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT

View File

@ -10,9 +10,12 @@ package brotli
const kMinUTF8Ratio float64 = 0.75
/* Returns 1 if at least min_fraction of the bytes between pos and
/*
Returns 1 if at least min_fraction of the bytes between pos and
pos + length in the (data, mask) ring-buffer is UTF8-encoded, otherwise
returns 0. */
returns 0.
*/
func parseAsUTF8(symbol *int, input []byte, size uint) uint {
/* ASCII */
if input[0]&0x80 == 0 {

View File

@ -1,12 +1,12 @@
package storm
import (
"reflect"
"sort"
"time"
"github.com/asdine/storm/v3/index"
"github.com/asdine/storm/v3/q"
bolt "go.etcd.io/bbolt"
"reflect"
"sort"
"time"
)
type item struct {

View File

@ -1,3 +1,4 @@
//go:build !go1.8
// +build !go1.8
package storm

View File

@ -1,3 +1,4 @@
//go:build go1.8
// +build go1.8
package storm

View File

@ -18,6 +18,7 @@
// tag is deprecated and thus should not be used.
// Go versions prior to 1.4 are disabled because they use a different layout
// for interfaces which make the implementation of unsafeReflectValue more complex.
//go:build !js && !appengine && !safe && !disableunsafe && go1.4
// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew

View File

@ -16,6 +16,7 @@
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
//go:build js || appengine || safe || disableunsafe || !go1.4
// +build js appengine safe disableunsafe !go1.4
package spew

View File

@ -254,14 +254,14 @@ pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
- Pointers are dereferenced and followed
- Circular data structures are detected and handled properly
- Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
- Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
- Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members

View File

@ -21,35 +21,36 @@ debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
- Pointers are dereferenced and followed
- Circular data structures are detected and handled properly
- Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
- Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
- Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
- Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
- A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
# Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
@ -58,12 +59,13 @@ Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
# Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
@ -74,37 +76,38 @@ equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
- Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
- MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
- DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
- DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
- DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
- DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
- ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
- SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
@ -113,12 +116,12 @@ The following configuration options are available:
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
- SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
Dump Usage
# Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
@ -133,7 +136,7 @@ A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
# Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
@ -150,13 +153,14 @@ shown here.
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
# Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
@ -170,7 +174,7 @@ standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
# Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
@ -184,15 +188,17 @@ functions have syntax you are most likely already familiar with:
See the Index for the full list convenience functions.
Sample Formatter Output
# Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
@ -201,7 +207,7 @@ Pointer to circular struct with a uint8 field and a pointer to itself:
See the Printf example for details on the setup of variables being shown
here.
Errors
# Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information

View File

@ -488,14 +488,14 @@ pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
- Pointers are dereferenced and followed
- Circular data structures are detected and handled properly
- Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
- Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
- Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,

View File

@ -58,9 +58,9 @@ func Invert(img image.Image) *image.NRGBA {
// The percentage = -100 gives the image with the saturation value zeroed for each pixel (grayscale).
//
// Examples:
//
// dstImage = imaging.AdjustSaturation(srcImage, 25) // Increase image saturation by 25%.
// dstImage = imaging.AdjustSaturation(srcImage, -10) // Decrease image saturation by 10%.
//
func AdjustSaturation(img image.Image, percentage float64) *image.NRGBA {
percentage = math.Min(math.Max(percentage, -100), 100)
multiplier := 1 + percentage/100
@ -84,7 +84,6 @@ func AdjustSaturation(img image.Image, percentage float64) *image.NRGBA {
//
// dstImage = imaging.AdjustContrast(srcImage, -10) // Decrease image contrast by 10%.
// dstImage = imaging.AdjustContrast(srcImage, 20) // Increase image contrast by 20%.
//
func AdjustContrast(img image.Image, percentage float64) *image.NRGBA {
percentage = math.Min(math.Max(percentage, -100.0), 100.0)
lut := make([]uint8, 256)
@ -112,7 +111,6 @@ func AdjustContrast(img image.Image, percentage float64) *image.NRGBA {
//
// dstImage = imaging.AdjustBrightness(srcImage, -15) // Decrease image brightness by 15%.
// dstImage = imaging.AdjustBrightness(srcImage, 10) // Increase image brightness by 10%.
//
func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA {
percentage = math.Min(math.Max(percentage, -100.0), 100.0)
lut := make([]uint8, 256)
@ -132,7 +130,6 @@ func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA {
// Example:
//
// dstImage = imaging.AdjustGamma(srcImage, 0.7)
//
func AdjustGamma(img image.Image, gamma float64) *image.NRGBA {
e := 1.0 / math.Max(gamma, 0.0001)
lut := make([]uint8, 256)
@ -154,7 +151,6 @@ func AdjustGamma(img image.Image, gamma float64) *image.NRGBA {
//
// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, 3.0) // Increase the contrast.
// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, -3.0) // Decrease the contrast.
//
func AdjustSigmoid(img image.Image, midpoint, factor float64) *image.NRGBA {
if factor == 0 {
return Clone(img)
@ -226,7 +222,6 @@ func adjustLUT(img image.Image, lut []uint8) *image.NRGBA {
// return color.NRGBA{uint8(r), c.G, c.B, c.A}
// }
// )
//
func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGBA {
src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))

View File

@ -15,7 +15,6 @@ func gaussianBlurKernel(x, sigma float64) float64 {
// Example:
//
// dstImage := imaging.Blur(srcImage, 3.5)
//
func Blur(img image.Image, sigma float64) *image.NRGBA {
if sigma <= 0 {
return Clone(img)
@ -137,7 +136,6 @@ func blurVertical(img image.Image, kernel []float64) *image.NRGBA {
// Example:
//
// dstImage := imaging.Sharpen(srcImage, 3.5)
//
func Sharpen(img image.Image, sigma float64) *image.NRGBA {
if sigma <= 0 {
return Clone(img)

View File

@ -91,7 +91,6 @@ func Decode(r io.Reader, opts ...DecodeOption) (image.Image, error) {
//
// // Load an image and transform it depending on the EXIF orientation tag (if present).
// img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true))
//
func Open(filename string, opts ...DecodeOption) (image.Image, error) {
file, err := fs.Open(filename)
if err != nil {
@ -264,7 +263,6 @@ func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) e
//
// // Save the image as JPEG with optional quality parameter set to 80.
// err := imaging.Save(img, "out.jpg", imaging.JPEGQuality(80))
//
func Save(img image.Image, filename string, opts ...EncodeOption) (err error) {
f, err := FormatFromFilename(filename)
if err != nil {

View File

@ -61,7 +61,6 @@ func precomputeWeights(dstSize, srcSize int, filter ResampleFilter) [][]indexWei
// Example:
//
// dstImage := imaging.Resize(srcImage, 800, 600, imaging.Lanczos)
//
func Resize(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
dstW, dstH := width, height
if dstW < 0 || dstH < 0 {
@ -218,7 +217,6 @@ func resizeNearest(img image.Image, width, height int) *image.NRGBA {
// Example:
//
// dstImage := imaging.Fit(srcImage, 800, 600, imaging.Lanczos)
//
func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
maxW, maxH := width, height
@ -259,7 +257,6 @@ func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA
// Example:
//
// dstImage := imaging.Fill(srcImage, 800, 600, imaging.Center, imaging.Lanczos)
//
func Fill(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
dstW, dstH := width, height
@ -338,7 +335,6 @@ func resizeAndCrop(img image.Image, width, height int, anchor Anchor, filter Res
// Example:
//
// dstImage := imaging.Thumbnail(srcImage, 100, 100, imaging.Lanczos)
//
func Thumbnail(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
return Fill(img, width, height, Center, filter)
}
@ -365,7 +361,6 @@ func Thumbnail(img image.Image, width, height int, filter ResampleFilter) *image
//
// - NearestNeighbor
// Fastest resampling filter, no antialiasing.
//
type ResampleFilter struct {
Support float64
Kernel func(float64) float64

View File

@ -176,7 +176,6 @@ func PasteCenter(background, img image.Image) *image.NRGBA {
//
// // Blend two opaque images of the same size.
// dstImage := imaging.Overlay(imageOne, imageTwo, image.Pt(0, 0), 0.5)
//
func Overlay(background, img image.Image, pos image.Point, opacity float64) *image.NRGBA {
opacity = math.Min(math.Max(opacity, 0.0), 1.0) // Ensure 0.0 <= opacity <= 1.0.
dst := Clone(background)

View File

@ -15,6 +15,7 @@ import "github.com/dsnet/compress/bzip2/internal/sais"
// Transform, such that a SA can be converted to a BWT in O(n) time.
//
// References:
//
// http://www.hpl.hp.com/techreports/Compaq-DEC/SRC-RR-124.pdf
// https://github.com/cscott/compressjs/blob/master/lib/BWT.js
// https://www.quora.com/How-can-I-optimize-burrows-wheeler-transform-and-inverse-transform-to-work-in-O-n-time-O-n-space

View File

@ -5,9 +5,11 @@
// Package bzip2 implements the BZip2 compressed data format.
//
// Canonical C implementation:
//
// http://bzip.org
//
// Unofficial format specification:
//
// https://github.com/dsnet/compress/blob/master/doc/bzip2-format.pdf
package bzip2

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
//go:build !gofuzz
// +build !gofuzz
// This file exists to suppress fuzzing details from release builds.

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
//go:build gofuzz
// +build gofuzz
// This file exists to export internal implementation details for fuzz testing.

View File

@ -14,6 +14,7 @@ import "github.com/dsnet/compress/internal/errors"
// normal two's complement arithmetic. The methodology for doing so is below.
//
// Assuming the following:
//
// num: The value being encoded by RLE encoding.
// run: A sequence of RUNA and RUNB symbols represented as a binary integer,
// where RUNA is the 0 bit, RUNB is the 1 bit, and least-significant RUN
@ -21,6 +22,7 @@ import "github.com/dsnet/compress/internal/errors"
// cnt: The number of RUNA and RUNB symbols.
//
// Then the RLE encoding used by bzip2 has this mathematical property:
//
// num+1 == (1<<cnt) | run
type moveToFront struct {
dictBuf [256]uint8

View File

@ -32,7 +32,6 @@ const (
// 11110 <=> 4
// 111110 <=> 5
// 111111 <=> 6 Invalid tree index, so should fail
//
var encSel, decSel = func() (e prefix.Encoder, d prefix.Decoder) {
var selCodes [maxNumTrees + 1]prefix.PrefixCode
for i := range selCodes {
@ -150,6 +149,7 @@ func (pw *prefixWriter) WritePrefixCodes(codes []prefix.PrefixCodes, trees []pre
// handleDegenerateCodes converts a degenerate tree into a canonical tree.
//
// For example, when the input is an under-subscribed tree:
//
// input: []PrefixCode{
// {Sym: 0, Len: 3},
// {Sym: 1, Len: 4},
@ -165,6 +165,7 @@ func (pw *prefixWriter) WritePrefixCodes(codes []prefix.PrefixCodes, trees []pre
// }
//
// For example, when the input is an over-subscribed tree:
//
// input: []PrefixCode{
// {Sym: 0, Len: 1},
// {Sym: 1, Len: 3},

View File

@ -17,9 +17,11 @@ var rleDone = errorf(errors.Unknown, "RLE1 stage is completed")
// run lengths of 256..259. The decoder can handle the latter case.
//
// For example, if the input was:
//
// input: "AAAAAAABBBBCCCD"
//
// Then the output will be:
//
// output: "AAAA\x03BBBB\x00CCCD"
type runLengthEncoding struct {
buf []byte

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
//go:build debug && !gofuzz
// +build debug,!gofuzz
package internal

View File

@ -17,6 +17,7 @@
// recover from errors only generated from within this repository.
//
// Example usage:
//
// func Foo() (err error) {
// defer errors.Recover(&err)
//
@ -28,7 +29,6 @@
// errors.Panic(errors.New("whoopsie"))
// }
// }
//
package errors
import "strings"

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
//go:build gofuzz
// +build gofuzz
package internal

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
//go:build debug
// +build debug
package prefix

View File

@ -37,6 +37,7 @@ func (rcs RangeCodes) End() uint32 { return rcs[len(rcs)-1].End() }
// checkValid reports whether the RangeCodes is valid. In order to be valid,
// the following must hold true:
//
// rcs[i-1].Base <= rcs[i].Base
// rcs[i-1].End <= rcs[i].End
// rcs[i-1].End >= rcs[i].Base

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file.
//go:build !debug && !gofuzz
// +build !debug,!gofuzz
package internal

View File

@ -67,7 +67,6 @@ func (ve *ValueEncoder) encodeAscii(value string) (ed EncodedData, err error) {
//
// 2. The presence of this method allows us to completely test the complimentary
// no-nul parser.
//
func (ve *ValueEncoder) encodeAsciiNoNul(value string) (ed EncodedData, err error) {
ed.Type = TypeAsciiNoNul
ed.Encoded = []byte(value)

View File

@ -128,6 +128,7 @@ type LogAdapter interface {
}
// TODO(dustin): !! Also populate whether we've bypassed an exception so that
//
// we can add a template macro to prefix an exclamation of
// some sort.
type MessageContext struct {

View File

@ -1,3 +1,4 @@
//go:build go1.13
// +build go1.13
package errors

View File

@ -1,3 +1,4 @@
//go:build !go1.13
// +build !go1.13
package errors

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package oleutil

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package oleutil

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package oleutil

View File

@ -1,6 +1,7 @@
// This file is here so go get succeeds as without it errors with:
// no buildable Go source files in ...
//
//go:build !windows
// +build !windows
package oleutil

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

Some files were not shown because too many files have changed in this diff Show More