updated things

This commit is contained in:
Graham Steffaniak 2023-08-12 11:30:41 -05:00
parent 6f83300f92
commit 4e01929dc8
228 changed files with 2444 additions and 1627 deletions

2
.gitignore vendored
View File

@ -7,7 +7,7 @@ rice-box.go
/filebrowser.exe /filebrowser.exe
/frontend/dist /frontend/dist
/backend/vendor /backend/vendor
/backend/*.cov
.DS_Store .DS_Store
node_modules node_modules

View File

@ -2,6 +2,16 @@
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
# v0.1.4
- various UI fixes
- Added download button back to toolbar
- Added upload button to side menu
- breadcrumb spacing fix
- Added "compact" view option
- various backend fixes
- search no longer searches by word with spaces, includes space in searches
- prepared for full json configuration
-
## v0.1.3 ## v0.1.3
- improved styling, colors, transparency, blur - improved styling, colors, transparency, blur

View File

@ -5,7 +5,7 @@ RUN npm i
COPY ./frontend/ ./ COPY ./frontend/ ./
RUN npm run build RUN npm run build
FROM golang:alpine as base FROM golang:1.21-alpine as base
WORKDIR /app WORKDIR /app
COPY ./backend ./ COPY ./backend ./
RUN go build -ldflags="-w -s" -o filebrowser . RUN go build -ldflags="-w -s" -o filebrowser .
@ -15,7 +15,7 @@ RUN apk --no-cache add \
ca-certificates \ ca-certificates \
mailcap mailcap
VOLUME /srv VOLUME /srv
EXPOSE 80 EXPOSE 8080
WORKDIR / WORKDIR /
COPY --from=base /app/.filebrowser.json /.filebrowser.json COPY --from=base /app/.filebrowser.json /.filebrowser.json
COPY --from=base /app/filebrowser /filebrowser COPY --from=base /app/filebrowser /filebrowser

View File

@ -1,25 +1,25 @@
package http package http
import ( import (
"net/http"
"github.com/gtsteffaniak/filebrowser/search" "github.com/gtsteffaniak/filebrowser/search"
"net/http"
) )
var searchHandler = withUser(func(w http.ResponseWriter, r *http.Request, d *data) (int, error) { var searchHandler = withUser(func(w http.ResponseWriter, r *http.Request, d *data) (int, error) {
response := []map[string]interface{}{} response := []map[string]interface{}{}
query := r.URL.Query().Get("query") query := r.URL.Query().Get("query")
indexInfo, fileTypes := search.SearchAllIndexes(query, r.URL.Path) indexInfo, fileTypes := search.SearchAllIndexes(query, r.URL.Path)
for _,path := range(indexInfo){ for _, path := range indexInfo {
f := fileTypes[path] f := fileTypes[path]
responseObj := map[string]interface{}{ responseObj := map[string]interface{}{
"path" : path, "path": path,
} }
for filterType,_ := range(f) { for filterType, _ := range f {
if f[filterType] { if f[filterType] {
responseObj[filterType] = f[filterType] responseObj[filterType] = f[filterType]
} }
} }
response = append(response,responseObj) response = append(response, responseObj)
} }
return renderJSON(w, r, response) return renderJSON(w, r, response)
}) })

0
backend/render.yml Normal file
View File

18
backend/run_benchmark.sh Executable file
View File

@ -0,0 +1,18 @@
#!/bin/sh
## TEST file used by docker testing containers
touch render.yml
checkExit() {
if [ "$?" -ne 0 ];then
exit 1
fi
}
if command -v go &> /dev/null
then
printf "\n == Running benchmark (sends to results.txt) == \n"
go test -bench=. -benchmem ./...
checkExit
else
echo "ERROR: unable to perform tests"
exit 1
fi

3
backend/run_check_coverage.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
go test -race -v -coverpkg=./... -coverprofile=coverage.cov ./...
go tool cover -html=coverage.cov

2
backend/run_fmt.sh Executable file
View File

@ -0,0 +1,2 @@
#!/bin/bash
for i in $(find $(pwd) -name '*.go');do gofmt -w $i;done

21
backend/run_tests.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/sh
## TEST file used by docker testing containers
touch render.yml
checkExit() {
if [ "$?" -ne 0 ];then
exit 1
fi
}
if command -v go &> /dev/null
then
printf "\n == Running tests == \n"
go test -race -v ./...
checkExit
printf "\n == Running benchmark (sends to results.txt) == \n"
go test -bench=. -benchtime=100x -benchmem ./...
checkExit
else
echo "ERROR: unable to perform tests"
exit 1
fi

View File

@ -25,14 +25,14 @@ var compressedFile = []string{
".tar.xz", ".tar.xz",
} }
type searchOptions struct { type SearchOptions struct {
Conditions map[string]bool Conditions map[string]bool
Size int Size int
Terms []string Terms []string
} }
func ParseSearch(value string) *searchOptions { func ParseSearch(value string) *SearchOptions {
opts := &searchOptions{ opts := &SearchOptions{
Conditions: map[string]bool{ Conditions: map[string]bool{
"exact": strings.Contains(value, "case:exact"), "exact": strings.Contains(value, "case:exact"),
}, },
@ -79,8 +79,8 @@ func ParseSearch(value string) *searchOptions {
} }
if len(types) > 0 { if len(types) > 0 {
// Remove the fields from the search value, including added space // Remove the fields from the search value
value = typeRegexp.ReplaceAllString(value+" ", "") value = typeRegexp.ReplaceAllString(value, "")
} }
if value == "" { if value == "" {

View File

@ -2,29 +2,27 @@ package search
import ( import (
"log" "log"
"math/rand"
"mime"
"os" "os"
"path/filepath" "path/filepath"
"sort" "sort"
"strings" "strings"
"sync" "sync"
"time" "time"
"mime"
"math/rand"
) )
var ( var (
sessionInProgress sync.Map // Track IPs with requests in progress sessionInProgress sync.Map // Track IPs with requests in progress
rootPath string = "/srv" rootPath string = "/srv"
indexes map[string][]string indexes map[string][]string
mutex sync.RWMutex mutex sync.RWMutex
lastIndexed time.Time lastIndexed time.Time
) )
func InitializeIndex(intervalMinutes uint32) { func InitializeIndex(intervalMinutes uint32) {
// Initialize the indexes map // Initialize the indexes map
indexes = make(map[string][]string) indexes = make(map[string][]string)
indexes["dirs"] = []string{}
indexes["files"] = []string{}
var numFiles, numDirs int var numFiles, numDirs int
log.Println("Indexing files...") log.Println("Indexing files...")
lastIndexedStart := time.Now() lastIndexedStart := time.Now()
@ -97,15 +95,14 @@ func addToIndex(path string, fileName string, isDir bool) {
mutex.Lock() mutex.Lock()
defer mutex.Unlock() defer mutex.Unlock()
path = strings.TrimPrefix(path, rootPath+"/") path = strings.TrimPrefix(path, rootPath+"/")
path = strings.TrimSuffix(path, "/") path = strings.TrimSuffix(path, "/") + "/"
adjustedPath := path + "/" + fileName if path == "" {
if path == rootPath { path = "/"
adjustedPath = fileName
} }
if isDir { if isDir {
indexes["dirs"] = append(indexes["dirs"], adjustedPath) indexes[path] = []string{}
}else{ } else {
indexes["files"] = append(indexes["files"], adjustedPath) indexes[path] = append(indexes[path], fileName)
} }
} }
@ -119,37 +116,52 @@ func SearchAllIndexes(search string, scope string) ([]string, map[string]map[str
defer mutex.RUnlock() defer mutex.RUnlock()
fileListTypes := make(map[string]map[string]bool) fileListTypes := make(map[string]map[string]bool)
var matching []string var matching []string
maximum := 100 // 250 items total seems like a reasonable limit
maximum := 250
for _, searchTerm := range searchOptions.Terms { for _, searchTerm := range searchOptions.Terms {
if searchTerm == "" { if searchTerm == "" {
continue continue
} }
// Create a reused fileType map
reusedFileType := map[string]bool{}
// Iterate over the indexes // Iterate over the indexes
for _,i := range([]string{"dirs","files"}) { count := 0
isdir := i == "dirs" for pathName, files := range indexes {
count := 0 if count > maximum {
for _, path := range indexes[i] { break
value, found := sessionInProgress.Load(sourceSession) }
if !found || value != runningHash { // this is here to terminate a search if a new one has started
return []string{}, map[string]map[string]bool{} // currently limited to one search per container, should be session based
} value, found := sessionInProgress.Load(sourceSession)
if count > maximum { if !found || value != runningHash {
break return []string{}, map[string]map[string]bool{}
} }
pathName := scopedPathNameFilter(path, scope) pathName = scopedPathNameFilter(pathName, scope)
if pathName == "" { if pathName == "" {
continue continue
} }
matches, fileType := containsSearchTerm(path, searchTerm, *searchOptions, isdir) // check if dir matches
matches, fileType := containsSearchTerm(pathName, searchTerm, *searchOptions, false)
if matches {
matching = append(matching, pathName)
fileListTypes[pathName] = fileType
count++
}
for _, fileName := range files {
// check if file matches
matches, fileType := containsSearchTerm(pathName+fileName, searchTerm, *searchOptions, false)
if !matches { if !matches {
continue continue
} }
if isdir { matching = append(matching, pathName+fileName)
pathName = pathName+"/" // Clear and reuse the fileType map
for key := range reusedFileType {
delete(reusedFileType, key)
} }
matching = append(matching, pathName) for key, value := range fileType {
fileListTypes[pathName] = fileType reusedFileType[key] = value
}
fileListTypes[pathName] = copyFileTypeMap(reusedFileType)
count++ count++
} }
} }
@ -163,51 +175,79 @@ func SearchAllIndexes(search string, scope string) ([]string, map[string]map[str
return matching, fileListTypes return matching, fileListTypes
} }
func scopedPathNameFilter(pathName string, scope string) string { func copyFileTypeMap(src map[string]bool) map[string]bool {
scope = strings.TrimPrefix(scope, "/") dest := make(map[string]bool, len(src))
if strings.HasPrefix(pathName, scope) { for key, value := range src {
pathName = strings.TrimPrefix(pathName, scope) dest[key] = value
} else {
pathName = ""
} }
return pathName return dest
} }
func containsSearchTerm(pathName string, searchTerm string, options searchOptions, isDir bool) (bool, map[string]bool) { func scopedPathNameFilter(pathName string, scope string) string {
conditions := options.Conditions if strings.HasPrefix(pathName, scope) {
path := getLastPathComponent(pathName) return strings.TrimPrefix(pathName, scope)
if !conditions["exact"] { }
path = strings.ToLower(path) return ""
searchTerm = strings.ToLower(searchTerm) }
}
if strings.Contains(path, searchTerm) {
fileTypes := map[string]bool{}
fileSize := getFileSize(pathName)
matchesCondition := false
matchesAllConditions := true
extension := filepath.Ext(strings.ToLower(path))
mimetype := mime.TypeByExtension(extension)
fileTypes["audio"] = strings.HasPrefix(mimetype, "audio")
fileTypes["image"] = strings.HasPrefix(mimetype, "image")
fileTypes["video"] = strings.HasPrefix(mimetype, "video")
fileTypes["doc"] = isDoc(extension)
fileTypes["archive"] = isArchive(extension)
fileTypes["dir"] = isDir
for t,v := range conditions { func containsSearchTerm(pathName string, searchTerm string, options SearchOptions, isDir bool) (bool, map[string]bool) {
switch t { conditions := options.Conditions
case "exact" : continue path := getLastPathComponent(pathName)
case "larger" : matchesCondition = fileSize > int64(options.Size) * 1000000 // Convert to lowercase once
case "smaller" : matchesCondition = fileSize < int64(options.Size) * 1000000 lowerPath := path
default : matchesCondition = v == fileTypes[t] lowerSearchTerm := searchTerm
if !conditions["exact"] {
lowerPath = strings.ToLower(path)
lowerSearchTerm = strings.ToLower(searchTerm)
}
if strings.Contains(lowerPath, lowerSearchTerm) {
// Reuse the fileTypes map and clear its values
fileTypes := map[string]bool{
"audio": false,
"image": false,
"video": false,
"doc": false,
"archive": false,
"dir": false,
}
// Calculate fileSize only if needed
var fileSize int64
if conditions["larger"] || conditions["smaller"] {
fileSize = getFileSize(pathName)
}
matchesAllConditions := true
extension := filepath.Ext(lowerPath)
mimetype := mime.TypeByExtension(extension)
fileTypes["audio"] = strings.HasPrefix(mimetype, "audio")
fileTypes["image"] = strings.HasPrefix(mimetype, "image")
fileTypes["video"] = strings.HasPrefix(mimetype, "video")
fileTypes["doc"] = isDoc(extension)
fileTypes["archive"] = isArchive(extension)
fileTypes["dir"] = isDir
for t, v := range conditions {
if t == "exact" {
continue
} }
if (!matchesCondition) { var matchesCondition bool
switch t {
case "larger":
matchesCondition = fileSize > int64(options.Size)*1000000
case "smaller":
matchesCondition = fileSize < int64(options.Size)*1000000
default:
matchesCondition = v == fileTypes[t]
}
if !matchesCondition {
matchesAllConditions = false matchesAllConditions = false
} }
} }
return matchesAllConditions, fileTypes return matchesAllConditions, fileTypes
} }
// Clear variables and return
return false, map[string]bool{} return false, map[string]bool{}
} }
@ -221,7 +261,7 @@ func isDoc(extension string) bool {
} }
func getFileSize(filepath string) int64 { func getFileSize(filepath string) int64 {
fileInfo, err := os.Stat(rootPath+"/"+filepath) fileInfo, err := os.Stat(rootPath + "/" + filepath)
if err != nil { if err != nil {
return 0 return 0
} }
@ -259,4 +299,4 @@ func stringExistsInArray(target string, strings []string) bool {
} }
} }
return false return false
} }

View File

@ -0,0 +1,150 @@
package search
import (
"encoding/json"
"fmt"
"math/rand"
"reflect"
"testing"
"time"
)
// loop over test files and compare output
func TestParseSearch(t *testing.T) {
value := ParseSearch("my test search")
want := &SearchOptions{
Conditions: map[string]bool{
"exact": false,
},
Terms: []string{"my test search"},
}
if !reflect.DeepEqual(value, want) {
t.Fatalf("\n got: %+v\n want: %+v", value, want)
}
value = ParseSearch("case:exact my|test|search")
want = &SearchOptions{
Conditions: map[string]bool{
"exact": true,
},
Terms: []string{"my", "test", "search"},
}
if !reflect.DeepEqual(value, want) {
t.Fatalf("\n got: %+v\n want: %+v", value, want)
}
}
func BenchmarkSearchAllIndexes(b *testing.B) {
indexes = make(map[string][]string)
// Create mock data
createMockData(500, 3) // 1000 dirs, 3 files per dir
// Generate 100 random search terms
searchTerms := generateRandomSearchTerms(100)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
// Execute the SearchAllIndexes function
for _, term := range searchTerms {
SearchAllIndexes(term, "/")
}
}
printBenchmarkResults(b)
}
func BenchmarkFillIndex(b *testing.B) {
indexes = make(map[string][]string)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
createMockData(10000, 10) // 1000 dirs, 3 files per dir
}
for a, _ := range indexes {
b.Logf(a)
}
printBenchmarkResults(b)
}
func createMockData(numDirs, numFilesPerDir int) {
for i := 0; i < numDirs; i++ {
dirName := getRandomTerm()
addToIndex("/", dirName, true)
for j := 0; j < numFilesPerDir; j++ {
fileName := "file-" + getRandomTerm() + getRandomExtension()
addToIndex("/"+dirName, fileName, false)
}
}
}
func getRandomTerm() string {
wordbank := []string{
"hi", "test", "other", "name",
"cool", "things", "more", "items",
}
rand.Seed(time.Now().UnixNano())
index := rand.Intn(len(wordbank))
return wordbank[index]
}
func getRandomExtension() string {
wordbank := []string{
".txt", ".mp3", ".mov", ".doc",
".mp4", ".bak", ".zip", ".jpg",
}
rand.Seed(time.Now().UnixNano())
index := rand.Intn(len(wordbank))
return wordbank[index]
}
func generateRandomSearchTerms(numTerms int) []string {
// Generate random search terms
searchTerms := make([]string, numTerms)
for i := 0; i < numTerms; i++ {
searchTerms[i] = getRandomTerm()
}
return searchTerms
}
// JSONBytesEqual compares the JSON in two byte slices.
func JSONBytesEqual(a, b []byte) (bool, error) {
var j, j2 interface{}
if err := json.Unmarshal(a, &j); err != nil {
return false, err
}
if err := json.Unmarshal(b, &j2); err != nil {
return false, err
}
return reflect.DeepEqual(j2, j), nil
}
func passedFunc(t *testing.T) {
t.Logf("%s passed!", t.Name())
}
func formatDuration(duration time.Duration) string {
if duration >= time.Second {
return fmt.Sprintf("%.2f seconds", duration.Seconds())
} else if duration >= time.Millisecond {
return fmt.Sprintf("%.2f ms", float64(duration.Milliseconds()))
}
return fmt.Sprintf("%.2f ns", float64(duration.Nanoseconds()))
}
func formatMemory(bytes int64) string {
sizes := []string{"B", "KB", "MB", "GB", "TB"}
i := 0
for bytes >= 1024 && i < len(sizes)-1 {
bytes /= 1024
i++
}
return fmt.Sprintf("%d %s", bytes, sizes[i])
}
// Output the benchmark results with human-readable units
func printBenchmarkResults(b *testing.B) {
averageTimePerIteration := b.Elapsed() / time.Duration(b.N)
fmt.Printf("\nIterations : %d\n", b.N)
fmt.Printf("Total time : %s\n", formatDuration(b.Elapsed()))
fmt.Printf("Avg time per op : %s\n", formatDuration(averageTimePerIteration))
}

View File

@ -305,8 +305,11 @@ func computeMinimumCopyLength(start_cost float32, nodes []zopfliNode, num_bytes
return uint(len) return uint(len)
} }
/* REQUIRES: nodes[pos].cost < kInfinity /*
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */ REQUIRES: nodes[pos].cost < kInfinity
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant".
*/
func computeDistanceShortcut(block_start uint, pos uint, max_backward_limit uint, gap uint, nodes []zopfliNode) uint32 { func computeDistanceShortcut(block_start uint, pos uint, max_backward_limit uint, gap uint, nodes []zopfliNode) uint32 {
var clen uint = uint(zopfliNodeCopyLength(&nodes[pos])) var clen uint = uint(zopfliNodeCopyLength(&nodes[pos]))
var ilen uint = uint(nodes[pos].dcode_insert_length & 0x7FFFFFF) var ilen uint = uint(nodes[pos].dcode_insert_length & 0x7FFFFFF)
@ -326,13 +329,16 @@ func computeDistanceShortcut(block_start uint, pos uint, max_backward_limit uint
} }
} }
/* Fills in dist_cache[0..3] with the last four distances (as defined by /*
Section 4. of the Spec) that would be used at (block_start + pos) if we Fills in dist_cache[0..3] with the last four distances (as defined by
used the shortest path of commands from block_start, computed from
nodes[0..pos]. The last four distances at block_start are in Section 4. of the Spec) that would be used at (block_start + pos) if we
starting_dist_cache[0..3]. used the shortest path of commands from block_start, computed from
REQUIRES: nodes[pos].cost < kInfinity nodes[0..pos]. The last four distances at block_start are in
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */ starting_dist_cache[0..3].
REQUIRES: nodes[pos].cost < kInfinity
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant".
*/
func computeDistanceCache(pos uint, starting_dist_cache []int, nodes []zopfliNode, dist_cache []int) { func computeDistanceCache(pos uint, starting_dist_cache []int, nodes []zopfliNode, dist_cache []int) {
var idx int = 0 var idx int = 0
var p uint = uint(nodes[pos].u.shortcut) var p uint = uint(nodes[pos].u.shortcut)
@ -353,8 +359,11 @@ func computeDistanceCache(pos uint, starting_dist_cache []int, nodes []zopfliNod
} }
} }
/* Maintains "ZopfliNode array invariant" and pushes node to the queue, if it /*
is eligible. */ Maintains "ZopfliNode array invariant" and pushes node to the queue, if it
is eligible.
*/
func evaluateNode(block_start uint, pos uint, max_backward_limit uint, gap uint, starting_dist_cache []int, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) { func evaluateNode(block_start uint, pos uint, max_backward_limit uint, gap uint, starting_dist_cache []int, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) {
/* Save cost, because ComputeDistanceCache invalidates it. */ /* Save cost, because ComputeDistanceCache invalidates it. */
var node_cost float32 = nodes[pos].u.cost var node_cost float32 = nodes[pos].u.cost
@ -606,21 +615,24 @@ func zopfliIterate(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_
return computeShortestPathFromNodes(num_bytes, nodes) return computeShortestPathFromNodes(num_bytes, nodes)
} }
/* Computes the shortest path of commands from position to at most /*
position + num_bytes. Computes the shortest path of commands from position to at most
On return, path->size() is the number of commands found and path[i] is the position + num_bytes.
length of the i-th command (copy length plus insert length).
Note that the sum of the lengths of all commands can be less than num_bytes.
On return, the nodes[0..num_bytes] array will have the following On return, path->size() is the number of commands found and path[i] is the
"ZopfliNode array invariant": length of the i-th command (copy length plus insert length).
For each i in [1..num_bytes], if nodes[i].cost < kInfinity, then Note that the sum of the lengths of all commands can be less than num_bytes.
(1) nodes[i].copy_length() >= 2
(2) nodes[i].command_length() <= i and
(3) nodes[i - nodes[i].command_length()].cost < kInfinity
REQUIRES: nodes != nil and len(nodes) >= num_bytes + 1 */ On return, the nodes[0..num_bytes] array will have the following
"ZopfliNode array invariant":
For each i in [1..num_bytes], if nodes[i].cost < kInfinity, then
(1) nodes[i].copy_length() >= 2
(2) nodes[i].command_length() <= i and
(3) nodes[i - nodes[i].command_length()].cost < kInfinity
REQUIRES: nodes != nil and len(nodes) >= num_bytes + 1
*/
func zopfliComputeShortestPath(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, dist_cache []int, hasher *h10, nodes []zopfliNode) uint { func zopfliComputeShortestPath(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, dist_cache []int, hasher *h10, nodes []zopfliNode) uint {
var max_backward_limit uint = maxBackwardLimit(params.lgwin) var max_backward_limit uint = maxBackwardLimit(params.lgwin)
var max_zopfli_len uint = maxZopfliLen(params) var max_zopfli_len uint = maxZopfliLen(params)

View File

@ -70,11 +70,14 @@ type bitReaderState struct {
/* Initializes the BrotliBitReader fields. */ /* Initializes the BrotliBitReader fields. */
/* Ensures that accumulator is not empty. /*
May consume up to sizeof(brotli_reg_t) - 1 bytes of input. Ensures that accumulator is not empty.
Returns false if data is required but there is no input available.
For BROTLI_ALIGNED_READ this function also prepares bit reader for aligned May consume up to sizeof(brotli_reg_t) - 1 bytes of input.
reading. */ Returns false if data is required but there is no input available.
For BROTLI_ALIGNED_READ this function also prepares bit reader for aligned
reading.
*/
func bitReaderSaveState(from *bitReader, to *bitReaderState) { func bitReaderSaveState(from *bitReader, to *bitReaderState) {
to.val_ = from.val_ to.val_ = from.val_
to.bit_pos_ = from.bit_pos_ to.bit_pos_ = from.bit_pos_
@ -95,22 +98,31 @@ func getAvailableBits(br *bitReader) uint32 {
return 64 - br.bit_pos_ return 64 - br.bit_pos_
} }
/* Returns amount of unread bytes the bit reader still has buffered from the /*
BrotliInput, including whole bytes in br->val_. */ Returns amount of unread bytes the bit reader still has buffered from the
BrotliInput, including whole bytes in br->val_.
*/
func getRemainingBytes(br *bitReader) uint { func getRemainingBytes(br *bitReader) uint {
return uint(uint32(br.input_len-br.byte_pos) + (getAvailableBits(br) >> 3)) return uint(uint32(br.input_len-br.byte_pos) + (getAvailableBits(br) >> 3))
} }
/* Checks if there is at least |num| bytes left in the input ring-buffer /*
(excluding the bits remaining in br->val_). */ Checks if there is at least |num| bytes left in the input ring-buffer
(excluding the bits remaining in br->val_).
*/
func checkInputAmount(br *bitReader, num uint) bool { func checkInputAmount(br *bitReader, num uint) bool {
return br.input_len-br.byte_pos >= num return br.input_len-br.byte_pos >= num
} }
/* Guarantees that there are at least |n_bits| + 1 bits in accumulator. /*
Precondition: accumulator contains at least 1 bit. Guarantees that there are at least |n_bits| + 1 bits in accumulator.
|n_bits| should be in the range [1..24] for regular build. For portable
non-64-bit little-endian build only 16 bits are safe to request. */ Precondition: accumulator contains at least 1 bit.
|n_bits| should be in the range [1..24] for regular build. For portable
non-64-bit little-endian build only 16 bits are safe to request.
*/
func fillBitWindow(br *bitReader, n_bits uint32) { func fillBitWindow(br *bitReader, n_bits uint32) {
if br.bit_pos_ >= 32 { if br.bit_pos_ >= 32 {
br.val_ >>= 32 br.val_ >>= 32
@ -120,14 +132,20 @@ func fillBitWindow(br *bitReader, n_bits uint32) {
} }
} }
/* Mostly like BrotliFillBitWindow, but guarantees only 16 bits and reads no /*
more than BROTLI_SHORT_FILL_BIT_WINDOW_READ bytes of input. */ Mostly like BrotliFillBitWindow, but guarantees only 16 bits and reads no
more than BROTLI_SHORT_FILL_BIT_WINDOW_READ bytes of input.
*/
func fillBitWindow16(br *bitReader) { func fillBitWindow16(br *bitReader) {
fillBitWindow(br, 17) fillBitWindow(br, 17)
} }
/* Tries to pull one byte of input to accumulator. /*
Returns false if there is no input available. */ Tries to pull one byte of input to accumulator.
Returns false if there is no input available.
*/
func pullByte(br *bitReader) bool { func pullByte(br *bitReader) bool {
if br.byte_pos == br.input_len { if br.byte_pos == br.input_len {
return false return false
@ -140,28 +158,40 @@ func pullByte(br *bitReader) bool {
return true return true
} }
/* Returns currently available bits. /*
The number of valid bits could be calculated by BrotliGetAvailableBits. */ Returns currently available bits.
The number of valid bits could be calculated by BrotliGetAvailableBits.
*/
func getBitsUnmasked(br *bitReader) uint64 { func getBitsUnmasked(br *bitReader) uint64 {
return br.val_ >> br.bit_pos_ return br.val_ >> br.bit_pos_
} }
/* Like BrotliGetBits, but does not mask the result. /*
The result contains at least 16 valid bits. */ Like BrotliGetBits, but does not mask the result.
The result contains at least 16 valid bits.
*/
func get16BitsUnmasked(br *bitReader) uint32 { func get16BitsUnmasked(br *bitReader) uint32 {
fillBitWindow(br, 16) fillBitWindow(br, 16)
return uint32(getBitsUnmasked(br)) return uint32(getBitsUnmasked(br))
} }
/* Returns the specified number of bits from |br| without advancing bit /*
position. */ Returns the specified number of bits from |br| without advancing bit
position.
*/
func getBits(br *bitReader, n_bits uint32) uint32 { func getBits(br *bitReader, n_bits uint32) uint32 {
fillBitWindow(br, n_bits) fillBitWindow(br, n_bits)
return uint32(getBitsUnmasked(br)) & bitMask(n_bits) return uint32(getBitsUnmasked(br)) & bitMask(n_bits)
} }
/* Tries to peek the specified amount of bits. Returns false, if there /*
is not enough input. */ Tries to peek the specified amount of bits. Returns false, if there
is not enough input.
*/
func safeGetBits(br *bitReader, n_bits uint32, val *uint32) bool { func safeGetBits(br *bitReader, n_bits uint32, val *uint32) bool {
for getAvailableBits(br) < n_bits { for getAvailableBits(br) < n_bits {
if !pullByte(br) { if !pullByte(br) {
@ -191,15 +221,21 @@ func bitReaderUnload(br *bitReader) {
br.bit_pos_ += unused_bits br.bit_pos_ += unused_bits
} }
/* Reads the specified number of bits from |br| and advances the bit pos. /*
Precondition: accumulator MUST contain at least |n_bits|. */ Reads the specified number of bits from |br| and advances the bit pos.
Precondition: accumulator MUST contain at least |n_bits|.
*/
func takeBits(br *bitReader, n_bits uint32, val *uint32) { func takeBits(br *bitReader, n_bits uint32, val *uint32) {
*val = uint32(getBitsUnmasked(br)) & bitMask(n_bits) *val = uint32(getBitsUnmasked(br)) & bitMask(n_bits)
dropBits(br, n_bits) dropBits(br, n_bits)
} }
/* Reads the specified number of bits from |br| and advances the bit pos. /*
Assumes that there is enough input to perform BrotliFillBitWindow. */ Reads the specified number of bits from |br| and advances the bit pos.
Assumes that there is enough input to perform BrotliFillBitWindow.
*/
func readBits(br *bitReader, n_bits uint32) uint32 { func readBits(br *bitReader, n_bits uint32) uint32 {
var val uint32 var val uint32
fillBitWindow(br, n_bits) fillBitWindow(br, n_bits)
@ -207,8 +243,11 @@ func readBits(br *bitReader, n_bits uint32) uint32 {
return val return val
} }
/* Tries to read the specified amount of bits. Returns false, if there /*
is not enough input. |n_bits| MUST be positive. */ Tries to read the specified amount of bits. Returns false, if there
is not enough input. |n_bits| MUST be positive.
*/
func safeReadBits(br *bitReader, n_bits uint32, val *uint32) bool { func safeReadBits(br *bitReader, n_bits uint32, val *uint32) bool {
for getAvailableBits(br) < n_bits { for getAvailableBits(br) < n_bits {
if !pullByte(br) { if !pullByte(br) {
@ -220,8 +259,11 @@ func safeReadBits(br *bitReader, n_bits uint32, val *uint32) bool {
return true return true
} }
/* Advances the bit reader position to the next byte boundary and verifies /*
that any skipped bits are set to zero. */ Advances the bit reader position to the next byte boundary and verifies
that any skipped bits are set to zero.
*/
func bitReaderJumpToByteBoundary(br *bitReader) bool { func bitReaderJumpToByteBoundary(br *bitReader) bool {
var pad_bits_count uint32 = getAvailableBits(br) & 0x7 var pad_bits_count uint32 = getAvailableBits(br) & 0x7
var pad_bits uint32 = 0 var pad_bits uint32 = 0
@ -232,9 +274,12 @@ func bitReaderJumpToByteBoundary(br *bitReader) bool {
return pad_bits == 0 return pad_bits == 0
} }
/* Copies remaining input bytes stored in the bit reader to the output. Value /*
|num| may not be larger than BrotliGetRemainingBytes. The bit reader must be Copies remaining input bytes stored in the bit reader to the output. Value
warmed up again after this. */
|num| may not be larger than BrotliGetRemainingBytes. The bit reader must be
warmed up again after this.
*/
func copyBytes(dest []byte, br *bitReader, num uint) { func copyBytes(dest []byte, br *bitReader, num uint) {
for getAvailableBits(br) >= 8 && num > 0 { for getAvailableBits(br) >= 8 && num > 0 {
dest[0] = byte(getBitsUnmasked(br)) dest[0] = byte(getBitsUnmasked(br))

View File

@ -51,9 +51,12 @@ func refineEntropyCodesCommand(data []uint16, length uint, stride uint, num_hist
} }
} }
/* Assigns a block id from the range [0, num_histograms) to each data element /*
in data[0..length) and fills in block_id[0..length) with the assigned values. Assigns a block id from the range [0, num_histograms) to each data element
Returns the number of blocks, i.e. one plus the number of block switches. */
in data[0..length) and fills in block_id[0..length) with the assigned values.
Returns the number of blocks, i.e. one plus the number of block switches.
*/
func findBlocksCommand(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramCommand, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { func findBlocksCommand(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramCommand, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
var data_size uint = histogramDataSizeCommand() var data_size uint = histogramDataSizeCommand()
var bitmaplen uint = (num_histograms + 7) >> 3 var bitmaplen uint = (num_histograms + 7) >> 3

View File

@ -51,9 +51,12 @@ func refineEntropyCodesDistance(data []uint16, length uint, stride uint, num_his
} }
} }
/* Assigns a block id from the range [0, num_histograms) to each data element /*
in data[0..length) and fills in block_id[0..length) with the assigned values. Assigns a block id from the range [0, num_histograms) to each data element
Returns the number of blocks, i.e. one plus the number of block switches. */
in data[0..length) and fills in block_id[0..length) with the assigned values.
Returns the number of blocks, i.e. one plus the number of block switches.
*/
func findBlocksDistance(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramDistance, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { func findBlocksDistance(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramDistance, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
var data_size uint = histogramDataSizeDistance() var data_size uint = histogramDataSizeDistance()
var bitmaplen uint = (num_histograms + 7) >> 3 var bitmaplen uint = (num_histograms + 7) >> 3

View File

@ -51,9 +51,12 @@ func refineEntropyCodesLiteral(data []byte, length uint, stride uint, num_histog
} }
} }
/* Assigns a block id from the range [0, num_histograms) to each data element /*
in data[0..length) and fills in block_id[0..length) with the assigned values. Assigns a block id from the range [0, num_histograms) to each data element
Returns the number of blocks, i.e. one plus the number of block switches. */
in data[0..length) and fills in block_id[0..length) with the assigned values.
Returns the number of blocks, i.e. one plus the number of block switches.
*/
func findBlocksLiteral(data []byte, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramLiteral, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { func findBlocksLiteral(data []byte, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramLiteral, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint {
var data_size uint = histogramDataSizeLiteral() var data_size uint = histogramDataSizeLiteral()
var bitmaplen uint = (num_histograms + 7) >> 3 var bitmaplen uint = (num_histograms + 7) >> 3

View File

@ -7,12 +7,18 @@ import (
const maxHuffmanTreeSize = (2*numCommandSymbols + 1) const maxHuffmanTreeSize = (2*numCommandSymbols + 1)
/* The maximum size of Huffman dictionary for distances assuming that /*
NPOSTFIX = 0 and NDIRECT = 0. */ The maximum size of Huffman dictionary for distances assuming that
NPOSTFIX = 0 and NDIRECT = 0.
*/
const maxSimpleDistanceAlphabetSize = 140 const maxSimpleDistanceAlphabetSize = 140
/* Represents the range of values belonging to a prefix code: /*
[offset, offset + 2^nbits) */ Represents the range of values belonging to a prefix code:
[offset, offset + 2^nbits)
*/
type prefixCodeRange struct { type prefixCodeRange struct {
offset uint32 offset uint32
nbits uint32 nbits uint32
@ -96,9 +102,12 @@ func nextBlockTypeCode(calculator *blockTypeCodeCalculator, type_ byte) uint {
return type_code return type_code
} }
/* |nibblesbits| represents the 2 bits to encode MNIBBLES (0-3) /*
REQUIRES: length > 0 |nibblesbits| represents the 2 bits to encode MNIBBLES (0-3)
REQUIRES: length <= (1 << 24) */
REQUIRES: length > 0
REQUIRES: length <= (1 << 24)
*/
func encodeMlen(length uint, bits *uint64, numbits *uint, nibblesbits *uint64) { func encodeMlen(length uint, bits *uint64, numbits *uint, nibblesbits *uint64) {
var lg uint var lg uint
if length == 1 { if length == 1 {
@ -132,8 +141,11 @@ func storeCommandExtra(cmd *command, bw *bitWriter) {
bw.writeBits(uint(insnumextra+getCopyExtra(copycode)), bits) bw.writeBits(uint(insnumextra+getCopyExtra(copycode)), bits)
} }
/* Data structure that stores almost everything that is needed to encode each /*
block switch command. */ Data structure that stores almost everything that is needed to encode each
block switch command.
*/
type blockSplitCode struct { type blockSplitCode struct {
type_code_calculator blockTypeCodeCalculator type_code_calculator blockTypeCodeCalculator
type_depths [maxBlockTypeSymbols]byte type_depths [maxBlockTypeSymbols]byte
@ -154,9 +166,12 @@ func storeVarLenUint8(n uint, bw *bitWriter) {
} }
} }
/* Stores the compressed meta-block header. /*
REQUIRES: length > 0 Stores the compressed meta-block header.
REQUIRES: length <= (1 << 24) */
REQUIRES: length > 0
REQUIRES: length <= (1 << 24)
*/
func storeCompressedMetaBlockHeader(is_final_block bool, length uint, bw *bitWriter) { func storeCompressedMetaBlockHeader(is_final_block bool, length uint, bw *bitWriter) {
var lenbits uint64 var lenbits uint64
var nlenbits uint var nlenbits uint
@ -186,9 +201,12 @@ func storeCompressedMetaBlockHeader(is_final_block bool, length uint, bw *bitWri
} }
} }
/* Stores the uncompressed meta-block header. /*
REQUIRES: length > 0 Stores the uncompressed meta-block header.
REQUIRES: length <= (1 << 24) */
REQUIRES: length > 0
REQUIRES: length <= (1 << 24)
*/
func storeUncompressedMetaBlockHeader(length uint, bw *bitWriter) { func storeUncompressedMetaBlockHeader(length uint, bw *bitWriter) {
var lenbits uint64 var lenbits uint64
var nlenbits uint var nlenbits uint
@ -312,8 +330,11 @@ func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max
} }
} }
/* num = alphabet size /*
depths = symbol depths */ num = alphabet size
depths = symbol depths
*/
func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, bw *bitWriter) { func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, bw *bitWriter) {
var huffman_tree [numCommandSymbols]byte var huffman_tree [numCommandSymbols]byte
var huffman_tree_extra_bits [numCommandSymbols]byte var huffman_tree_extra_bits [numCommandSymbols]byte
@ -367,8 +388,11 @@ func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, bw *bitWriter
storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], bw) storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], bw)
} }
/* Builds a Huffman tree from histogram[0:length] into depth[0:length] and /*
bits[0:length] and stores the encoded tree to the bit stream. */ Builds a Huffman tree from histogram[0:length] into depth[0:length] and
bits[0:length] and stores the encoded tree to the bit stream.
*/
func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, bw *bitWriter) { func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, bw *bitWriter) {
var count uint = 0 var count uint = 0
var s4 = [4]uint{0} var s4 = [4]uint{0}
@ -668,12 +692,15 @@ func moveToFrontTransform(v_in []uint32, v_size uint, v_out []uint32) {
} }
} }
/* Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of /*
the run length plus extra bits (lower 9 bits is the prefix code and the rest Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of
are the extra bits). Non-zero values in v[] are shifted by
*max_length_prefix. Will not create prefix codes bigger than the initial the run length plus extra bits (lower 9 bits is the prefix code and the rest
value of *max_run_length_prefix. The prefix code of run length L is simply are the extra bits). Non-zero values in v[] are shifted by
Log2Floor(L) and the number of extra bits is the same as the prefix code. */ *max_length_prefix. Will not create prefix codes bigger than the initial
value of *max_run_length_prefix. The prefix code of run length L is simply
Log2Floor(L) and the number of extra bits is the same as the prefix code.
*/
func runLengthCodeZeros(in_size uint, v []uint32, out_size *uint, max_run_length_prefix *uint32) { func runLengthCodeZeros(in_size uint, v []uint32, out_size *uint, max_run_length_prefix *uint32) {
var max_reps uint32 = 0 var max_reps uint32 = 0
var i uint var i uint
@ -793,8 +820,11 @@ func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, i
bw.writeBits(uint(len_nextra), uint64(len_extra)) bw.writeBits(uint(len_nextra), uint64(len_extra))
} }
/* Builds a BlockSplitCode data structure from the block split given by the /*
vector of block types and block lengths and stores it to the bit stream. */ Builds a BlockSplitCode data structure from the block split given by the
vector of block types and block lengths and stores it to the bit stream.
*/
func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, bw *bitWriter) { func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, bw *bitWriter) {
var type_histo [maxBlockTypeSymbols]uint32 var type_histo [maxBlockTypeSymbols]uint32
var length_histo [numBlockLenSymbols]uint32 var length_histo [numBlockLenSymbols]uint32
@ -913,14 +943,20 @@ func cleanupBlockEncoder(self *blockEncoder) {
blockEncoderPool.Put(self) blockEncoderPool.Put(self)
} }
/* Creates entropy codes of block lengths and block types and stores them /*
to the bit stream. */ Creates entropy codes of block lengths and block types and stores them
to the bit stream.
*/
func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, bw *bitWriter) { func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, bw *bitWriter) {
buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, bw) buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, bw)
} }
/* Stores the next symbol with the entropy code of the current block type. /*
Updates the block type and block length at block boundaries. */ Stores the next symbol with the entropy code of the current block type.
Updates the block type and block length at block boundaries.
*/
func storeSymbol(self *blockEncoder, symbol uint, bw *bitWriter) { func storeSymbol(self *blockEncoder, symbol uint, bw *bitWriter) {
if self.block_len_ == 0 { if self.block_len_ == 0 {
self.block_ix_++ self.block_ix_++
@ -939,9 +975,12 @@ func storeSymbol(self *blockEncoder, symbol uint, bw *bitWriter) {
} }
} }
/* Stores the next symbol with the entropy code of the current block type and /*
context value. Stores the next symbol with the entropy code of the current block type and
Updates the block type and block length at block boundaries. */
context value.
Updates the block type and block length at block boundaries.
*/
func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, bw *bitWriter, context_bits uint) { func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, bw *bitWriter, context_bits uint) {
if self.block_len_ == 0 { if self.block_len_ == 0 {
self.block_ix_++ self.block_ix_++
@ -1257,8 +1296,11 @@ func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is
} }
} }
/* This is for storing uncompressed blocks (simple raw storage of /*
bytes-as-bytes). */ This is for storing uncompressed blocks (simple raw storage of
bytes-as-bytes).
*/
func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, bw *bitWriter) { func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, bw *bitWriter) {
var masked_pos uint = position & mask var masked_pos uint = position & mask
storeUncompressedMetaBlockHeader(uint(len), bw) storeUncompressedMetaBlockHeader(uint(len), bw)

View File

@ -8,8 +8,11 @@ import "math"
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/ */
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if /*
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue.
*/
func compareAndPushToQueueCommand(out []histogramCommand, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { func compareAndPushToQueueCommand(out []histogramCommand, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) {
var is_good_pair bool = false var is_good_pair bool = false
var p histogramPair var p histogramPair
@ -165,10 +168,13 @@ func histogramBitCostDistanceCommand(histogram *histogramCommand, candidate *his
} }
} }
/* Find the best 'out' histogram for each of the 'in' histograms. /*
When called, clusters[0..num_clusters) contains the unique values from Find the best 'out' histogram for each of the 'in' histograms.
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date. */ When called, clusters[0..num_clusters) contains the unique values from
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date.
*/
func histogramRemapCommand(in []histogramCommand, in_size uint, clusters []uint32, num_clusters uint, out []histogramCommand, symbols []uint32) { func histogramRemapCommand(in []histogramCommand, in_size uint, clusters []uint32, num_clusters uint, out []histogramCommand, symbols []uint32) {
var i uint var i uint
for i = 0; i < in_size; i++ { for i = 0; i < in_size; i++ {

View File

@ -8,8 +8,11 @@ import "math"
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/ */
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if /*
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue.
*/
func compareAndPushToQueueDistance(out []histogramDistance, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { func compareAndPushToQueueDistance(out []histogramDistance, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) {
var is_good_pair bool = false var is_good_pair bool = false
var p histogramPair var p histogramPair
@ -165,10 +168,13 @@ func histogramBitCostDistanceDistance(histogram *histogramDistance, candidate *h
} }
} }
/* Find the best 'out' histogram for each of the 'in' histograms. /*
When called, clusters[0..num_clusters) contains the unique values from Find the best 'out' histogram for each of the 'in' histograms.
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date. */ When called, clusters[0..num_clusters) contains the unique values from
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date.
*/
func histogramRemapDistance(in []histogramDistance, in_size uint, clusters []uint32, num_clusters uint, out []histogramDistance, symbols []uint32) { func histogramRemapDistance(in []histogramDistance, in_size uint, clusters []uint32, num_clusters uint, out []histogramDistance, symbols []uint32) {
var i uint var i uint
for i = 0; i < in_size; i++ { for i = 0; i < in_size; i++ {

View File

@ -8,8 +8,11 @@ import "math"
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/ */
/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if /*
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ Computes the bit cost reduction by combining out[idx1] and out[idx2] and if
it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue.
*/
func compareAndPushToQueueLiteral(out []histogramLiteral, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { func compareAndPushToQueueLiteral(out []histogramLiteral, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) {
var is_good_pair bool = false var is_good_pair bool = false
var p histogramPair var p histogramPair
@ -165,10 +168,13 @@ func histogramBitCostDistanceLiteral(histogram *histogramLiteral, candidate *his
} }
} }
/* Find the best 'out' histogram for each of the 'in' histograms. /*
When called, clusters[0..num_clusters) contains the unique values from Find the best 'out' histogram for each of the 'in' histograms.
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date. */ When called, clusters[0..num_clusters) contains the unique values from
symbols[0..in_size), but this property is not preserved in this function.
Note: we assume that out[]->bit_cost_ is already up-to-date.
*/
func histogramRemapLiteral(in []histogramLiteral, in_size uint, clusters []uint32, num_clusters uint, out []histogramLiteral, symbols []uint32) { func histogramRemapLiteral(in []histogramLiteral, in_size uint, clusters []uint32, num_clusters uint, out []histogramLiteral, symbols []uint32) {
var i uint var i uint
for i = 0; i < in_size; i++ { for i = 0; i < in_size; i++ {

View File

@ -37,14 +37,17 @@ func isMatch5(p1 []byte, p2 []byte) bool {
p1[4] == p2[4] p1[4] == p2[4]
} }
/* Builds a literal prefix code into "depths" and "bits" based on the statistics /*
of the "input" string and stores it into the bit stream. Builds a literal prefix code into "depths" and "bits" based on the statistics
Note that the prefix code here is built from the pre-LZ77 input, therefore
we can only approximate the statistics of the actual literal stream. of the "input" string and stores it into the bit stream.
Moreover, for long inputs we build a histogram from a sample of the input Note that the prefix code here is built from the pre-LZ77 input, therefore
and thus have to assign a non-zero depth for each literal. we can only approximate the statistics of the actual literal stream.
Returns estimated compression ratio millibytes/char for encoding given input Moreover, for long inputs we build a histogram from a sample of the input
with generated code. */ and thus have to assign a non-zero depth for each literal.
Returns estimated compression ratio millibytes/char for encoding given input
with generated code.
*/
func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, bw *bitWriter) uint { func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, bw *bitWriter) uint {
var histogram = [256]uint32{0} var histogram = [256]uint32{0}
var histogram_total uint var histogram_total uint
@ -96,8 +99,11 @@ func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte
} }
} }
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and /*
"bits" based on "histogram" and stores it into the bit stream. */ Builds a command and distance prefix code (each 64 symbols) into "depth" and
"bits" based on "histogram" and stores it into the bit stream.
*/
func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) { func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) {
var tree [129]huffmanTree var tree [129]huffmanTree
var cmd_depth = [numCommandSymbols]byte{0} var cmd_depth = [numCommandSymbols]byte{0}
@ -637,27 +643,29 @@ next_block:
} }
} }
/* Compresses "input" string to bw as one or more complete meta-blocks. /*
Compresses "input" string to bw as one or more complete meta-blocks.
If "is_last" is 1, emits an additional empty last meta-block. If "is_last" is 1, emits an additional empty last meta-block.
"cmd_depth" and "cmd_bits" contain the command and distance prefix codes "cmd_depth" and "cmd_bits" contain the command and distance prefix codes
(see comment in encode.h) used for the encoding of this input fragment. (see comment in encode.h) used for the encoding of this input fragment.
If "is_last" is 0, they are updated to reflect the statistics If "is_last" is 0, they are updated to reflect the statistics
of this input fragment, to be used for the encoding of the next fragment. of this input fragment, to be used for the encoding of the next fragment.
"*cmd_code_numbits" is the number of bits of the compressed representation "*cmd_code_numbits" is the number of bits of the compressed representation
of the command and distance prefix codes, and "cmd_code" is an array of of the command and distance prefix codes, and "cmd_code" is an array of
at least "(*cmd_code_numbits + 7) >> 3" size that contains the compressed at least "(*cmd_code_numbits + 7) >> 3" size that contains the compressed
command and distance prefix codes. If "is_last" is 0, these are also command and distance prefix codes. If "is_last" is 0, these are also
updated to represent the updated "cmd_depth" and "cmd_bits". updated to represent the updated "cmd_depth" and "cmd_bits".
REQUIRES: "input_size" is greater than zero, or "is_last" is 1. REQUIRES: "input_size" is greater than zero, or "is_last" is 1.
REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24).
REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two
OUTPUT: maximal copy distance <= |input_size| OUTPUT: maximal copy distance <= |input_size|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18)
*/
func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, bw *bitWriter) { func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, bw *bitWriter) {
var initial_storage_ix uint = bw.getPos() var initial_storage_ix uint = bw.getPos()
var table_bits uint = uint(log2FloorNonZero(table_size)) var table_bits uint = uint(log2FloorNonZero(table_size))

View File

@ -39,8 +39,11 @@ func isMatch1(p1 []byte, p2 []byte, length uint) bool {
return p1[4] == p2[4] && p1[5] == p2[5] return p1[4] == p2[4] && p1[5] == p2[5]
} }
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and /*
"bits" based on "histogram" and stores it into the bit stream. */ Builds a command and distance prefix code (each 64 symbols) into "depth" and
"bits" based on "histogram" and stores it into the bit stream.
*/
func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) { func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) {
var tree [129]huffmanTree var tree [129]huffmanTree
var cmd_depth = [numCommandSymbols]byte{0} var cmd_depth = [numCommandSymbols]byte{0}
@ -558,18 +561,20 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
} }
} }
/* Compresses "input" string to bw as one or more complete meta-blocks. /*
Compresses "input" string to bw as one or more complete meta-blocks.
If "is_last" is 1, emits an additional empty last meta-block. If "is_last" is 1, emits an additional empty last meta-block.
REQUIRES: "input_size" is greater than zero, or "is_last" is 1. REQUIRES: "input_size" is greater than zero, or "is_last" is 1.
REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24).
REQUIRES: "command_buf" and "literal_buf" point to at least REQUIRES: "command_buf" and "literal_buf" point to at least
kCompressFragmentTwoPassBlockSize long arrays. kCompressFragmentTwoPassBlockSize long arrays.
REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
REQUIRES: "table_size" is a power of two REQUIRES: "table_size" is a power of two
OUTPUT: maximal copy distance <= |input_size| OUTPUT: maximal copy distance <= |input_size|
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18)
*/
func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, bw *bitWriter) { func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, bw *bitWriter) {
var initial_storage_ix uint = bw.getPos() var initial_storage_ix uint = bw.getPos()
var table_bits uint = uint(log2FloorNonZero(table_size)) var table_bits uint = uint(log2FloorNonZero(table_size))

View File

@ -69,9 +69,11 @@ const huffmanTableBits = 8
const huffmanTableMask = 0xFF const huffmanTableMask = 0xFF
/* We need the slack region for the following reasons: /*
- doing up to two 16-byte copies for fast backward copying We need the slack region for the following reasons:
- inserting transformed dictionary word (5 prefix + 24 base + 8 suffix) */ - doing up to two 16-byte copies for fast backward copying
- inserting transformed dictionary word (5 prefix + 24 base + 8 suffix)
*/
const kRingBufferWriteAheadSlack uint32 = 42 const kRingBufferWriteAheadSlack uint32 = 42
var kCodeLengthCodeOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15} var kCodeLengthCodeOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15}
@ -121,8 +123,11 @@ func saveErrorCode(s *Reader, e int) int {
} }
} }
/* Decodes WBITS by reading 1 - 7 bits, or 0x11 for "Large Window Brotli". /*
Precondition: bit-reader accumulator has at least 8 bits. */ Decodes WBITS by reading 1 - 7 bits, or 0x11 for "Large Window Brotli".
Precondition: bit-reader accumulator has at least 8 bits.
*/
func decodeWindowBits(s *Reader, br *bitReader) int { func decodeWindowBits(s *Reader, br *bitReader) int {
var n uint32 var n uint32
var large_window bool = s.large_window var large_window bool = s.large_window
@ -361,10 +366,13 @@ func decodeMetaBlockLength(s *Reader, br *bitReader) int {
} }
} }
/* Decodes the Huffman code. /*
This method doesn't read data from the bit reader, BUT drops the amount of Decodes the Huffman code.
bits that correspond to the decoded symbol.
bits MUST contain at least 15 (BROTLI_HUFFMAN_MAX_CODE_LENGTH) valid bits. */ This method doesn't read data from the bit reader, BUT drops the amount of
bits that correspond to the decoded symbol.
bits MUST contain at least 15 (BROTLI_HUFFMAN_MAX_CODE_LENGTH) valid bits.
*/
func decodeSymbol(bits uint32, table []huffmanCode, br *bitReader) uint32 { func decodeSymbol(bits uint32, table []huffmanCode, br *bitReader) uint32 {
table = table[bits&huffmanTableMask:] table = table[bits&huffmanTableMask:]
if table[0].bits > huffmanTableBits { if table[0].bits > huffmanTableBits {
@ -377,14 +385,20 @@ func decodeSymbol(bits uint32, table []huffmanCode, br *bitReader) uint32 {
return uint32(table[0].value) return uint32(table[0].value)
} }
/* Reads and decodes the next Huffman code from bit-stream. /*
This method peeks 16 bits of input and drops 0 - 15 of them. */ Reads and decodes the next Huffman code from bit-stream.
This method peeks 16 bits of input and drops 0 - 15 of them.
*/
func readSymbol(table []huffmanCode, br *bitReader) uint32 { func readSymbol(table []huffmanCode, br *bitReader) uint32 {
return decodeSymbol(get16BitsUnmasked(br), table, br) return decodeSymbol(get16BitsUnmasked(br), table, br)
} }
/* Same as DecodeSymbol, but it is known that there is less than 15 bits of /*
input are currently available. */ Same as DecodeSymbol, but it is known that there is less than 15 bits of
input are currently available.
*/
func safeDecodeSymbol(table []huffmanCode, br *bitReader, result *uint32) bool { func safeDecodeSymbol(table []huffmanCode, br *bitReader, result *uint32) bool {
var val uint32 var val uint32
var available_bits uint32 = getAvailableBits(br) var available_bits uint32 = getAvailableBits(br)
@ -448,8 +462,11 @@ func preloadSymbol(safe int, table []huffmanCode, br *bitReader, bits *uint32, v
*value = uint32(table[0].value) *value = uint32(table[0].value)
} }
/* Decodes the next Huffman code using data prepared by PreloadSymbol. /*
Reads 0 - 15 bits. Also peeks 8 following bits. */ Decodes the next Huffman code using data prepared by PreloadSymbol.
Reads 0 - 15 bits. Also peeks 8 following bits.
*/
func readPreloadedSymbol(table []huffmanCode, br *bitReader, bits *uint32, value *uint32) uint32 { func readPreloadedSymbol(table []huffmanCode, br *bitReader, bits *uint32, value *uint32) uint32 {
var result uint32 = *value var result uint32 = *value
var ext []huffmanCode var ext []huffmanCode
@ -479,9 +496,12 @@ func log2Floor(x uint32) uint32 {
return result return result
} }
/* Reads (s->symbol + 1) symbols. /*
Totally 1..4 symbols are read, 1..11 bits each. Reads (s->symbol + 1) symbols.
The list of symbols MUST NOT contain duplicates. */
Totally 1..4 symbols are read, 1..11 bits each.
The list of symbols MUST NOT contain duplicates.
*/
func readSimpleHuffmanSymbols(alphabet_size uint32, max_symbol uint32, s *Reader) int { func readSimpleHuffmanSymbols(alphabet_size uint32, max_symbol uint32, s *Reader) int {
var br *bitReader = &s.br var br *bitReader = &s.br
var max_bits uint32 = log2Floor(alphabet_size - 1) var max_bits uint32 = log2Floor(alphabet_size - 1)
@ -517,12 +537,15 @@ func readSimpleHuffmanSymbols(alphabet_size uint32, max_symbol uint32, s *Reader
return decoderSuccess return decoderSuccess
} }
/* Process single decoded symbol code length: /*
A) reset the repeat variable Process single decoded symbol code length:
B) remember code length (if it is not 0)
C) extend corresponding index-chain A) reset the repeat variable
D) reduce the Huffman space B) remember code length (if it is not 0)
E) update the histogram */ C) extend corresponding index-chain
D) reduce the Huffman space
E) update the histogram
*/
func processSingleCodeLength(code_len uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) { func processSingleCodeLength(code_len uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) {
*repeat = 0 *repeat = 0
if code_len != 0 { /* code_len == 1..15 */ if code_len != 0 { /* code_len == 1..15 */
@ -536,16 +559,19 @@ func processSingleCodeLength(code_len uint32, symbol *uint32, repeat *uint32, sp
(*symbol)++ (*symbol)++
} }
/* Process repeated symbol code length. /*
A) Check if it is the extension of previous repeat sequence; if the decoded Process repeated symbol code length.
value is not BROTLI_REPEAT_PREVIOUS_CODE_LENGTH, then it is a new
symbol-skip
B) Update repeat variable
C) Check if operation is feasible (fits alphabet)
D) For each symbol do the same operations as in ProcessSingleCodeLength
PRECONDITION: code_len == BROTLI_REPEAT_PREVIOUS_CODE_LENGTH or A) Check if it is the extension of previous repeat sequence; if the decoded
code_len == BROTLI_REPEAT_ZERO_CODE_LENGTH */ value is not BROTLI_REPEAT_PREVIOUS_CODE_LENGTH, then it is a new
symbol-skip
B) Update repeat variable
C) Check if operation is feasible (fits alphabet)
D) For each symbol do the same operations as in ProcessSingleCodeLength
PRECONDITION: code_len == BROTLI_REPEAT_PREVIOUS_CODE_LENGTH or
code_len == BROTLI_REPEAT_ZERO_CODE_LENGTH
*/
func processRepeatedCodeLength(code_len uint32, repeat_delta uint32, alphabet_size uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, repeat_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) { func processRepeatedCodeLength(code_len uint32, repeat_delta uint32, alphabet_size uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, repeat_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) {
var old_repeat uint32 /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */ /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */ var old_repeat uint32 /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */ /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */
var extra_bits uint32 = 3 var extra_bits uint32 = 3
@ -688,8 +714,11 @@ func safeReadSymbolCodeLengths(alphabet_size uint32, s *Reader) int {
return decoderSuccess return decoderSuccess
} }
/* Reads and decodes 15..18 codes using static prefix code. /*
Each code is 2..4 bits long. In total 30..72 bits are used. */ Reads and decodes 15..18 codes using static prefix code.
Each code is 2..4 bits long. In total 30..72 bits are used.
*/
func readCodeLengthCodeLengths(s *Reader) int { func readCodeLengthCodeLengths(s *Reader) int {
var br *bitReader = &s.br var br *bitReader = &s.br
var num_codes uint32 = s.repeat var num_codes uint32 = s.repeat
@ -737,17 +766,20 @@ func readCodeLengthCodeLengths(s *Reader) int {
return decoderSuccess return decoderSuccess
} }
/* Decodes the Huffman tables. /*
There are 2 scenarios: Decodes the Huffman tables.
A) Huffman code contains only few symbols (1..4). Those symbols are read
directly; their code lengths are defined by the number of symbols.
For this scenario 4 - 49 bits will be read.
B) 2-phase decoding: There are 2 scenarios:
B.1) Small Huffman table is decoded; it is specified with code lengths A) Huffman code contains only few symbols (1..4). Those symbols are read
encoded with predefined entropy code. 32 - 74 bits are used. directly; their code lengths are defined by the number of symbols.
B.2) Decoded table is used to decode code lengths of symbols in resulting For this scenario 4 - 49 bits will be read.
Huffman table. In worst case 3520 bits are read. */
B) 2-phase decoding:
B.1) Small Huffman table is decoded; it is specified with code lengths
encoded with predefined entropy code. 32 - 74 bits are used.
B.2) Decoded table is used to decode code lengths of symbols in resulting
Huffman table. In worst case 3520 bits are read.
*/
func readHuffmanCode(alphabet_size uint32, max_symbol uint32, table []huffmanCode, opt_table_size *uint32, s *Reader) int { func readHuffmanCode(alphabet_size uint32, max_symbol uint32, table []huffmanCode, opt_table_size *uint32, s *Reader) int {
var br *bitReader = &s.br var br *bitReader = &s.br
@ -887,8 +919,11 @@ func readBlockLength(table []huffmanCode, br *bitReader) uint32 {
return kBlockLengthPrefixCode[code].offset + readBits(br, nbits) return kBlockLengthPrefixCode[code].offset + readBits(br, nbits)
} }
/* WARNING: if state is not BROTLI_STATE_READ_BLOCK_LENGTH_NONE, then /*
reading can't be continued with ReadBlockLength. */ WARNING: if state is not BROTLI_STATE_READ_BLOCK_LENGTH_NONE, then
reading can't be continued with ReadBlockLength.
*/
func safeReadBlockLength(s *Reader, result *uint32, table []huffmanCode, br *bitReader) bool { func safeReadBlockLength(s *Reader, result *uint32, table []huffmanCode, br *bitReader) bool {
var index uint32 var index uint32
if s.substate_read_block_length == stateReadBlockLengthNone { if s.substate_read_block_length == stateReadBlockLengthNone {
@ -913,20 +948,24 @@ func safeReadBlockLength(s *Reader, result *uint32, table []huffmanCode, br *bit
} }
} }
/* Transform: /*
1) initialize list L with values 0, 1,... 255 Transform:
2) For each input element X:
1. initialize list L with values 0, 1,... 255
2. For each input element X:
2.1) let Y = L[X] 2.1) let Y = L[X]
2.2) remove X-th element from L 2.2) remove X-th element from L
2.3) prepend Y to L 2.3) prepend Y to L
2.4) append Y to output 2.4) append Y to output
In most cases max(Y) <= 7, so most of L remains intact. In most cases max(Y) <= 7, so most of L remains intact.
To reduce the cost of initialization, we reuse L, remember the upper bound To reduce the cost of initialization, we reuse L, remember the upper bound
of Y values, and reinitialize only first elements in L. of Y values, and reinitialize only first elements in L.
Most of input values are 0 and 1. To reduce number of branches, we replace Most of input values are 0 and 1. To reduce number of branches, we replace
inner for loop with do-while. */ inner for loop with do-while.
*/
func inverseMoveToFrontTransform(v []byte, v_len uint32, state *Reader) { func inverseMoveToFrontTransform(v []byte, v_len uint32, state *Reader) {
var mtf [256]byte var mtf [256]byte
var i int var i int
@ -973,14 +1012,17 @@ func huffmanTreeGroupDecode(group *huffmanTreeGroup, s *Reader) int {
return decoderSuccess return decoderSuccess
} }
/* Decodes a context map. /*
Decoding is done in 4 phases: Decodes a context map.
1) Read auxiliary information (6..16 bits) and allocate memory.
In case of trivial context map, decoding is finished at this phase. Decoding is done in 4 phases:
2) Decode Huffman table using ReadHuffmanCode function. 1) Read auxiliary information (6..16 bits) and allocate memory.
This table will be used for reading context map items. In case of trivial context map, decoding is finished at this phase.
3) Read context map items; "0" values could be run-length encoded. 2) Decode Huffman table using ReadHuffmanCode function.
4) Optionally, apply InverseMoveToFront transform to the resulting map. */ This table will be used for reading context map items.
3) Read context map items; "0" values could be run-length encoded.
4) Optionally, apply InverseMoveToFront transform to the resulting map.
*/
func decodeContextMap(context_map_size uint32, num_htrees *uint32, context_map_arg *[]byte, s *Reader) int { func decodeContextMap(context_map_size uint32, num_htrees *uint32, context_map_arg *[]byte, s *Reader) int {
var br *bitReader = &s.br var br *bitReader = &s.br
var result int = decoderSuccess var result int = decoderSuccess
@ -1121,8 +1163,11 @@ func decodeContextMap(context_map_size uint32, num_htrees *uint32, context_map_a
} }
} }
/* Decodes a command or literal and updates block type ring-buffer. /*
Reads 3..54 bits. */ Decodes a command or literal and updates block type ring-buffer.
Reads 3..54 bits.
*/
func decodeBlockTypeAndLength(safe int, s *Reader, tree_type int) bool { func decodeBlockTypeAndLength(safe int, s *Reader, tree_type int) bool {
var max_block_type uint32 = s.num_block_types[tree_type] var max_block_type uint32 = s.num_block_types[tree_type]
var type_tree []huffmanCode var type_tree []huffmanCode
@ -1207,8 +1252,11 @@ func prepareLiteralDecoding(s *Reader) {
s.context_lookup = getContextLUT(int(context_mode)) s.context_lookup = getContextLUT(int(context_mode))
} }
/* Decodes the block type and updates the state for literal context. /*
Reads 3..54 bits. */ Decodes the block type and updates the state for literal context.
Reads 3..54 bits.
*/
func decodeLiteralBlockSwitchInternal(safe int, s *Reader) bool { func decodeLiteralBlockSwitchInternal(safe int, s *Reader) bool {
if !decodeBlockTypeAndLength(safe, s, 0) { if !decodeBlockTypeAndLength(safe, s, 0) {
return false return false
@ -1226,8 +1274,11 @@ func safeDecodeLiteralBlockSwitch(s *Reader) bool {
return decodeLiteralBlockSwitchInternal(1, s) return decodeLiteralBlockSwitchInternal(1, s)
} }
/* Block switch for insert/copy length. /*
Reads 3..54 bits. */ Block switch for insert/copy length.
Reads 3..54 bits.
*/
func decodeCommandBlockSwitchInternal(safe int, s *Reader) bool { func decodeCommandBlockSwitchInternal(safe int, s *Reader) bool {
if !decodeBlockTypeAndLength(safe, s, 1) { if !decodeBlockTypeAndLength(safe, s, 1) {
return false return false
@ -1245,8 +1296,11 @@ func safeDecodeCommandBlockSwitch(s *Reader) bool {
return decodeCommandBlockSwitchInternal(1, s) return decodeCommandBlockSwitchInternal(1, s)
} }
/* Block switch for distance codes. /*
Reads 3..54 bits. */ Block switch for distance codes.
Reads 3..54 bits.
*/
func decodeDistanceBlockSwitchInternal(safe int, s *Reader) bool { func decodeDistanceBlockSwitchInternal(safe int, s *Reader) bool {
if !decodeBlockTypeAndLength(safe, s, 2) { if !decodeBlockTypeAndLength(safe, s, 2) {
return false return false
@ -1276,9 +1330,12 @@ func unwrittenBytes(s *Reader, wrap bool) uint {
return partial_pos_rb - s.partial_pos_out return partial_pos_rb - s.partial_pos_out
} }
/* Dumps output. /*
Returns BROTLI_DECODER_NEEDS_MORE_OUTPUT only if there is more output to push Dumps output.
and either ring-buffer is as big as window size, or |force| is true. */
Returns BROTLI_DECODER_NEEDS_MORE_OUTPUT only if there is more output to push
and either ring-buffer is as big as window size, or |force| is true.
*/
func writeRingBuffer(s *Reader, available_out *uint, next_out *[]byte, total_out *uint, force bool) int { func writeRingBuffer(s *Reader, available_out *uint, next_out *[]byte, total_out *uint, force bool) int {
var start []byte var start []byte
start = s.ringbuffer[s.partial_pos_out&uint(s.ringbuffer_mask):] start = s.ringbuffer[s.partial_pos_out&uint(s.ringbuffer_mask):]
@ -1336,13 +1393,15 @@ func wrapRingBuffer(s *Reader) {
} }
} }
/* Allocates ring-buffer. /*
Allocates ring-buffer.
s->ringbuffer_size MUST be updated by BrotliCalculateRingBufferSize before s->ringbuffer_size MUST be updated by BrotliCalculateRingBufferSize before
this function is called. this function is called.
Last two bytes of ring-buffer are initialized to 0, so context calculation Last two bytes of ring-buffer are initialized to 0, so context calculation
could be done uniformly for the first two and all other positions. */ could be done uniformly for the first two and all other positions.
*/
func ensureRingBuffer(s *Reader) bool { func ensureRingBuffer(s *Reader) bool {
var old_ringbuffer []byte = s.ringbuffer var old_ringbuffer []byte = s.ringbuffer
if s.ringbuffer_size == s.new_ringbuffer_size { if s.ringbuffer_size == s.new_ringbuffer_size {
@ -1429,12 +1488,14 @@ func copyUncompressedBlockToOutput(available_out *uint, next_out *[]byte, total_
} }
} }
/* Calculates the smallest feasible ring buffer. /*
Calculates the smallest feasible ring buffer.
If we know the data size is small, do not allocate more ring buffer If we know the data size is small, do not allocate more ring buffer
size than needed to reduce memory usage. size than needed to reduce memory usage.
When this method is called, metablock size and flags MUST be decoded. */ When this method is called, metablock size and flags MUST be decoded.
*/
func calculateRingBufferSize(s *Reader) { func calculateRingBufferSize(s *Reader) {
var window_size int = 1 << s.window_bits var window_size int = 1 << s.window_bits
var new_ringbuffer_size int = window_size var new_ringbuffer_size int = window_size
@ -2060,17 +2121,19 @@ func maxDistanceSymbol(ndirect uint32, npostfix uint32) uint32 {
} }
} }
/* Invariant: input stream is never overconsumed: /*
- invalid input implies that the whole stream is invalid -> any amount of Invariant: input stream is never overconsumed:
input could be read and discarded - invalid input implies that the whole stream is invalid -> any amount of
- when result is "needs more input", then at least one more byte is REQUIRED input could be read and discarded
to complete decoding; all input data MUST be consumed by decoder, so - when result is "needs more input", then at least one more byte is REQUIRED
client could swap the input buffer to complete decoding; all input data MUST be consumed by decoder, so
- when result is "needs more output" decoder MUST ensure that it doesn't client could swap the input buffer
hold more than 7 bits in bit reader; this saves client from swapping input - when result is "needs more output" decoder MUST ensure that it doesn't
buffer ahead of time hold more than 7 bits in bit reader; this saves client from swapping input
- when result is "success" decoder MUST return all unused data back to input buffer ahead of time
buffer; this is possible because the invariant is held on enter */ - when result is "success" decoder MUST return all unused data back to input
buffer; this is possible because the invariant is held on enter
*/
func decoderDecompressStream(s *Reader, available_in *uint, next_in *[]byte, available_out *uint, next_out *[]byte) int { func decoderDecompressStream(s *Reader, available_in *uint, next_in *[]byte, available_out *uint, next_out *[]byte) int {
var result int = decoderSuccess var result int = decoderSuccess
var br *bitReader = &s.br var br *bitReader = &s.br

View File

@ -126,8 +126,11 @@ func remainingInputBlockSize(s *Writer) uint {
return block_size - uint(delta) return block_size - uint(delta)
} }
/* Wraps 64-bit input position to 32-bit ring-buffer position preserving /*
"not-a-first-lap" feature. */ Wraps 64-bit input position to 32-bit ring-buffer position preserving
"not-a-first-lap" feature.
*/
func wrapPosition(position uint64) uint32 { func wrapPosition(position uint64) uint32 {
var result uint32 = uint32(position) var result uint32 = uint32(position)
var gb uint64 = position >> 30 var gb uint64 = position >> 30
@ -619,11 +622,11 @@ func encoderInitState(s *Writer) {
} }
/* /*
Copies the given input data to the internal ring buffer of the compressor. Copies the given input data to the internal ring buffer of the compressor.
No processing of the data occurs at this time and this function can be No processing of the data occurs at this time and this function can be
called multiple times before calling WriteBrotliData() to process the called multiple times before calling WriteBrotliData() to process the
accumulated input. At most input_block_size() bytes of input data can be accumulated input. At most input_block_size() bytes of input data can be
copied to the ring buffer, otherwise the next WriteBrotliData() will fail. copied to the ring buffer, otherwise the next WriteBrotliData() will fail.
*/ */
func copyInputToRingBuffer(s *Writer, input_size uint, input_buffer []byte) { func copyInputToRingBuffer(s *Writer, input_size uint, input_buffer []byte) {
var ringbuffer_ *ringBuffer = &s.ringbuffer_ var ringbuffer_ *ringBuffer = &s.ringbuffer_
@ -678,8 +681,11 @@ func copyInputToRingBuffer(s *Writer, input_size uint, input_buffer []byte) {
} }
} }
/* Marks all input as processed. /*
Returns true if position wrapping occurs. */ Marks all input as processed.
Returns true if position wrapping occurs.
*/
func updateLastProcessedPos(s *Writer) bool { func updateLastProcessedPos(s *Writer) bool {
var wrapped_last_processed_pos uint32 = wrapPosition(s.last_processed_pos_) var wrapped_last_processed_pos uint32 = wrapPosition(s.last_processed_pos_)
var wrapped_input_pos uint32 = wrapPosition(s.input_pos_) var wrapped_input_pos uint32 = wrapPosition(s.input_pos_)
@ -717,15 +723,15 @@ func extendLastCommand(s *Writer, bytes *uint32, wrapped_last_processed_pos *uin
} }
/* /*
Processes the accumulated input data and writes Processes the accumulated input data and writes
the new output meta-block to s.dest, if one has been the new output meta-block to s.dest, if one has been
created (otherwise the processed input data is buffered internally). created (otherwise the processed input data is buffered internally).
If |is_last| or |force_flush| is true, an output meta-block is If |is_last| or |force_flush| is true, an output meta-block is
always created. However, until |is_last| is true encoder may retain up always created. However, until |is_last| is true encoder may retain up
to 7 bits of the last byte of output. To force encoder to dump the remaining to 7 bits of the last byte of output. To force encoder to dump the remaining
bits use WriteMetadata() to append an empty meta-data block. bits use WriteMetadata() to append an empty meta-data block.
Returns false if the size of the input data is larger than Returns false if the size of the input data is larger than
input_block_size(). input_block_size().
*/ */
func encodeData(s *Writer, is_last bool, force_flush bool) bool { func encodeData(s *Writer, is_last bool, force_flush bool) bool {
var delta uint64 = unprocessedInputSize(s) var delta uint64 = unprocessedInputSize(s)
@ -883,8 +889,11 @@ func encodeData(s *Writer, is_last bool, force_flush bool) bool {
} }
} }
/* Dumps remaining output bits and metadata header to s.bw. /*
REQUIRED: |block_size| <= (1 << 24). */ Dumps remaining output bits and metadata header to s.bw.
REQUIRED: |block_size| <= (1 << 24).
*/
func writeMetadataHeader(s *Writer, block_size uint) { func writeMetadataHeader(s *Writer, block_size uint) {
bw := &s.bw bw := &s.bw

View File

@ -112,21 +112,23 @@ func sortHuffmanTree(v0 huffmanTree, v1 huffmanTree) bool {
return v0.index_right_or_value_ > v1.index_right_or_value_ return v0.index_right_or_value_ > v1.index_right_or_value_
} }
/* This function will create a Huffman tree. /*
This function will create a Huffman tree.
The catch here is that the tree cannot be arbitrarily deep. The catch here is that the tree cannot be arbitrarily deep.
Brotli specifies a maximum depth of 15 bits for "code trees" Brotli specifies a maximum depth of 15 bits for "code trees"
and 7 bits for "code length code trees." and 7 bits for "code length code trees."
count_limit is the value that is to be faked as the minimum value count_limit is the value that is to be faked as the minimum value
and this minimum value is raised until the tree matches the and this minimum value is raised until the tree matches the
maximum length requirement. maximum length requirement.
This algorithm is not of excellent performance for very long data blocks, This algorithm is not of excellent performance for very long data blocks,
especially when population counts are longer than 2**tree_limit, but especially when population counts are longer than 2**tree_limit, but
we are not planning to use this with extremely long blocks. we are not planning to use this with extremely long blocks.
See http://en.wikipedia.org/wiki/Huffman_coding */ See http://en.wikipedia.org/wiki/Huffman_coding
*/
func createHuffmanTree(data []uint32, length uint, tree_limit int, tree []huffmanTree, depth []byte) { func createHuffmanTree(data []uint32, length uint, tree_limit int, tree []huffmanTree, depth []byte) {
var count_limit uint32 var count_limit uint32
var sentinel huffmanTree var sentinel huffmanTree
@ -297,13 +299,16 @@ func writeHuffmanTreeRepetitionsZeros(repetitions uint, tree_size *uint, tree []
} }
} }
/* Change the population counts in a way that the consequent /*
Huffman tree compression, especially its RLE-part will be more Change the population counts in a way that the consequent
likely to compress this data more efficiently.
length contains the size of the histogram. Huffman tree compression, especially its RLE-part will be more
counts contains the population counts. likely to compress this data more efficiently.
good_for_rle is a buffer of at least length size */
length contains the size of the histogram.
counts contains the population counts.
good_for_rle is a buffer of at least length size
*/
func optimizeHuffmanCountsForRLE(length uint, counts []uint32, good_for_rle []byte) { func optimizeHuffmanCountsForRLE(length uint, counts []uint32, good_for_rle []byte) {
var nonzero_count uint = 0 var nonzero_count uint = 0
var stride uint var stride uint
@ -481,9 +486,12 @@ func decideOverRLEUse(depth []byte, length uint, use_rle_for_non_zero *bool, use
*use_rle_for_zero = total_reps_zero > count_reps_zero*2 *use_rle_for_zero = total_reps_zero > count_reps_zero*2
} }
/* Write a Huffman tree from bit depths into the bit-stream representation /*
of a Huffman tree. The generated Huffman tree is to be compressed once Write a Huffman tree from bit depths into the bit-stream representation
more using a Huffman tree */
of a Huffman tree. The generated Huffman tree is to be compressed once
more using a Huffman tree
*/
func writeHuffmanTree(depth []byte, length uint, tree_size *uint, tree []byte, extra_bits_data []byte) { func writeHuffmanTree(depth []byte, length uint, tree_size *uint, tree []byte, extra_bits_data []byte) {
var previous_value byte = initialRepeatedCodeLength var previous_value byte = initialRepeatedCodeLength
var i uint var i uint

View File

@ -23,10 +23,13 @@ func log2FloorNonZero(n uint) uint32 {
return result return result
} }
/* A lookup table for small values of log2(int) to be used in entropy /*
computation. A lookup table for small values of log2(int) to be used in entropy
", ".join(["%.16ff" % x for x in [0.0]+[log2(x) for x in range(1, 256)]]) */ computation.
", ".join(["%.16ff" % x for x in [0.0]+[log2(x) for x in range(1, 256)]])
*/
var kLog2Table = []float32{ var kLog2Table = []float32{
0.0000000000000000, 0.0000000000000000,
0.0000000000000000, 0.0000000000000000,

View File

@ -24,12 +24,15 @@ func hashBytesH10(data []byte) uint32 {
return h >> (32 - 17) return h >> (32 - 17)
} }
/* A (forgetful) hash table where each hash bucket contains a binary tree of /*
sequences whose first 4 bytes share the same hash code. A (forgetful) hash table where each hash bucket contains a binary tree of
Each sequence is 128 long and is identified by its starting
position in the input data. The binary tree is sorted by the lexicographic sequences whose first 4 bytes share the same hash code.
order of the sequences, and it is also a max-heap with respect to the Each sequence is 128 long and is identified by its starting
starting positions. */ position in the input data. The binary tree is sorted by the lexicographic
order of the sequences, and it is also a max-heap with respect to the
starting positions.
*/
type h10 struct { type h10 struct {
hasherCommon hasherCommon
window_mask_ uint window_mask_ uint
@ -61,16 +64,19 @@ func rightChildIndexH10(self *h10, pos uint) uint {
return 2*(pos&self.window_mask_) + 1 return 2*(pos&self.window_mask_) + 1
} }
/* Stores the hash of the next 4 bytes and in a single tree-traversal, the /*
hash bucket's binary tree is searched for matches and is re-rooted at the Stores the hash of the next 4 bytes and in a single tree-traversal, the
current position.
If less than 128 data is available, the hash bucket of the hash bucket's binary tree is searched for matches and is re-rooted at the
current position is searched for matches, but the state of the hash table current position.
is not changed, since we can not know the final sorting order of the
current (incomplete) sequence.
This function must be called with increasing cur_ix positions. */ If less than 128 data is available, the hash bucket of the
current position is searched for matches, but the state of the hash table
is not changed, since we can not know the final sorting order of the
current (incomplete) sequence.
This function must be called with increasing cur_ix positions.
*/
func storeAndFindMatchesH10(self *h10, data []byte, cur_ix uint, ring_buffer_mask uint, max_length uint, max_backward uint, best_len *uint, matches []backwardMatch) []backwardMatch { func storeAndFindMatchesH10(self *h10, data []byte, cur_ix uint, ring_buffer_mask uint, max_length uint, max_backward uint, best_len *uint, matches []backwardMatch) []backwardMatch {
var cur_ix_masked uint = cur_ix & ring_buffer_mask var cur_ix_masked uint = cur_ix & ring_buffer_mask
var max_comp_len uint = brotli_min_size_t(max_length, 128) var max_comp_len uint = brotli_min_size_t(max_length, 128)
@ -152,13 +158,16 @@ func storeAndFindMatchesH10(self *h10, data []byte, cur_ix uint, ring_buffer_mas
return matches return matches
} }
/* Finds all backward matches of &data[cur_ix & ring_buffer_mask] up to the /*
length of max_length and stores the position cur_ix in the hash table. Finds all backward matches of &data[cur_ix & ring_buffer_mask] up to the
Sets *num_matches to the number of matches found, and stores the found length of max_length and stores the position cur_ix in the hash table.
matches in matches[0] to matches[*num_matches - 1]. The matches will be
sorted by strictly increasing length and (non-strictly) increasing Sets *num_matches to the number of matches found, and stores the found
distance. */ matches in matches[0] to matches[*num_matches - 1]. The matches will be
sorted by strictly increasing length and (non-strictly) increasing
distance.
*/
func findAllMatchesH10(handle *h10, dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, cur_ix uint, max_length uint, max_backward uint, gap uint, params *encoderParams, matches []backwardMatch) uint { func findAllMatchesH10(handle *h10, dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, cur_ix uint, max_length uint, max_backward uint, gap uint, params *encoderParams, matches []backwardMatch) uint {
var orig_matches []backwardMatch = matches var orig_matches []backwardMatch = matches
var cur_ix_masked uint = cur_ix & ring_buffer_mask var cur_ix_masked uint = cur_ix & ring_buffer_mask
@ -224,9 +233,12 @@ func findAllMatchesH10(handle *h10, dictionary *encoderDictionary, data []byte,
return uint(-cap(matches) + cap(orig_matches)) return uint(-cap(matches) + cap(orig_matches))
} }
/* Stores the hash of the next 4 bytes and re-roots the binary tree at the /*
current sequence, without returning any matches. Stores the hash of the next 4 bytes and re-roots the binary tree at the
REQUIRES: ix + 128 <= end-of-current-block */
current sequence, without returning any matches.
REQUIRES: ix + 128 <= end-of-current-block
*/
func (h *h10) Store(data []byte, mask uint, ix uint) { func (h *h10) Store(data []byte, mask uint, ix uint) {
var max_backward uint = h.window_mask_ - windowGap + 1 var max_backward uint = h.window_mask_ - windowGap + 1
/* Maximum distance is window size - 16, see section 9.1. of the spec. */ /* Maximum distance is window size - 16, see section 9.1. of the spec. */

View File

@ -8,12 +8,15 @@ import "encoding/binary"
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/ */
/* A (forgetful) hash table to the data seen by the compressor, to /*
help create backward references to previous data. A (forgetful) hash table to the data seen by the compressor, to
This is a hash map of fixed size (bucket_size_) to a ring buffer of help create backward references to previous data.
fixed size (block_size_). The ring buffer contains the last block_size_
index positions of the given hash key in the compressed data. */ This is a hash map of fixed size (bucket_size_) to a ring buffer of
fixed size (block_size_). The ring buffer contains the last block_size_
index positions of the given hash key in the compressed data.
*/
func (*h5) HashTypeLength() uint { func (*h5) HashTypeLength() uint {
return 4 return 4
} }
@ -67,8 +70,11 @@ func (h *h5) Prepare(one_shot bool, input_size uint, data []byte) {
} }
} }
/* Look at 4 bytes at &data[ix & mask]. /*
Compute a hash from these, and store the value of ix at that position. */ Look at 4 bytes at &data[ix & mask].
Compute a hash from these, and store the value of ix at that position.
*/
func (h *h5) Store(data []byte, mask uint, ix uint) { func (h *h5) Store(data []byte, mask uint, ix uint) {
var num []uint16 = h.num var num []uint16 = h.num
var key uint32 = hashBytesH5(data[ix&mask:], h.hash_shift_) var key uint32 = hashBytesH5(data[ix&mask:], h.hash_shift_)
@ -100,17 +106,20 @@ func (h *h5) PrepareDistanceCache(distance_cache []int) {
prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check) prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check)
} }
/* Find a longest backward match of &data[cur_ix] up to the length of /*
max_length and stores the position cur_ix in the hash table. Find a longest backward match of &data[cur_ix] up to the length of
REQUIRES: PrepareDistanceCacheH5 must be invoked for current distance cache max_length and stores the position cur_ix in the hash table.
values; if this method is invoked repeatedly with the same distance
cache values, it is enough to invoke PrepareDistanceCacheH5 once.
Does not look for matches longer than max_length. REQUIRES: PrepareDistanceCacheH5 must be invoked for current distance cache
Does not look for matches further away than max_backward. values; if this method is invoked repeatedly with the same distance
Writes the best match into |out|. cache values, it is enough to invoke PrepareDistanceCacheH5 once.
|out|->score is updated only if a better match is found. */
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found.
*/
func (h *h5) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { func (h *h5) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var num []uint16 = h.num var num []uint16 = h.num
var buckets []uint32 = h.buckets var buckets []uint32 = h.buckets

View File

@ -8,12 +8,15 @@ import "encoding/binary"
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/ */
/* A (forgetful) hash table to the data seen by the compressor, to /*
help create backward references to previous data. A (forgetful) hash table to the data seen by the compressor, to
This is a hash map of fixed size (bucket_size_) to a ring buffer of help create backward references to previous data.
fixed size (block_size_). The ring buffer contains the last block_size_
index positions of the given hash key in the compressed data. */ This is a hash map of fixed size (bucket_size_) to a ring buffer of
fixed size (block_size_). The ring buffer contains the last block_size_
index positions of the given hash key in the compressed data.
*/
func (*h6) HashTypeLength() uint { func (*h6) HashTypeLength() uint {
return 8 return 8
} }
@ -69,8 +72,11 @@ func (h *h6) Prepare(one_shot bool, input_size uint, data []byte) {
} }
} }
/* Look at 4 bytes at &data[ix & mask]. /*
Compute a hash from these, and store the value of ix at that position. */ Look at 4 bytes at &data[ix & mask].
Compute a hash from these, and store the value of ix at that position.
*/
func (h *h6) Store(data []byte, mask uint, ix uint) { func (h *h6) Store(data []byte, mask uint, ix uint) {
var num []uint16 = h.num var num []uint16 = h.num
var key uint32 = hashBytesH6(data[ix&mask:], h.hash_mask_, h.hash_shift_) var key uint32 = hashBytesH6(data[ix&mask:], h.hash_mask_, h.hash_shift_)
@ -102,17 +108,20 @@ func (h *h6) PrepareDistanceCache(distance_cache []int) {
prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check) prepareDistanceCache(distance_cache, h.params.num_last_distances_to_check)
} }
/* Find a longest backward match of &data[cur_ix] up to the length of /*
max_length and stores the position cur_ix in the hash table. Find a longest backward match of &data[cur_ix] up to the length of
REQUIRES: PrepareDistanceCacheH6 must be invoked for current distance cache max_length and stores the position cur_ix in the hash table.
values; if this method is invoked repeatedly with the same distance
cache values, it is enough to invoke PrepareDistanceCacheH6 once.
Does not look for matches longer than max_length. REQUIRES: PrepareDistanceCacheH6 must be invoked for current distance cache
Does not look for matches further away than max_backward. values; if this method is invoked repeatedly with the same distance
Writes the best match into |out|. cache values, it is enough to invoke PrepareDistanceCacheH6 once.
|out|->score is updated only if a better match is found. */
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found.
*/
func (h *h6) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { func (h *h6) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var num []uint16 = h.num var num []uint16 = h.num
var buckets []uint32 = h.buckets var buckets []uint32 = h.buckets

View File

@ -44,12 +44,14 @@ type hasherSearchResult struct {
len_code_delta int len_code_delta int
} }
/* kHashMul32 multiplier has these properties: /*
* The multiplier must be odd. Otherwise we may lose the highest bit. kHashMul32 multiplier has these properties:
* No long streaks of ones or zeros. - The multiplier must be odd. Otherwise we may lose the highest bit.
* There is no effort to ensure that it is a prime, the oddity is enough - No long streaks of ones or zeros.
for this use. - There is no effort to ensure that it is a prime, the oddity is enough
* The number has been tuned heuristically against compression benchmarks. */ for this use.
- The number has been tuned heuristically against compression benchmarks.
*/
const kHashMul32 uint32 = 0x1E35A7BD const kHashMul32 uint32 = 0x1E35A7BD
const kHashMul64 uint64 = 0x1E35A7BD1E35A7BD const kHashMul64 uint64 = 0x1E35A7BD1E35A7BD
@ -92,22 +94,25 @@ const distanceBitPenalty = 30
/* Score must be positive after applying maximal penalty. */ /* Score must be positive after applying maximal penalty. */
const scoreBase = (distanceBitPenalty * 8 * 8) const scoreBase = (distanceBitPenalty * 8 * 8)
/* Usually, we always choose the longest backward reference. This function /*
allows for the exception of that rule. Usually, we always choose the longest backward reference. This function
If we choose a backward reference that is further away, it will allows for the exception of that rule.
usually be coded with more bits. We approximate this by assuming
log2(distance). If the distance can be expressed in terms of the
last four distances, we use some heuristic constants to estimate
the bits cost. For the first up to four literals we use the bit
cost of the literals from the literal cost model, after that we
use the average bit cost of the cost model.
This function is used to sometimes discard a longer backward reference If we choose a backward reference that is further away, it will
when it is not much longer and the bit cost for encoding it is more usually be coded with more bits. We approximate this by assuming
than the saved literals. log2(distance). If the distance can be expressed in terms of the
last four distances, we use some heuristic constants to estimate
the bits cost. For the first up to four literals we use the bit
cost of the literals from the literal cost model, after that we
use the average bit cost of the cost model.
backward_reference_offset MUST be positive. */ This function is used to sometimes discard a longer backward reference
when it is not much longer and the bit cost for encoding it is more
than the saved literals.
backward_reference_offset MUST be positive.
*/
func backwardReferenceScore(copy_length uint, backward_reference_offset uint) uint { func backwardReferenceScore(copy_length uint, backward_reference_offset uint) uint {
return scoreBase + literalByteScore*uint(copy_length) - distanceBitPenalty*uint(log2FloorNonZero(backward_reference_offset)) return scoreBase + literalByteScore*uint(copy_length) - distanceBitPenalty*uint(log2FloorNonZero(backward_reference_offset))
} }

View File

@ -26,8 +26,11 @@ func (h *hashComposite) StoreLookahead() uint {
} }
} }
/* Composite hasher: This hasher allows to combine two other hashers, HASHER_A /*
and HASHER_B. */ Composite hasher: This hasher allows to combine two other hashers, HASHER_A
and HASHER_B.
*/
type hashComposite struct { type hashComposite struct {
hasherCommon hasherCommon
ha hasherHandle ha hasherHandle
@ -39,10 +42,13 @@ func (h *hashComposite) Initialize(params *encoderParams) {
h.params = params h.params = params
} }
/* TODO: Initialize of the hashers is defered to Prepare (and params /*
remembered here) because we don't get the one_shot and input_size params TODO: Initialize of the hashers is defered to Prepare (and params
here that are needed to know the memory size of them. Instead provide
those params to all hashers InitializehashComposite */ remembered here) because we don't get the one_shot and input_size params
here that are needed to know the memory size of them. Instead provide
those params to all hashers InitializehashComposite
*/
func (h *hashComposite) Prepare(one_shot bool, input_size uint, data []byte) { func (h *hashComposite) Prepare(one_shot bool, input_size uint, data []byte) {
if h.ha == nil { if h.ha == nil {
var common_a *hasherCommon var common_a *hasherCommon

View File

@ -30,12 +30,15 @@ type slot struct {
next uint16 next uint16
} }
/* A (forgetful) hash table to the data seen by the compressor, to /*
help create backward references to previous data. A (forgetful) hash table to the data seen by the compressor, to
Hashes are stored in chains which are bucketed to groups. Group of chains help create backward references to previous data.
share a storage "bank". When more than "bank size" chain nodes are added,
oldest nodes are replaced; this way several chains may share a tail. */ Hashes are stored in chains which are bucketed to groups. Group of chains
share a storage "bank". When more than "bank size" chain nodes are added,
oldest nodes are replaced; this way several chains may share a tail.
*/
type hashForgetfulChain struct { type hashForgetfulChain struct {
hasherCommon hasherCommon
@ -105,8 +108,11 @@ func (h *hashForgetfulChain) Prepare(one_shot bool, input_size uint, data []byte
} }
} }
/* Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend /*
node to corresponding chain; also update tiny_hash for current position. */ Look at 4 bytes at &data[ix & mask]. Compute a hash from these, and prepend
node to corresponding chain; also update tiny_hash for current position.
*/
func (h *hashForgetfulChain) Store(data []byte, mask uint, ix uint) { func (h *hashForgetfulChain) Store(data []byte, mask uint, ix uint) {
var key uint = h.HashBytes(data[ix&mask:]) var key uint = h.HashBytes(data[ix&mask:])
var bank uint = key & (h.numBanks - 1) var bank uint = key & (h.numBanks - 1)
@ -146,17 +152,20 @@ func (h *hashForgetfulChain) PrepareDistanceCache(distance_cache []int) {
prepareDistanceCache(distance_cache, h.numLastDistancesToCheck) prepareDistanceCache(distance_cache, h.numLastDistancesToCheck)
} }
/* Find a longest backward match of &data[cur_ix] up to the length of /*
max_length and stores the position cur_ix in the hash table. Find a longest backward match of &data[cur_ix] up to the length of
REQUIRES: PrepareDistanceCachehashForgetfulChain must be invoked for current distance cache max_length and stores the position cur_ix in the hash table.
values; if this method is invoked repeatedly with the same distance
cache values, it is enough to invoke PrepareDistanceCachehashForgetfulChain once.
Does not look for matches longer than max_length. REQUIRES: PrepareDistanceCachehashForgetfulChain must be invoked for current distance cache
Does not look for matches further away than max_backward. values; if this method is invoked repeatedly with the same distance
Writes the best match into |out|. cache values, it is enough to invoke PrepareDistanceCachehashForgetfulChain once.
|out|->score is updated only if a better match is found. */
Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found.
*/
func (h *hashForgetfulChain) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { func (h *hashForgetfulChain) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var cur_ix_masked uint = cur_ix & ring_buffer_mask var cur_ix_masked uint = cur_ix & ring_buffer_mask
var min_score uint = out.score var min_score uint = out.score

View File

@ -20,9 +20,12 @@ func (*hashLongestMatchQuickly) StoreLookahead() uint {
return 8 return 8
} }
/* HashBytes is the function that chooses the bucket to place /*
the address in. The HashLongestMatch and hashLongestMatchQuickly HashBytes is the function that chooses the bucket to place
classes have separate, different implementations of hashing. */
the address in. The HashLongestMatch and hashLongestMatchQuickly
classes have separate, different implementations of hashing.
*/
func (h *hashLongestMatchQuickly) HashBytes(data []byte) uint32 { func (h *hashLongestMatchQuickly) HashBytes(data []byte) uint32 {
var hash uint64 = ((binary.LittleEndian.Uint64(data) << (64 - 8*h.hashLen)) * kHashMul64) var hash uint64 = ((binary.LittleEndian.Uint64(data) << (64 - 8*h.hashLen)) * kHashMul64)
@ -31,11 +34,14 @@ func (h *hashLongestMatchQuickly) HashBytes(data []byte) uint32 {
return uint32(hash >> (64 - h.bucketBits)) return uint32(hash >> (64 - h.bucketBits))
} }
/* A (forgetful) hash table to the data seen by the compressor, to /*
help create backward references to previous data. A (forgetful) hash table to the data seen by the compressor, to
This is a hash map of fixed size (1 << 16). Starting from the help create backward references to previous data.
given index, 1 buckets are used to store values of a key. */
This is a hash map of fixed size (1 << 16). Starting from the
given index, 1 buckets are used to store values of a key.
*/
type hashLongestMatchQuickly struct { type hashLongestMatchQuickly struct {
hasherCommon hasherCommon
@ -73,9 +79,12 @@ func (h *hashLongestMatchQuickly) Prepare(one_shot bool, input_size uint, data [
} }
} }
/* Look at 5 bytes at &data[ix & mask]. /*
Compute a hash from these, and store the value somewhere within Look at 5 bytes at &data[ix & mask].
[ix .. ix+3]. */
Compute a hash from these, and store the value somewhere within
[ix .. ix+3].
*/
func (h *hashLongestMatchQuickly) Store(data []byte, mask uint, ix uint) { func (h *hashLongestMatchQuickly) Store(data []byte, mask uint, ix uint) {
var key uint32 = h.HashBytes(data[ix&mask:]) var key uint32 = h.HashBytes(data[ix&mask:])
var off uint32 = uint32(ix>>3) % uint32(h.bucketSweep) var off uint32 = uint32(ix>>3) % uint32(h.bucketSweep)
@ -104,14 +113,17 @@ func (h *hashLongestMatchQuickly) StitchToPreviousBlock(num_bytes uint, position
func (*hashLongestMatchQuickly) PrepareDistanceCache(distance_cache []int) { func (*hashLongestMatchQuickly) PrepareDistanceCache(distance_cache []int) {
} }
/* Find a longest backward match of &data[cur_ix & ring_buffer_mask] /*
up to the length of max_length and stores the position cur_ix in the Find a longest backward match of &data[cur_ix & ring_buffer_mask]
hash table.
Does not look for matches longer than max_length. up to the length of max_length and stores the position cur_ix in the
Does not look for matches further away than max_backward. hash table.
Writes the best match into |out|.
|out|->score is updated only if a better match is found. */ Does not look for matches longer than max_length.
Does not look for matches further away than max_backward.
Writes the best match into |out|.
|out|->score is updated only if a better match is found.
*/
func (h *hashLongestMatchQuickly) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) { func (h *hashLongestMatchQuickly) FindLongestMatch(dictionary *encoderDictionary, data []byte, ring_buffer_mask uint, distance_cache []int, cur_ix uint, max_length uint, max_backward uint, gap uint, max_distance uint, out *hasherSearchResult) {
var best_len_in uint = out.len var best_len_in uint = out.len
var cur_ix_masked uint = cur_ix & ring_buffer_mask var cur_ix_masked uint = cur_ix & ring_buffer_mask

View File

@ -13,9 +13,12 @@ const kRollingHashMul32 uint32 = 69069
const kInvalidPosHashRolling uint32 = 0xffffffff const kInvalidPosHashRolling uint32 = 0xffffffff
/* This hasher uses a longer forward length, but returning a higher value here /*
will hurt compression by the main hasher when combined with a composite This hasher uses a longer forward length, but returning a higher value here
hasher. The hasher tests for forward itself instead. */
will hurt compression by the main hasher when combined with a composite
hasher. The hasher tests for forward itself instead.
*/
func (*hashRolling) HashTypeLength() uint { func (*hashRolling) HashTypeLength() uint {
return 4 return 4
} }
@ -24,8 +27,11 @@ func (*hashRolling) StoreLookahead() uint {
return 4 return 4
} }
/* Computes a code from a single byte. A lookup table of 256 values could be /*
used, but simply adding 1 works about as good. */ Computes a code from a single byte. A lookup table of 256 values could be
used, but simply adding 1 works about as good.
*/
func (*hashRolling) HashByte(b byte) uint32 { func (*hashRolling) HashByte(b byte) uint32 {
return uint32(b) + 1 return uint32(b) + 1
} }
@ -38,8 +44,11 @@ func (h *hashRolling) HashRollingFunction(state uint32, add byte, rem byte, fact
return uint32(factor*state + h.HashByte(add) - factor_remove*h.HashByte(rem)) return uint32(factor*state + h.HashByte(add) - factor_remove*h.HashByte(rem))
} }
/* Rolling hash for long distance long string matches. Stores one position /*
per bucket, bucket key is computed over a long region. */ Rolling hash for long distance long string matches. Stores one position
per bucket, bucket key is computed over a long region.
*/
type hashRolling struct { type hashRolling struct {
hasherCommon hasherCommon

View File

@ -10,8 +10,11 @@ package brotli
const huffmanMaxCodeLength = 15 const huffmanMaxCodeLength = 15
/* Maximum possible Huffman table size for an alphabet size of (index * 32), /*
max code length 15 and root table bits 8. */ Maximum possible Huffman table size for an alphabet size of (index * 32),
max code length 15 and root table bits 8.
*/
var kMaxHuffmanTableSize = []uint16{ var kMaxHuffmanTableSize = []uint16{
256, 256,
402, 402,
@ -363,9 +366,12 @@ var kReverseBits = [1 << reverseBitsMax]byte{
const reverseBitsLowest = (uint64(1) << (reverseBitsMax - 1 + reverseBitsBase)) const reverseBitsLowest = (uint64(1) << (reverseBitsMax - 1 + reverseBitsBase))
/* Returns reverse(num >> BROTLI_REVERSE_BITS_BASE, BROTLI_REVERSE_BITS_MAX), /*
where reverse(value, len) is the bit-wise reversal of the len least Returns reverse(num >> BROTLI_REVERSE_BITS_BASE, BROTLI_REVERSE_BITS_MAX),
significant bits of value. */
where reverse(value, len) is the bit-wise reversal of the len least
significant bits of value.
*/
func reverseBits8(num uint64) uint64 { func reverseBits8(num uint64) uint64 {
return uint64(kReverseBits[num]) return uint64(kReverseBits[num])
} }
@ -382,9 +388,12 @@ func replicateValue(table []huffmanCode, step int, end int, code huffmanCode) {
} }
} }
/* Returns the table width of the next 2nd level table. |count| is the histogram /*
of bit lengths for the remaining symbols, |len| is the code length of the Returns the table width of the next 2nd level table. |count| is the histogram
next processed symbol. */
of bit lengths for the remaining symbols, |len| is the code length of the
next processed symbol.
*/
func nextTableBitSize(count []uint16, len int, root_bits int) int { func nextTableBitSize(count []uint16, len int, root_bits int) int {
var left int = 1 << uint(len-root_bits) var left int = 1 << uint(len-root_bits)
for len < huffmanMaxCodeLength { for len < huffmanMaxCodeLength {

View File

@ -268,8 +268,11 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
const maxStaticContexts = 13 const maxStaticContexts = 13
/* Greedy block splitter for one block category (literal, command or distance). /*
Gathers histograms for all context buckets. */ Greedy block splitter for one block category (literal, command or distance).
Gathers histograms for all context buckets.
*/
type contextBlockSplitter struct { type contextBlockSplitter struct {
alphabet_size_ uint alphabet_size_ uint
num_contexts_ uint num_contexts_ uint
@ -328,10 +331,13 @@ func initContextBlockSplitter(self *contextBlockSplitter, alphabet_size uint, nu
self.last_histogram_ix_[0] = self.last_histogram_ix_[1] self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
} }
/* Does either of three things: /*
(1) emits the current block with a new block type; Does either of three things:
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block. */ (1) emits the current block with a new block type;
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block.
*/
func contextBlockSplitterFinishBlock(self *contextBlockSplitter, is_final bool) { func contextBlockSplitterFinishBlock(self *contextBlockSplitter, is_final bool) {
var split *blockSplit = self.split_ var split *blockSplit = self.split_
var num_contexts uint = self.num_contexts_ var num_contexts uint = self.num_contexts_
@ -459,8 +465,11 @@ func contextBlockSplitterFinishBlock(self *contextBlockSplitter, is_final bool)
} }
} }
/* Adds the next symbol to the current block type and context. When the /*
current block reaches the target size, decides on merging the block. */ Adds the next symbol to the current block type and context. When the
current block reaches the target size, decides on merging the block.
*/
func contextBlockSplitterAddSymbol(self *contextBlockSplitter, symbol uint, context uint) { func contextBlockSplitterAddSymbol(self *contextBlockSplitter, symbol uint, context uint) {
histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_+context], symbol) histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_+context], symbol)
self.block_size_++ self.block_size_++

View File

@ -58,10 +58,13 @@ func initBlockSplitterCommand(self *blockSplitterCommand, alphabet_size uint, mi
self.last_histogram_ix_[0] = self.last_histogram_ix_[1] self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
} }
/* Does either of three things: /*
(1) emits the current block with a new block type; Does either of three things:
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block. */ (1) emits the current block with a new block type;
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block.
*/
func blockSplitterFinishBlockCommand(self *blockSplitterCommand, is_final bool) { func blockSplitterFinishBlockCommand(self *blockSplitterCommand, is_final bool) {
var split *blockSplit = self.split_ var split *blockSplit = self.split_
var last_entropy []float64 = self.last_entropy_[:] var last_entropy []float64 = self.last_entropy_[:]
@ -154,8 +157,11 @@ func blockSplitterFinishBlockCommand(self *blockSplitterCommand, is_final bool)
} }
} }
/* Adds the next symbol to the current histogram. When the current histogram /*
reaches the target size, decides on merging the block. */ Adds the next symbol to the current histogram. When the current histogram
reaches the target size, decides on merging the block.
*/
func blockSplitterAddSymbolCommand(self *blockSplitterCommand, symbol uint) { func blockSplitterAddSymbolCommand(self *blockSplitterCommand, symbol uint) {
histogramAddCommand(&self.histograms_[self.curr_histogram_ix_], symbol) histogramAddCommand(&self.histograms_[self.curr_histogram_ix_], symbol)
self.block_size_++ self.block_size_++

View File

@ -58,10 +58,13 @@ func initBlockSplitterDistance(self *blockSplitterDistance, alphabet_size uint,
self.last_histogram_ix_[0] = self.last_histogram_ix_[1] self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
} }
/* Does either of three things: /*
(1) emits the current block with a new block type; Does either of three things:
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block. */ (1) emits the current block with a new block type;
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block.
*/
func blockSplitterFinishBlockDistance(self *blockSplitterDistance, is_final bool) { func blockSplitterFinishBlockDistance(self *blockSplitterDistance, is_final bool) {
var split *blockSplit = self.split_ var split *blockSplit = self.split_
var last_entropy []float64 = self.last_entropy_[:] var last_entropy []float64 = self.last_entropy_[:]
@ -154,8 +157,11 @@ func blockSplitterFinishBlockDistance(self *blockSplitterDistance, is_final bool
} }
} }
/* Adds the next symbol to the current histogram. When the current histogram /*
reaches the target size, decides on merging the block. */ Adds the next symbol to the current histogram. When the current histogram
reaches the target size, decides on merging the block.
*/
func blockSplitterAddSymbolDistance(self *blockSplitterDistance, symbol uint) { func blockSplitterAddSymbolDistance(self *blockSplitterDistance, symbol uint) {
histogramAddDistance(&self.histograms_[self.curr_histogram_ix_], symbol) histogramAddDistance(&self.histograms_[self.curr_histogram_ix_], symbol)
self.block_size_++ self.block_size_++

View File

@ -58,10 +58,13 @@ func initBlockSplitterLiteral(self *blockSplitterLiteral, alphabet_size uint, mi
self.last_histogram_ix_[0] = self.last_histogram_ix_[1] self.last_histogram_ix_[0] = self.last_histogram_ix_[1]
} }
/* Does either of three things: /*
(1) emits the current block with a new block type; Does either of three things:
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block. */ (1) emits the current block with a new block type;
(2) emits the current block with the type of the second last block;
(3) merges the current block with the last block.
*/
func blockSplitterFinishBlockLiteral(self *blockSplitterLiteral, is_final bool) { func blockSplitterFinishBlockLiteral(self *blockSplitterLiteral, is_final bool) {
var split *blockSplit = self.split_ var split *blockSplit = self.split_
var last_entropy []float64 = self.last_entropy_[:] var last_entropy []float64 = self.last_entropy_[:]
@ -154,8 +157,11 @@ func blockSplitterFinishBlockLiteral(self *blockSplitterLiteral, is_final bool)
} }
} }
/* Adds the next symbol to the current histogram. When the current histogram /*
reaches the target size, decides on merging the block. */ Adds the next symbol to the current histogram. When the current histogram
reaches the target size, decides on merging the block.
*/
func blockSplitterAddSymbolLiteral(self *blockSplitterLiteral, symbol uint) { func blockSplitterAddSymbolLiteral(self *blockSplitterLiteral, symbol uint) {
histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_], symbol) histogramAddLiteral(&self.histograms_[self.curr_histogram_ix_], symbol)
self.block_size_++ self.block_size_++

View File

@ -9,8 +9,11 @@ package brotli
/* Functions for encoding of integers into prefix codes the amount of extra /* Functions for encoding of integers into prefix codes the amount of extra
bits, and the actual values of the extra bits. */ bits, and the actual values of the extra bits. */
/* Here distance_code is an intermediate code, i.e. one of the special codes or /*
the actual distance increased by BROTLI_NUM_DISTANCE_SHORT_CODES - 1. */ Here distance_code is an intermediate code, i.e. one of the special codes or
the actual distance increased by BROTLI_NUM_DISTANCE_SHORT_CODES - 1.
*/
func prefixEncodeCopyDistance(distance_code uint, num_direct_codes uint, postfix_bits uint, code *uint16, extra_bits *uint32) { func prefixEncodeCopyDistance(distance_code uint, num_direct_codes uint, postfix_bits uint, code *uint16, extra_bits *uint32) {
if distance_code < numDistanceShortCodes+num_direct_codes { if distance_code < numDistanceShortCodes+num_direct_codes {
*code = uint16(distance_code) *code = uint16(distance_code)

View File

@ -24,8 +24,11 @@ const minQualityForHqContextModeling = 7
const minQualityForHqBlockSplitting = 10 const minQualityForHqBlockSplitting = 10
/* For quality below MIN_QUALITY_FOR_BLOCK_SPLIT there is no block splitting, /*
so we buffer at most this much literals and commands. */ For quality below MIN_QUALITY_FOR_BLOCK_SPLIT there is no block splitting,
so we buffer at most this much literals and commands.
*/
const maxNumDelayedSymbols = 0x2FFF const maxNumDelayedSymbols = 0x2FFF
/* Returns hash-table size for quality levels 0 and 1. */ /* Returns hash-table size for quality levels 0 and 1. */
@ -102,11 +105,14 @@ func computeLgBlock(params *encoderParams) int {
return lgblock return lgblock
} }
/* Returns log2 of the size of main ring buffer area. /*
Allocate at least lgwin + 1 bits for the ring buffer so that the newly Returns log2 of the size of main ring buffer area.
added block fits there completely and we still get lgwin bits and at least
read_block_size_bits + 1 bits because the copy tail length needs to be Allocate at least lgwin + 1 bits for the ring buffer so that the newly
smaller than ring-buffer size. */ added block fits there completely and we still get lgwin bits and at least
read_block_size_bits + 1 bits because the copy tail length needs to be
smaller than ring-buffer size.
*/
func computeRbBits(params *encoderParams) int { func computeRbBits(params *encoderParams) int {
return 1 + brotli_max_int(int(params.lgwin), params.lgblock) return 1 + brotli_max_int(int(params.lgwin), params.lgblock)
} }
@ -116,12 +122,15 @@ func maxMetablockSize(params *encoderParams) uint {
return uint(1) << uint(bits) return uint(1) << uint(bits)
} }
/* When searching for backward references and have not seen matches for a long /*
time, we can skip some match lookups. Unsuccessful match lookups are very When searching for backward references and have not seen matches for a long
expensive and this kind of a heuristic speeds up compression quite a lot.
At first 8 byte strides are taken and every second byte is put to hasher. time, we can skip some match lookups. Unsuccessful match lookups are very
After 4x more literals stride by 16 bytes, every put 4-th byte to hasher. expensive and this kind of a heuristic speeds up compression quite a lot.
Applied only to qualities 2 to 9. */ At first 8 byte strides are taken and every second byte is put to hasher.
After 4x more literals stride by 16 bytes, every put 4-th byte to hasher.
Applied only to qualities 2 to 9.
*/
func literalSpreeLengthForSparseSearch(params *encoderParams) uint { func literalSpreeLengthForSparseSearch(params *encoderParams) uint {
if params.quality < 9 { if params.quality < 9 {
return 64 return 64

View File

@ -6,15 +6,18 @@ package brotli
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/ */
/* A ringBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of /*
data in a circular manner: writing a byte writes it to: A ringBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of
`position() % (1 << window_bits)'.
For convenience, the ringBuffer array contains another copy of the data in a circular manner: writing a byte writes it to:
first `1 << tail_bits' bytes: `position() % (1 << window_bits)'.
buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits), For convenience, the ringBuffer array contains another copy of the
and another copy of the last two bytes: first `1 << tail_bits' bytes:
buffer_[-1] == buffer_[(1 << window_bits) - 1] and buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits),
buffer_[-2] == buffer_[(1 << window_bits) - 2]. */ and another copy of the last two bytes:
buffer_[-1] == buffer_[(1 << window_bits) - 1] and
buffer_[-2] == buffer_[(1 << window_bits) - 2].
*/
type ringBuffer struct { type ringBuffer struct {
size_ uint32 size_ uint32
mask_ uint32 mask_ uint32
@ -41,8 +44,11 @@ func ringBufferSetup(params *encoderParams, rb *ringBuffer) {
const kSlackForEightByteHashingEverywhere uint = 7 const kSlackForEightByteHashingEverywhere uint = 7
/* Allocates or re-allocates data_ to the given length + plus some slack /*
region before and after. Fills the slack regions with zeros. */ Allocates or re-allocates data_ to the given length + plus some slack
region before and after. Fills the slack regions with zeros.
*/
func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) { func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) {
var new_data []byte var new_data []byte
var i uint var i uint

View File

@ -14,10 +14,11 @@ const maxStaticDictionaryMatchLen = 37
const kInvalidMatch uint32 = 0xFFFFFFF const kInvalidMatch uint32 = 0xFFFFFFF
/* Copyright 2013 Google Inc. All Rights Reserved. /*
Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license. Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/ */
func hash(data []byte) uint32 { func hash(data []byte) uint32 {
var h uint32 = binary.LittleEndian.Uint32(data) * kDictHashMul32 var h uint32 = binary.LittleEndian.Uint32(data) * kDictHashMul32

View File

@ -10,9 +10,12 @@ package brotli
const kMinUTF8Ratio float64 = 0.75 const kMinUTF8Ratio float64 = 0.75
/* Returns 1 if at least min_fraction of the bytes between pos and /*
pos + length in the (data, mask) ring-buffer is UTF8-encoded, otherwise Returns 1 if at least min_fraction of the bytes between pos and
returns 0. */
pos + length in the (data, mask) ring-buffer is UTF8-encoded, otherwise
returns 0.
*/
func parseAsUTF8(symbol *int, input []byte, size uint) uint { func parseAsUTF8(symbol *int, input []byte, size uint) uint {
/* ASCII */ /* ASCII */
if input[0]&0x80 == 0 { if input[0]&0x80 == 0 {

View File

@ -1,12 +1,12 @@
package storm package storm
import ( import (
"reflect"
"sort"
"time"
"github.com/asdine/storm/v3/index" "github.com/asdine/storm/v3/index"
"github.com/asdine/storm/v3/q" "github.com/asdine/storm/v3/q"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
"reflect"
"sort"
"time"
) )
type item struct { type item struct {

View File

@ -1,3 +1,4 @@
//go:build !go1.8
// +build !go1.8 // +build !go1.8
package storm package storm

View File

@ -1,3 +1,4 @@
//go:build go1.8
// +build go1.8 // +build go1.8
package storm package storm

View File

@ -18,6 +18,7 @@
// tag is deprecated and thus should not be used. // tag is deprecated and thus should not be used.
// Go versions prior to 1.4 are disabled because they use a different layout // Go versions prior to 1.4 are disabled because they use a different layout
// for interfaces which make the implementation of unsafeReflectValue more complex. // for interfaces which make the implementation of unsafeReflectValue more complex.
//go:build !js && !appengine && !safe && !disableunsafe && go1.4
// +build !js,!appengine,!safe,!disableunsafe,go1.4 // +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew package spew

View File

@ -16,6 +16,7 @@
// when the code is running on Google App Engine, compiled by GopherJS, or // when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe" // "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used. // tag is deprecated and thus should not be used.
//go:build js || appengine || safe || disableunsafe || !go1.4
// +build js appengine safe disableunsafe !go1.4 // +build js appengine safe disableunsafe !go1.4
package spew package spew

View File

@ -254,15 +254,15 @@ pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt following features over the built-in printing facilities provided by the fmt
package: package:
* Pointers are dereferenced and followed - Pointers are dereferenced and followed
* Circular data structures are detected and handled properly - Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including - Custom Stringer/error interfaces are optionally invoked, including
on unexported types on unexported types
* Custom types which only implement the Stringer/error interfaces via - Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer a pointer receiver are optionally invoked when passing non-pointer
variables variables
* Byte arrays and slices are dumped like the hexdump -C command which - Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation. of c. See ConfigState for options documentation.
@ -295,12 +295,12 @@ func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{})
// NewDefaultConfig returns a ConfigState with the following default settings. // NewDefaultConfig returns a ConfigState with the following default settings.
// //
// Indent: " " // Indent: " "
// MaxDepth: 0 // MaxDepth: 0
// DisableMethods: false // DisableMethods: false
// DisablePointerMethods: false // DisablePointerMethods: false
// ContinueOnMethod: false // ContinueOnMethod: false
// SortKeys: false // SortKeys: false
func NewDefaultConfig() *ConfigState { func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "} return &ConfigState{Indent: " "}
} }

View File

@ -21,35 +21,36 @@ debugging.
A quick overview of the additional features spew provides over the built-in A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows: printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed - Pointers are dereferenced and followed
* Circular data structures are detected and handled properly - Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including - Custom Stringer/error interfaces are optionally invoked, including
on unexported types on unexported types
* Custom types which only implement the Stringer/error interfaces via - Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer a pointer receiver are optionally invoked when passing non-pointer
variables variables
* Byte arrays and slices are dumped like the hexdump -C command which - Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using includes offsets, byte values in hex, and ASCII output (only when using
Dump style) Dump style)
There are two different approaches spew allows for dumping Go data structures: There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation, - Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses and additional debug information such as types and all pointer addresses
used to indirect to the final value used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt - A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q outlined above and passing unsupported format verbs such as %x and %q
along to fmt along to fmt
Quick Start # Quick Start
This section demonstrates how to quickly get started with spew. See the This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options. sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump: information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...) spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...) spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...) str := spew.Sdump(myVar1, myVar2, ...)
@ -58,12 +59,13 @@ Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or %v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses): %#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options # Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available convenience, all of the top-level functions use a global state available
@ -74,51 +76,52 @@ equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details. options. See the ConfigState documentation for more details.
The following configuration options are available: The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth - Indent
Maximum number of levels to descend into nested data structures. String to use for each indentation level for Dump functions.
There is no limit by default. It is a single space by default. A popular alternative is "\t".
* DisableMethods - MaxDepth
Disables invocation of error and Stringer interface methods. Maximum number of levels to descend into nested data structures.
Method invocation is enabled by default. There is no limit by default.
* DisablePointerMethods - DisableMethods
Disables invocation of error and Stringer interface methods on types Disables invocation of error and Stringer interface methods.
which only accept pointer receivers from non-pointer variables. Method invocation is enabled by default.
Pointer method invocation is enabled by default.
* DisablePointerAddresses - DisablePointerMethods
DisablePointerAddresses specifies whether to disable the printing of Disables invocation of error and Stringer interface methods on types
pointer addresses. This is useful when diffing data structures in tests. which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisableCapacities - DisablePointerAddresses
DisableCapacities specifies whether to disable the printing of DisablePointerAddresses specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when pointer addresses. This is useful when diffing data structures in tests.
diffing data structures in tests.
* ContinueOnMethod - DisableCapacities
Enables recursion into types after invoking error and Stringer interface DisableCapacities specifies whether to disable the printing of
methods. Recursion after method invocation is disabled by default. capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* SortKeys - ContinueOnMethod
Specifies map keys should be sorted before being printed. Use Enables recursion into types after invoking error and Stringer interface
this to have a more deterministic, diffable output. Note that methods. Recursion after method invocation is disabled by default.
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys - SortKeys
Specifies that, as a last resort attempt, map keys should be Specifies map keys should be sorted before being printed. Use
spewed to strings and sorted by those strings. This is only this to have a more deterministic, diffable output. Note that
considered if SortKeys is true. only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
Dump Usage - SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
# Dump Usage
Simply call spew.Dump with a list of variables you want to dump: Simply call spew.Dump with a list of variables you want to dump:
@ -133,7 +136,7 @@ A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...) str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output # Sample Dump Output
See the Dump example for details on the setup of the types and variables being See the Dump example for details on the setup of the types and variables being
shown here. shown here.
@ -150,13 +153,14 @@ shown here.
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown. command as shown.
([]uint8) (len=32 cap=32) { ([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12| 00000020 31 32 |12|
} }
Custom Formatter # Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The so that it integrates cleanly with standard fmt package printing functions. The
@ -170,7 +174,7 @@ standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter). specifiers not handled by the custom formatter).
Custom Formatter Usage # Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
@ -184,15 +188,17 @@ functions have syntax you are most likely already familiar with:
See the Index for the full list convenience functions. See the Index for the full list convenience functions.
Sample Formatter Output # Sample Formatter Output
Double pointer to a uint8: Double pointer to a uint8:
%v: <**>5 %v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5 %+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5 %#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself: Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>} %v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>} %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>} %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
@ -201,7 +207,7 @@ Pointer to circular struct with a uint8 field and a pointer to itself:
See the Printf example for details on the setup of variables being shown See the Printf example for details on the setup of variables being shown
here. here.
Errors # Errors
Since it is possible for custom Stringer/error interfaces to panic, spew Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information detects them and handles them internally by printing the panic information

View File

@ -488,15 +488,15 @@ pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt following features over the built-in printing facilities provided by the fmt
package: package:
* Pointers are dereferenced and followed - Pointers are dereferenced and followed
* Circular data structures are detected and handled properly - Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including - Custom Stringer/error interfaces are optionally invoked, including
on unexported types on unexported types
* Custom types which only implement the Stringer/error interfaces via - Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer a pointer receiver are optionally invoked when passing non-pointer
variables variables
* Byte arrays and slices are dumped like the hexdump -C command which - Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global, The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation. spew.Config. See ConfigState for options documentation.

View File

@ -58,9 +58,9 @@ func Invert(img image.Image) *image.NRGBA {
// The percentage = -100 gives the image with the saturation value zeroed for each pixel (grayscale). // The percentage = -100 gives the image with the saturation value zeroed for each pixel (grayscale).
// //
// Examples: // Examples:
// dstImage = imaging.AdjustSaturation(srcImage, 25) // Increase image saturation by 25%.
// dstImage = imaging.AdjustSaturation(srcImage, -10) // Decrease image saturation by 10%.
// //
// dstImage = imaging.AdjustSaturation(srcImage, 25) // Increase image saturation by 25%.
// dstImage = imaging.AdjustSaturation(srcImage, -10) // Decrease image saturation by 10%.
func AdjustSaturation(img image.Image, percentage float64) *image.NRGBA { func AdjustSaturation(img image.Image, percentage float64) *image.NRGBA {
percentage = math.Min(math.Max(percentage, -100), 100) percentage = math.Min(math.Max(percentage, -100), 100)
multiplier := 1 + percentage/100 multiplier := 1 + percentage/100
@ -84,7 +84,6 @@ func AdjustSaturation(img image.Image, percentage float64) *image.NRGBA {
// //
// dstImage = imaging.AdjustContrast(srcImage, -10) // Decrease image contrast by 10%. // dstImage = imaging.AdjustContrast(srcImage, -10) // Decrease image contrast by 10%.
// dstImage = imaging.AdjustContrast(srcImage, 20) // Increase image contrast by 20%. // dstImage = imaging.AdjustContrast(srcImage, 20) // Increase image contrast by 20%.
//
func AdjustContrast(img image.Image, percentage float64) *image.NRGBA { func AdjustContrast(img image.Image, percentage float64) *image.NRGBA {
percentage = math.Min(math.Max(percentage, -100.0), 100.0) percentage = math.Min(math.Max(percentage, -100.0), 100.0)
lut := make([]uint8, 256) lut := make([]uint8, 256)
@ -112,7 +111,6 @@ func AdjustContrast(img image.Image, percentage float64) *image.NRGBA {
// //
// dstImage = imaging.AdjustBrightness(srcImage, -15) // Decrease image brightness by 15%. // dstImage = imaging.AdjustBrightness(srcImage, -15) // Decrease image brightness by 15%.
// dstImage = imaging.AdjustBrightness(srcImage, 10) // Increase image brightness by 10%. // dstImage = imaging.AdjustBrightness(srcImage, 10) // Increase image brightness by 10%.
//
func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA { func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA {
percentage = math.Min(math.Max(percentage, -100.0), 100.0) percentage = math.Min(math.Max(percentage, -100.0), 100.0)
lut := make([]uint8, 256) lut := make([]uint8, 256)
@ -132,7 +130,6 @@ func AdjustBrightness(img image.Image, percentage float64) *image.NRGBA {
// Example: // Example:
// //
// dstImage = imaging.AdjustGamma(srcImage, 0.7) // dstImage = imaging.AdjustGamma(srcImage, 0.7)
//
func AdjustGamma(img image.Image, gamma float64) *image.NRGBA { func AdjustGamma(img image.Image, gamma float64) *image.NRGBA {
e := 1.0 / math.Max(gamma, 0.0001) e := 1.0 / math.Max(gamma, 0.0001)
lut := make([]uint8, 256) lut := make([]uint8, 256)
@ -154,7 +151,6 @@ func AdjustGamma(img image.Image, gamma float64) *image.NRGBA {
// //
// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, 3.0) // Increase the contrast. // dstImage = imaging.AdjustSigmoid(srcImage, 0.5, 3.0) // Increase the contrast.
// dstImage = imaging.AdjustSigmoid(srcImage, 0.5, -3.0) // Decrease the contrast. // dstImage = imaging.AdjustSigmoid(srcImage, 0.5, -3.0) // Decrease the contrast.
//
func AdjustSigmoid(img image.Image, midpoint, factor float64) *image.NRGBA { func AdjustSigmoid(img image.Image, midpoint, factor float64) *image.NRGBA {
if factor == 0 { if factor == 0 {
return Clone(img) return Clone(img)
@ -226,7 +222,6 @@ func adjustLUT(img image.Image, lut []uint8) *image.NRGBA {
// return color.NRGBA{uint8(r), c.G, c.B, c.A} // return color.NRGBA{uint8(r), c.G, c.B, c.A}
// } // }
// ) // )
//
func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGBA { func AdjustFunc(img image.Image, fn func(c color.NRGBA) color.NRGBA) *image.NRGBA {
src := newScanner(img) src := newScanner(img)
dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h)) dst := image.NewNRGBA(image.Rect(0, 0, src.w, src.h))

View File

@ -15,7 +15,6 @@ func gaussianBlurKernel(x, sigma float64) float64 {
// Example: // Example:
// //
// dstImage := imaging.Blur(srcImage, 3.5) // dstImage := imaging.Blur(srcImage, 3.5)
//
func Blur(img image.Image, sigma float64) *image.NRGBA { func Blur(img image.Image, sigma float64) *image.NRGBA {
if sigma <= 0 { if sigma <= 0 {
return Clone(img) return Clone(img)
@ -137,7 +136,6 @@ func blurVertical(img image.Image, kernel []float64) *image.NRGBA {
// Example: // Example:
// //
// dstImage := imaging.Sharpen(srcImage, 3.5) // dstImage := imaging.Sharpen(srcImage, 3.5)
//
func Sharpen(img image.Image, sigma float64) *image.NRGBA { func Sharpen(img image.Image, sigma float64) *image.NRGBA {
if sigma <= 0 { if sigma <= 0 {
return Clone(img) return Clone(img)

View File

@ -91,7 +91,6 @@ func Decode(r io.Reader, opts ...DecodeOption) (image.Image, error) {
// //
// // Load an image and transform it depending on the EXIF orientation tag (if present). // // Load an image and transform it depending on the EXIF orientation tag (if present).
// img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true)) // img, err := imaging.Open("test.jpg", imaging.AutoOrientation(true))
//
func Open(filename string, opts ...DecodeOption) (image.Image, error) { func Open(filename string, opts ...DecodeOption) (image.Image, error) {
file, err := fs.Open(filename) file, err := fs.Open(filename)
if err != nil { if err != nil {
@ -264,7 +263,6 @@ func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) e
// //
// // Save the image as JPEG with optional quality parameter set to 80. // // Save the image as JPEG with optional quality parameter set to 80.
// err := imaging.Save(img, "out.jpg", imaging.JPEGQuality(80)) // err := imaging.Save(img, "out.jpg", imaging.JPEGQuality(80))
//
func Save(img image.Image, filename string, opts ...EncodeOption) (err error) { func Save(img image.Image, filename string, opts ...EncodeOption) (err error) {
f, err := FormatFromFilename(filename) f, err := FormatFromFilename(filename)
if err != nil { if err != nil {

View File

@ -61,7 +61,6 @@ func precomputeWeights(dstSize, srcSize int, filter ResampleFilter) [][]indexWei
// Example: // Example:
// //
// dstImage := imaging.Resize(srcImage, 800, 600, imaging.Lanczos) // dstImage := imaging.Resize(srcImage, 800, 600, imaging.Lanczos)
//
func Resize(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA { func Resize(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
dstW, dstH := width, height dstW, dstH := width, height
if dstW < 0 || dstH < 0 { if dstW < 0 || dstH < 0 {
@ -218,7 +217,6 @@ func resizeNearest(img image.Image, width, height int) *image.NRGBA {
// Example: // Example:
// //
// dstImage := imaging.Fit(srcImage, 800, 600, imaging.Lanczos) // dstImage := imaging.Fit(srcImage, 800, 600, imaging.Lanczos)
//
func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA { func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
maxW, maxH := width, height maxW, maxH := width, height
@ -259,7 +257,6 @@ func Fit(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA
// Example: // Example:
// //
// dstImage := imaging.Fill(srcImage, 800, 600, imaging.Center, imaging.Lanczos) // dstImage := imaging.Fill(srcImage, 800, 600, imaging.Center, imaging.Lanczos)
//
func Fill(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA { func Fill(img image.Image, width, height int, anchor Anchor, filter ResampleFilter) *image.NRGBA {
dstW, dstH := width, height dstW, dstH := width, height
@ -338,7 +335,6 @@ func resizeAndCrop(img image.Image, width, height int, anchor Anchor, filter Res
// Example: // Example:
// //
// dstImage := imaging.Thumbnail(srcImage, 100, 100, imaging.Lanczos) // dstImage := imaging.Thumbnail(srcImage, 100, 100, imaging.Lanczos)
//
func Thumbnail(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA { func Thumbnail(img image.Image, width, height int, filter ResampleFilter) *image.NRGBA {
return Fill(img, width, height, Center, filter) return Fill(img, width, height, Center, filter)
} }
@ -365,7 +361,6 @@ func Thumbnail(img image.Image, width, height int, filter ResampleFilter) *image
// //
// - NearestNeighbor // - NearestNeighbor
// Fastest resampling filter, no antialiasing. // Fastest resampling filter, no antialiasing.
//
type ResampleFilter struct { type ResampleFilter struct {
Support float64 Support float64
Kernel func(float64) float64 Kernel func(float64) float64

View File

@ -176,7 +176,6 @@ func PasteCenter(background, img image.Image) *image.NRGBA {
// //
// // Blend two opaque images of the same size. // // Blend two opaque images of the same size.
// dstImage := imaging.Overlay(imageOne, imageTwo, image.Pt(0, 0), 0.5) // dstImage := imaging.Overlay(imageOne, imageTwo, image.Pt(0, 0), 0.5)
//
func Overlay(background, img image.Image, pos image.Point, opacity float64) *image.NRGBA { func Overlay(background, img image.Image, pos image.Point, opacity float64) *image.NRGBA {
opacity = math.Min(math.Max(opacity, 0.0), 1.0) // Ensure 0.0 <= opacity <= 1.0. opacity = math.Min(math.Max(opacity, 0.0), 1.0) // Ensure 0.0 <= opacity <= 1.0.
dst := Clone(background) dst := Clone(background)

View File

@ -15,6 +15,7 @@ import "github.com/dsnet/compress/bzip2/internal/sais"
// Transform, such that a SA can be converted to a BWT in O(n) time. // Transform, such that a SA can be converted to a BWT in O(n) time.
// //
// References: // References:
//
// http://www.hpl.hp.com/techreports/Compaq-DEC/SRC-RR-124.pdf // http://www.hpl.hp.com/techreports/Compaq-DEC/SRC-RR-124.pdf
// https://github.com/cscott/compressjs/blob/master/lib/BWT.js // https://github.com/cscott/compressjs/blob/master/lib/BWT.js
// https://www.quora.com/How-can-I-optimize-burrows-wheeler-transform-and-inverse-transform-to-work-in-O-n-time-O-n-space // https://www.quora.com/How-can-I-optimize-burrows-wheeler-transform-and-inverse-transform-to-work-in-O-n-time-O-n-space

View File

@ -5,9 +5,11 @@
// Package bzip2 implements the BZip2 compressed data format. // Package bzip2 implements the BZip2 compressed data format.
// //
// Canonical C implementation: // Canonical C implementation:
//
// http://bzip.org // http://bzip.org
// //
// Unofficial format specification: // Unofficial format specification:
//
// https://github.com/dsnet/compress/blob/master/doc/bzip2-format.pdf // https://github.com/dsnet/compress/blob/master/doc/bzip2-format.pdf
package bzip2 package bzip2

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE.md file.
//go:build !gofuzz
// +build !gofuzz // +build !gofuzz
// This file exists to suppress fuzzing details from release builds. // This file exists to suppress fuzzing details from release builds.

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE.md file.
//go:build gofuzz
// +build gofuzz // +build gofuzz
// This file exists to export internal implementation details for fuzz testing. // This file exists to export internal implementation details for fuzz testing.

View File

@ -14,6 +14,7 @@ import "github.com/dsnet/compress/internal/errors"
// normal two's complement arithmetic. The methodology for doing so is below. // normal two's complement arithmetic. The methodology for doing so is below.
// //
// Assuming the following: // Assuming the following:
//
// num: The value being encoded by RLE encoding. // num: The value being encoded by RLE encoding.
// run: A sequence of RUNA and RUNB symbols represented as a binary integer, // run: A sequence of RUNA and RUNB symbols represented as a binary integer,
// where RUNA is the 0 bit, RUNB is the 1 bit, and least-significant RUN // where RUNA is the 0 bit, RUNB is the 1 bit, and least-significant RUN
@ -21,6 +22,7 @@ import "github.com/dsnet/compress/internal/errors"
// cnt: The number of RUNA and RUNB symbols. // cnt: The number of RUNA and RUNB symbols.
// //
// Then the RLE encoding used by bzip2 has this mathematical property: // Then the RLE encoding used by bzip2 has this mathematical property:
//
// num+1 == (1<<cnt) | run // num+1 == (1<<cnt) | run
type moveToFront struct { type moveToFront struct {
dictBuf [256]uint8 dictBuf [256]uint8

View File

@ -32,7 +32,6 @@ const (
// 11110 <=> 4 // 11110 <=> 4
// 111110 <=> 5 // 111110 <=> 5
// 111111 <=> 6 Invalid tree index, so should fail // 111111 <=> 6 Invalid tree index, so should fail
//
var encSel, decSel = func() (e prefix.Encoder, d prefix.Decoder) { var encSel, decSel = func() (e prefix.Encoder, d prefix.Decoder) {
var selCodes [maxNumTrees + 1]prefix.PrefixCode var selCodes [maxNumTrees + 1]prefix.PrefixCode
for i := range selCodes { for i := range selCodes {
@ -150,6 +149,7 @@ func (pw *prefixWriter) WritePrefixCodes(codes []prefix.PrefixCodes, trees []pre
// handleDegenerateCodes converts a degenerate tree into a canonical tree. // handleDegenerateCodes converts a degenerate tree into a canonical tree.
// //
// For example, when the input is an under-subscribed tree: // For example, when the input is an under-subscribed tree:
//
// input: []PrefixCode{ // input: []PrefixCode{
// {Sym: 0, Len: 3}, // {Sym: 0, Len: 3},
// {Sym: 1, Len: 4}, // {Sym: 1, Len: 4},
@ -165,6 +165,7 @@ func (pw *prefixWriter) WritePrefixCodes(codes []prefix.PrefixCodes, trees []pre
// } // }
// //
// For example, when the input is an over-subscribed tree: // For example, when the input is an over-subscribed tree:
//
// input: []PrefixCode{ // input: []PrefixCode{
// {Sym: 0, Len: 1}, // {Sym: 0, Len: 1},
// {Sym: 1, Len: 3}, // {Sym: 1, Len: 3},

View File

@ -17,9 +17,11 @@ var rleDone = errorf(errors.Unknown, "RLE1 stage is completed")
// run lengths of 256..259. The decoder can handle the latter case. // run lengths of 256..259. The decoder can handle the latter case.
// //
// For example, if the input was: // For example, if the input was:
//
// input: "AAAAAAABBBBCCCD" // input: "AAAAAAABBBBCCCD"
// //
// Then the output will be: // Then the output will be:
//
// output: "AAAA\x03BBBB\x00CCCD" // output: "AAAA\x03BBBB\x00CCCD"
type runLengthEncoding struct { type runLengthEncoding struct {
buf []byte buf []byte

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE.md file.
//go:build debug && !gofuzz
// +build debug,!gofuzz // +build debug,!gofuzz
package internal package internal

View File

@ -17,6 +17,7 @@
// recover from errors only generated from within this repository. // recover from errors only generated from within this repository.
// //
// Example usage: // Example usage:
//
// func Foo() (err error) { // func Foo() (err error) {
// defer errors.Recover(&err) // defer errors.Recover(&err)
// //
@ -28,7 +29,6 @@
// errors.Panic(errors.New("whoopsie")) // errors.Panic(errors.New("whoopsie"))
// } // }
// } // }
//
package errors package errors
import "strings" import "strings"

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE.md file.
//go:build gofuzz
// +build gofuzz // +build gofuzz
package internal package internal

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE.md file.
//go:build debug
// +build debug // +build debug
package prefix package prefix

View File

@ -91,8 +91,8 @@ func (pc PrefixCodes) checkPrefixes() bool {
// checkCanonical reports whether all codes are canonical. // checkCanonical reports whether all codes are canonical.
// That is, they have the following properties: // That is, they have the following properties:
// //
// 1. All codes of a given bit-length are consecutive values. // 1. All codes of a given bit-length are consecutive values.
// 2. Shorter codes lexicographically precede longer codes. // 2. Shorter codes lexicographically precede longer codes.
// //
// The codes must have unique symbols and be sorted by the symbol // The codes must have unique symbols and be sorted by the symbol
// The Len and Val fields in each code must be populated. // The Len and Val fields in each code must be populated.

View File

@ -37,6 +37,7 @@ func (rcs RangeCodes) End() uint32 { return rcs[len(rcs)-1].End() }
// checkValid reports whether the RangeCodes is valid. In order to be valid, // checkValid reports whether the RangeCodes is valid. In order to be valid,
// the following must hold true: // the following must hold true:
//
// rcs[i-1].Base <= rcs[i].Base // rcs[i-1].Base <= rcs[i].Base
// rcs[i-1].End <= rcs[i].End // rcs[i-1].End <= rcs[i].End
// rcs[i-1].End >= rcs[i].Base // rcs[i-1].End >= rcs[i].Base

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.md file. // license that can be found in the LICENSE.md file.
//go:build !debug && !gofuzz
// +build !debug,!gofuzz // +build !debug,!gofuzz
package internal package internal

View File

@ -60,14 +60,13 @@ func (ve *ValueEncoder) encodeAscii(value string) (ed EncodedData, err error) {
// //
// Note that: // Note that:
// //
// 1. This type can not be automatically encoded using `Encode()`. The default // 1. This type can not be automatically encoded using `Encode()`. The default
// mode is to encode *with* a trailing NUL byte using `encodeAscii`. Only // mode is to encode *with* a trailing NUL byte using `encodeAscii`. Only
// certain undefined-type tags using an unterminated ASCII string and these // certain undefined-type tags using an unterminated ASCII string and these
// are exceptional in nature. // are exceptional in nature.
//
// 2. The presence of this method allows us to completely test the complimentary
// no-nul parser.
// //
// 2. The presence of this method allows us to completely test the complimentary
// no-nul parser.
func (ve *ValueEncoder) encodeAsciiNoNul(value string) (ed EncodedData, err error) { func (ve *ValueEncoder) encodeAsciiNoNul(value string) (ed EncodedData, err error) {
ed.Type = TypeAsciiNoNul ed.Type = TypeAsciiNoNul
ed.Encoded = []byte(value) ed.Encoded = []byte(value)

View File

@ -478,13 +478,13 @@ func (ib *IfdBuilder) Tags() (tags []*BuilderTag) {
// //
// NOTES: // NOTES:
// //
// - We don't manage any facet of the thumbnail data. This is the // - We don't manage any facet of the thumbnail data. This is the
// responsibility of the user/developer. // responsibility of the user/developer.
// - This method will fail unless the thumbnail is set on a the root IFD. // - This method will fail unless the thumbnail is set on a the root IFD.
// However, in order to be valid, it must be set on the second one, linked to // However, in order to be valid, it must be set on the second one, linked to
// by the first, as per the EXIF/TIFF specification. // by the first, as per the EXIF/TIFF specification.
// - We set the offset to (0) now but will allocate the data and properly assign // - We set the offset to (0) now but will allocate the data and properly assign
// the offset when the IB is encoded (later). // the offset when the IB is encoded (later).
func (ib *IfdBuilder) SetThumbnail(data []byte) (err error) { func (ib *IfdBuilder) SetThumbnail(data []byte) (err error) {
defer func() { defer func() {
if state := recover(); state != nil { if state := recover(); state != nil {

View File

@ -128,8 +128,9 @@ type LogAdapter interface {
} }
// TODO(dustin): !! Also populate whether we've bypassed an exception so that // TODO(dustin): !! Also populate whether we've bypassed an exception so that
// we can add a template macro to prefix an exclamation of //
// some sort. // we can add a template macro to prefix an exclamation of
// some sort.
type MessageContext struct { type MessageContext struct {
Level *string Level *string
Noun *string Noun *string

View File

@ -11,11 +11,11 @@ const (
// GracefulCopy willcopy while enduring lesser normal issues. // GracefulCopy willcopy while enduring lesser normal issues.
// //
// - We'll ignore EOF if the read byte-count is more than zero. Only an EOF when // - We'll ignore EOF if the read byte-count is more than zero. Only an EOF when
// zero bytes were read will terminate the loop. // zero bytes were read will terminate the loop.
// //
// - Ignore short-writes. If less bytes were written than the bytes that were // - Ignore short-writes. If less bytes were written than the bytes that were
// given, we'll keep trying until done. // given, we'll keep trying until done.
func GracefulCopy(w io.Writer, r io.Reader, buffer []byte) (copyCount int, err error) { func GracefulCopy(w io.Writer, r io.Reader, buffer []byte) (copyCount int, err error) {
if buffer == nil { if buffer == nil {
buffer = make([]byte, defaultCopyBufferSize) buffer = make([]byte, defaultCopyBufferSize)

View File

@ -17,9 +17,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file // When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example: // descriptors are closed, and deletes will always emit a Chmod. For example:
// //
// fp := os.Open("file") // fp := os.Open("file")
// os.Remove("file") // Triggers Chmod // os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove // fp.Close() // Triggers Remove
// //
// This is the event that inotify sends, so not much can be changed about this. // This is the event that inotify sends, so not much can be changed about this.
// //
@ -33,16 +33,16 @@ import (
// //
// To increase them you can use sysctl or write the value to the /proc file: // To increase them you can use sysctl or write the value to the /proc file:
// //
// # Default values on Linux 5.18 // # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128 // sysctl fs.inotify.max_user_instances=128
// //
// To make the changes persist on reboot edit /etc/sysctl.conf or // To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation): // your distro's documentation):
// //
// fs.inotify.max_user_watches=124983 // fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128 // fs.inotify.max_user_instances=128
// //
// Reaching the limit will result in a "no space left on device" or "too many open // Reaching the limit will result in a "no space left on device" or "too many open
// files" error. // files" error.

View File

@ -26,9 +26,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file // When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example: // descriptors are closed, and deletes will always emit a Chmod. For example:
// //
// fp := os.Open("file") // fp := os.Open("file")
// os.Remove("file") // Triggers Chmod // os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove // fp.Close() // Triggers Remove
// //
// This is the event that inotify sends, so not much can be changed about this. // This is the event that inotify sends, so not much can be changed about this.
// //
@ -42,16 +42,16 @@ import (
// //
// To increase them you can use sysctl or write the value to the /proc file: // To increase them you can use sysctl or write the value to the /proc file:
// //
// # Default values on Linux 5.18 // # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128 // sysctl fs.inotify.max_user_instances=128
// //
// To make the changes persist on reboot edit /etc/sysctl.conf or // To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation): // your distro's documentation):
// //
// fs.inotify.max_user_watches=124983 // fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128 // fs.inotify.max_user_instances=128
// //
// Reaching the limit will result in a "no space left on device" or "too many open // Reaching the limit will result in a "no space left on device" or "too many open
// files" error. // files" error.

View File

@ -24,9 +24,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file // When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example: // descriptors are closed, and deletes will always emit a Chmod. For example:
// //
// fp := os.Open("file") // fp := os.Open("file")
// os.Remove("file") // Triggers Chmod // os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove // fp.Close() // Triggers Remove
// //
// This is the event that inotify sends, so not much can be changed about this. // This is the event that inotify sends, so not much can be changed about this.
// //
@ -40,16 +40,16 @@ import (
// //
// To increase them you can use sysctl or write the value to the /proc file: // To increase them you can use sysctl or write the value to the /proc file:
// //
// # Default values on Linux 5.18 // # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128 // sysctl fs.inotify.max_user_instances=128
// //
// To make the changes persist on reboot edit /etc/sysctl.conf or // To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation): // your distro's documentation):
// //
// fs.inotify.max_user_watches=124983 // fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128 // fs.inotify.max_user_instances=128
// //
// Reaching the limit will result in a "no space left on device" or "too many open // Reaching the limit will result in a "no space left on device" or "too many open
// files" error. // files" error.

View File

@ -27,9 +27,9 @@ import (
// When a file is removed a Remove event won't be emitted until all file // When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example: // descriptors are closed, and deletes will always emit a Chmod. For example:
// //
// fp := os.Open("file") // fp := os.Open("file")
// os.Remove("file") // Triggers Chmod // os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove // fp.Close() // Triggers Remove
// //
// This is the event that inotify sends, so not much can be changed about this. // This is the event that inotify sends, so not much can be changed about this.
// //
@ -43,16 +43,16 @@ import (
// //
// To increase them you can use sysctl or write the value to the /proc file: // To increase them you can use sysctl or write the value to the /proc file:
// //
// # Default values on Linux 5.18 // # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128 // sysctl fs.inotify.max_user_instances=128
// //
// To make the changes persist on reboot edit /etc/sysctl.conf or // To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation): // your distro's documentation):
// //
// fs.inotify.max_user_watches=124983 // fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128 // fs.inotify.max_user_instances=128
// //
// Reaching the limit will result in a "no space left on device" or "too many open // Reaching the limit will result in a "no space left on device" or "too many open
// files" error. // files" error.

View File

@ -9,36 +9,36 @@
// //
// For example: // For example:
// //
// package crashy // package crashy
// //
// import "github.com/go-errors/errors" // import "github.com/go-errors/errors"
// //
// var Crashed = errors.Errorf("oh dear") // var Crashed = errors.Errorf("oh dear")
// //
// func Crash() error { // func Crash() error {
// return errors.New(Crashed) // return errors.New(Crashed)
// } // }
// //
// This can be called as follows: // This can be called as follows:
// //
// package main // package main
// //
// import ( // import (
// "crashy" // "crashy"
// "fmt" // "fmt"
// "github.com/go-errors/errors" // "github.com/go-errors/errors"
// ) // )
// //
// func main() { // func main() {
// err := crashy.Crash() // err := crashy.Crash()
// if err != nil { // if err != nil {
// if errors.Is(err, crashy.Crashed) { // if errors.Is(err, crashy.Crashed) {
// fmt.Println(err.(*errors.Error).ErrorStack()) // fmt.Println(err.(*errors.Error).ErrorStack())
// } else { // } else {
// panic(err) // panic(err)
// } // }
// } // }
// } // }
// //
// This package was original written to allow reporting to Bugsnag, // This package was original written to allow reporting to Bugsnag,
// but after I found similar packages by Facebook and Dropbox, it // but after I found similar packages by Facebook and Dropbox, it

View File

@ -1,3 +1,4 @@
//go:build go1.13
// +build go1.13 // +build go1.13
package errors package errors

View File

@ -1,3 +1,4 @@
//go:build !go1.13
// +build !go1.13 // +build !go1.13
package errors package errors

View File

@ -75,8 +75,8 @@ func ParsePanic(text string) (*Error, error) {
// The lines we're passing look like this: // The lines we're passing look like this:
// //
// main.(*foo).destruct(0xc208067e98) // main.(*foo).destruct(0xc208067e98)
// /0/go/src/github.com/bugsnag/bugsnag-go/pan/main.go:22 +0x151 // /0/go/src/github.com/bugsnag/bugsnag-go/pan/main.go:22 +0x151
func parsePanicFrame(name string, line string, createdBy bool) (*StackFrame, error) { func parsePanicFrame(name string, line string, createdBy bool) (*StackFrame, error) {
idx := strings.LastIndex(name, "(") idx := strings.LastIndex(name, "(")
if idx == -1 && !createdBy { if idx == -1 && !createdBy {

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows // +build windows
package ole package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows // +build !windows
package ole package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows // +build !windows
package ole package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows // +build windows
package ole package ole

View File

@ -108,9 +108,9 @@ type GUID struct {
// //
// The supplied string may be in any of these formats: // The supplied string may be in any of these formats:
// //
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX // XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} // {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}
// //
// The conversion of the supplied string is not case-sensitive. // The conversion of the supplied string is not case-sensitive.
func NewGUID(guid string) *GUID { func NewGUID(guid string) *GUID {
@ -216,11 +216,11 @@ func decodeHexChar(c byte) (byte, bool) {
// String converts the GUID to string form. It will adhere to this pattern: // String converts the GUID to string form. It will adhere to this pattern:
// //
// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} // {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}
// //
// If the GUID is nil, the string representation of an empty GUID is returned: // If the GUID is nil, the string representation of an empty GUID is returned:
// //
// {00000000-0000-0000-0000-000000000000} // {00000000-0000-0000-0000-000000000000}
func (guid *GUID) String() string { func (guid *GUID) String() string {
if guid == nil { if guid == nil {
return emptyGUID return emptyGUID

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows // +build !windows
package ole package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows // +build windows
package ole package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows // +build !windows
package ole package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows // +build windows
package ole package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows // +build !windows
package ole package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows // +build windows
package ole package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows // +build !windows
package ole package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows // +build windows
package ole package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows // +build !windows
package ole package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows // +build windows
package ole package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows // +build !windows
package ole package ole

Some files were not shown because too many files have changed in this diff Show More