Update Go-Git to take advantage of LargeObjectThreshold (#16316)

Following the merging of https://github.com/go-git/go-git/pull/330 we
can now add a setting to avoid go-git reading and caching large objects.

Signed-off-by: Andrew Thornton <art27@cantab.net>
This commit is contained in:
zeripath 2021-06-30 21:58:45 +01:00 committed by GitHub
parent 4f26e0ac0e
commit 9979983283
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 507 additions and 45 deletions

View File

@ -573,6 +573,9 @@ PATH =
;; ;;
;; Respond to pushes to a non-default branch with a URL for creating a Pull Request (if the repository has them enabled) ;; Respond to pushes to a non-default branch with a URL for creating a Pull Request (if the repository has them enabled)
;PULL_REQUEST_PUSH_MESSAGE = true ;PULL_REQUEST_PUSH_MESSAGE = true
;;
;; (Go-Git only) Don't cache objects greater than this in memory. (Set to 0 to disable.)
;LARGE_OBJECT_THRESHOLD = 1048576
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;

View File

@ -837,7 +837,7 @@ NB: You must have `DISABLE_ROUTER_LOG` set to `false` for this option to take ef
- `PULL_REQUEST_PUSH_MESSAGE`: **true**: Respond to pushes to a non-default branch with a URL for creating a Pull Request (if the repository has them enabled) - `PULL_REQUEST_PUSH_MESSAGE`: **true**: Respond to pushes to a non-default branch with a URL for creating a Pull Request (if the repository has them enabled)
- `VERBOSE_PUSH`: **true**: Print status information about pushes as they are being processed. - `VERBOSE_PUSH`: **true**: Print status information about pushes as they are being processed.
- `VERBOSE_PUSH_DELAY`: **5s**: Only print verbose information if push takes longer than this delay. - `VERBOSE_PUSH_DELAY`: **5s**: Only print verbose information if push takes longer than this delay.
- `LARGE_OBJECT_THRESHOLD`: **1048576**: (Go-Git only), don't cache objects greater than this in memory. (Set to 0 to disable.)
## Git - Timeout settings (`git.timeout`) ## Git - Timeout settings (`git.timeout`)
- `DEFAUlT`: **360**: Git operations default timeout seconds. - `DEFAUlT`: **360**: Git operations default timeout seconds.
- `MIGRATE`: **600**: Migrate external repositories timeout seconds. - `MIGRATE`: **600**: Migrate external repositories timeout seconds.

6
go.mod
View File

@ -41,7 +41,7 @@ require (
github.com/go-chi/cors v1.2.0 github.com/go-chi/cors v1.2.0
github.com/go-enry/go-enry/v2 v2.7.0 github.com/go-enry/go-enry/v2 v2.7.0
github.com/go-git/go-billy/v5 v5.3.1 github.com/go-git/go-billy/v5 v5.3.1
github.com/go-git/go-git/v5 v5.4.2 github.com/go-git/go-git/v5 v5.4.3-0.20210630082519-b4368b2a2ca4
github.com/go-ldap/ldap/v3 v3.3.0 github.com/go-ldap/ldap/v3 v3.3.0
github.com/go-redis/redis/v8 v8.10.0 github.com/go-redis/redis/v8 v8.10.0
github.com/go-sql-driver/mysql v1.6.0 github.com/go-sql-driver/mysql v1.6.0
@ -123,9 +123,9 @@ require (
go.uber.org/multierr v1.7.0 // indirect go.uber.org/multierr v1.7.0 // indirect
go.uber.org/zap v1.17.0 // indirect go.uber.org/zap v1.17.0 // indirect
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 golang.org/x/net v0.0.0-20210614182718-04defd469f4e
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
golang.org/x/text v0.3.6 golang.org/x/text v0.3.6
golang.org/x/time v0.0.0-20210608053304-ed9ce3a009e4 // indirect golang.org/x/time v0.0.0-20210608053304-ed9ce3a009e4 // indirect
golang.org/x/tools v0.1.0 golang.org/x/tools v0.1.0

11
go.sum
View File

@ -313,8 +313,8 @@ github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Ai
github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8=
github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0=
github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= github.com/go-git/go-git/v5 v5.4.3-0.20210630082519-b4368b2a2ca4 h1:1RSUwVK7VjTeA82kcLIqz1EU70QRwFdZUlJW58gP4GY=
github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= github.com/go-git/go-git/v5 v5.4.3-0.20210630082519-b4368b2a2ca4/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@ -1233,8 +1233,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
golang.org/x/net v0.0.0-20210331060903-cb1fcc7394e5/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210331060903-cb1fcc7394e5/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo= golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1339,8 +1339,9 @@ golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View File

@ -12,6 +12,8 @@ import (
"path/filepath" "path/filepath"
gitealog "code.gitea.io/gitea/modules/log" gitealog "code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"github.com/go-git/go-billy/v5/osfs" "github.com/go-git/go-billy/v5/osfs"
gogit "github.com/go-git/go-git/v5" gogit "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing/cache" "github.com/go-git/go-git/v5/plumbing/cache"
@ -46,7 +48,7 @@ func OpenRepository(repoPath string) (*Repository, error) {
return nil, err return nil, err
} }
} }
storage := filesystem.NewStorageWithOptions(fs, cache.NewObjectLRUDefault(), filesystem.Options{KeepDescriptors: true}) storage := filesystem.NewStorageWithOptions(fs, cache.NewObjectLRUDefault(), filesystem.Options{KeepDescriptors: true, LargeObjectThreshold: setting.Git.LargeObjectThreshold})
gogitRepo, err := gogit.Open(storage, fs) gogitRepo, err := gogit.Open(storage, fs)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -25,6 +25,7 @@ var (
GCArgs []string `ini:"GC_ARGS" delim:" "` GCArgs []string `ini:"GC_ARGS" delim:" "`
EnableAutoGitWireProtocol bool EnableAutoGitWireProtocol bool
PullRequestPushMessage bool PullRequestPushMessage bool
LargeObjectThreshold int64
Timeout struct { Timeout struct {
Default int Default int
Migrate int Migrate int
@ -45,6 +46,7 @@ var (
GCArgs: []string{}, GCArgs: []string{},
EnableAutoGitWireProtocol: true, EnableAutoGitWireProtocol: true,
PullRequestPushMessage: true, PullRequestPushMessage: true,
LargeObjectThreshold: 1024 * 1024,
Timeout: struct { Timeout: struct {
Default int Default int
Migrate int Migrate int

View File

@ -7,19 +7,21 @@ import (
"github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache" "github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/format/idxfile" "github.com/go-git/go-git/v5/plumbing/format/idxfile"
"github.com/go-git/go-git/v5/utils/ioutil"
) )
// FSObject is an object from the packfile on the filesystem. // FSObject is an object from the packfile on the filesystem.
type FSObject struct { type FSObject struct {
hash plumbing.Hash hash plumbing.Hash
h *ObjectHeader h *ObjectHeader
offset int64 offset int64
size int64 size int64
typ plumbing.ObjectType typ plumbing.ObjectType
index idxfile.Index index idxfile.Index
fs billy.Filesystem fs billy.Filesystem
path string path string
cache cache.Object cache cache.Object
largeObjectThreshold int64
} }
// NewFSObject creates a new filesystem object. // NewFSObject creates a new filesystem object.
@ -32,16 +34,18 @@ func NewFSObject(
fs billy.Filesystem, fs billy.Filesystem,
path string, path string,
cache cache.Object, cache cache.Object,
largeObjectThreshold int64,
) *FSObject { ) *FSObject {
return &FSObject{ return &FSObject{
hash: hash, hash: hash,
offset: offset, offset: offset,
size: contentSize, size: contentSize,
typ: finalType, typ: finalType,
index: index, index: index,
fs: fs, fs: fs,
path: path, path: path,
cache: cache, cache: cache,
largeObjectThreshold: largeObjectThreshold,
} }
} }
@ -62,7 +66,21 @@ func (o *FSObject) Reader() (io.ReadCloser, error) {
return nil, err return nil, err
} }
p := NewPackfileWithCache(o.index, nil, f, o.cache) p := NewPackfileWithCache(o.index, nil, f, o.cache, o.largeObjectThreshold)
if o.largeObjectThreshold > 0 && o.size > o.largeObjectThreshold {
// We have a big object
h, err := p.objectHeaderAtOffset(o.offset)
if err != nil {
return nil, err
}
r, err := p.getReaderDirect(h)
if err != nil {
_ = f.Close()
return nil, err
}
return ioutil.NewReadCloserWithCloser(r, f.Close), nil
}
r, err := p.getObjectContent(o.offset) r, err := p.getObjectContent(o.offset)
if err != nil { if err != nil {
_ = f.Close() _ = f.Close()

View File

@ -2,6 +2,8 @@ package packfile
import ( import (
"bytes" "bytes"
"compress/zlib"
"fmt"
"io" "io"
"os" "os"
@ -35,11 +37,12 @@ const smallObjectThreshold = 16 * 1024
// Packfile allows retrieving information from inside a packfile. // Packfile allows retrieving information from inside a packfile.
type Packfile struct { type Packfile struct {
idxfile.Index idxfile.Index
fs billy.Filesystem fs billy.Filesystem
file billy.File file billy.File
s *Scanner s *Scanner
deltaBaseCache cache.Object deltaBaseCache cache.Object
offsetToType map[int64]plumbing.ObjectType offsetToType map[int64]plumbing.ObjectType
largeObjectThreshold int64
} }
// NewPackfileWithCache creates a new Packfile with the given object cache. // NewPackfileWithCache creates a new Packfile with the given object cache.
@ -50,6 +53,7 @@ func NewPackfileWithCache(
fs billy.Filesystem, fs billy.Filesystem,
file billy.File, file billy.File,
cache cache.Object, cache cache.Object,
largeObjectThreshold int64,
) *Packfile { ) *Packfile {
s := NewScanner(file) s := NewScanner(file)
return &Packfile{ return &Packfile{
@ -59,6 +63,7 @@ func NewPackfileWithCache(
s, s,
cache, cache,
make(map[int64]plumbing.ObjectType), make(map[int64]plumbing.ObjectType),
largeObjectThreshold,
} }
} }
@ -66,8 +71,8 @@ func NewPackfileWithCache(
// and packfile idx. // and packfile idx.
// If the filesystem is provided, the packfile will return FSObjects, otherwise // If the filesystem is provided, the packfile will return FSObjects, otherwise
// it will return MemoryObjects. // it will return MemoryObjects.
func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile { func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File, largeObjectThreshold int64) *Packfile {
return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault()) return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault(), largeObjectThreshold)
} }
// Get retrieves the encoded object in the packfile with the given hash. // Get retrieves the encoded object in the packfile with the given hash.
@ -263,6 +268,7 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.
p.fs, p.fs,
p.file.Name(), p.file.Name(),
p.deltaBaseCache, p.deltaBaseCache,
p.largeObjectThreshold,
), nil ), nil
} }
@ -282,6 +288,50 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
return obj.Reader() return obj.Reader()
} }
func asyncReader(p *Packfile) (io.ReadCloser, error) {
reader := ioutil.NewReaderUsingReaderAt(p.file, p.s.r.offset)
zr := zlibReaderPool.Get().(io.ReadCloser)
if err := zr.(zlib.Resetter).Reset(reader, nil); err != nil {
return nil, fmt.Errorf("zlib reset error: %s", err)
}
return ioutil.NewReadCloserWithCloser(zr, func() error {
zlibReaderPool.Put(zr)
return nil
}), nil
}
func (p *Packfile) getReaderDirect(h *ObjectHeader) (io.ReadCloser, error) {
switch h.Type {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return asyncReader(p)
case plumbing.REFDeltaObject:
deltaRc, err := asyncReader(p)
if err != nil {
return nil, err
}
r, err := p.readREFDeltaObjectContent(h, deltaRc)
if err != nil {
return nil, err
}
return r, nil
case plumbing.OFSDeltaObject:
deltaRc, err := asyncReader(p)
if err != nil {
return nil, err
}
r, err := p.readOFSDeltaObjectContent(h, deltaRc)
if err != nil {
return nil, err
}
return r, nil
default:
return nil, ErrInvalidObject.AddDetails("type %q", h.Type)
}
}
func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) { func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
var obj = new(plumbing.MemoryObject) var obj = new(plumbing.MemoryObject)
obj.SetSize(h.Length) obj.SetSize(h.Length)
@ -334,6 +384,20 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu
return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf) return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
} }
func (p *Packfile) readREFDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
var err error
base, ok := p.cacheGet(h.Reference)
if !ok {
base, err = p.Get(h.Reference)
if err != nil {
return nil, err
}
}
return ReaderFromDelta(base, deltaRC)
}
func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error { func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error {
var err error var err error
@ -364,6 +428,20 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset
return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf) return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
} }
func (p *Packfile) readOFSDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
hash, err := p.FindHash(h.OffsetReference)
if err != nil {
return nil, err
}
base, err := p.objectAtOffset(h.OffsetReference, hash)
if err != nil {
return nil, err
}
return ReaderFromDelta(base, deltaRC)
}
func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error { func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error {
hash, err := p.FindHash(offset) hash, err := p.FindHash(offset)
if err != nil { if err != nil {

View File

@ -1,9 +1,11 @@
package packfile package packfile
import ( import (
"bufio"
"bytes" "bytes"
"errors" "errors"
"io" "io"
"math"
"github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/ioutil" "github.com/go-git/go-git/v5/utils/ioutil"
@ -73,6 +75,131 @@ func PatchDelta(src, delta []byte) ([]byte, error) {
return b.Bytes(), nil return b.Bytes(), nil
} }
func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadCloser, error) {
deltaBuf := bufio.NewReaderSize(deltaRC, 1024)
srcSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return nil, ErrInvalidDelta
}
return nil, err
}
if srcSz != uint(base.Size()) {
return nil, ErrInvalidDelta
}
targetSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return nil, ErrInvalidDelta
}
return nil, err
}
remainingTargetSz := targetSz
dstRd, dstWr := io.Pipe()
go func() {
baseRd, err := base.Reader()
if err != nil {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
defer baseRd.Close()
baseBuf := bufio.NewReader(baseRd)
basePos := uint(0)
for {
cmd, err := deltaBuf.ReadByte()
if err == io.EOF {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
if isCopyFromSrc(cmd) {
offset, err := decodeOffsetByteReader(cmd, deltaBuf)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
sz, err := decodeSizeByteReader(cmd, deltaBuf)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
if invalidSize(sz, targetSz) ||
invalidOffsetSize(offset, sz, srcSz) {
_ = dstWr.Close()
return
}
discard := offset - basePos
if basePos > offset {
_ = baseRd.Close()
baseRd, err = base.Reader()
if err != nil {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
baseBuf.Reset(baseRd)
discard = offset
}
for discard > math.MaxInt32 {
n, err := baseBuf.Discard(math.MaxInt32)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
basePos += uint(n)
discard -= uint(n)
}
for discard > 0 {
n, err := baseBuf.Discard(int(discard))
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
basePos += uint(n)
discard -= uint(n)
}
if _, err := io.Copy(dstWr, io.LimitReader(baseBuf, int64(sz))); err != nil {
_ = dstWr.CloseWithError(err)
return
}
remainingTargetSz -= sz
basePos += sz
} else if isCopyFromDelta(cmd) {
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
if _, err := io.Copy(dstWr, io.LimitReader(deltaBuf, int64(sz))); err != nil {
_ = dstWr.CloseWithError(err)
return
}
remainingTargetSz -= sz
} else {
_ = dstWr.CloseWithError(ErrDeltaCmd)
return
}
if remainingTargetSz <= 0 {
_ = dstWr.Close()
return
}
}
}()
return dstRd, nil
}
func patchDelta(dst *bytes.Buffer, src, delta []byte) error { func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
if len(delta) < deltaSizeMin { if len(delta) < deltaSizeMin {
return ErrInvalidDelta return ErrInvalidDelta
@ -161,6 +288,25 @@ func decodeLEB128(input []byte) (uint, []byte) {
return num, input[sz:] return num, input[sz:]
} }
func decodeLEB128ByteReader(input io.ByteReader) (uint, error) {
var num, sz uint
for {
b, err := input.ReadByte()
if err != nil {
return 0, err
}
num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks
sz++
if uint(b)&continuation == 0 {
break
}
}
return num, nil
}
const ( const (
payload = 0x7f // 0111 1111 payload = 0x7f // 0111 1111
continuation = 0x80 // 1000 0000 continuation = 0x80 // 1000 0000
@ -174,6 +320,40 @@ func isCopyFromDelta(cmd byte) bool {
return (cmd&0x80) == 0 && cmd != 0 return (cmd&0x80) == 0 && cmd != 0
} }
func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var offset uint
if (cmd & 0x01) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset = uint(next)
}
if (cmd & 0x02) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << 8
}
if (cmd & 0x04) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << 16
}
if (cmd & 0x08) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << 24
}
return offset, nil
}
func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) { func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
var offset uint var offset uint
if (cmd & 0x01) != 0 { if (cmd & 0x01) != 0 {
@ -208,6 +388,36 @@ func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
return offset, delta, nil return offset, delta, nil
} }
func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var sz uint
if (cmd & 0x10) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz = uint(next)
}
if (cmd & 0x20) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz |= uint(next) << 8
}
if (cmd & 0x40) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz |= uint(next) << 16
}
if sz == 0 {
sz = 0x10000
}
return sz, nil
}
func decodeSize(cmd byte, delta []byte) (uint, []byte, error) { func decodeSize(cmd byte, delta []byte) (uint, []byte, error) {
var sz uint var sz uint
if (cmd & 0x10) != 0 { if (cmd & 0x10) != 0 {

View File

@ -320,6 +320,21 @@ func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err erro
return return
} }
// ReadObject returns a reader for the object content and an error
func (s *Scanner) ReadObject() (io.ReadCloser, error) {
s.pendingObject = nil
zr := zlibReaderPool.Get().(io.ReadCloser)
if err := zr.(zlib.Resetter).Reset(s.r, nil); err != nil {
return nil, fmt.Errorf("zlib reset error: %s", err)
}
return ioutil.NewReadCloserWithCloser(zr, func() error {
zlibReaderPool.Put(zr)
return nil
}), nil
}
// ReadRegularObject reads and write a non-deltified object // ReadRegularObject reads and write a non-deltified object
// from it zlib stream in an object entry in the packfile. // from it zlib stream in an object entry in the packfile.
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) { func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {

View File

@ -0,0 +1,79 @@
package dotgit
import (
"fmt"
"io"
"os"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/format/objfile"
"github.com/go-git/go-git/v5/utils/ioutil"
)
var _ (plumbing.EncodedObject) = &EncodedObject{}
type EncodedObject struct {
dir *DotGit
h plumbing.Hash
t plumbing.ObjectType
sz int64
}
func (e *EncodedObject) Hash() plumbing.Hash {
return e.h
}
func (e *EncodedObject) Reader() (io.ReadCloser, error) {
f, err := e.dir.Object(e.h)
if err != nil {
if os.IsNotExist(err) {
return nil, plumbing.ErrObjectNotFound
}
return nil, err
}
r, err := objfile.NewReader(f)
if err != nil {
return nil, err
}
t, size, err := r.Header()
if err != nil {
_ = r.Close()
return nil, err
}
if t != e.t {
_ = r.Close()
return nil, objfile.ErrHeader
}
if size != e.sz {
_ = r.Close()
return nil, objfile.ErrHeader
}
return ioutil.NewReadCloserWithCloser(r, f.Close), nil
}
func (e *EncodedObject) SetType(plumbing.ObjectType) {}
func (e *EncodedObject) Type() plumbing.ObjectType {
return e.t
}
func (e *EncodedObject) Size() int64 {
return e.sz
}
func (e *EncodedObject) SetSize(int64) {}
func (e *EncodedObject) Writer() (io.WriteCloser, error) {
return nil, fmt.Errorf("Not supported")
}
func NewEncodedObject(dir *DotGit, h plumbing.Hash, t plumbing.ObjectType, size int64) *EncodedObject {
return &EncodedObject{
dir: dir,
h: h,
t: t,
sz: size,
}
}

View File

@ -204,9 +204,9 @@ func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfi
var p *packfile.Packfile var p *packfile.Packfile
if s.objectCache != nil { if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache, s.options.LargeObjectThreshold)
} else { } else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f) p = packfile.NewPackfile(idx, s.dir.Fs(), f, s.options.LargeObjectThreshold)
} }
return p, s.storePackfileInCache(pack, p) return p, s.storePackfileInCache(pack, p)
@ -389,7 +389,6 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
return cacheObj, nil return cacheObj, nil
} }
obj = s.NewEncodedObject()
r, err := objfile.NewReader(f) r, err := objfile.NewReader(f)
if err != nil { if err != nil {
return nil, err return nil, err
@ -402,6 +401,13 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
return nil, err return nil, err
} }
if s.options.LargeObjectThreshold > 0 && size > s.options.LargeObjectThreshold {
obj = dotgit.NewEncodedObject(s.dir, h, t, size)
return obj, nil
}
obj = s.NewEncodedObject()
obj.SetType(t) obj.SetType(t)
obj.SetSize(size) obj.SetSize(size)
w, err := obj.Writer() w, err := obj.Writer()
@ -595,6 +601,7 @@ func (s *ObjectStorage) buildPackfileIters(
return newPackfileIter( return newPackfileIter(
s.dir.Fs(), pack, t, seen, s.index[h], s.dir.Fs(), pack, t, seen, s.index[h],
s.objectCache, s.options.KeepDescriptors, s.objectCache, s.options.KeepDescriptors,
s.options.LargeObjectThreshold,
) )
}, },
}, nil }, nil
@ -684,6 +691,7 @@ func NewPackfileIter(
idxFile billy.File, idxFile billy.File,
t plumbing.ObjectType, t plumbing.ObjectType,
keepPack bool, keepPack bool,
largeObjectThreshold int64,
) (storer.EncodedObjectIter, error) { ) (storer.EncodedObjectIter, error) {
idx := idxfile.NewMemoryIndex() idx := idxfile.NewMemoryIndex()
if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil { if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil {
@ -695,7 +703,7 @@ func NewPackfileIter(
} }
seen := make(map[plumbing.Hash]struct{}) seen := make(map[plumbing.Hash]struct{})
return newPackfileIter(fs, f, t, seen, idx, nil, keepPack) return newPackfileIter(fs, f, t, seen, idx, nil, keepPack, largeObjectThreshold)
} }
func newPackfileIter( func newPackfileIter(
@ -706,12 +714,13 @@ func newPackfileIter(
index idxfile.Index, index idxfile.Index,
cache cache.Object, cache cache.Object,
keepPack bool, keepPack bool,
largeObjectThreshold int64,
) (storer.EncodedObjectIter, error) { ) (storer.EncodedObjectIter, error) {
var p *packfile.Packfile var p *packfile.Packfile
if cache != nil { if cache != nil {
p = packfile.NewPackfileWithCache(index, fs, f, cache) p = packfile.NewPackfileWithCache(index, fs, f, cache, largeObjectThreshold)
} else { } else {
p = packfile.NewPackfile(index, fs, f) p = packfile.NewPackfile(index, fs, f, largeObjectThreshold)
} }
iter, err := p.GetByType(t) iter, err := p.GetByType(t)

View File

@ -34,6 +34,9 @@ type Options struct {
// MaxOpenDescriptors is the max number of file descriptors to keep // MaxOpenDescriptors is the max number of file descriptors to keep
// open. If KeepDescriptors is true, all file descriptors will remain open. // open. If KeepDescriptors is true, all file descriptors will remain open.
MaxOpenDescriptors int MaxOpenDescriptors int
// LargeObjectThreshold maximum object size (in bytes) that will be read in to memory.
// If left unset or set to 0 there is no limit
LargeObjectThreshold int64
} }
// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache. // NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.

View File

@ -55,6 +55,28 @@ func NewReadCloser(r io.Reader, c io.Closer) io.ReadCloser {
return &readCloser{Reader: r, closer: c} return &readCloser{Reader: r, closer: c}
} }
type readCloserCloser struct {
io.ReadCloser
closer func() error
}
func (r *readCloserCloser) Close() (err error) {
defer func() {
if err == nil {
err = r.closer()
return
}
_ = r.closer()
}()
return r.ReadCloser.Close()
}
// NewReadCloserWithCloser creates an `io.ReadCloser` with the given `io.ReaderCloser` and
// `io.Closer` that ensures that the closer is closed on close
func NewReadCloserWithCloser(r io.ReadCloser, c func() error) io.ReadCloser {
return &readCloserCloser{ReadCloser: r, closer: c}
}
type writeCloser struct { type writeCloser struct {
io.Writer io.Writer
closer io.Closer closer io.Closer
@ -82,6 +104,24 @@ func WriteNopCloser(w io.Writer) io.WriteCloser {
return writeNopCloser{w} return writeNopCloser{w}
} }
type readerAtAsReader struct {
io.ReaderAt
offset int64
}
func (r *readerAtAsReader) Read(bs []byte) (int, error) {
n, err := r.ReaderAt.ReadAt(bs, r.offset)
r.offset += int64(n)
return n, err
}
func NewReaderUsingReaderAt(r io.ReaderAt, offset int64) io.Reader {
return &readerAtAsReader{
ReaderAt: r,
offset: offset,
}
}
// CheckClose calls Close on the given io.Closer. If the given *error points to // CheckClose calls Close on the given io.Closer. If the given *error points to
// nil, it will be assigned the error returned by Close. Otherwise, any error // nil, it will be assigned the error returned by Close. Otherwise, any error
// returned by Close will be ignored. CheckClose is usually called with defer. // returned by Close will be ignored. CheckClose is usually called with defer.

View File

@ -1773,6 +1773,8 @@ const (
NFPROTO_NUMPROTO = 0xd NFPROTO_NUMPROTO = 0xd
) )
const SO_ORIGINAL_DST = 0x50
type Nfgenmsg struct { type Nfgenmsg struct {
Nfgen_family uint8 Nfgen_family uint8
Version uint8 Version uint8

View File

@ -680,7 +680,7 @@ const (
WTD_CHOICE_CERT = 5 WTD_CHOICE_CERT = 5
WTD_STATEACTION_IGNORE = 0x00000000 WTD_STATEACTION_IGNORE = 0x00000000
WTD_STATEACTION_VERIFY = 0x00000010 WTD_STATEACTION_VERIFY = 0x00000001
WTD_STATEACTION_CLOSE = 0x00000002 WTD_STATEACTION_CLOSE = 0x00000002
WTD_STATEACTION_AUTO_CACHE = 0x00000003 WTD_STATEACTION_AUTO_CACHE = 0x00000003
WTD_STATEACTION_AUTO_CACHE_FLUSH = 0x00000004 WTD_STATEACTION_AUTO_CACHE_FLUSH = 0x00000004

6
vendor/modules.txt vendored
View File

@ -305,7 +305,7 @@ github.com/go-git/go-billy/v5/helper/polyfill
github.com/go-git/go-billy/v5/memfs github.com/go-git/go-billy/v5/memfs
github.com/go-git/go-billy/v5/osfs github.com/go-git/go-billy/v5/osfs
github.com/go-git/go-billy/v5/util github.com/go-git/go-billy/v5/util
# github.com/go-git/go-git/v5 v5.4.2 # github.com/go-git/go-git/v5 v5.4.3-0.20210630082519-b4368b2a2ca4
## explicit ## explicit
github.com/go-git/go-git/v5 github.com/go-git/go-git/v5
github.com/go-git/go-git/v5/config github.com/go-git/go-git/v5/config
@ -887,7 +887,7 @@ golang.org/x/crypto/ssh/knownhosts
# golang.org/x/mod v0.4.2 # golang.org/x/mod v0.4.2
golang.org/x/mod/module golang.org/x/mod/module
golang.org/x/mod/semver golang.org/x/mod/semver
# golang.org/x/net v0.0.0-20210525063256-abc453219eb5 # golang.org/x/net v0.0.0-20210614182718-04defd469f4e
## explicit ## explicit
golang.org/x/net/bpf golang.org/x/net/bpf
golang.org/x/net/context golang.org/x/net/context
@ -913,7 +913,7 @@ golang.org/x/oauth2/google/internal/externalaccount
golang.org/x/oauth2/internal golang.org/x/oauth2/internal
golang.org/x/oauth2/jws golang.org/x/oauth2/jws
golang.org/x/oauth2/jwt golang.org/x/oauth2/jwt
# golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 # golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
## explicit ## explicit
golang.org/x/sys/cpu golang.org/x/sys/cpu
golang.org/x/sys/execabs golang.org/x/sys/execabs