go/nix/bcacheup: init utility for uploading things to a binary cache

This commit is contained in:
Luke Granger-Brown 2022-10-09 16:46:55 +01:00
parent a0400126fe
commit 98f53c5cd6
23 changed files with 1040 additions and 8 deletions

View file

@ -4,7 +4,7 @@
module hg.lukegb.com/lukegb/depot/go
go 1.14
go 1.18
require (
github.com/coreos/go-systemd/v22 v22.3.2
@ -25,3 +25,8 @@ require (
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
)
require (
github.com/mattn/go-sqlite3 v1.14.15 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
)

View file

@ -509,6 +509,8 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
@ -606,6 +608,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=

455
go/nix/bcacheup/bcacheup.go Normal file
View file

@ -0,0 +1,455 @@
// Binary bcachegc garbage collects a Nix binary cache.
package main
import (
"context"
"flag"
"fmt"
"io"
"log"
"net/http"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"sync"
"time"
"github.com/numtide/go-nix/nixbase32"
"github.com/ulikunitz/xz"
"gocloud.dev/blob"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/singleflight"
"hg.lukegb.com/lukegb/depot/go/nix/nar"
"hg.lukegb.com/lukegb/depot/go/nix/nar/narinfo"
"hg.lukegb.com/lukegb/depot/go/nix/nixstore"
_ "gocloud.dev/blob/fileblob"
_ "gocloud.dev/blob/gcsblob"
)
var (
blobURLFlag = flag.String("cache_url", "", "Cache URL")
)
var (
hashExtractRegexp = regexp.MustCompile(`(^|/)([0-9a-df-np-sv-z]{32})([-.].*)?$`)
trustedCaches = []string{
"https://cache.nixos.org",
}
)
func hashExtract(s string) string {
res := hashExtractRegexp.FindStringSubmatch(s)
if len(res) == 0 {
return ""
}
return res[2]
}
type state int
const (
stateUnknown state = iota
stateCheckingShouldUpload
stateUploadingReferences
stateUploadingContent
stateCopyingContent
stateUploadingNarinfo
stateSkipped
stateFailed
stateUploaded
stateMax
)
func (s state) Terminal() bool {
return s == stateSkipped || s == stateUploaded || s == stateFailed
}
func (s state) String() string {
return map[state]string{
stateUnknown: "unknown",
stateCheckingShouldUpload: "determining if upload required",
stateUploadingReferences: "uploading references",
stateUploadingContent: "uploading content",
stateCopyingContent: "copying content",
stateUploadingNarinfo: "uploading narinfo",
stateSkipped: "skipped",
stateFailed: "failed",
stateUploaded: "uploaded",
}[s]
}
type stateInfo struct {
Current state
Since time.Time
Path string
}
type stateTracker struct {
mu sync.Mutex
pathState map[string]stateInfo
}
func (t *stateTracker) SetState(p string, s state) {
si := stateInfo{
Current: s,
Since: time.Now(),
Path: p,
}
t.mu.Lock()
if t.pathState == nil {
t.pathState = make(map[string]stateInfo)
}
t.pathState[p] = si
t.mu.Unlock()
}
func (t *stateTracker) CurrentState() map[string]stateInfo {
out := make(map[string]stateInfo, len(t.pathState))
t.mu.Lock()
for k, v := range t.pathState {
out[k] = v
}
t.mu.Unlock()
return out
}
func (t *stateTracker) StateSummary() string {
states := t.CurrentState()
countByState := map[state]int{}
var oldestActive []stateInfo
for _, s := range states {
countByState[s.Current]++
if !s.Current.Terminal() && s.Current != stateUploadingReferences {
oldestActive = append(oldestActive, s)
}
}
sort.Slice(oldestActive, func(i, j int) bool {
a, b := oldestActive[i], oldestActive[j]
return a.Since.Before(b.Since)
})
var firstLineBits []string
for n := stateUnknown; n < stateMax; n++ {
c := countByState[n]
if c != 0 {
firstLineBits = append(firstLineBits, fmt.Sprintf("%d %s", c, n))
}
}
lines := []string{
strings.Join(firstLineBits, ", "),
}
for n := 0; n < len(oldestActive) && n < 20; n++ {
si := oldestActive[n]
lines = append(lines, fmt.Sprintf("\t%s: %s (for %s)", si.Path, si.Current, time.Since(si.Since).Truncate(time.Second)))
}
return strings.Join(lines, "\n")
}
type uploader struct {
bucket *blob.Bucket
store *nixstore.DB
storePath string
st stateTracker
uploadSF singleflight.Group
}
type byteCounterWriter struct{ n uint64 }
func (w *byteCounterWriter) Write(b []byte) (int, error) {
w.n += uint64(len(b))
return len(b), nil
}
func (u *uploader) inStore(ctx context.Context, path string) (bool, error) {
// Check if the narinfo exists.
key, err := keyForPath(path)
if err != nil {
return false, fmt.Errorf("computing narinfo key for %v: %w", path, err)
}
return u.bucket.Exists(ctx, key)
}
func (u *uploader) inTrustedCaches(ctx context.Context, path string) (bool, error) {
key, err := keyForPath(path)
if err != nil {
return false, fmt.Errorf("computing narinfo key for %v: %w", path, err)
}
for _, c := range trustedCaches {
req, err := http.NewRequestWithContext(ctx, "HEAD", fmt.Sprintf("%v/%v", c, key), nil)
if err != nil {
return false, fmt.Errorf("constructing request for %v/%v: %v", c, key, err)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return false, fmt.Errorf("making request for %v/%v: %v", c, key, err)
}
resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return true, nil
}
}
return false, nil
}
func (u *uploader) shouldUpload(ctx context.Context, path string) (bool, error) {
inStore, err := u.inStore(ctx, path)
if err != nil {
return false, err
}
if inStore {
return false, nil
}
inTrustedCaches, err := u.inTrustedCaches(ctx, path)
if err != nil {
return false, err
}
if inTrustedCaches {
return false, nil
}
return true, nil
}
func (u *uploader) uploadContent(ctx context.Context, ni *narinfo.NarInfo, path string, dst io.Writer) error {
if !ni.NarHash.Valid() {
return fmt.Errorf("nar hash for %v is not valid", path)
}
narHasher := ni.NarHash.Algorithm.Hash()
fileHasher := ni.NarHash.Algorithm.Hash()
fileByteCounter := &byteCounterWriter{}
xzWriter, err := xz.NewWriter(io.MultiWriter(fileHasher, fileByteCounter, dst))
if err != nil {
return fmt.Errorf("creating xz writer: %v", err)
}
w := io.MultiWriter(narHasher, xzWriter)
narSize, err := nar.Pack(w, nar.DirFS(u.storePath), filepath.Base(path))
if err != nil {
return fmt.Errorf("packing %v as NAR: %w", path, err)
}
if err := xzWriter.Close(); err != nil {
return fmt.Errorf("compressing with xz: %w", err)
}
// Check the NAR hash is correct.
if uint64(narSize) != ni.NarSize {
return fmt.Errorf("uploaded nar was %d bytes; expected %d bytes", narSize, ni.NarSize)
}
narHash := narinfo.Hash{
Hash: narHasher.Sum(nil),
Algorithm: ni.NarHash.Algorithm,
}
if len(narHash.Hash) != len(ni.NarHash.Hash) {
return fmt.Errorf("uploaded nar hash length was %d bytes; expected %d bytes", len(narHash.Hash), len(ni.NarHash.Hash))
}
if got, want := narHash.String(), ni.NarHash.String(); got != want {
return fmt.Errorf("uploaded nar hash was %v; wanted %v", got, want)
}
ni.Compression = narinfo.CompressionXz
ni.FileHash = narinfo.Hash{
Hash: fileHasher.Sum(nil),
Algorithm: ni.NarHash.Algorithm,
}
ni.FileSize = fileByteCounter.n
return nil
}
func keyForPath(storePath string) (string, error) {
fileHash := hashExtract(storePath)
if fileHash == "" {
return "", fmt.Errorf("store path %v seems to be invalid: couldn't extract hash", storePath)
}
return fmt.Sprintf("%s.narinfo", fileHash), nil
}
func (u *uploader) uploadNARInfo(ctx context.Context, ni *narinfo.NarInfo) error {
key, err := keyForPath(ni.StorePath)
if err != nil {
return err
}
return u.bucket.WriteAll(ctx, key, []byte(ni.String()), nil)
}
func (u *uploader) uploadRefs(ctx context.Context, current string, refs []string) error {
if len(refs) == 0 {
return nil
}
eg, egctx := errgroup.WithContext(ctx)
for _, ref := range refs {
refPath := filepath.Join(u.storePath, ref)
if current == refPath {
// We depend on ourselves, which is fine.
continue
}
eg.Go(func() error {
return u.Upload(egctx, refPath)
})
}
return eg.Wait()
}
func (u *uploader) upload(ctx context.Context, path string) error {
u.st.SetState(path, stateCheckingShouldUpload)
if ok, err := u.shouldUpload(ctx, path); err != nil {
u.st.SetState(path, stateFailed)
return fmt.Errorf("determining if we should upload %v: %w", path, err)
} else if !ok {
u.st.SetState(path, stateSkipped)
return nil
}
log.Printf("Uploading %v", path)
ni, err := u.store.NARInfo(path)
if err != nil {
u.st.SetState(path, stateFailed)
return fmt.Errorf("getting narinfo for %v: %w", path, err)
}
u.st.SetState(path, stateUploadingReferences)
if err := u.uploadRefs(ctx, ni.StorePath, ni.References); err != nil {
u.st.SetState(path, stateFailed)
return fmt.Errorf("uploading references for %v: %w", path, err)
}
u.st.SetState(path, stateUploadingContent)
if !ni.NarHash.Valid() {
u.st.SetState(path, stateFailed)
return fmt.Errorf("nar hash is invalid")
}
tmpPath := fmt.Sprintf("tmp-uploading/%s", filepath.Base(path))
dst, err := u.bucket.NewWriter(ctx, tmpPath, nil)
if err != nil {
u.st.SetState(path, stateFailed)
return fmt.Errorf("creating new writer for upload of %v: %w", path, err)
}
defer dst.Close()
if err := u.uploadContent(ctx, ni, path, dst); err != nil {
u.st.SetState(path, stateFailed)
if err := dst.Close(); err == nil {
u.bucket.Delete(ctx, tmpPath)
}
return err
}
if err := dst.Close(); err != nil {
u.bucket.Delete(ctx, tmpPath)
u.st.SetState(path, stateFailed)
return fmt.Errorf("completing tmp write of %v: %w", path, err)
}
// Copy to the "correct" place.
u.st.SetState(path, stateCopyingContent)
finalDstKey := fmt.Sprintf("nar/%s.nar.xz", nixbase32.EncodeToString(ni.FileHash.Hash))
if err := u.bucket.Copy(ctx, finalDstKey, tmpPath, nil); err != nil {
u.bucket.Delete(ctx, tmpPath)
u.st.SetState(path, stateFailed)
return fmt.Errorf("copying tmp write of %v from %v to %v: %w", path, tmpPath, finalDstKey, err)
}
if err := u.bucket.Delete(ctx, tmpPath); err != nil {
u.bucket.Delete(ctx, finalDstKey)
u.st.SetState(path, stateFailed)
return fmt.Errorf("cleaning up tmp write of %v at %v: %w", path, tmpPath, err)
}
ni.URL = finalDstKey
u.st.SetState(path, stateUploadingNarinfo)
if err := u.uploadNARInfo(ctx, ni); err != nil {
u.bucket.Delete(ctx, finalDstKey)
u.st.SetState(path, stateFailed)
return fmt.Errorf("uploading narinfo for %v: %w", path, err)
}
u.st.SetState(path, stateUploaded)
return nil
}
func (u *uploader) Upload(ctx context.Context, path string) error {
resCh := u.uploadSF.DoChan(path, func() (any, error) {
err := u.upload(ctx, path)
if err != nil {
log.Printf("Uploading %v: %v", path, err)
}
return nil, err
})
select {
case <-ctx.Done():
return ctx.Err()
case res := <-resCh:
return res.Err
}
}
func main() {
flag.Parse()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
log.Printf("Using cache URL %q", *blobURLFlag)
bucket, err := blob.OpenBucket(ctx, *blobURLFlag)
if err != nil {
log.Fatalf("opening bucket %q: %v", *blobURLFlag, err)
}
defer bucket.Close()
store, err := nixstore.Open(nixstore.DefaultStoreDB)
if err != nil {
log.Fatalf("opening Nix store: %v", err)
}
defer store.Close()
u := &uploader{
bucket: bucket,
store: store,
storePath: "/nix/store",
}
go func() {
t := time.NewTicker(1 * time.Second)
defer t.Stop()
for {
select {
case <-t.C:
log.Print(u.st.StateSummary())
}
}
}()
for _, p := range flag.Args() {
realPath, err := os.Readlink(p)
if err != nil {
log.Fatalf("Readlink(%q): %v", p, err)
}
if err := u.Upload(ctx, realPath); err != nil {
log.Fatalf("upload(%q): %v", p, err)
}
}
}

View file

@ -0,0 +1,22 @@
# SPDX-FileCopyrightText: 2022 Luke Granger-Brown <depot@lukegb.com>
#
# SPDX-License-Identifier: Apache-2.0
{ depot, ... }:
depot.third_party.buildGo.program {
name = "bcacheup";
srcs = [
./bcacheup.go
];
deps = with depot; [
third_party.gopkgs."gocloud.dev".blob
third_party.gopkgs."gocloud.dev".blob.fileblob
third_party.gopkgs."gocloud.dev".blob.gcsblob
third_party.gopkgs."golang.org".x.sync.errgroup
third_party.gopkgs."golang.org".x.sync.singleflight
third_party.gopkgs."github.com".ulikunitz.xz
go.nix.nar
go.nix.nar.narinfo
go.nix.nixstore
];
}

View file

@ -5,5 +5,7 @@
args:
{
nar = import ./nar args;
nixstore = import ./nixstore args;
bcachegc = import ./bcachegc args;
bcacheup = import ./bcacheup args;
}

View file

@ -2,7 +2,14 @@
#
# SPDX-License-Identifier: Apache-2.0
args:
{
{ depot, ... }@args:
(depot.third_party.buildGo.package {
name = "nar";
path = "hg.lukegb.com/lukegb/depot/go/nix/nar";
srcs = [
./nar.go
./dirfs.go
];
}) // {
narinfo = import ./narinfo args;
}

56
go/nix/nar/dirfs.go Normal file
View file

@ -0,0 +1,56 @@
package nar
import (
"io/fs"
"os"
)
type DirFS string
func (dir DirFS) Open(name string) (fs.File, error) {
if !fs.ValidPath(name) {
return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid}
}
f, err := os.Open(dir.Join(name))
if err != nil {
return nil, err
}
return f, nil
}
func (dir DirFS) Stat(name string) (fs.FileInfo, error) {
if !fs.ValidPath(name) {
return nil, &fs.PathError{Op: "stat", Path: name, Err: fs.ErrInvalid}
}
f, err := os.Stat(dir.Join(name))
if err != nil {
return nil, err
}
return f, nil
}
func (dir DirFS) Lstat(name string) (fs.FileInfo, error) {
if !fs.ValidPath(name) {
return nil, &fs.PathError{Op: "lstat", Path: name, Err: fs.ErrInvalid}
}
f, err := os.Lstat(dir.Join(name))
if err != nil {
return nil, err
}
return f, nil
}
func (dir DirFS) Readlink(name string) (string, error) {
if !fs.ValidPath(name) {
return "", &fs.PathError{Op: "readlink", Path: name, Err: fs.ErrInvalid}
}
f, err := os.Readlink(dir.Join(name))
if err != nil {
return "", err
}
return f, nil
}
func (dir DirFS) Join(name string) string {
return string(dir) + string(os.PathSeparator) + name
}

196
go/nix/nar/nar.go Normal file
View file

@ -0,0 +1,196 @@
package nar
import (
"encoding/binary"
"fmt"
"io"
"io/fs"
"path"
"sort"
)
type serializeWriter struct {
io.Writer
}
func (w serializeWriter) WritePadding(n int64) (int64, error) {
if n%8 > 0 {
n, err := w.Write(make([]byte, 8-(n%8)))
return int64(n), err
}
return 0, nil
}
func (w serializeWriter) WriteUint64(n uint64) (int64, error) {
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, n)
wrote, err := w.Write(buf)
return int64(wrote), err
}
func (w serializeWriter) WriteString(s string) (int64, error) {
nSize, err := w.WriteUint64(uint64(len(s)))
if err != nil {
return int64(nSize), err
}
nData, err := w.Write([]byte(s))
if err != nil {
return int64(nSize) + int64(nData), err
}
nPad, err := w.WritePadding(int64(len(s)))
return int64(nSize) + int64(nData) + int64(nPad), err
}
type FS interface {
Open(string) (fs.File, error)
Stat(string) (fs.FileInfo, error)
Lstat(string) (fs.FileInfo, error)
Readlink(string) (string, error)
}
func packFile(sw serializeWriter, root FS, fn string, stat fs.FileInfo) (int64, error) {
var nSoFar int64
write := func(data ...any) (int64, error) {
for _, datum := range data {
var n int64
var err error
switch datum := datum.(type) {
case string:
n, err = sw.WriteString(datum)
case uint64:
n, err = sw.WriteUint64(datum)
default:
return nSoFar, fmt.Errorf("unknown data type %T (%s)", datum, err)
}
if err != nil {
return nSoFar + n, err
}
nSoFar += n
}
return 0, nil
}
if n, err := write("("); err != nil {
return n, err
}
switch {
case stat.Mode()&fs.ModeDir != 0:
// Directory.
if n, err := write("type", "directory"); err != nil {
return n, err
}
f, err := root.Open(fn)
if err != nil {
return 0, err
}
defer f.Close()
dirF, ok := f.(fs.ReadDirFile)
if !ok {
return nSoFar, fmt.Errorf("%v didn't get me a ReadDirFile", fn)
}
dents, err := dirF.ReadDir(-1)
if err != nil {
return nSoFar, fmt.Errorf("reading dents from %v: %w", fn, err)
}
sort.Slice(dents, func(i, j int) bool {
return dents[i].Name() < dents[j].Name()
})
for _, dent := range dents {
if n, err := write("entry", "(", "name", dent.Name(), "node"); err != nil {
return n, err
}
dentStat, err := dent.Info()
if err != nil {
return nSoFar, fmt.Errorf("stat for %v: %w", path.Join(fn, dent.Name()), err)
}
n, err := packFile(sw, root, path.Join(fn, dent.Name()), dentStat)
if err != nil {
return nSoFar + n, err
}
nSoFar += n
if n, err := write(")"); err != nil {
return n, err
}
}
case stat.Mode()&fs.ModeSymlink != 0:
// Symlink.
target, err := root.Readlink(fn)
if err != nil {
return nSoFar, err
}
if n, err := write("type", "symlink", "target", target); err != nil {
return n, err
}
case stat.Mode().Type() != 0:
return 0, fmt.Errorf("not implemented (other: %s)", stat.Mode())
default:
// Regular file.
if n, err := write("type", "regular"); err != nil {
return n, err
}
if stat.Mode()&0o100 != 0 {
// Executable.
if n, err := write("executable", ""); err != nil {
return n, err
}
}
if n, err := write("contents", uint64(stat.Size())); err != nil {
return n, err
}
f, err := root.Open(fn)
if err != nil {
return 0, err
}
defer f.Close()
wrote, err := io.Copy(sw, f)
if err != nil {
return nSoFar + wrote, err
}
nSoFar += wrote
n, err := sw.WritePadding(wrote)
if err != nil {
return nSoFar + n, err
}
nSoFar += n
}
if n, err := write(")"); err != nil {
return n, err
}
return nSoFar, nil
}
func Pack(w io.Writer, fs FS, fn string) (int64, error) {
sw := serializeWriter{w}
n, err := sw.WriteString("nix-archive-1")
if err != nil {
return n, err
}
stat, err := fs.Lstat(fn)
if err != nil {
return n, fmt.Errorf("lstat(%q): %w", fn, err)
}
npf, err := packFile(sw, fs, fn, stat)
return npf + n, err
}

79
go/nix/nar/nar_test.go Normal file
View file

@ -0,0 +1,79 @@
package nar
import (
"bytes"
"encoding/hex"
"io"
"testing"
"github.com/google/go-cmp/cmp"
)
func TestHeader(t *testing.T) {
var b bytes.Buffer
sw := serializeWriter{&b}
wrote, err := sw.WriteString("nix-archive-1")
if err != nil {
t.Fatalf("WriteString: %v", err)
}
if want := int64(0x18); wrote != want {
t.Errorf("wrote = 0x%x; want 0x%x", wrote, want)
}
wantBuf := append(append([]byte{
// Length
0x0d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}, []byte("nix-archive-1")...), []byte{
// Padding
0x00, 0x00, 0x00,
}...)
if diff := cmp.Diff(b.Bytes(), wantBuf); diff != "" {
t.Errorf("b.Bytes() diff (-got +want):\n%s", diff)
}
t.Logf("\n%s", hex.Dump(b.Bytes()))
}
func TestPack(t *testing.T) {
fs := DirFS("testdata")
for _, tc := range []struct {
f string
golden string
}{{
f: "f.txt",
golden: "f.txt.nar",
}, {
f: "dir",
golden: "dir.nar",
}, {
f: "dirWithBadSymlink",
golden: "dirWithBadSymlink.nar",
}} {
t.Run(tc.f, func(t *testing.T) {
var b bytes.Buffer
wrote, err := Pack(&b, fs, tc.f)
if err != nil {
t.Fatalf("Pack: %v", err)
}
golden, err := fs.Open(tc.golden)
if err != nil {
t.Fatalf("opening golden %v: %v", tc.golden, err)
}
defer golden.Close()
want, err := io.ReadAll(golden)
if err != nil {
t.Fatalf("reading golden %v: %v", tc.golden, err)
}
if wrote != int64(len(b.Bytes())) {
t.Errorf("wrote (%d) != len(b.Bytes()) (%d)", wrote, int64(len(b.Bytes())))
}
if diff := cmp.Diff(b.Bytes(), want); diff != "" {
t.Errorf("b.Bytes() diff (-got +want):\n%s", diff)
}
})
}
}

View file

@ -94,7 +94,7 @@ func (a HashAlgorithm) String() string {
return "!!unknown!!"
}
func (a HashAlgorithm) hash() hash.Hash {
func (a HashAlgorithm) Hash() hash.Hash {
switch a {
case HashMd5:
return md5.New()
@ -109,7 +109,7 @@ func (a HashAlgorithm) hash() hash.Hash {
}
func (a HashAlgorithm) sizes() sizes {
sz := a.hash().Size()
sz := a.Hash().Size()
return sizes{
rawLen: sz,
base16Len: hex.EncodedLen(sz),
@ -144,7 +144,7 @@ func (h Hash) String() string {
return fmt.Sprintf("%s:%s", h.Algorithm, nixbase32.EncodeToString(h.Hash))
}
func hashFromString(s string) (Hash, error) {
func HashFromString(s string) (Hash, error) {
var h Hash
idx := strings.IndexAny(s, "-:")
if idx == -1 {
@ -316,7 +316,7 @@ func ParseNarInfo(r io.Reader) (*NarInfo, error) {
return nil, fmt.Errorf("unknown compression method %q", value)
}
case "FileHash":
h, err := hashFromString(value)
h, err := HashFromString(value)
if err != nil {
return nil, fmt.Errorf("parsing %q as FileHash: %w", value, err)
}
@ -328,7 +328,7 @@ func ParseNarInfo(r io.Reader) (*NarInfo, error) {
return nil, fmt.Errorf("parsing %q as FileSize: %w", value, err)
}
case "NarHash":
h, err := hashFromString(value)
h, err := HashFromString(value)
if err != nil {
return nil, fmt.Errorf("parsing %q as NarHash: %w", value, err)
}

BIN
go/nix/nar/testdata/dir.nar vendored Normal file

Binary file not shown.

1
go/nix/nar/testdata/dir/f.txt vendored Normal file
View file

@ -0,0 +1 @@
hello

1
go/nix/nar/testdata/dir/f2.txt vendored Normal file
View file

@ -0,0 +1 @@
f2

1
go/nix/nar/testdata/dir/symlink vendored Symbolic link
View file

@ -0,0 +1 @@
f.txt

Binary file not shown.

View file

@ -0,0 +1 @@
/dir

1
go/nix/nar/testdata/f.txt vendored Normal file
View file

@ -0,0 +1 @@
hello

BIN
go/nix/nar/testdata/f.txt.nar vendored Normal file

Binary file not shown.

View file

@ -0,0 +1,16 @@
# SPDX-FileCopyrightText: 2022 Luke Granger-Brown <depot@lukegb.com>
#
# SPDX-License-Identifier: Apache-2.0
{ depot, ... }:
depot.third_party.buildGo.package {
name = "nixstore";
path = "hg.lukegb.com/lukegb/depot/go/nix/nixstore";
srcs = [
./nixstore.go
];
deps = with depot; [
go.nix.nar.narinfo
third_party.gopkgs."github.com".mattn.go-sqlite3
];
}

130
go/nix/nixstore/nixstore.go Normal file
View file

@ -0,0 +1,130 @@
package nixstore
import (
"database/sql"
"encoding/base64"
"fmt"
"path"
"strings"
"hg.lukegb.com/lukegb/depot/go/nix/nar/narinfo"
_ "github.com/mattn/go-sqlite3"
)
const DefaultStoreDB = "/nix/var/nix/db/db.sqlite"
type DB struct {
db *sql.DB
}
func (d *DB) NARInfo(storePath string) (*narinfo.NarInfo, error) {
stmt, err := d.db.Prepare(`
SELECT
vp.id,
vp.path,
vp.hash,
vp.deriver,
vp.narSize,
vp.sigs,
1 FROM
ValidPaths vp
WHERE 1=1
AND vp.path = ?
`)
if err != nil {
return nil, fmt.Errorf("preparing initial statement: %w", err)
}
defer stmt.Close()
ni := narinfo.NarInfo{}
var storePathID int
var dummy int
var hashStr string
var deriverStr *string
var sigsStr *string
err = stmt.QueryRow(storePath).Scan(
&storePathID,
&ni.StorePath,
&hashStr,
&deriverStr,
&ni.NarSize,
&sigsStr,
&dummy)
if err != nil {
return nil, fmt.Errorf("scanning initial statement: %w", err)
}
ni.NarHash, err = narinfo.HashFromString(hashStr)
if err != nil {
return nil, fmt.Errorf("parsing hash %q: %w", hashStr, err)
}
if deriverStr != nil {
ni.Deriver = path.Base(*deriverStr)
}
if sigsStr != nil {
sigsBits := strings.Fields(*sigsStr)
sigs := make(map[string][]byte)
for _, sigsBit := range sigsBits {
sigsPieces := strings.Split(sigsBit, ":")
if len(sigsPieces) != 2 {
return nil, fmt.Errorf("parsing signature %q: wrong number of : separated pieces (%d)", sigsBit, len(sigsPieces))
}
var err error
sigs[sigsPieces[0]], err = base64.StdEncoding.DecodeString(sigsPieces[1])
if err != nil {
return nil, fmt.Errorf("parsing signature %q: invalid base64: %w", sigsBit, err)
}
}
ni.Sig = sigs
}
referencesStmt, err := d.db.Prepare(`
SELECT
refedvp.path
FROM
Refs r
INNER JOIN
ValidPaths refedvp ON refedvp.id = r.reference
WHERE
r.referrer = ?
ORDER BY 1
`)
if err != nil {
return nil, fmt.Errorf("preparing references statement: %w", err)
}
defer referencesStmt.Close()
referencesRows, err := referencesStmt.Query(storePathID)
if err != nil {
return nil, fmt.Errorf("querying references: %w", err)
}
defer referencesRows.Close()
for referencesRows.Next() {
var refStorePath string
if err := referencesRows.Scan(&refStorePath); err != nil {
return nil, fmt.Errorf("scanning references: %w", err)
}
ni.References = append(ni.References, path.Base(refStorePath))
}
return &ni, nil
}
func (d *DB) Close() error {
return d.db.Close()
}
func Open(dbPath string) (*DB, error) {
sqlDB, err := sql.Open("sqlite3", dbPath)
if err != nil {
return nil, err
}
return &DB{
db: sqlDB,
}, nil
}

View file

@ -0,0 +1,18 @@
package nixstore
import "testing"
func TestNARInfo(t *testing.T) {
db, err := Open(DefaultStoreDB)
if err != nil {
t.Fatalf("Open: %v", err)
}
ni, err := db.NARInfo("/nix/store/yk8ps7v1jhwpj82pigmqjb68ln7bgjbn-acl-2.3.1")
if err != nil {
t.Fatalf("NARInfo: %v", err)
}
t.Logf("%#v", ni)
t.Log(ni.String())
t.Error("meep")
}

View file

@ -0,0 +1,23 @@
# SPDX-FileCopyrightText: 2021 Luke Granger-Brown <depot@lukegb.com>
#
# SPDX-License-Identifier: Apache-2.0
{ depot, pkgs, ... }:
depot.third_party.buildGo.external {
path = "github.com/mattn/go-sqlite3";
src = depot.third_party.nixpkgs.fetchFromGitHub {
owner = "mattn";
repo = "go-sqlite3";
rev = "v1.14.15";
hash = "sha256:0rkila12zj2q3bzyljg3l6jya9n1i0z625pjblzgmm0xyrkk1i1v";
};
tags = [ "linux" "libsqlite3" ];
cgo = true;
cgodeps = with pkgs; [
sqlite
];
deps = with depot.third_party; [
#gopkgs."github.com".mattn.go-isatty
];
}

View file

@ -0,0 +1,14 @@
# SPDX-FileCopyrightText: 2022 Luke Granger-Brown <depot@lukegb.com>
#
# SPDX-License-Identifier: Apache-2.0
{ depot, ... }:
depot.third_party.buildGo.external {
path = "github.com/ulikunitz/xz";
src = depot.third_party.nixpkgs.fetchFromGitHub {
owner = "ulikunitz";
repo = "xz";
rev = "v0.5.10";
hash = "sha256:07vynk0sh8i8g7x9p9x04dj8wylvxaf8ypbi43yvcv7j6zd63c72";
};
}