Additions to domain/ and workspace/ chron-note packages

This commit is contained in:
2026-03-11 14:03:52 +08:00
parent 8edeb3ac99
commit 34d18e244e
16 changed files with 744 additions and 3 deletions

View File

@@ -1,3 +1,81 @@
package main
func main() {}
import (
"fmt"
"os"
"path/filepath"
"git.michelsen.id/phill/chron/chron-note/internal/domain"
"git.michelsen.id/phill/chron/chron-note/internal/workspace"
)
func must(err error) {
if err != nil {
panic(err)
}
}
func main() {
root := "."
if len(os.Args) > 1 {
root = os.Args[1]
}
ws, err := workspace.Open(root)
must(err)
fmt.Println("Workspace directory:")
fmt.Println(ws.Dir)
fmt.Println()
fmt.Println("Creating objects...")
var ids []domain.ObjectID
for i := range 3 {
id, err := domain.NewObjectID()
must(err)
content := fmt.Sprintf("object %d\nid: %s\n", i, id.String())
must(ws.Write(id, []byte(content)))
ids = append(ids, id)
fmt.Printf("Created object %s\n", id)
fmt.Printf("Path: %s\n\n", ws.ObjectPath(id))
}
fmt.Println("Listing workspace objects:")
fmt.Println()
list, err := ws.ListObjectIDs()
must(err)
for _, id := range list {
fi, err := ws.Stat(id)
must(err)
fmt.Printf("Object: %s\n", id)
fmt.Printf("Size: %d bytes\n", fi.Size())
fmt.Printf("Path: %s\n", ws.ObjectPath(id))
fmt.Println()
}
fmt.Println("Reading objects back:")
fmt.Println()
for _, id := range ids {
data, err := ws.Read(id)
must(err)
fmt.Printf("Object %s contents:\n", id)
fmt.Println(string(data))
fmt.Println("---")
}
fmt.Println()
fmt.Println("Filesystem view:")
fmt.Println(filepath.Join(ws.Dir, "..."))
}

View File

@@ -1,2 +1,11 @@
package domain
import "errors"
var (
ErrInvalidObjectID = errors.New("invalid object id")
ErrInvalidBlobID = errors.New("invalid blob id")
ErrUnknownEventType = errors.New("unknown event type")
ErrInvalidEvent = errors.New("invalid event")
ErrDecode = errors.New("decode error")
)

View File

@@ -1 +1,137 @@
package domain
import (
"fmt"
"git.michelsen.id/phill/chron/chron-note/internal/util"
)
type EventType uint8
const (
EventObjectUpsert EventType = 1
EventObjectDelete EventType = 2
)
const encodingVersion uint8 = 1
type ObjectUpsert struct {
ObjectID ObjectID
Name string
Tags []string
HasBlob bool
Blob BlobID
}
type ObjectDelete struct {
ObjectID ObjectID
}
type Event struct {
Type EventType
Upsert *ObjectUpsert
Delete *ObjectDelete
}
func EncodeEvent(e Event) ([]byte, error) {
enc := util.NewEncoder(nil)
enc.U8(encodingVersion)
enc.U8(uint8(e.Type))
switch e.Type {
case EventObjectUpsert:
if e.Upsert == nil {
return nil, ErrInvalidEvent
}
u := e.Upsert
var flags uint8
if u.HasBlob {
flags |= 0x01
}
enc.U8(flags)
enc.BytesFixed(u.ObjectID[:])
enc.String(u.Name)
enc.StringSlice(u.Tags)
if u.HasBlob {
enc.BytesFixed(u.Blob[:])
}
case EventObjectDelete:
if e.Delete == nil {
return nil, ErrInvalidEvent
}
enc.BytesFixed(e.Delete.ObjectID[:])
default:
return nil, ErrUnknownEventType
}
if err := enc.Err(); err != nil {
return nil, err
}
return enc.Bytes(), nil
}
func DecodeEvent(b []byte) (Event, error) {
dec := util.NewDecoder(b)
ver := dec.U8()
if dec.Err() != nil {
return Event{}, ErrDecode
}
if ver != encodingVersion {
return Event{}, fmt.Errorf("%w: unsupported encoding version %d", ErrDecode, ver)
}
typ := EventType(dec.U8())
if dec.Err() != nil {
return Event{}, ErrDecode
}
switch typ {
case EventObjectUpsert:
flags := dec.U8()
var objID ObjectID
copy(objID[:], dec.BytesFixed(len(objID)))
name := dec.String()
tags := dec.StringSlice()
hasBlob := (flags & 0x01) != 0
var blob BlobID
if hasBlob {
copy(blob[:], dec.BytesFixed(len(blob)))
}
if dec.Err() != nil {
return Event{}, ErrDecode
}
return Event{
Type: typ,
Upsert: &ObjectUpsert{
ObjectID: objID,
Name: NormalizeName(name),
Tags: NormalizeTags(tags),
HasBlob: hasBlob,
Blob: blob,
},
}, nil
case EventObjectDelete:
var objID ObjectID
copy(objID[:], dec.BytesFixed(len(objID)))
if dec.Err() != nil {
return Event{}, ErrDecode
}
return Event{Type: typ, Delete: &ObjectDelete{ObjectID: objID}}, nil
default:
return Event{}, ErrUnknownEventType
}
}

View File

@@ -1 +1,74 @@
package domain
import (
"crypto/rand"
"encoding/hex"
"strings"
)
type ObjectID [16]byte
type BlobID [32]byte
func NewObjectID() (ObjectID, error) {
var id ObjectID
_, err := rand.Read(id[:])
return id, err
}
func ParseObjectID(s string) (ObjectID, error) {
s = strings.TrimSpace(s)
b, err := hex.DecodeString(s)
if err != nil || len(b) != len(ObjectID{}) {
return ObjectID{}, ErrInvalidObjectID
}
var id ObjectID
copy(id[:], b)
return id, nil
}
func (id ObjectID) String() string { return hex.EncodeToString(id[:]) }
func ParseBlobID(s string) (BlobID, error) {
s = strings.TrimSpace(s)
b, err := hex.DecodeString(s)
if err != nil || len(b) != len(BlobID{}) {
return BlobID{}, ErrInvalidBlobID
}
var id BlobID
copy(id[:], b)
return id, nil
}
func (id BlobID) String() string { return hex.EncodeToString(id[:]) }
type ObjectState struct {
ID ObjectID
Name string
Tags []string
Blob BlobID
HasBlob bool
Deleted bool
}
// Keep normalization here so all layers agree.
func NormalizeName(s string) string {
s = strings.TrimSpace(s)
return s
}
func NormalizeTags(tags []string) []string {
out := make([]string, 0, len(tags))
seen := make(map[string]struct{}, len(tags))
for _, t := range tags {
t = strings.TrimSpace(strings.ToLower(t))
if t == "" {
continue
}
if _, ok := seen[t]; ok {
continue
}
seen[t] = struct{}{}
out = append(out, t)
}
return out
}

View File

@@ -1 +0,0 @@
package index

View File

@@ -1 +0,0 @@
package index

View File

@@ -0,0 +1 @@
package state

View File

@@ -0,0 +1 @@
package state

View File

@@ -0,0 +1,14 @@
package cas
import (
"context"
"git.michelsen.id/phill/chron/chron-note/internal/domain"
)
type Store interface {
Put(ctx context.Context, data []byte) (domain.BlobID, error) // id = blake3-256(data)
Get(ctx context.Context, id domain.BlobID) ([]byte, error)
Has(ctx context.Context, id domain.BlobID) (bool, error)
Remove(ctx context.Context, id domain.BlobID) error
}

View File

@@ -0,0 +1,93 @@
package cas
import (
"context"
"encoding/hex"
"errors"
"fmt"
"os"
"path/filepath"
"git.michelsen.id/phill/chron/chron-note/internal/domain"
"git.michelsen.id/phill/chron/chron-note/internal/util"
)
var ErrBlobNotFound = errors.New("blob not found")
type FS struct {
root string
}
func NewFS(root string) *FS {
return &FS{root: root}
}
func (s *FS) Put(ctx context.Context, data []byte) (domain.BlobID, error) {
_ = ctx
sum := util.Hash256(data)
var id domain.BlobID
copy(id[:], sum[:])
p := s.pathFor(id)
// Fast path: already exists
if _, err := os.Stat(p); err == nil {
return id, nil
}
if err := os.MkdirAll(filepath.Dir(p), 0o755); err != nil {
return domain.BlobID{}, err
}
tmp := p + ".tmp"
if err := os.WriteFile(tmp, data, 0o644); err != nil {
return domain.BlobID{}, err
}
if err := os.Rename(tmp, p); err != nil {
_ = os.Remove(tmp)
// If another writer won the race, accept it.
if _, statErr := os.Stat(p); statErr == nil {
return id, nil
}
return domain.BlobID{}, err
}
return id, nil
}
func (s *FS) Get(ctx context.Context, id domain.BlobID) ([]byte, error) {
_ = ctx
p := s.pathFor(id)
b, err := os.ReadFile(p)
if err != nil {
if os.IsNotExist(err) {
return nil, ErrBlobNotFound
}
return nil, err
}
return b, nil
}
func (s *FS) Has(ctx context.Context, id domain.BlobID) (bool, error) {
_ = ctx
p := s.pathFor(id)
_, err := os.Stat(p)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func (s *FS) pathFor(id domain.BlobID) string {
hexID := hex.EncodeToString(id[:])
if len(hexID) < 4 {
// should never happen
return filepath.Join(s.root, fmt.Sprintf("bad-%s", hexID))
}
// git-style fanout
return filepath.Join(s.root, hexID[:2], hexID[2:4], hexID)
}

View File

@@ -1 +1,133 @@
package util
import (
"encoding/binary"
"errors"
)
var ErrDecode = errors.New("decode")
type Encoder struct {
b []byte
err error
}
func NewEncoder(dst []byte) *Encoder { return &Encoder{b: dst} }
func (e *Encoder) Bytes() []byte { return e.b }
func (e *Encoder) Err() error { return e.err }
func (e *Encoder) U8(v uint8) {
if e.err != nil {
return
}
e.b = append(e.b, v)
}
func (e *Encoder) U32(v uint32) {
if e.err != nil {
return
}
var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], v)
e.b = append(e.b, tmp[:]...)
}
func (e *Encoder) BytesFixed(p []byte) {
if e.err != nil {
return
}
e.b = append(e.b, p...)
}
func (e *Encoder) BytesLen(p []byte) {
if e.err != nil {
return
}
e.U32(uint32(len(p)))
e.b = append(e.b, p...)
}
func (e *Encoder) String(s string) {
e.BytesLen([]byte(s))
}
func (e *Encoder) StringSlice(ss []string) {
if e.err != nil {
return
}
e.U32(uint32(len(ss)))
for _, s := range ss {
e.String(s)
}
}
type Decoder struct {
b []byte
off int
err error
}
func NewDecoder(src []byte) *Decoder { return &Decoder{b: src} }
func (d *Decoder) Err() error { return d.err }
func (d *Decoder) need(n int) bool {
if d.err != nil {
return false
}
if n < 0 || d.off+n > len(d.b) {
d.err = ErrDecode
return false
}
return true
}
func (d *Decoder) U8() uint8 {
if !d.need(1) {
return 0
}
v := d.b[d.off]
d.off++
return v
}
func (d *Decoder) U32() uint32 {
if !d.need(4) {
return 0
}
v := binary.LittleEndian.Uint32(d.b[d.off : d.off+4])
d.off += 4
return v
}
func (d *Decoder) BytesFixed(n int) []byte {
if !d.need(n) {
return nil
}
out := d.b[d.off : d.off+n]
d.off += n
return out
}
func (d *Decoder) BytesLen() []byte {
n := int(d.U32())
return d.BytesFixed(n)
}
func (d *Decoder) String() string {
return string(d.BytesLen())
}
func (d *Decoder) StringSlice() []string {
n := int(d.U32())
if d.Err() != nil {
return nil
}
out := make([]string, 0, n)
for range n {
out = append(out, d.String())
if d.Err() != nil {
return nil
}
}
return out
}

View File

@@ -1 +1,8 @@
package util
import "lukechampine.com/blake3"
func Hash256(b []byte) [32]byte {
h := blake3.Sum256(b)
return h
}

View File

@@ -0,0 +1,69 @@
package workspace
import (
"fmt"
"os"
"git.michelsen.id/phill/chron/chron-note/internal/domain"
)
func (ws *Workspace) Exists(id domain.ObjectID) (bool, error) {
_, err := os.Stat(ws.ObjectPath(id))
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, fmt.Errorf("stat object %s: %w", id.String(), err)
}
func (ws *Workspace) Read(id domain.ObjectID) ([]byte, error) {
b, err := os.ReadFile(ws.ObjectPath(id))
if err != nil {
return nil, fmt.Errorf("read object %s: %w", id.String(), err)
}
return b, nil
}
func (ws *Workspace) Write(id domain.ObjectID, b []byte) error {
if err := os.MkdirAll(ws.Dir, 0o755); err != nil {
return fmt.Errorf("mkdir workspace dir: %w", err)
}
dst := ws.ObjectPath(id)
f, err := os.CreateTemp(ws.Dir, id.String()+".*.tmp")
if err != nil {
return fmt.Errorf("create temp for object %s: %w", id.String(), err)
}
tmp := f.Name()
// Best-effort cleanup on failure.
defer func() { _ = os.Remove(tmp) }()
if _, err := f.Write(b); err != nil {
_ = f.Close()
return fmt.Errorf("write temp for object %s: %w", id.String(), err)
}
if err := f.Close(); err != nil {
return fmt.Errorf("close temp for object %s: %w", id.String(), err)
}
if err := os.Rename(tmp, dst); err != nil {
return fmt.Errorf("rename temp for object %s: %w", id.String(), err)
}
// Rename succeeded; prevent deferred cleanup.
_ = os.Remove(tmp)
return nil
}
func (ws *Workspace) Delete(id domain.ObjectID) error {
p := ws.ObjectPath(id)
err := os.Remove(p)
if err == nil || os.IsNotExist(err) {
return nil
}
return fmt.Errorf("delete object %s: %w", id.String(), err)
}

View File

@@ -0,0 +1,46 @@
package workspace
import (
"fmt"
"io/fs"
"os"
"git.michelsen.id/phill/chron/chron-note/internal/domain"
)
func (ws *Workspace) ListObjectIDs() ([]domain.ObjectID, error) {
entries, err := os.ReadDir(ws.Dir)
if err != nil {
return nil, fmt.Errorf("readdir workspace dir: %w", err)
}
out := make([]domain.ObjectID, 0, len(entries))
for _, e := range entries {
if e.IsDir() {
continue
}
info, err := e.Info()
if err != nil {
return nil, fmt.Errorf("stat workspace entry: %w", err)
}
if !info.Mode().IsRegular() {
continue
}
id, err := domain.ParseObjectID(e.Name())
if err != nil {
continue
}
out = append(out, id)
}
return out, nil
}
func (ws *Workspace) Stat(id domain.ObjectID) (fs.FileInfo, error) {
fi, err := os.Stat(ws.ObjectPath(id))
if err != nil {
return nil, fmt.Errorf("stat object %s: %w", id.String(), err)
}
return fi, nil
}

View File

@@ -0,0 +1,52 @@
package workspace
import (
"context"
"fmt"
"os"
"git.michelsen.id/phill/chron/chron-note/internal/domain"
)
type SnapshotObject struct {
ID domain.ObjectID
Size int64
ModNS int64
}
func (ws *Workspace) Snapshot(ctx context.Context) ([]SnapshotObject, error) {
entries, err := os.ReadDir(ws.Dir)
if err != nil {
return nil, fmt.Errorf("readdir workspace dir: %w", err)
}
out := make([]SnapshotObject, 0, len(entries))
for _, e := range entries {
if ctx.Err() != nil {
return nil, ctx.Err()
}
if e.IsDir() {
continue
}
info, err := e.Info()
if err != nil {
return nil, fmt.Errorf("stat workspace entry: %w", err)
}
if !info.Mode().IsRegular() {
continue
}
id, err := domain.ParseObjectID(e.Name())
if err != nil {
continue
}
out = append(out, SnapshotObject{
ID: id,
Size: info.Size(),
ModNS: info.ModTime().UnixNano(),
})
}
return out, nil
}

View File

@@ -0,0 +1,32 @@
package workspace
import (
"fmt"
"os"
"path/filepath"
"git.michelsen.id/phill/chron/chron-note/internal/domain"
)
type Workspace struct {
Root string
Dir string
}
func Open(root string) (*Workspace, error) {
root = filepath.Clean(root)
dir := filepath.Join(root, ".chron", "workspace")
if err := os.MkdirAll(dir, 0o755); err != nil {
return nil, fmt.Errorf("mkdir workspace dir: %w", err)
}
return &Workspace{
Root: root,
Dir: dir,
}, nil
}
func (ws *Workspace) ObjectPath(id domain.ObjectID) string {
return filepath.Join(ws.Dir, id.String())
}