-
Notifications
You must be signed in to change notification settings - Fork 3.8k
fix(op-deployer): avoid embedded artifacts recompilation #17699
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: develop
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,175 @@ | ||
package main | ||
|
||
import ( | ||
"archive/tar" | ||
"flag" | ||
"io" | ||
"log" | ||
"os" | ||
"path/filepath" | ||
"strings" | ||
|
||
"github.com/klauspost/compress/zstd" | ||
) | ||
|
||
var ( | ||
baseDir = flag.String("base", "", "directory to archive") | ||
outFile = flag.String("out", "", "path to output tzst") | ||
) | ||
|
||
// mktar creates a zstd-compressed tarball of the given base directory. | ||
// It excludes certain directories and files that are not needed for the | ||
// forge client. | ||
// | ||
// Usage: mktar -base DIR -out FILE | ||
// | ||
// Example: mktar -base ../packages/contracts-bedrock -out ./pkg/deployer/artifacts/forge-artifacts/artifacts.tzst | ||
// | ||
// The output file will be a zstd-compressed tarball of the given base directory. | ||
// Do not confuse this script with the ops/publish-artifacts.sh script, which is | ||
// used to publish the tarball to GCS. | ||
func main() { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Would be good to add some documentation (via small comment block) explaining what this program does. I'm curious what made you go with the golang program approach instead of via bash commands in the justfile as originally implemented. Maybe the tar cli command didn't provide enough configurability? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sadly zstd is not available through mise, we would need to ask anyone to install it on their OS using a package manager or recompile it. Using a go dependency is the less troublesome way to get zstd. |
||
flag.Parse() | ||
|
||
if *baseDir == "" || *outFile == "" { | ||
log.Fatalf("usage: mktar -base DIR -out FILE") | ||
} | ||
|
||
absBase, err := filepath.Abs(*baseDir) | ||
if err != nil { | ||
log.Fatalf("resolve base: %v", err) | ||
} | ||
|
||
info, err := os.Stat(absBase) | ||
if err != nil { | ||
log.Fatalf("stat base: %v", err) | ||
} | ||
if !info.IsDir() { | ||
log.Fatalf("base must be a directory: %s", absBase) | ||
} | ||
|
||
if err := os.MkdirAll(filepath.Dir(*outFile), 0o755); err != nil { | ||
log.Fatalf("create output directory: %v", err) | ||
} | ||
|
||
f, err := os.Create(*outFile) | ||
if err != nil { | ||
log.Fatalf("create output file: %v", err) | ||
} | ||
defer f.Close() | ||
|
||
gz, err := zstd.NewWriter(f, zstd.WithEncoderLevel(zstd.SpeedBestCompression)) | ||
if err != nil { | ||
log.Fatalf("create zstd writer: %v", err) | ||
} | ||
defer func() { | ||
if err := gz.Close(); err != nil { | ||
log.Fatalf("close zstd: %v", err) | ||
} | ||
}() | ||
|
||
tw := tar.NewWriter(gz) | ||
defer func() { | ||
if err := tw.Close(); err != nil { | ||
log.Fatalf("close tar: %v", err) | ||
} | ||
}() | ||
|
||
if err := filepath.WalkDir(absBase, func(path string, d os.DirEntry, walkErr error) error { | ||
if walkErr != nil { | ||
return walkErr | ||
} | ||
|
||
rel, err := filepath.Rel(absBase, path) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
if shouldExclude(rel, d) { | ||
if d.IsDir() { | ||
return filepath.SkipDir | ||
} | ||
return nil | ||
} | ||
|
||
if rel == "." { | ||
return nil | ||
} | ||
|
||
info, err := d.Info() | ||
if err != nil { | ||
return err | ||
} | ||
|
||
hdr, err := tar.FileInfoHeader(info, linkTarget(path, info)) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
hdr.Name = filepath.ToSlash(rel) | ||
hdr.ModTime = info.ModTime() | ||
hdr.AccessTime = info.ModTime() | ||
|
||
// tar-like progress output | ||
log.Printf("a %s", hdr.Name) | ||
if err := tw.WriteHeader(hdr); err != nil { | ||
return err | ||
} | ||
|
||
if info.Mode().IsRegular() { | ||
file, err := os.Open(path) | ||
if err != nil { | ||
return err | ||
} | ||
defer file.Close() | ||
|
||
if _, err := io.Copy(tw, file); err != nil { | ||
return err | ||
} | ||
} | ||
|
||
return nil | ||
}); err != nil { | ||
log.Fatalf("walk: %v", err) | ||
} | ||
|
||
if err := tw.Flush(); err != nil { | ||
log.Fatalf("flush tar: %v", err) | ||
} | ||
|
||
log.Printf("wrote %s", *outFile) | ||
} | ||
|
||
func shouldExclude(rel string, d os.DirEntry) bool { | ||
if rel == "." { | ||
return false | ||
} | ||
|
||
rel = filepath.ToSlash(rel) | ||
|
||
if strings.HasPrefix(rel, "book/") || rel == "book" { | ||
return true | ||
} | ||
if strings.HasPrefix(rel, "snapshots/") || rel == "snapshots" { | ||
return true | ||
} | ||
|
||
if !d.IsDir() { | ||
if strings.HasSuffix(d.Name(), ".t.sol") { | ||
return true | ||
} | ||
} | ||
|
||
return false | ||
} | ||
|
||
func linkTarget(path string, info os.FileInfo) string { | ||
if info.Mode()&os.ModeSymlink == 0 { | ||
return "" | ||
} | ||
target, err := os.Readlink(path) | ||
if err != nil { | ||
log.Fatalf("readlink %s: %v", path, err) | ||
} | ||
return target | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Currently there is a
publish-contract-artifacts
job in ci (ref). It expects to store a .tar.gz file type in our gcp bucket (ref). Seems like the filetype update would break that script and anything that expects to download .tar.gz files from that bucket). I'm not sure what downstream tools depend on downloading from that gcp bucketUh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Actually it seems that the script it calls, publish-artifacts.sh, does recompress the files itself so it wouldn't be breaking this. Same for pull-artifacts.sh, it's still also using gzip.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ok got it. And op-deployer has code to decompress either file type so seems we're compatible with either on the op-deployer side?