diff --git a/pkg/newlog/cmd/fssagg.go b/pkg/newlog/cmd/fssagg.go new file mode 100644 index 0000000000..fe06bf5ace --- /dev/null +++ b/pkg/newlog/cmd/fssagg.go @@ -0,0 +1,95 @@ +// Copyright (c) 2024 Zededa, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "crypto/hmac" + "crypto/sha256" +) + +// Notes : +// This is based on wor of Ma and Tsudik: +// Ma, Di, and Gene Tsudik. "Forward-secure sequential aggregate authentication." +// IEEE Symposium on Security and Privacy (SP'07). IEEE, 2007. +// https://eprint.iacr.org/2007/052.pdf +// +// This implementation provides forward security, content integrity, stream integrity +// and truncation detection for each batch of logs *gzipped*. This means logs not +// yet processed by the newlogd are not protected by this scheme, the risk of an +// attacker compromising the logs can be lowered by adjusting the batch size or time +// interval to create batches of logs more frequently. +// +// Current implementation lacks the batch (or tail) deletion detection, this is +// to be implemented in the next version using tpm. +// +// Pure golang implementation of FssAgg might not be entirely safe, the initial +// key, before gc collected, might hang around a bit and swapped into disk if +// the attacker puts the system under memory pressure. Bear in mid this is a highly +// unlikly scenario. Unfortunately mlocking might not possible to implement as +// the underlying golang crypto calls copy data around into not locked memory. + +// h must be a collision-resistant one-way hash function (we are using SHA-256, +// ofcouse given enough universes nothing is collision resistant) +func h(data []byte) []byte { + h := sha256.New() + h.Write(data) + return h.Sum(nil) +} + +// hsk must be a secure MAC function h (here we are using HMAC with SHA-256 and a secret key) +func hsk(key, message []byte) []byte { + mac := hmac.New(sha256.New, key) + mac.Write(message) + return mac.Sum(nil) +} + +// fssAggUpd updates key using hash of previous key +func fssAggUpd(prevKey []byte) []byte { + return h(prevKey) +} + +// FssAggSig takes the current key, aggregated signature so far and a message, +// calculates the message MAC, updated aggregated signature and updates the key +// for forward security. It returns the aggregated signature plus the updated key. +func fssAggSig(key, aggMAC, message []byte) ([]byte, []byte) { + var aggSig []byte + + // generate MAC for current message and aggregate current MAC with previous MACs + curMAC := hsk(key, message) + aggSig = h(append(aggMAC, curMAC...)) + + // update key for forward security + nextKey := fssAggUpd(key) + + return nextKey, aggSig +} + +// FssAggVer verifies aggregated signature over messages, accepting the initial +// secret key and the aggregate signature, returning a boolean indicating the +// verification result. +func fssAggVer(secretKey, aggSig []byte, messages [][]byte) bool { + key := secretKey + var computedAggSig []byte + + for _, message := range messages { + // generate MAC for current message + curMAC := hsk(key, message) + // aggregate current MAC with previous MACs + computedAggSig = h(append(computedAggSig, curMAC...)) + + // update key + key = fssAggUpd(key) + } + + // this compare is constant time, so no time leak and attack. + return hmac.Equal(computedAggSig, aggSig) +} + +func evolveKey(secretKey []byte, num uint64) []byte { + key := secretKey + for i := uint64(0); i < num; i++ { + key = fssAggUpd(key) + } + return key +} diff --git a/pkg/newlog/cmd/newlogd.go b/pkg/newlog/cmd/newlogd.go index e7b06ec467..91e9952d59 100644 --- a/pkg/newlog/cmd/newlogd.go +++ b/pkg/newlog/cmd/newlogd.go @@ -7,10 +7,12 @@ import ( "bufio" "bytes" "compress/gzip" + "encoding/base64" "encoding/json" "flag" "fmt" "io" + "math/rand" "net" "os" "path" @@ -39,33 +41,14 @@ import ( ) const ( - agentName = "newlogd" - errorTime = 3 * time.Minute - warningTime = 40 * time.Second - metricsPublishInterval = 300 * time.Second - logfileDelay = 300 // maximum delay 5 minutes for log file collection - fastlogfileDelay = 10 // faster to close log file if fastUpload is enabled - stillRunningInerval = 25 * time.Second - - collectDir = types.NewlogCollectDir - uploadDevDir = types.NewlogUploadDevDir - uploadAppDir = types.NewlogUploadAppDir - keepSentDir = types.NewlogKeepSentQueueDir - failSendDir = types.NewlogDir + "/failedUpload" - panicFileDir = types.NewlogDir + "/panicStacks" - symlinkFile = collectDir + "/current.device.log" - tmpSymlink = collectDir + "/tmp-sym.dev.log" - devPrefix = types.DevPrefix - appPrefix = types.AppPrefix - tmpPrefix = "TempFile" - skipUpload = "skipTx." - - maxLogFileSize int32 = 550000 // maximum collect file size in bytes - maxGzipFileSize int64 = 50000 // maximum gzipped file size for upload in bytes - gzipFileFooter int64 = 12 // size of gzip footer to use in calculations - defaultSyncCount = 30 // default log events flush/sync to disk file - - maxToSendMbytes uint32 = 2048 // default 2 Gbytes for log files remains on disk + agentName = "newlogd" + errorTime = 3 * time.Minute + warningTime = 40 * time.Second + metricsPublishInterval = 300 * time.Second + logfileDelay = 300 // maximum delay 5 minutes for log file collection + fastlogfileDelay = 10 // faster to close log file if fastUpload is enabled + stillRunningInerval = 25 * time.Second + maxToSendMbytes uint32 = 2048 // default 2 Gbytes for log files remains on disk ansi = "[\u0009\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" ) @@ -110,8 +93,37 @@ var ( // goroutine sync/atomic synchronization is used. You've been warned. syslogPrio = types.SyslogKernelLogLevelNum[types.SyslogKernelDefaultLogLevel] kernelPrio = types.SyslogKernelLogLevelNum[types.SyslogKernelDefaultLogLevel] + + // these are constant in principle, but we need to be able to change them + // for testing. + collectDir = types.NewlogCollectDir + uploadDevDir = types.NewlogUploadDevDir + uploadAppDir = types.NewlogUploadAppDir + keepSentDir = types.NewlogKeepSentQueueDir + failSendDir = types.NewlogDir + "/failedUpload" + panicFileDir = types.NewlogDir + "/panicStacks" + symlinkFile = collectDir + "/current.device.log" + tmpSymlink = collectDir + "/tmp-sym.dev.log" + devPrefix = types.DevPrefix + appPrefix = types.AppPrefix + tmpPrefix = "TempFile" + skipUpload = "skipTx." + + maxLogFileSize int32 = 550000 // maximum collect file size in bytes + maxGzipFileSize int64 = 50000 // maximum gzipped file size for upload in bytes + gzipFileFooter int64 = 12 // size of gzip footer to use in calculations + defaultSyncCount = 30 // default log events flush/sync to disk file + + // for log verification + secLog secLogMetadata ) +type secLogMetadata struct { + key []byte + keyIter uint64 + enabled bool +} + // for app Domain-ID mapping into UUID and DisplayName type appDomain struct { appUUID string @@ -210,6 +222,20 @@ func main() { // handle linux Syslog /dev/log messages go getSyslogMsg(loggerChan) + // initialize secure logging key + // this is fixed key, in practice we generate a random key and + // store it in the TPM just like we do the volume storage key. + secLog.key = []byte{ + 0x12, 0x34, 0x56, 0x78, + 0x9A, 0xBC, 0xDE, 0xF0, + 0x11, 0x22, 0x33, 0x44, + 0x55, 0x66, 0x77, 0x88, + } + // evolve the key, so the initial key is gone from memory. + // no need to be random, but looks cool. + secLog.keyIter = uint64(rand.Intn(100)) + secLog.key = evolveKey(secLog.key, secLog.keyIter) + stillRunning := time.NewTicker(stillRunningInerval) ps.StillRunning(agentName, warningTime, errorTime) @@ -335,7 +361,11 @@ func main() { case tmpLogfileInfo := <-movefileChan: // handle logfile to gzip conversion work - doMoveCompressFile(&ps, tmpLogfileInfo) + if secLog.enabled { + doMoveCompressFileSecure(&ps, tmpLogfileInfo) + } else { + doMoveCompressFile(&ps, tmpLogfileInfo) + } case panicBuf := <-panicFileChan: // save panic stack into files @@ -522,6 +552,10 @@ func handleGlobalConfigImp(ctxArg interface{}, key string, statusArg interface{} // parse kernel log level kernelPrioStr := gcp.GlobalValueString(types.KernelLogLevel) atomic.StoreUint32(&kernelPrio, parseLogLevel(kernelPrioStr)) + + // enable secure logging + // secLog.enabled = gcp.GlobalValueBool(types.EnableSecureLogging) + secLog.enabled = true } log.Tracef("handleGlobalConfigModify done for %s, fastupload enabled %v", key, enableFastUpload) } @@ -1121,7 +1155,7 @@ func doMoveCompressFile(ps *pubsub.PubSub, tmplogfileInfo fileChanInfo) { // note: bytesWritten may be updated eventually because of gzip implementation // potentially we cannot account maxGzipFileSize less than windowsize of gzip 32768 if underlayWriter.bytesWritten+gzipFileFooter+int64(len(newLine)) >= maxGzipFileSize { - newSize += finalizeGzipToOutTempFile(gw, oTmpFile, outfile) + newSize += finalizeGzipToOutTempFile(gw, nil, oTmpFile, outfile) logmetrics.NumBreakGZipFile++ fileID++ outfile = gzipFileNameGet(isApp, timeNowNum+fileID, dirName, appuuid, tmplogfileInfo.notUpload) @@ -1135,7 +1169,210 @@ func doMoveCompressFile(ps *pubsub.PubSub, tmplogfileInfo fileChanInfo) { if scanner.Err() != nil { log.Fatal("doMoveCompressFile: reading file failed", scanner.Err()) } - newSize += finalizeGzipToOutTempFile(gw, oTmpFile, outfile) + newSize += finalizeGzipToOutTempFile(gw, nil, oTmpFile, outfile) + fileID++ + + // store variable to check for the new file name generator + lastLogNum = timeNowNum + fileID + + if isApp { + logmetrics.AppMetrics.NumGZipBytesWrite += uint64(newSize) + if tmplogfileInfo.notUpload { + logmetrics.NumSkipUploadAppFile += uint32(fileID) + } + } else { + logmetrics.DevMetrics.NumGZipBytesWrite += uint64(newSize) + } + + _ = iFile.Close() + // done gzip conversion, get rid of the temp log file in collect directory + err = os.Remove(tmplogfileInfo.tmpfile) + if err != nil { + log.Fatal("doMoveCompressFile: remove file failed", err) + } +} + +func sequenceLogEntry(entry []byte, seq int) ([]byte, error) { + jsonMap := make(map[string]interface{}) + err := json.Unmarshal(entry, &jsonMap) + if err != nil { + return nil, err + } + + jsonMap["seqNum"] = seq + out, err := json.Marshal(jsonMap) + if err != nil { + return nil, err + } + + return out, nil +} + +func addSecureLogMetadata(metadata string, aggSig []byte, keyIter uint64) (string, error) { + jsonMap := make(map[string]interface{}) + err := json.Unmarshal([]byte(metadata), &jsonMap) + if err != nil { + return "", err + } + + jsonMap["aggSig"] = base64.StdEncoding.EncodeToString(aggSig) + jsonMap["keyIter"] = keyIter + out, err := json.Marshal(jsonMap) + if err != nil { + return "", err + } + + return string(out), nil +} + +// doMoveCompressFileSecure process logs just like doMoveCompressFile but also +// calculates the aggregate signature for logs and adds necessary information +// to the log metadata so logs can be verified later. +func doMoveCompressFileSecure(ps *pubsub.PubSub, tmplogfileInfo fileChanInfo) { + isApp := tmplogfileInfo.isApp + dirName, appuuid := getFileInfo(tmplogfileInfo) + + now := time.Now() + timeNowNum := int(now.UnixNano() / int64(time.Millisecond)) // in msec + if timeNowNum < lastLogNum { + // adjust variable for file name generation to not overlap with the old one + timeNowNum = lastLogNum + 1 + } + outfile := gzipFileNameGet(isApp, timeNowNum, dirName, appuuid, tmplogfileInfo.notUpload) + + // open input file + iFile, err := os.Open(tmplogfileInfo.tmpfile) + if err != nil { + log.Fatal(err) + } + scanner := bufio.NewScanner(iFile) + // check if we cannot scan + // check valid json header for device log we will use later + if !scanner.Scan() || (!isApp && !json.Valid(scanner.Bytes())) { + _ = iFile.Close() + err = fmt.Errorf("doMoveCompressFile: can't get metadata on first line, remove %s", tmplogfileInfo.tmpfile) + log.Error(err) + if scanner.Err() != nil { + log.Error(scanner.Err()) + } + err = os.Remove(tmplogfileInfo.tmpfile) + if err != nil { + log.Fatal("doMoveCompressFile: remove file failed", err) + } + return + } + + // assign the metadata in the first line of the logfile + tmplogfileInfo.header = scanner.Text() + + // prepare writers to save gzipped logs + gw, underlayWriter, oTmpFile := prepareGzipToOutTempFile(filepath.Dir(outfile), tmplogfileInfo, now) + + fileID := 0 + wdTime := time.Now() + var newSize int64 + + // each log is processed, tagged with sequence number and then palced in + // the processedLogs slice. the log are later written into the gzip file + // in a call to finalizeGzipToOutTempFile. + var processedLogs = make([][]byte, 0) + // each log entry in a batch gets a sequence number, can be used to + // sort the logs for verification. + var logSeq = 0 + // batchKeyIter is what iteration of the key we start with, to calculate + // log entrieys HMAC and the aggregate signature. + var batchKeyIter = secLog.keyIter + // aggregateSig is the aggregate signature of all the logs in a batch + // that are processed. + var aggregateSig []byte + + for scanner.Scan() { + if time.Since(wdTime) >= (15 * time.Second) { + ps.StillRunning(agentName, warningTime, errorTime) + wdTime = time.Now() + } + newLine := scanner.Bytes() + //trim non-graphic symbols + newLine = bytes.TrimFunc(newLine, func(r rune) bool { + return !unicode.IsGraphic(r) + }) + if len(newLine) == 0 { + continue + } + if !json.Valid(newLine) { + log.Errorf("doMoveCompressFile: found broken line: %s", string(newLine)) + continue + } + // assume that next line is incompressible to be safe + // note: bytesWritten may be updated eventually because of gzip implementation + // potentially we cannot account maxGzipFileSize less than windowsize of gzip 32768 + if underlayWriter.bytesWritten+gzipFileFooter+int64(len(newLine)) >= maxGzipFileSize { + // we have reached the size limit of the gzip file, so before starting + // a new batch of logs, finalze the current batch of logs, add aggregate + // signature and key iteration to the gzip metadata, giving the verifier + // the information needed to verify the logs. + verifMetadate, err := addSecureLogMetadata(gw.Comment, aggregateSig, batchKeyIter) + if err != nil { + log.Errorf("doMoveCompressFile: addSecureLogMetadata failed: %v", err) + } else { + gw.Comment = verifMetadate + } + + // write the processed logs to the gzip file and finalize the gzip file. + newSize += finalizeGzipToOutTempFile(gw, processedLogs, oTmpFile, outfile) + + // reset the variables for the next gzip file + processedLogs = processedLogs[:0] + aggregateSig = []byte{} + batchKeyIter = secLog.keyIter + logSeq = 0 + + // update the metrics + logmetrics.NumBreakGZipFile++ + fileID++ + + // start a new batch + outfile = gzipFileNameGet(isApp, timeNowNum+fileID, dirName, appuuid, tmplogfileInfo.notUpload) + gw, underlayWriter, oTmpFile = prepareGzipToOutTempFile(filepath.Dir(outfile), tmplogfileInfo, now) + } + + // tag the log entry with sequence number and collect it + sequencedEntry, err := sequenceLogEntry(newLine, logSeq) + if err != nil { + log.Fatalf("doMoveCompressFile: sequenceLogEntry failed: %v", err) + } + logSeq++ + + // calculate the aggregate signature of the log entry, update the key and + // iteration counter. + secLog.key, aggregateSig = fssAggSig(secLog.key, aggregateSig, sequencedEntry) + secLog.keyIter++ + + // we are not writing the log entry to the gzip file yet, since it will + // write the header (metadata) of the gzip file in the first write(), + // but we are not ready yet, there might be more logs to process, + // signature to calculate and so on. so we just collect the log entries + // at this stage. + processedLogs = append(processedLogs, sequencedEntry) + } + if scanner.Err() != nil { + log.Fatal("doMoveCompressFile: reading file failed", scanner.Err()) + } + + // logs are processed, add aggregate signature and key iteration to the gzip + // metadata, giving the verifier the information needed to verify the logs. + // If you're wondering whether we should sign the device ID in the log batch with + // the TPM-based device key: we don't. Logs are wrapped in an auth container, + // this ensures the cloud knows these logs are from this device. + verifMetadate, err := addSecureLogMetadata(gw.Comment, aggregateSig, batchKeyIter) + if err != nil { + log.Errorf("doMoveCompressFile: addSecureLogMetadata failed: %v", err) + } else { + gw.Comment = verifMetadate + } + + // write the processed logs to the gzip file and finalize the gzip file. + newSize += finalizeGzipToOutTempFile(gw, processedLogs, oTmpFile, outfile) fileID++ // store variable to check for the new file name generator @@ -1205,7 +1442,15 @@ func prepareGzipToOutTempFile(gzipDirName string, fHdr fileChanInfo, now time.Ti return gw, writer, oTmpFile } -func finalizeGzipToOutTempFile(gw *gzip.Writer, oTmpFile *os.File, outfile string) int64 { +func finalizeGzipToOutTempFile(gw *gzip.Writer, logs [][]byte, oTmpFile *os.File, outfile string) int64 { + // this is non-empty only if when we are in secure log mode + for _, l := range logs { + _, err := gw.Write(append(l, '\n')) + if err != nil { + log.Fatal("finalizeGzipToOutTempFile: cannot write log", err) + } + } + err := gw.Close() if err != nil { log.Fatal("finalizeGzipToOutTempFile: cannot close file", err) diff --git a/pkg/newlog/cmd/seclog_perf_test.go b/pkg/newlog/cmd/seclog_perf_test.go new file mode 100644 index 0000000000..0bc3744483 --- /dev/null +++ b/pkg/newlog/cmd/seclog_perf_test.go @@ -0,0 +1,480 @@ +// Copyright (c) 2024 Zededa, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "bufio" + "compress/gzip" + "encoding/csv" + "encoding/json" + "fmt" + "os" + "path" + "testing" + "time" + + "github.com/lf-edge/eve/pkg/pillar/base" + "github.com/lf-edge/eve/pkg/pillar/pubsub" + "github.com/sirupsen/logrus" +) + +var ( + runs = []int{10, 20, 30, 40, 50, 100, 200, 500} + growth = 10 + keyCacheBase = 10000 + keyCacheMax = 500000 +) + +func growLogContents(filename string, times int) error { + content, err := os.ReadFile(filename) + if err != nil { + return fmt.Errorf("failed to read file: %v", err) + } + + var newContent []byte + for i := 0; i < times; i++ { + newContent = append(newContent, content...) + } + + err = os.WriteFile(filename, newContent, 0777) + if err != nil { + return fmt.Errorf("failed to write to file: %v", err) + } + + return nil +} + +func processVerifyLogs(t *testing.T) { + // get the log gzipped file path + allLogs := listFilesInDir(uploadDevDir) + for _, logGzipped := range allLogs { + rf, err := os.Open(logGzipped) + if err != nil { + t.Fatalf("Failed to open %s: %s", logGzipped, err) + } + defer rf.Close() + + gr, err := gzip.NewReader(rf) + if err != nil { + t.Fatalf("Failed to create gzip reader: %s", err) + } + defer gr.Close() + + // read the batch metadata from gzipped metadata. + aggSig, keyIter, err := getLogVerficiationMetadata(gr.Comment) + if err != nil { + t.Fatalf("Failed to read secure log metadata from %s: %s", logGzipped, err) + } + + logs := make([][]byte, 0) + scanner := bufio.NewScanner(gr) + for scanner.Scan() { + if !json.Valid(scanner.Bytes()) { + t.Fatalf("Invalid JSON: %s", scanner.Text()) + } + logs = append(logs, append([]byte(nil), scanner.Bytes()...)) + } + + verifKey := evolveKey(sk0Test, keyIter) + res := fssAggVer(verifKey, aggSig, logs) + if !res { + t.Fatalf("Failed to verify logs") + } + } +} + +func processVerifyLogsWithCachedKeys(t *testing.T) { + // get the log gzipped file path + allLogs := listFilesInDir(uploadDevDir) + for _, logGzipped := range allLogs { + rf, err := os.Open(logGzipped) + if err != nil { + t.Fatalf("Failed to open %s: %s", logGzipped, err) + } + defer rf.Close() + + gr, err := gzip.NewReader(rf) + if err != nil { + t.Fatalf("Failed to create gzip reader: %s", err) + } + defer gr.Close() + + // read the batch metadata from gzipped metadata. + aggSig, keyIter, err := getLogVerficiationMetadata(gr.Comment) + if err != nil { + t.Fatalf("Failed to read secure log metadata from %s: %s", logGzipped, err) + } + + logs := make([][]byte, 0) + scanner := bufio.NewScanner(gr) + for scanner.Scan() { + if !json.Valid(scanner.Bytes()) { + t.Fatalf("Invalid JSON: %s", scanner.Text()) + } + logs = append(logs, append([]byte(nil), scanner.Bytes()...)) + } + + // shortcut? + if keyIter > uint64(keyCacheBase) { + keyIndex := (keyIter / uint64(keyCacheBase)) * uint64(keyCacheBase) + keyFile := fmt.Sprintf("key_%d", keyIndex) + + // read the key from the file + startingKey, err := os.ReadFile(keyFile) + if err != nil { + t.Fatalf("Failed to read cached key: %s", err) + } + + verifKey := evolveKey(startingKey, (keyIter - keyIndex)) + res := fssAggVer(verifKey, aggSig, logs) + if !res { + t.Fatalf("Failed to verify log with cached key") + } + } else { + verifKey := evolveKey(sk0Test, keyIter) + res := fssAggVer(verifKey, aggSig, logs) + if !res { + t.Fatalf("Failed to verify logs") + } + } + } +} + +func processLogs(t *testing.T) { + // get the log gzipped file path + allLogs := listFilesInDir(uploadDevDir) + for _, logGzipped := range allLogs { + rf, err := os.Open(logGzipped) + if err != nil { + t.Fatalf("Failed to open %s: %s", logGzipped, err) + } + defer rf.Close() + + gr, err := gzip.NewReader(rf) + if err != nil { + t.Fatalf("Failed to create gzip reader: %s", err) + } + defer gr.Close() + + // just read the logs + logs := make([][]byte, 0) + scanner := bufio.NewScanner(gr) + for scanner.Scan() { + if !json.Valid(scanner.Bytes()) { + t.Fatalf("Invalid JSON: %s", scanner.Text()) + } + logs = append(logs, append([]byte(nil), scanner.Bytes()...)) + } + } +} + +func cacheKeys() { + // cache the keys + for i := keyCacheBase; i <= keyCacheMax; i += keyCacheBase { + key := evolveKey(sk0Test, uint64(i)) + fd, err := os.Create(fmt.Sprintf("key_%d", i)) + if err != nil { + fmt.Printf("Failed to create key file: %s", err) + } + + _, _ = fd.Write(key) + fd.Close() + } +} + +func removeCachedKeys() { + // cache the keys + for i := keyCacheBase; i <= keyCacheMax; i += keyCacheBase { + os.Remove(fmt.Sprintf("key_%d", i)) + } +} + +func TestSecureLogAggregationPerformance(t *testing.T) { + if os.Getenv("RUN_SEC_LOG_PERF_TEST") == "" { + t.Skip("Skipping performance test") + } + + logger := logrus.StandardLogger() + log := base.NewSourceLogObject(logger, "seclog_test", os.Getpid()) + ps := pubsub.New(&pubsub.EmptyDriver{}, logger, log) + + // setup the test environment + makeTestDirs() + defer removeTestDirs() + + secLog.keyIter = 0 + secLog.key = sk0Test + + var fileinfo fileChanInfo + var secureLogTotalTime, nonSecureLogTotalTime time.Duration + for _, run := range runs { + for i := 0; i < run; i++ { + scr := logFile + dst := path.Join(collectDir, "dev.log.sample") + copyFile(scr, dst) + + // grow the log contents for better performance testing + err := growLogContents(dst, growth) + if err != nil { + t.Fatalf("Failed to grow log contents: %s", err) + } + + fileinfo.notUpload = true + fileinfo.tmpfile = dst + fileinfo.inputSize = 0 + + startSecureLog := time.Now() + doMoveCompressFileSecure(ps, fileinfo) + secureLogTotalTime += time.Since(startSecureLog) + + // non-secure log aggregation + scr = logFile + dst = path.Join(collectDir, "dev.log.sample") + copyFile(scr, dst) + + // grow the log contents for better performance testing + err = growLogContents(dst, growth) + if err != nil { + t.Fatalf("Failed to grow log contents: %s", err) + } + + fileinfo.notUpload = true + fileinfo.tmpfile = dst + fileinfo.inputSize = 0 + + startNonSecureLog := time.Now() + doMoveCompressFile(ps, fileinfo) + nonSecureLogTotalTime += time.Since(startNonSecureLog) + } + + // Calculate averages + secureAvg := int(secureLogTotalTime.Microseconds()) / run + nonSecureAvg := int(nonSecureLogTotalTime.Microseconds()) / run + performanceCost := secureAvg - nonSecureAvg + performanceCostPercentage := (float64(performanceCost) / float64(nonSecureAvg)) * 100 + + file, err := os.OpenFile("fssaggsig_timing_results.csv", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + t.Fatalf("Error creating CSV file %v", err) + } + defer file.Close() + writer := csv.NewWriter(file) + + stat, _ := file.Stat() + if stat.Size() == 0 { + _ = writer.Write([]string{"Test Run", "Average Secure Time (us)", "Average Non-Secure Time (us)", "Performance Cost (us)", "Performance Overhead (%)"}) + } + + _ = writer.Write([]string{ + fmt.Sprintf("%d", run), + fmt.Sprintf("%d", secureAvg), + fmt.Sprintf("%d", nonSecureAvg), + fmt.Sprintf("%d", performanceCost), + fmt.Sprintf("%.2f", performanceCostPercentage), + }) + + writer.Flush() + } +} + +func TestSecureLogVerificationPerformance(t *testing.T) { + if os.Getenv("RUN_SEC_LOG_PERF_TEST") == "" { + t.Skip("Skipping performance test") + } + + logger := logrus.StandardLogger() + log := base.NewSourceLogObject(logger, "seclog_test", os.Getpid()) + ps := pubsub.New(&pubsub.EmptyDriver{}, logger, log) + + // setup the test environment + makeTestDirs() + defer removeTestDirs() + + secLog.keyIter = 0 + secLog.key = sk0Test + + var fileinfo fileChanInfo + var secureLogTotalTime, nonSecureLogTotalTime time.Duration + for _, run := range runs { + for i := 0; i < run; i++ { + makeTestDirs() + + scr := logFile + dst := path.Join(collectDir, "dev.log.sample") + copyFile(scr, dst) + + // grow the log contents for better performance testing + err := growLogContents(dst, growth) + if err != nil { + t.Fatalf("Failed to grow log contents: %s", err) + } + + fileinfo.notUpload = true + fileinfo.tmpfile = dst + fileinfo.inputSize = 0 + doMoveCompressFileSecure(ps, fileinfo) + + startSecureLog := time.Now() + processVerifyLogs(t) + secureLogTotalTime += time.Since(startSecureLog) + + removeTestDirs() + makeTestDirs() + + // non-secure log verification + scr = logFile + dst = path.Join(collectDir, "dev.log.sample") + copyFile(scr, dst) + + // grow the log contents for better performance testing + err = growLogContents(dst, growth) + if err != nil { + t.Fatalf("Failed to grow log contents: %s", err) + } + + fileinfo.notUpload = true + fileinfo.tmpfile = dst + fileinfo.inputSize = 0 + + doMoveCompressFile(ps, fileinfo) + startNonSecureLog := time.Now() + processLogs(t) + nonSecureLogTotalTime += time.Since(startNonSecureLog) + + removeTestDirs() + } + + // Calculate averages + secureAvg := int(secureLogTotalTime.Microseconds()) / run + nonSecureAvg := int(nonSecureLogTotalTime.Microseconds()) / run + performanceCost := secureAvg - nonSecureAvg + performanceCostPercentage := (float64(performanceCost) / float64(nonSecureAvg)) * 100 + + file, err := os.OpenFile("fssaggver_timing_results.csv", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + t.Fatalf("Error creating CSV file %v", err) + } + defer file.Close() + writer := csv.NewWriter(file) + + stat, _ := file.Stat() + if stat.Size() == 0 { + _ = writer.Write([]string{"Test Run", "Average Secure Time (us)", "Average Non-Secure Time (us)", "Performance Cost (us)", "Performance Overhead (%)"}) + } + + _ = writer.Write([]string{ + fmt.Sprintf("%d", run), + fmt.Sprintf("%d", secureAvg), + fmt.Sprintf("%d", nonSecureAvg), + fmt.Sprintf("%d", performanceCost), + fmt.Sprintf("%.2f", performanceCostPercentage), + }) + + writer.Flush() + } +} + +func TestSecureLogCachedKeysVerificationPerformance(t *testing.T) { + if os.Getenv("RUN_SEC_LOG_PERF_TEST") == "" { + t.Skip("Skipping performance test") + } + + logger := logrus.StandardLogger() + log := base.NewSourceLogObject(logger, "seclog_test", os.Getpid()) + ps := pubsub.New(&pubsub.EmptyDriver{}, logger, log) + + // cache the keys first, this is not secure, just for testing, + // in real world, the cached keys should be securely stored just like + // the initial key. + cacheKeys() + defer removeCachedKeys() + + // setup the test environment + makeTestDirs() + defer removeTestDirs() + + secLog.keyIter = 0 + secLog.key = sk0Test + + var fileinfo fileChanInfo + var secureLogTotalTime, nonSecureLogTotalTime time.Duration + for _, run := range runs { + for i := 0; i < run; i++ { + makeTestDirs() + + scr := logFile + dst := path.Join(collectDir, "dev.log.sample") + copyFile(scr, dst) + + // grow the log contents for better performance testing + err := growLogContents(dst, growth) + if err != nil { + t.Fatalf("Failed to grow log contents: %s", err) + } + + fileinfo.notUpload = true + fileinfo.tmpfile = dst + fileinfo.inputSize = 0 + doMoveCompressFileSecure(ps, fileinfo) + + startSecureLog := time.Now() + processVerifyLogsWithCachedKeys(t) + secureLogTotalTime += time.Since(startSecureLog) + + removeTestDirs() + makeTestDirs() + + // non-secure log verification + scr = logFile + dst = path.Join(collectDir, "dev.log.sample") + copyFile(scr, dst) + + // grow the log contents for better performance testing + err = growLogContents(dst, growth) + if err != nil { + t.Fatalf("Failed to grow log contents: %s", err) + } + + fileinfo.notUpload = true + fileinfo.tmpfile = dst + fileinfo.inputSize = 0 + + doMoveCompressFile(ps, fileinfo) + startNonSecureLog := time.Now() + processLogs(t) + nonSecureLogTotalTime += time.Since(startNonSecureLog) + + removeTestDirs() + } + + // Calculate averages + secureAvg := int(secureLogTotalTime.Microseconds()) / run + nonSecureAvg := int(nonSecureLogTotalTime.Microseconds()) / run + performanceCost := secureAvg - nonSecureAvg + performanceCostPercentage := (float64(performanceCost) / float64(nonSecureAvg)) * 100 + + fileName := fmt.Sprintf("fssaggver_cached_keys_%d_timing_results.csv", keyCacheBase) + file, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + t.Fatalf("Error creating CSV file %v", err) + } + defer file.Close() + writer := csv.NewWriter(file) + + stat, _ := file.Stat() + if stat.Size() == 0 { + _ = writer.Write([]string{"Test Run", "Average Secure Time (us)", "Average Non-Secure Time (us)", "Performance Cost (us)", "Performance Overhead (%)"}) + } + + _ = writer.Write([]string{ + fmt.Sprintf("%d", run), + fmt.Sprintf("%d", secureAvg), + fmt.Sprintf("%d", nonSecureAvg), + fmt.Sprintf("%d", performanceCost), + fmt.Sprintf("%.2f", performanceCostPercentage), + }) + + writer.Flush() + } +} diff --git a/pkg/newlog/cmd/seclog_test.go b/pkg/newlog/cmd/seclog_test.go new file mode 100644 index 0000000000..58907f5e38 --- /dev/null +++ b/pkg/newlog/cmd/seclog_test.go @@ -0,0 +1,453 @@ +// Copyright (c) 2024 Zededa, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package main + +import ( + "bufio" + "compress/gzip" + "encoding/base64" + "encoding/json" + "fmt" + "math/rand" + "os" + "path" + "testing" + + "github.com/lf-edge/eve/pkg/pillar/base" + "github.com/lf-edge/eve/pkg/pillar/pubsub" + "github.com/sirupsen/logrus" +) + +var ( + // initial key, keep it secret, keep it safe + sk0Test = []byte{ + 0x12, 0x34, 0x56, 0x78, + 0x9A, 0xBC, 0xDE, 0xF0, + 0x11, 0x22, 0x33, 0x44, + 0x55, 0x66, 0x77, 0x88, + } +) + +const ( + // TODO : fix this paths + testDirPrefix = "/tmp/seclog" + logFile = "/home/shah/shah-dev/eve/pkg/newlog/testdata/dev.log.sample" +) + +func makeTestDirs() { + uploadDevDir = path.Join(testDirPrefix, "devupload") + uploadAppDir = path.Join(testDirPrefix, "appupload") + collectDir = path.Join(testDirPrefix, "collect") + + _ = os.MkdirAll(uploadDevDir, 0777) + _ = os.MkdirAll(uploadAppDir, 0777) + _ = os.MkdirAll(collectDir, 0777) +} + +func removeTestDirs() { + os.RemoveAll(testDirPrefix) +} + +func listFilesInDir(dir string) []string { + files, _ := os.ReadDir(dir) + var filenames []string + for _, f := range files { + filenames = append(filenames, path.Join(dir, f.Name())) + } + return filenames +} + +func getLogVerficiationMetadata(gwComment string) ([]byte, uint64, error) { + var jsonMap map[string]interface{} + err := json.Unmarshal([]byte(gwComment), &jsonMap) + if err != nil { + return nil, 0, fmt.Errorf("Failed to unmarshal metadata: %s", err) + } + + macStr, ok := jsonMap["aggSig"].(string) + if !ok { + return nil, 0, fmt.Errorf("Failed to find signature in metadata") + } + + mac, err := base64.StdEncoding.DecodeString(macStr) + if err != nil { + return nil, 0, fmt.Errorf("Failed to decode signature: %s", err) + } + + iter, ok := jsonMap["keyIter"].(float64) + if !ok { + return nil, 0, fmt.Errorf("Failed to find keyIteration in metadata") + } + + return mac, uint64(iter), nil +} + +func copyFile(src string, dst string) { + data, _ := os.ReadFile(src) + _ = os.WriteFile(dst, data, 0777) +} + +func TestSingleSecureLogVerification(t *testing.T) { + logger := logrus.StandardLogger() + log := base.NewSourceLogObject(logger, "seclog_test", os.Getpid()) + ps := pubsub.New(&pubsub.EmptyDriver{}, logger, log) + + // setup the test environment + makeTestDirs() + defer removeTestDirs() + + scr := logFile + dst := path.Join(collectDir, "dev.log.sample") + copyFile(scr, dst) + + var fileinfo fileChanInfo + fileinfo.notUpload = true + fileinfo.tmpfile = dst + fileinfo.inputSize = 0 + + // evolve the key, for good measure. + intialKey := evolveKey(sk0Test, 100) + secLog.keyIter = 100 + secLog.key = intialKey + + // process the log entries and create the gzipped log file + doMoveCompressFileSecure(ps, fileinfo) + + // get the log gzipped file path + logGzipped := listFilesInDir(uploadDevDir)[0] + rf, err := os.Open(logGzipped) + if err != nil { + t.Fatalf("Failed to open %s: %s", logGzipped, err) + } + defer rf.Close() + + gr, err := gzip.NewReader(rf) + if err != nil { + t.Fatalf("Failed to create gzip reader: %s", err) + } + defer gr.Close() + + // read the batch metadata from gzipped metadata. + aggSig, keyIter, err := getLogVerficiationMetadata(gr.Comment) + if err != nil { + t.Fatalf("Failed to read secure log metadata from %s: %s", logGzipped, err) + } + + logs := make([][]byte, 0) + scanner := bufio.NewScanner(gr) + for scanner.Scan() { + if !json.Valid(scanner.Bytes()) { + t.Fatalf("Invalid JSON: %s", scanner.Text()) + } + logs = append(logs, append([]byte(nil), scanner.Bytes()...)) + } + + // evolve the key to the same iteration as the one used to sign the log. + verifKey := evolveKey(sk0Test, keyIter) + // verify the log + res := fssAggVer(verifKey, aggSig, logs) + if !res { + t.Fatalf("Secure log verification failed") + } +} + +func TestMultipleSecureLogVerification(t *testing.T) { + logger := logrus.StandardLogger() + log := base.NewSourceLogObject(logger, "seclog_test", os.Getpid()) + ps := pubsub.New(&pubsub.EmptyDriver{}, logger, log) + + // setup the test environment + makeTestDirs() + defer removeTestDirs() + + scr := logFile + dst := path.Join(collectDir, "dev.log.sample") + copyFile(scr, dst) + + var fileinfo fileChanInfo + fileinfo.notUpload = true + fileinfo.tmpfile = dst + fileinfo.inputSize = 0 + + // evolve the key, for good measure. + intialKey := evolveKey(sk0Test, 100) + secLog.keyIter = 100 + secLog.key = intialKey + + // lower the maxGzipFileSize to multiple batches + maxGzipFileSize = 1000 + + // process the log entries and create the gzipped log file + doMoveCompressFileSecure(ps, fileinfo) + + // get the log gzipped file path + allLogs := listFilesInDir(uploadDevDir) + for _, logGzipped := range allLogs { + rf, err := os.Open(logGzipped) + if err != nil { + t.Fatalf("Failed to open %s: %s", logGzipped, err) + } + defer rf.Close() + + gr, err := gzip.NewReader(rf) + if err != nil { + t.Fatalf("Failed to create gzip reader: %s", err) + } + defer gr.Close() + + // read the batch metadata from gzipped metadata. + aggSig, keyIter, err := getLogVerficiationMetadata(gr.Comment) + if err != nil { + t.Fatalf("Failed to read secure log metadata from %s: %s", logGzipped, err) + } + + logs := make([][]byte, 0) + scanner := bufio.NewScanner(gr) + for scanner.Scan() { + if !json.Valid(scanner.Bytes()) { + t.Fatalf("Invalid JSON: %s", scanner.Text()) + } + logs = append(logs, append([]byte(nil), scanner.Bytes()...)) + } + + t.Logf("Verifying log with key iteration : %d", keyIter) + t.Logf("Number of log entries: %d", len(logs)) + t.Logf("Batch aggregated signature: %x", aggSig) + t.Log("----------------------------------------") + + // evolve the key to the same iteration as the one used to sign the log. + verifKey := evolveKey(sk0Test, keyIter) + // verify the log + res := fssAggVer(verifKey, aggSig, logs) + if !res { + t.Fatalf("Secure log verification failed for %s", logGzipped) + } + } +} + +func TestContentIntegritySecureLogVerification(t *testing.T) { + logger := logrus.StandardLogger() + log := base.NewSourceLogObject(logger, "seclog_test", os.Getpid()) + ps := pubsub.New(&pubsub.EmptyDriver{}, logger, log) + + // setup the test environment + makeTestDirs() + defer removeTestDirs() + + scr := logFile + dst := path.Join(collectDir, "dev.log.sample") + copyFile(scr, dst) + + var fileinfo fileChanInfo + fileinfo.notUpload = true + fileinfo.tmpfile = dst + fileinfo.inputSize = 0 + + // evolve the key, for good measure. + intialKey := evolveKey(sk0Test, 100) + secLog.keyIter = 100 + secLog.key = intialKey + + // process the log entries and create the gzipped log file + doMoveCompressFileSecure(ps, fileinfo) + + // get the log gzipped file path + logGzipped := listFilesInDir(uploadDevDir)[0] + rf, err := os.Open(logGzipped) + if err != nil { + t.Fatalf("Failed to open %s: %s", logGzipped, err) + } + defer rf.Close() + + gr, err := gzip.NewReader(rf) + if err != nil { + t.Fatalf("Failed to create gzip reader: %s", err) + } + defer gr.Close() + + // read the batch metadata from gzipped metadata. + aggSig, keyIter, err := getLogVerficiationMetadata(gr.Comment) + if err != nil { + t.Fatalf("Failed to read secure log metadata from %s: %s", logGzipped, err) + } + + logs := make([][]byte, 0) + scanner := bufio.NewScanner(gr) + for scanner.Scan() { + if !json.Valid(scanner.Bytes()) { + t.Fatalf("Invalid JSON: %s", scanner.Text()) + } + logs = append(logs, append([]byte(nil), scanner.Bytes()...)) + } + + if len(logs) < 3 { + t.Fatalf("Not enough log entries to content integrity") + } + + // get a ranom log entry and corrupt it + corruptLogIdx := rand.Intn(len(logs)) + logs[corruptLogIdx] = []byte("corrupted log entry") + + // evolve the key to the same iteration as the one used to sign the log. + verifKey := evolveKey(sk0Test, keyIter) + // verify the log + res := fssAggVer(verifKey, aggSig, logs) + if res != false { + t.Fatalf("Expected secure log verification to failed") + } +} + +func TestStreamIntegritySecureLogVerification(t *testing.T) { + logger := logrus.StandardLogger() + log := base.NewSourceLogObject(logger, "seclog_test", os.Getpid()) + ps := pubsub.New(&pubsub.EmptyDriver{}, logger, log) + + // setup the test environment + makeTestDirs() + defer removeTestDirs() + + scr := logFile + dst := path.Join(collectDir, "dev.log.sample") + copyFile(scr, dst) + + var fileinfo fileChanInfo + fileinfo.notUpload = true + fileinfo.tmpfile = dst + fileinfo.inputSize = 0 + + // evolve the key, for good measure. + intialKey := evolveKey(sk0Test, 100) + secLog.keyIter = 100 + secLog.key = intialKey + + // process the log entries and create the gzipped log file + doMoveCompressFileSecure(ps, fileinfo) + + // get the log gzipped file path + logGzipped := listFilesInDir(uploadDevDir)[0] + rf, err := os.Open(logGzipped) + if err != nil { + t.Fatalf("Failed to open %s: %s", logGzipped, err) + } + defer rf.Close() + + gr, err := gzip.NewReader(rf) + if err != nil { + t.Fatalf("Failed to create gzip reader: %s", err) + } + defer gr.Close() + + // read the batch metadata from gzipped metadata. + aggSig, keyIter, err := getLogVerficiationMetadata(gr.Comment) + if err != nil { + t.Fatalf("Failed to read secure log metadata from %s: %s", logGzipped, err) + } + + logs := make([][]byte, 0) + scanner := bufio.NewScanner(gr) + for scanner.Scan() { + if !json.Valid(scanner.Bytes()) { + t.Fatalf("Invalid JSON: %s", scanner.Text()) + } + logs = append(logs, append([]byte(nil), scanner.Bytes()...)) + } + + if len(logs) < 3 { + t.Fatalf("Not enough log entries to stream integrity") + } + + // get a ranom log entry and delete it + corruptLogIdx := rand.Intn(len(logs)) + logs = append(logs[:corruptLogIdx], logs[corruptLogIdx+1:]...) + + // evolve the key to the same iteration as the one used to sign the log. + verifKey := evolveKey(sk0Test, keyIter) + // verify the log + res := fssAggVer(verifKey, aggSig, logs) + if res != false { + t.Fatalf("Expected secure log verification to failed") + } +} + +func TestTruncationSecureLogVerification(t *testing.T) { + logger := logrus.StandardLogger() + log := base.NewSourceLogObject(logger, "seclog_test", os.Getpid()) + ps := pubsub.New(&pubsub.EmptyDriver{}, logger, log) + + // setup the test environment + makeTestDirs() + defer removeTestDirs() + + scr := logFile + dst := path.Join(collectDir, "dev.log.sample") + copyFile(scr, dst) + + var fileinfo fileChanInfo + fileinfo.notUpload = true + fileinfo.tmpfile = dst + fileinfo.inputSize = 0 + + // evolve the key, for good measure. + intialKey := evolveKey(sk0Test, 100) + secLog.keyIter = 100 + secLog.key = intialKey + + // process the log entries and create the gzipped log file + doMoveCompressFileSecure(ps, fileinfo) + + // get the log gzipped file path + logGzipped := listFilesInDir(uploadDevDir)[0] + rf, err := os.Open(logGzipped) + if err != nil { + t.Fatalf("Failed to open %s: %s", logGzipped, err) + } + defer rf.Close() + + gr, err := gzip.NewReader(rf) + if err != nil { + t.Fatalf("Failed to create gzip reader: %s", err) + } + defer gr.Close() + + // read the batch metadata from gzipped metadata. + aggSig, keyIter, err := getLogVerficiationMetadata(gr.Comment) + if err != nil { + t.Fatalf("Failed to read secure log metadata from %s: %s", logGzipped, err) + } + + logs := make([][]byte, 0) + scanner := bufio.NewScanner(gr) + for scanner.Scan() { + if !json.Valid(scanner.Bytes()) { + t.Fatalf("Invalid JSON: %s", scanner.Text()) + } + logs = append(logs, append([]byte(nil), scanner.Bytes()...)) + } + + if len(logs) < 3 { + t.Fatalf("Not enough log entries to test truncation") + } + + // delete a few log entries form the tail + logsTail := logs[:len(logs)-2] + + // evolve the key to the same iteration as the one used to sign the log. + verifKey := evolveKey(sk0Test, keyIter) + // verify the log + res := fssAggVer(verifKey, aggSig, logsTail) + if res != false { + t.Fatalf("Expected secure log verification to failed (tail truncation)") + } + + // delete a few log entries form the head + logsHead := logs[2:] + + // evolve the key to the same iteration as the one used to sign the log. + verifKey = evolveKey(sk0Test, keyIter) + // verify the log + res = fssAggVer(verifKey, aggSig, logsHead) + if res != false { + t.Fatalf("Expected secure log verification to failed (head truncation)") + } +} diff --git a/pkg/newlog/testdata/dev.log.sample b/pkg/newlog/testdata/dev.log.sample new file mode 100644 index 0000000000..d2373588bf --- /dev/null +++ b/pkg/newlog/testdata/dev.log.sample @@ -0,0 +1,46 @@ +{"devID":"8dbec8c5-91c0-4f4b-85dc-ec67bee778c7","image":"IMGA","eveVersion":"6.8.2-kvm-amd64"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2360 [select, 17 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc00034c600, 0x1d69ff8, 0xc00000eb98, 0x12)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952576111Z\"}\n","msgid":3783,"timestamp":{"seconds":1707911329,"nanos":952576111},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2341 [select]:\\ngithub.com/lf-edge/eve/pkg/pillar/cmd/vaultmgr.Run(0xc0011e11d0, 0xc000e008c0, 0xc00195afc0, 0xc000010720)\\n\\t/pillar/cmd/vaultmgr/vaultmgr.go:858 +0xb6f\\nmain.startAgentAndDone(0x1acbaa0, 0xc000945c01, 0xc000699ef0, 0x8, 0xc0011e11d0, 0xc000e008c0, 0xc00195afc0)\\n\\t/pillar/zedbox/zedbox.go:241 +0x62\\ncreated by main.handleService\\n\\t/pillar/zedbox/zedbox.go:231 +0x3fb\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952591506Z\"}\n","msgid":3784,"timestamp":{"seconds":1707911329,"nanos":952591506},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2332 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc000a81500, 0x1d69ff8, 0xc00000fe98, 0x0)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952607158Z\"}\n","msgid":3785,"timestamp":{"seconds":1707911329,"nanos":952607158},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2011 [select]:\\ngithub.com/lf-edge/eve/pkg/pillar/cmd/zfsmanager.Run(0xc00142e570, 0xc000fe61c0, 0xc0015c1470, 0xc0010baf80)\\n\\t/pillar/cmd/zfsmanager/zfsmanager.go:97 +0x605\\nmain.startAgentAndDone(0x1acc070, 0xc000945c00, 0xc000520f50, 0xa, 0xc00142e570, 0xc000fe61c0, 0xc0015c1470)\\n\\t/pillar/zedbox/zedbox.go:241 +0x62\\ncreated by main.handleService\\n\\t/pillar/zedbox/zedbox.go:231 +0x3fb\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952622327Z\"}\n","msgid":3786,"timestamp":{"seconds":1707911329,"nanos":952622327},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2697 [IO wait, 126 minutes]:\\ninternal/poll.runtime_pollWait(0x7f79ba8f7dc0, 0x72, 0x0)\\n\\t/usr/lib/go/src/runtime/netpoll.go:222 +0x55\\ninternal/poll.(*pollDesc).wait(0xc000e8d418, 0x72, 0x0, 0x0, 0x1a1d5d2)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:87 +0x45\\ninternal/poll.(*pollDesc).waitRead(...)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:92\\ninternal/poll.(*FD).Accept(0xc000e8d400, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0)\\n\\t/usr/lib/go/src/internal/poll/fd_unix.go:401 +0x212\\nnet.(*netFD).accept(0xc000e8d400, 0x0, 0xc00134bf00, 0x5d0000)\\n\\t/usr/lib/go/src/net/fd_unix.go:172 +0x45\\nnet.(*UnixListener).accept(0xc0017fefc0, 0xc001529380, 0xc00134bf00, 0x20)\\n\\t/usr/lib/go/src/net/unixsock_posix.go:162 +0x32\\nnet.(*UnixListener).Accept(0xc0017fefc0, 0xc001529380, 0xc00034db00, 0x1d69ff8, 0xc001936008)\\n\\t/usr/lib/go/src/net/unixsock.go:260 +0x65\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1(0xc00034dbc0)\\n\\t/pillar/pubsub/socketdriver/publish.go:140 +0x95\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start\\n\\t/pillar/pubsub/socketdriver/publish.go:130 +0xf0\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952638765Z\"}\n","msgid":3787,"timestamp":{"seconds":1707911329,"nanos":952638765},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2164 [select, 120 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc000a81380, 0x1d69ff8, 0xc000011890, 0x0)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952655478Z\"}\n","msgid":3788,"timestamp":{"seconds":1707911329,"nanos":952655478},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2165 [select, 1 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc000a81440, 0x1d69ff8, 0xc0000118a8, 0x1)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952712681Z\"}\n","msgid":3789,"timestamp":{"seconds":1707911329,"nanos":952712681},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2166 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc000a80e40, 0x1d69ff8, 0xc0000118c8, 0x0)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.95272817Z\"}\n","msgid":3790,"timestamp":{"seconds":1707911329,"nanos":952728170},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2167 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc000a80fc0, 0x1d69ff8, 0xc0000118f0, 0x0)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952743286Z\"}\n","msgid":3791,"timestamp":{"seconds":1707911329,"nanos":952743286},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2342 [IO wait, 17 minutes]:\\ninternal/poll.runtime_pollWait(0x7f79ba8f87b8, 0x72, 0x0)\\n\\t/usr/lib/go/src/runtime/netpoll.go:222 +0x55\\ninternal/poll.(*pollDesc).wait(0xc001235118, 0x72, 0x0, 0x0, 0x0)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:87 +0x45\\ninternal/poll.(*pollDesc).waitRead(...)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:92\\ninternal/poll.(*FD).RawRead(0xc001235100, 0xc000e22f80, 0x0, 0x0)\\n\\t/usr/lib/go/src/internal/poll/fd_unix.go:553 +0xff\\nnet.(*rawConn).Read(0xc001ad42d8, 0xc000e22f80, 0xc001ad42d8, 0x0)\\n\\t/usr/lib/go/src/net/rawconn.go:43 +0x68\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub.ConnReadCheck(0x1d69ff8, 0xc00000ea48, 0x0, 0x0)\\n\\t/pillar/pubsub/util.go:82 +0x168\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Subscriber).connectAndRead(0xc0014bc240, 0xc001239f98, 0x6, 0xc0006983f0, 0x6, 0xc00108e000, 0x17b4, 0x17b5)\\n\\t/pillar/pubsub/socketdriver/subscribe.go:251 +0x38e\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Subscriber).watchSock(0xc0014bc240)\\n\\t/pillar/pubsub/socketdriver/subscribe.go:173 +0x45\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Subscriber).Start\\n\\t/pillar/pubsub/socketdriver/subscribe.go:132 +0x470\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952776851Z\"}\n","msgid":3792,"timestamp":{"seconds":1707911329,"nanos":952776851},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2343 [IO wait, 126 minutes]:\\ninternal/poll.runtime_pollWait(0x7f79ba8f88a0, 0x72, 0x0)\\n\\t/usr/lib/go/src/runtime/netpoll.go:222 +0x55\\ninternal/poll.(*pollDesc).wait(0xc001234e98, 0x72, 0x0, 0x0, 0x0)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:87 +0x45\\ninternal/poll.(*pollDesc).waitRead(...)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:92\\ninternal/poll.(*FD).RawRead(0xc001234e80, 0xc0008bc5c0, 0x0, 0x0)\\n\\t/usr/lib/go/src/internal/poll/fd_unix.go:553 +0xff\\nnet.(*rawConn).Read(0xc00000ecd0, 0xc0008bc5c0, 0xc00000ecd0, 0x0)\\n\\t/usr/lib/go/src/net/rawconn.go:43 +0x68\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub.ConnReadCheck(0x1d69ff8, 0xc00000ea30, 0x0, 0x0)\\n\\t/pillar/pubsub/util.go:82 +0x168\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Subscriber).connectAndRead(0xc0014bdb90, 0xc001478f98, 0x8, 0x0, 0x0, 0x0, 0x0, 0x0)\\n\\t/pillar/pubsub/socketdriver/subscribe.go:251 +0x38e\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Subscriber).watchSock(0xc0014bdb90)\\n\\t/pillar/pubsub/socketdriver/subscribe.go:173 +0x45\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Subscriber).Start\\n\\t/pillar/pubsub/socketdriver/subscribe.go:132 +0x470\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952816114Z\"}\n","msgid":3793,"timestamp":{"seconds":1707911329,"nanos":952816114},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2344 [IO wait, 126 minutes]:\\ninternal/poll.runtime_pollWait(0x7f79ba9c95b8, 0x72, 0x0)\\n\\t/usr/lib/go/src/runtime/netpoll.go:222 +0x55\\ninternal/poll.(*pollDesc).wait(0xc001318f98, 0x72, 0x0, 0x0, 0x1a1d5d2)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:87 +0x45\\ninternal/poll.(*pollDesc).waitRead(...)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:92\\ninternal/poll.(*FD).Accept(0xc001318f80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0)\\n\\t/usr/lib/go/src/internal/poll/fd_unix.go:401 +0x212\\nnet.(*netFD).accept(0xc001318f80, 0x0, 0xc001238f00, 0x5d0000)\\n\\t/usr/lib/go/src/net/fd_unix.go:172 +0x45\\nnet.(*UnixListener).accept(0xc00131c4e0, 0xc00049ef60, 0xc001238f00, 0x20)\\n\\t/usr/lib/go/src/net/unixsock_posix.go:162 +0x32\\nnet.(*UnixListener).Accept(0xc00131c4e0, 0xc00049ef60, 0xc000ece900, 0x1d69ff8, 0xc0010bbdd0)\\n\\t/usr/lib/go/src/net/unixsock.go:260 +0x65\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1(0xc000ece900)\\n\\t/pillar/pubsub/socketdriver/publish.go:140 +0x95\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start\\n\\t/pillar/pubsub/socketdriver/publish.go:130 +0xf0\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952832812Z\"}\n","msgid":3794,"timestamp":{"seconds":1707911329,"nanos":952832812},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2345 [IO wait, 126 minutes]:\\ninternal/poll.runtime_pollWait(0x7f79ba9c94d0, 0x72, 0x0)\\n\\t/usr/lib/go/src/runtime/netpoll.go:222 +0x55\\ninternal/poll.(*pollDesc).wait(0xc001319118, 0x72, 0x0, 0x0, 0x1a1d5d2)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:87 +0x45\\ninternal/poll.(*pollDesc).waitRead(...)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:92\\ninternal/poll.(*FD).Accept(0xc001319100, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0)\\n\\t/usr/lib/go/src/internal/poll/fd_unix.go:401 +0x212\\nnet.(*netFD).accept(0xc001319100, 0x0, 0xc001588f00, 0x5d0000)\\n\\t/usr/lib/go/src/net/fd_unix.go:172 +0x45\\nnet.(*UnixListener).accept(0xc00131ca50, 0xc00049f020, 0xc001588f00, 0x20)\\n\\t/usr/lib/go/src/net/unixsock_posix.go:162 +0x32\\nnet.(*UnixListener).Accept(0xc00131ca50, 0xc00049f020, 0xc000ece900, 0x1d69ff8, 0xc0010ba1d0)\\n\\t/usr/lib/go/src/net/unixsock.go:260 +0x65\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1(0xc000ece9c0)\\n\\t/pillar/pubsub/socketdriver/publish.go:140 +0x95\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start\\n\\t/pillar/pubsub/socketdriver/publish.go:130 +0xf0\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952850619Z\"}\n","msgid":3795,"timestamp":{"seconds":1707911329,"nanos":952850619},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2364 [IO wait, 126 minutes]:\\ninternal/poll.runtime_pollWait(0x7f79ba8f85e8, 0x72, 0x0)\\n\\t/usr/lib/go/src/runtime/netpoll.go:222 +0x55\\ninternal/poll.(*pollDesc).wait(0xc001235998, 0x72, 0x0, 0x0, 0x1a1d5d2)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:87 +0x45\\ninternal/poll.(*pollDesc).waitRead(...)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:92\\ninternal/poll.(*FD).Accept(0xc001235980, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0)\\n\\t/usr/lib/go/src/internal/poll/fd_unix.go:401 +0x212\\nnet.(*netFD).accept(0xc001235980, 0x0, 0xc0007a4f00, 0x400000)\\n\\t/usr/lib/go/src/net/fd_unix.go:172 +0x45\\nnet.(*UnixListener).accept(0xc000d19020, 0xc00093d620, 0xc0007a4f00, 0x0)\\n\\t/usr/lib/go/src/net/unixsock_posix.go:162 +0x32\\nnet.(*UnixListener).Accept(0xc000d19020, 0xc00093d620, 0x0, 0x0, 0xc0007a4f90)\\n\\t/usr/lib/go/src/net/unixsock.go:260 +0x65\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1(0xc000a80600)\\n\\t/pillar/pubsub/socketdriver/publish.go:140 +0x95\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start\\n\\t/pillar/pubsub/socketdriver/publish.go:130 +0xf0\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.95288038Z\"}\n","msgid":3796,"timestamp":{"seconds":1707911329,"nanos":952880380},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2377 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc000ece900, 0x1d69ff8, 0xc000010d78, 0x0)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952895921Z\"}\n","msgid":3797,"timestamp":{"seconds":1707911329,"nanos":952895921},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 462598 [select, 4 minutes]:\\nnet/http.(*persistConn).writeLoop(0xc000322a20)\\n\\t/usr/lib/go/src/net/http/transport.go:2382 +0xf7\\ncreated by net/http.(*Transport).dialConn\\n\\t/usr/lib/go/src/net/http/transport.go:1744 +0xc9c\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952910582Z\"}\n","msgid":3798,"timestamp":{"seconds":1707911329,"nanos":952910582},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2383 [chan receive, 126 minutes]:\\ngoogle.golang.org/grpc.(*addrConn).resetTransport(0xc0013d3600)\\n\\t/pillar/vendor/google.golang.org/grpc/clientconn.go:1213 +0x465\\ncreated by google.golang.org/grpc.(*addrConn).connect\\n\\t/pillar/vendor/google.golang.org/grpc/clientconn.go:843 +0x12a\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952945459Z\"}\n","msgid":3799,"timestamp":{"seconds":1707911329,"nanos":952945459},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2385 [IO wait]:\\ninternal/poll.runtime_pollWait(0x7f79ba8f8418, 0x72, 0xffffffffffffffff)\\n\\t/usr/lib/go/src/runtime/netpoll.go:222 +0x55\\ninternal/poll.(*pollDesc).wait(0xc001319998, 0x72, 0x8000, 0x8000, 0xffffffffffffffff)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:87 +0x45\\ninternal/poll.(*pollDesc).waitRead(...)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:92\\ninternal/poll.(*FD).Read(0xc001319980, 0xc000e80000, 0x8000, 0x8000, 0x0, 0x0, 0x0)\\n\\t/usr/lib/go/src/internal/poll/fd_unix.go:166 +0x1d5\\nnet.(*netFD).Read(0xc001319980, 0xc000e80000, 0x8000, 0x8000, 0xacdd3e, 0x800000601, 0x0)\\n\\t/usr/lib/go/src/net/fd_posix.go:55 +0x4f\\nnet.(*conn).Read(0xc000010f28, 0xc000e80000, 0x8000, 0x8000, 0x0, 0x0, 0x0)\\n\\t/usr/lib/go/src/net/net.go:183 +0x91\\nbufio.(*Reader).Read(0xc0012af260, 0xc000618b98, 0x9, 0x9, 0xc0008ca480, 0xc00146ddd8, 0x405cc5)\\n\\t/usr/lib/go/src/bufio/bufio.go:227 +0x222\\nio.ReadAtLeast(0x1d221a0, 0xc0012af260, 0xc000618b98, 0x9, 0x9, 0x9, 0x18, 0xc000432570, 0x462780)\\n\\t/usr/lib/go/src/io/io.go:328 +0x87\\nio.ReadFull(...)\\n\\t/usr/lib/go/src/io/io.go:347\\ngolang.org/x/net/http2.readFrameHeader(0xc000618b98, 0x9, 0x9, 0x1d221a0, 0xc0012af260, 0x0, 0xc000000000, 0xc00146de3f, 0xc00146de01)\\n\\t/pillar/vendor/golang.org/x/net/http2/frame.go:237 +0x89\\ngolang.org/x/net/http2.(*Framer).ReadFrame(0xc000618b60, 0xc000975c68, 0xc000975c68, 0x0, 0x0)\\n\\t/pillar/vendor/golang.org/x/net/http2/frame.go:492 +0xa5\\ngoogle.golang.org/grpc/internal/transport.(*http2Client).reader(0xc00110bc00)\\n\\t/pillar/vendor/google.golang.org/grpc/internal/transport/http2_client.go:1330 +0x185\\ncreated by google.golang.org/grpc/internal/transport.newHTTP2Client\\n\\t/pillar/vendor/google.golang.org/grpc/internal/transport/http2_client.go:345 +0x1011\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952960808Z\"}\n","msgid":3800,"timestamp":{"seconds":1707911329,"nanos":952960808},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2386 [select]:\\ngoogle.golang.org/grpc/internal/transport.(*controlBuffer).get(0xc000afe1e0, 0x1, 0x0, 0x0, 0x0, 0x0)\\n\\t/pillar/vendor/google.golang.org/grpc/internal/transport/controlbuf.go:395 +0xff\\ngoogle.golang.org/grpc/internal/transport.(*loopyWriter).run(0xc0012af380, 0x0, 0x0)\\n\\t/pillar/vendor/google.golang.org/grpc/internal/transport/controlbuf.go:515 +0x1dd\\ngoogle.golang.org/grpc/internal/transport.newHTTP2Client.func3(0xc00110bc00)\\n\\t/pillar/vendor/google.golang.org/grpc/internal/transport/http2_client.go:391 +0x7b\\ncreated by google.golang.org/grpc/internal/transport.newHTTP2Client\\n\\t/pillar/vendor/google.golang.org/grpc/internal/transport/http2_client.go:389 +0x11e7\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952978372Z\"}\n","msgid":3801,"timestamp":{"seconds":1707911329,"nanos":952978372},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 3071 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc00034dbc0, 0x1d69ff8, 0xc001936008, 0x0)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.952993734Z\"}\n","msgid":3802,"timestamp":{"seconds":1707911329,"nanos":952993734},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2422 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc000ece900, 0x1d69ff8, 0xc000011430, 0x1)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953045486Z\"}\n","msgid":3803,"timestamp":{"seconds":1707911329,"nanos":953045486},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2940 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc000ece900, 0x1d69ff8, 0xc0010bbdd0, 0x6)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953060295Z\"}\n","msgid":3804,"timestamp":{"seconds":1707911329,"nanos":953060295},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2469 [chan receive, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/worker.(*Pool).mergeResult(0xc000d8f800, 0x1d6d388, 0xc001551e60)\\n\\t/pillar/worker/workerpool.go:93 +0x170\\ncreated by github.com/lf-edge/eve/pkg/pillar/worker.(*Pool).TrySubmit\\n\\t/pillar/worker/workerpool.go:178 +0x3c5\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953074872Z\"}\n","msgid":3805,"timestamp":{"seconds":1707911329,"nanos":953074872},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2945 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/libs/zedUpload.(*DronaCtx).ListenAndServe(0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:132 +0xa5\\ngithub.com/lf-edge/eve/libs/zedUpload.NewDronaCtx.func1(0xc000647014, 0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:281 +0x53\\ncreated by github.com/lf-edge/eve/libs/zedUpload.NewDronaCtx\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:279 +0x171\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953089735Z\"}\n","msgid":3806,"timestamp":{"seconds":1707911329,"nanos":953089735},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2468 [chan receive, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/worker.(*Single).processWork(0xc001551e60, 0x1d24a40, 0xc001056ed0, 0x171e680, 0xc0014ce3c0, 0xc000e07e00, 0xc000e07e60)\\n\\t/pillar/worker/worker.go:141 +0x2c5\\ncreated by github.com/lf-edge/eve/pkg/pillar/worker.NewWorker\\n\\t/pillar/worker/worker.go:117 +0x24c\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953122888Z\"}\n","msgid":3807,"timestamp":{"seconds":1707911329,"nanos":953122888},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2405 [select, 1 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/flextimer.flexTicker(0xc001490000, 0xc000d7b1a0)\\n\\t/pillar/flextimer/flextimer.go:153 +0x2f5\\ncreated by github.com/lf-edge/eve/pkg/pillar/flextimer.newFlexTicker\\n\\t/pillar/flextimer/flextimer.go:117 +0x67\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953153353Z\"}\n","msgid":3808,"timestamp":{"seconds":1707911329,"nanos":953153353},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2910 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/libs/zedUpload.(*DronaCtx).ListenAndServe(0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:132 +0xa5\\ngithub.com/lf-edge/eve/libs/zedUpload.NewDronaCtx.func1(0xc000647014, 0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:281 +0x53\\ncreated by github.com/lf-edge/eve/libs/zedUpload.NewDronaCtx\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:279 +0x171\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953209123Z\"}\n","msgid":3809,"timestamp":{"seconds":1707911329,"nanos":953209123},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2906 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/libs/zedUpload.(*DronaCtx).ListenAndServe(0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:132 +0xa5\\ngithub.com/lf-edge/eve/libs/zedUpload.NewDronaCtx.func1(0xc000647014, 0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:281 +0x53\\ncreated by github.com/lf-edge/eve/libs/zedUpload.NewDronaCtx\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:279 +0x171\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953246339Z\"}\n","msgid":3810,"timestamp":{"seconds":1707911329,"nanos":953246339},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2908 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/libs/zedUpload.(*DronaCtx).ListenAndServe(0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:132 +0xa5\\ngithub.com/lf-edge/eve/libs/zedUpload.NewDronaCtx.func1(0xc000647014, 0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:281 +0x53\\ncreated by github.com/lf-edge/eve/libs/zedUpload.NewDronaCtx\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:279 +0x171\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953261833Z\"}\n","msgid":3811,"timestamp":{"seconds":1707911329,"nanos":953261833},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2909 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/libs/zedUpload.(*DronaCtx).ListenAndServe(0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:132 +0xa5\\ngithub.com/lf-edge/eve/libs/zedUpload.NewDronaCtx.func1(0xc000647014, 0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:281 +0x53\\ncreated by github.com/lf-edge/eve/libs/zedUpload.NewDronaCtx\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:279 +0x171\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953276606Z\"}\n","msgid":3812,"timestamp":{"seconds":1707911329,"nanos":953276606},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2946 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/libs/zedUpload.(*DronaCtx).ListenAndServe(0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:132 +0xa5\\ngithub.com/lf-edge/eve/libs/zedUpload.NewDronaCtx.func1(0xc000647014, 0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:281 +0x53\\ncreated by github.com/lf-edge/eve/libs/zedUpload.NewDronaCtx\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:279 +0x171\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953291616Z\"}\n","msgid":3813,"timestamp":{"seconds":1707911329,"nanos":953291616},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2912 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/libs/zedUpload.(*DronaCtx).ListenAndServe(0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:132 +0xa5\\ngithub.com/lf-edge/eve/libs/zedUpload.NewDronaCtx.func1(0xc000647014, 0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:281 +0x53\\ncreated by github.com/lf-edge/eve/libs/zedUpload.NewDronaCtx\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:279 +0x171\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953308511Z\"}\n","msgid":3814,"timestamp":{"seconds":1707911329,"nanos":953308511},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2911 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/libs/zedUpload.(*DronaCtx).ListenAndServe(0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:132 +0xa5\\ngithub.com/lf-edge/eve/libs/zedUpload.NewDronaCtx.func1(0xc000647014, 0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:281 +0x53\\ncreated by github.com/lf-edge/eve/libs/zedUpload.NewDronaCtx\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:279 +0x171\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953323695Z\"}\n","msgid":3815,"timestamp":{"seconds":1707911329,"nanos":953323695},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2546 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc000ece900, 0x1d69ff8, 0xc00000f4f0, 0x2)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953342048Z\"}\n","msgid":3816,"timestamp":{"seconds":1707911329,"nanos":953342048},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2667 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc000ece900, 0x1d69ff8, 0xc0010ba198, 0x5)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953373872Z\"}\n","msgid":3817,"timestamp":{"seconds":1707911329,"nanos":953373872},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2668 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc000ece9c0, 0x1d69ff8, 0xc0010ba1d0, 0x0)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953388599Z\"}\n","msgid":3818,"timestamp":{"seconds":1707911329,"nanos":953388599},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2696 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).serveConnection(0xc000ece900, 0x1d69ff8, 0xc0010bb0c8, 0x4)\\n\\t/pillar/pubsub/socketdriver/publish.go:267 +0xaed\\ncreated by github.com/lf-edge/eve/pkg/pillar/pubsub/socketdriver.(*Publisher).Start.func1\\n\\t/pillar/pubsub/socketdriver/publish.go:148 +0x274\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953424684Z\"}\n","msgid":3819,"timestamp":{"seconds":1707911329,"nanos":953424684},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2698 [chan receive, 126 minutes]:\\ngithub.com/lf-edge/eve/pkg/pillar/cmd/zfsmanager.deviceWatcher(0xc001529500)\\n\\t/pillar/cmd/zfsmanager/zfsmanager.go:161 +0x234\\ncreated by github.com/lf-edge/eve/pkg/pillar/cmd/zfsmanager.Run\\n\\t/pillar/cmd/zfsmanager/zfsmanager.go:94 +0x545\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953455524Z\"}\n","msgid":3820,"timestamp":{"seconds":1707911329,"nanos":953455524},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2699 [syscall, 126 minutes]:\\nsyscall.Syscall6(0xe8, 0x162, 0xc0015a7c2c, 0x7, 0xffffffffffffffff, 0x0, 0x0, 0x6666666666666666, 0x3a646e427061430a, 0x6631303030303009)\\n\\t/usr/lib/go/src/syscall/asm_linux_amd64.s:43 +0x5\\ngolang.org/x/sys/unix.EpollWait(0x162, 0xc0015a7c2c, 0x7, 0x7, 0xffffffffffffffff, 0x530a30093a737265, 0x0, 0x0)\\n\\t/pillar/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go:77 +0x72\\ngithub.com/fsnotify/fsnotify.(*fdPoller).wait(0xc001427840, 0x0, 0x0, 0x0)\\n\\t/pillar/vendor/github.com/fsnotify/fsnotify/inotify_poller.go:86 +0x91\\ngithub.com/fsnotify/fsnotify.(*Watcher).readEvents(0xc000ae5400)\\n\\t/pillar/vendor/github.com/fsnotify/fsnotify/inotify.go:192 +0x206\\ncreated by github.com/fsnotify/fsnotify.NewWatcher\\n\\t/pillar/vendor/github.com/fsnotify/fsnotify/inotify.go:59 +0x1ab\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953492281Z\"}\n","msgid":3821,"timestamp":{"seconds":1707911329,"nanos":953492281},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2905 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/libs/zedUpload.(*DronaCtx).ListenAndServe(0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:132 +0xa5\\ngithub.com/lf-edge/eve/libs/zedUpload.NewDronaCtx.func1(0xc000647014, 0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:281 +0x53\\ncreated by github.com/lf-edge/eve/libs/zedUpload.NewDronaCtx\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:279 +0x171\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953508165Z\"}\n","msgid":3822,"timestamp":{"seconds":1707911329,"nanos":953508165},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 2907 [select, 126 minutes]:\\ngithub.com/lf-edge/eve/libs/zedUpload.(*DronaCtx).ListenAndServe(0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:132 +0xa5\\ngithub.com/lf-edge/eve/libs/zedUpload.NewDronaCtx.func1(0xc000647014, 0xc0006ba390)\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:281 +0x53\\ncreated by github.com/lf-edge/eve/libs/zedUpload.NewDronaCtx\\n\\t/pillar/vendor/github.com/lf-edge/eve/libs/zedUpload/datastore.go:279 +0x171\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953525183Z\"}\n","msgid":3823,"timestamp":{"seconds":1707911329,"nanos":953525183},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 448311 [IO wait, 7 minutes]:\\ninternal/poll.runtime_pollWait(0x7f79ba900478, 0x72, 0xffffffffffffffff)\\n\\t/usr/lib/go/src/runtime/netpoll.go:222 +0x55\\ninternal/poll.(*pollDesc).wait(0xc000eb3a98, 0x72, 0x1000, 0x1000, 0xffffffffffffffff)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:87 +0x45\\ninternal/poll.(*pollDesc).waitRead(...)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:92\\ninternal/poll.(*FD).Read(0xc000eb3a80, 0xc00174a000, 0x1000, 0x1000, 0x0, 0x0, 0x0)\\n\\t/usr/lib/go/src/internal/poll/fd_unix.go:166 +0x1d5\\nnet.(*netFD).Read(0xc000eb3a80, 0xc00174a000, 0x1000, 0x1000, 0x43a17c, 0xc00134dc38, 0x465b00)\\n\\t/usr/lib/go/src/net/fd_posix.go:55 +0x4f\\nnet.(*conn).Read(0xc001937e78, 0xc00174a000, 0x1000, 0x1000, 0x0, 0x0, 0x0)\\n\\t/usr/lib/go/src/net/net.go:183 +0x91\\nnet/http.(*persistConn).Read(0xc0017da5a0, 0xc00174a000, 0x1000, 0x1000, 0xc00093daa0, 0xc00134dd40, 0x405755)\\n\\t/usr/lib/go/src/net/http/transport.go:1922 +0x77\\nbufio.(*Reader).fill(0xc00195e480)\\n\\t/usr/lib/go/src/bufio/bufio.go:101 +0x108\\nbufio.(*Reader).Peek(0xc00195e480, 0x1, 0x0, 0x1, 0x4, 0x1, 0x3)\\n\\t/usr/lib/go/src/bufio/bufio.go:139 +0x4f\\nnet/http.(*persistConn).readLoop(0xc0017da5a0)\\n\\t/usr/lib/go/src/net/http/transport.go:2083 +0x1a8\\ncreated by net/http.(*Transport).dialConn\\n\\t/usr/lib/go/src/net/http/transport.go:1743 +0xc77\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953540644Z\"}\n","msgid":3824,"timestamp":{"seconds":1707911329,"nanos":953540644},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 448312 [select, 7 minutes]:\\nnet/http.(*persistConn).writeLoop(0xc0017da5a0)\\n\\t/usr/lib/go/src/net/http/transport.go:2382 +0xf7\\ncreated by net/http.(*Transport).dialConn\\n\\t/usr/lib/go/src/net/http/transport.go:1744 +0xc9c\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953556906Z\"}\n","msgid":3825,"timestamp":{"seconds":1707911329,"nanos":953556906},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:181\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"log_event_type\":\"log\",\"msg\":\"goroutine 462597 [IO wait, 4 minutes]:\\ninternal/poll.runtime_pollWait(0x7f79ba8f7b08, 0x72, 0xffffffffffffffff)\\n\\t/usr/lib/go/src/runtime/netpoll.go:222 +0x55\\ninternal/poll.(*pollDesc).wait(0xc001882b98, 0x72, 0x1000, 0x1000, 0xffffffffffffffff)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:87 +0x45\\ninternal/poll.(*pollDesc).waitRead(...)\\n\\t/usr/lib/go/src/internal/poll/fd_poll_runtime.go:92\\ninternal/poll.(*FD).Read(0xc001882b80, 0xc00182b000, 0x1000, 0x1000, 0x0, 0x0, 0x0)\\n\\t/usr/lib/go/src/internal/poll/fd_unix.go:166 +0x1d5\\nnet.(*netFD).Read(0xc001882b80, 0xc00182b000, 0x1000, 0x1000, 0x43a17c, 0xc00158cc38, 0x465b00)\\n\\t/usr/lib/go/src/net/fd_posix.go:55 +0x4f\\nnet.(*conn).Read(0xc000f882f0, 0xc00182b000, 0x1000, 0x1000, 0x0, 0x0, 0x0)\\n\\t/usr/lib/go/src/net/net.go:183 +0x91\\nnet/http.(*persistConn).Read(0xc000322a20, 0xc00182b000, 0x1000, 0x1000, 0xc0013c6ae0, 0xc00158cd40, 0x405755)\\n\\t/usr/lib/go/src/net/http/transport.go:1922 +0x77\\nbufio.(*Reader).fill(0xc0014e9e00)\\n\\t/usr/lib/go/src/bufio/bufio.go:101 +0x108\\nbufio.(*Reader).Peek(0xc0014e9e00, 0x1, 0x0, 0x1, 0x4, 0x1, 0x3)\\n\\t/usr/lib/go/src/bufio/bufio.go:139 +0x4f\\nnet/http.(*persistConn).readLoop(0xc000322a20)\\n\\t/usr/lib/go/src/net/http/transport.go:2083 +0x1a8\\ncreated by net/http.(*Transport).dialConn\\n\\t/usr/lib/go/src/net/http/transport.go:1743 +0xc77\\n\",\"obj_key\":\"sigusr1_stacks\",\"obj_type\":\"sigusr1_stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953571822Z\"}\n","msgid":3826,"timestamp":{"seconds":1707911329,"nanos":953571822},"filename":"/pillar/agentlog/agentlog.go:181","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} +{"severity":"warning","source":"zedbox","iid":"2003","content":"{\"file\":\"/pillar/agentlog/agentlog.go:183\",\"func\":\"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals\",\"level\":\"warning\",\"msg\":\"SIGUSR1: end of stacks\",\"pid\":2003,\"source\":\"zedbox\",\"time\":\"2024-02-14T11:48:49.953589346Z\"}\n","msgid":3827,"timestamp":{"seconds":1707911329,"nanos":953589346},"filename":"/pillar/agentlog/agentlog.go:183","function":"github.com/lf-edge/eve/pkg/pillar/agentlog.handleSignals"} diff --git a/pkg/newlog/testdata/perf/README.md b/pkg/newlog/testdata/perf/README.md new file mode 100644 index 0000000000..44f8f15164 --- /dev/null +++ b/pkg/newlog/testdata/perf/README.md @@ -0,0 +1,39 @@ +# Performance Data + +This directory contains performance data for the secure logging mechanisms introduced to `newlogd`. The algorithm used is "Forward-Secure Sequential Aggregate Authentication." + +Each test consists of 10, 20, 30, 40, 50, 100, 200, and 500 runs. A single run is the time required to process a batch of logs. Each batch of logs contains roughly 400 log entries, with each entry varying in size. The base log file is artificially grown by appending its content to itself, so consider a small +/- degree of error in benchmarks compared what you might see in the real world. + +This is a microbenchmark, calculating the performance degradation only for the log processing function `doMoveCompressFile*`. + +## FssAggSig + +This is the cost of computing the aggregated signature for each batch of logs. The results show that adding the aggregated signature to each batch of logs incurs a performance cost of 28% to 46% per processed batch of logs, with the cost dropping on higher-count runs as system caches are utilized. + +![](fssaggsig_overhead_and_cost.png) +*FssAggSig Overhead and Cost* + +![](fssaggsig_average_secure_non_secure_time.png) +*FssAggSig Timing* + +## FssAggVer + +This is the cost of computing the verification result of the aggregated signature for each batch of logs. This can lead to exponential growth as the number of key iterations increases. + + +![](fssaggver_overhead_and_cost.png) +*FssAggVer Overhead and Cost* + +![](fssaggver_average_secure_non_secure_time.png) +*FssAggVer Timing* + + +## FssAggVer with Cached Keys + +This is the cost of computing the verification result of the aggregated signature for each batch of logs. In this run verification key is cached for every multiple of 10,000, allowing the verification algorithm to use shortcuts to verify the logs faster. The results show that verifying each batch of logs can add a performance cost of 35% to 53%. + +![](fssaggver_cached_keys_10000_overhead_and_cost.png) +*FssAggVer (with cached keys) Overhead and Cost* + +![](fssaggver_cached_keys_10000_average_secure_non_secure_time.png) +*FssAggVer (with cached keys) Timing* \ No newline at end of file diff --git a/pkg/newlog/testdata/perf/fssaggsig_average_secure_non_secure_time.png b/pkg/newlog/testdata/perf/fssaggsig_average_secure_non_secure_time.png new file mode 100644 index 0000000000..ae0d5750d6 Binary files /dev/null and b/pkg/newlog/testdata/perf/fssaggsig_average_secure_non_secure_time.png differ diff --git a/pkg/newlog/testdata/perf/fssaggsig_overhead_and_cost.png b/pkg/newlog/testdata/perf/fssaggsig_overhead_and_cost.png new file mode 100644 index 0000000000..0107c8e19e Binary files /dev/null and b/pkg/newlog/testdata/perf/fssaggsig_overhead_and_cost.png differ diff --git a/pkg/newlog/testdata/perf/fssaggsig_timing_results.csv b/pkg/newlog/testdata/perf/fssaggsig_timing_results.csv new file mode 100644 index 0000000000..73044b5173 --- /dev/null +++ b/pkg/newlog/testdata/perf/fssaggsig_timing_results.csv @@ -0,0 +1,9 @@ +Test Run,Average Secure Time (us),Average Non-Secure Time (us),Performance Cost (us),Performance Overhead (%) +10,22588,17526,5062,28.88 +20,33573,25075,8498,33.89 +30,57371,39083,18288,46.79 +40,65602,45449,20153,44.34 +50,76776,53785,22991,42.75 +100,75143,51382,23761,46.24 +200,67506,46271,21235,45.89 +500,53891,37241,16650,44.71 diff --git a/pkg/newlog/testdata/perf/fssaggver_average_secure_non_secure_time.png b/pkg/newlog/testdata/perf/fssaggver_average_secure_non_secure_time.png new file mode 100644 index 0000000000..bd5caa1c1d Binary files /dev/null and b/pkg/newlog/testdata/perf/fssaggver_average_secure_non_secure_time.png differ diff --git a/pkg/newlog/testdata/perf/fssaggver_cached_keys_10000_average_secure_non_secure_time.png b/pkg/newlog/testdata/perf/fssaggver_cached_keys_10000_average_secure_non_secure_time.png new file mode 100644 index 0000000000..c20fb77d31 Binary files /dev/null and b/pkg/newlog/testdata/perf/fssaggver_cached_keys_10000_average_secure_non_secure_time.png differ diff --git a/pkg/newlog/testdata/perf/fssaggver_cached_keys_10000_overhead_and_cost.png b/pkg/newlog/testdata/perf/fssaggver_cached_keys_10000_overhead_and_cost.png new file mode 100644 index 0000000000..a2e21807e9 Binary files /dev/null and b/pkg/newlog/testdata/perf/fssaggver_cached_keys_10000_overhead_and_cost.png differ diff --git a/pkg/newlog/testdata/perf/fssaggver_cached_keys_10000_timing_results.csv b/pkg/newlog/testdata/perf/fssaggver_cached_keys_10000_timing_results.csv new file mode 100644 index 0000000000..ed41e5794a --- /dev/null +++ b/pkg/newlog/testdata/perf/fssaggver_cached_keys_10000_timing_results.csv @@ -0,0 +1,9 @@ +Test Run,Average Secure Time (us),Average Non-Secure Time (us),Performance Cost (us),Performance Overhead (%) +10,2561,1908,653,34.22 +20,3960,2892,1068,36.93 +30,5488,3892,1596,41.01 +40,7053,4765,2288,48.02 +50,10219,6904,3315,48.02 +100,8483,5542,2941,53.07 +200,8512,5603,2909,51.92 +500,7414,4856,2558,52.68 diff --git a/pkg/newlog/testdata/perf/fssaggver_overhead_and_cost.png b/pkg/newlog/testdata/perf/fssaggver_overhead_and_cost.png new file mode 100644 index 0000000000..8212c3ee85 Binary files /dev/null and b/pkg/newlog/testdata/perf/fssaggver_overhead_and_cost.png differ diff --git a/pkg/newlog/testdata/perf/fssaggver_timing_results.csv b/pkg/newlog/testdata/perf/fssaggver_timing_results.csv new file mode 100644 index 0000000000..d6c4643c90 --- /dev/null +++ b/pkg/newlog/testdata/perf/fssaggver_timing_results.csv @@ -0,0 +1,9 @@ +Test Run,Average Secure Time (us),Average Non-Secure Time (us),Performance Cost (us),Performance Overhead (%) +10,5013,3774,1239,32.83 +20,7843,5485,2358,42.99 +30,9567,5650,3917,69.33 +40,12660,6073,6587,108.46 +50,16831,6591,10240,155.36 +100,23883,6563,17320,263.90 +200,32799,6018,26781,445.01 +500,46149,4913,41236,839.32 diff --git a/pkg/newlog/testdata/perf/plot.py b/pkg/newlog/testdata/perf/plot.py new file mode 100644 index 0000000000..6861c3c2c5 --- /dev/null +++ b/pkg/newlog/testdata/perf/plot.py @@ -0,0 +1,62 @@ +# Copyright (c) 2024 Zededa, Inc. +# SPDX-License-Identifier: Apache-2.0 + +import pandas as pd +import matplotlib.pyplot as plt + +def plot_fssagg_average_time(df, filename): + # Plot Average Secure Time and Average Non-Secure Time for each run + plt.figure(figsize=(10, 6)) + plt.plot(df['Test Run'], df['Average Secure Time (us)'], label='Average Secure Time (us)', marker='o') + plt.plot(df['Test Run'], df['Average Non-Secure Time (us)'], label='Average Non-Secure Time (us)', marker='o') + plt.xlabel('Test Run') + plt.ylabel('Time (us)') + plt.title('Average Secure and Non-Secure Time for Each Run') + plt.legend() + plt.grid(True) + plt.tight_layout() + plt.savefig(filename) + plt.close() + +def plot_fssagg_average_cost(df, filename): + # Create a figure for Performance Overhead and Performance Cost + fig, ax1 = plt.subplots(figsize=(10, 6)) + + # Plot Performance Overhead on the left y-axis + ax1.plot(df['Test Run'], df['Performance Overhead (%)'], label='Performance Overhead (%)', marker='o', color='red') + ax1.set_xlabel('Test Run') + ax1.set_ylabel('Performance Overhead (%)', color='red') + ax1.tick_params(axis='y', labelcolor='red') + ax1.grid(True) + + # Create a second y-axis for Performance Cost on the right + ax2 = ax1.twinx() + ax2.plot(df['Test Run'], df['Performance Cost (us)'], label='Performance Cost (us)', marker='x', color='blue') + ax2.set_ylabel('Performance Cost (us)', color='blue') + ax2.tick_params(axis='y', labelcolor='blue') + + # Title and legend + plt.title('Performance Overhead and Performance Cost for Each Run') + fig.tight_layout() # Adjust the layout to avoid overlap + ax1.legend(loc='upper left') + ax2.legend(loc='upper right') + + # Save the plot as PNG + plt.savefig(filename) + plt.close() + +file_path = 'fssaggver_cached_keys_10000_timing_results.csv' +df = pd.read_csv(file_path) +plot_fssagg_average_time(df, 'fssaggver_cached_keys_10000_average_secure_non_secure_time.png') +plot_fssagg_average_cost(df, 'fssaggver_cached_keys_10000_overhead_and_cost.png') + +file_path = 'fssaggver_timing_results.csv' +df = pd.read_csv(file_path) +plot_fssagg_average_time(df, 'fssaggver_average_secure_non_secure_time.png') +plot_fssagg_average_cost(df, 'fssaggver_overhead_and_cost.png') + + +file_path = 'fssaggsig_timing_results.csv' +df = pd.read_csv(file_path) +plot_fssagg_average_time(df, 'fssaggsig_average_secure_non_secure_time.png') +plot_fssagg_average_cost(df, 'fssaggsig_overhead_and_cost.png') diff --git a/pkg/pillar/cmd/vaultmgr/vaultmgr.go b/pkg/pillar/cmd/vaultmgr/vaultmgr.go index 12c1126572..031feba135 100644 --- a/pkg/pillar/cmd/vaultmgr/vaultmgr.go +++ b/pkg/pillar/cmd/vaultmgr/vaultmgr.go @@ -435,7 +435,7 @@ func handleVaultKeyFromControllerImpl(ctxArg interface{}, key string, } // Try unlocking the vault now, in case it is not yet unlocked log.Noticef("Vault is still locked, trying to unlock") - err = etpm.SealDiskKey(log, decryptedKey, etpm.DiskKeySealingPCRs) + err = etpm.TpmSealData(log, decryptedKey, etpm.DiskKeySealingPCRs, etpm.TpmSealedDiskPrivHdl, etpm.TpmSealedDiskPubHdl, true) if err != nil { log.Errorf("Failed to Seal key in TPM %v", err) return diff --git a/pkg/pillar/evetpm/tpm.go b/pkg/pillar/evetpm/tpm.go index 9e70602b7c..1410bce26d 100644 --- a/pkg/pillar/evetpm/tpm.go +++ b/pkg/pillar/evetpm/tpm.go @@ -643,8 +643,8 @@ func FetchSealedVaultKey(log *base.LogObject) ([]byte, error) { } //gain some knowledge about existing environment - sealedKeyPresent := isSealedKeyPresent() - legacyKeyPresent := isLegacyKeyPresent() + sealedKeyPresent := isSealedDiskKeyPresent() + legacyKeyPresent := isLegacyDiskKeyPresent() if !sealedKeyPresent && !legacyKeyPresent { log.Noticef("neither legacy nor sealed disk key present, generating a fresh key") @@ -669,7 +669,7 @@ func FetchSealedVaultKey(log *base.LogObject) ([]byte, error) { if err != nil { return nil, fmt.Errorf("GetRandom failed: %w", err) } - err = SealDiskKey(log, key, DiskKeySealingPCRs) + err = TpmSealData(log, key, DiskKeySealingPCRs, TpmSealedDiskPrivHdl, TpmSealedDiskPubHdl, true) if err != nil { return nil, fmt.Errorf("sealing the fresh disk key failed: %w", err) } @@ -696,7 +696,7 @@ func FetchSealedVaultKey(log *base.LogObject) ([]byte, error) { log.Noticef("try to convert the legacy key into a sealed key") - err = SealDiskKey(log, key, DiskKeySealingPCRs) + err = TpmSealData(log, key, DiskKeySealingPCRs, TpmSealedDiskPrivHdl, TpmSealedDiskPubHdl, true) if err != nil { return nil, fmt.Errorf("sealing the legacy disk key into TPM failed: %w", err) } @@ -707,7 +707,7 @@ func FetchSealedVaultKey(log *base.LogObject) ([]byte, error) { log.Noticef("sealed disk key present int TPM, about to unseal it") } //By this, we have a key sealed into TPM - key, err := UnsealDiskKey(DiskKeySealingPCRs) + key, err := TpmUnsealData(DiskKeySealingPCRs, TpmSealedDiskPrivHdl, TpmSealedDiskPubHdl, true) if err == nil { // be more verbose, lets celebrate log.Noticef("successfully unsealed the disk key from TPM") @@ -716,8 +716,8 @@ func FetchSealedVaultKey(log *base.LogObject) ([]byte, error) { return key, err } -// SealDiskKey seals key into TPM2.0, with provided PCRs -func SealDiskKey(log *base.LogObject, key []byte, pcrSel tpm2.PCRSelection) error { +// TpmSealData seals private date (a key) into TPM2.0, with provided PCRs +func TpmSealData(log *base.LogObject, key []byte, pcrSel tpm2.PCRSelection, privHdl, pubHdl tpmutil.Handle, saveTpmLogs bool) error { rw, err := tpm2.OpenTPM(TpmDevicePath) if err != nil { return err @@ -725,10 +725,10 @@ func SealDiskKey(log *base.LogObject, key []byte, pcrSel tpm2.PCRSelection) erro defer rw.Close() tpm2.NVUndefineSpace(rw, EmptyPassword, - tpm2.HandleOwner, TpmSealedDiskPubHdl) + tpm2.HandleOwner, pubHdl) tpm2.NVUndefineSpace(rw, EmptyPassword, - tpm2.HandleOwner, TpmSealedDiskPrivHdl) + tpm2.HandleOwner, privHdl) //Note on any abrupt power failure at this point, and the result of it //We should be ok, since the key supplied here can be @@ -759,68 +759,70 @@ func SealDiskKey(log *base.LogObject, key []byte, pcrSel tpm2.PCRSelection) erro // Define space in NV storage and clean up afterwards or subsequent runs will fail. if err := tpm2.NVDefineSpace(rw, tpm2.HandleOwner, - TpmSealedDiskPrivHdl, + privHdl, EmptyPassword, EmptyPassword, nil, tpm2.AttrOwnerWrite|tpm2.AttrOwnerRead, uint16(len(priv)), ); err != nil { - return fmt.Errorf("NVDefineSpace %v failed: %w", TpmSealedDiskPrivHdl, err) + return fmt.Errorf("NVDefineSpace %v failed: %w", privHdl, err) } // Write the private data - if err := tpm2.NVWrite(rw, tpm2.HandleOwner, TpmSealedDiskPrivHdl, + if err := tpm2.NVWrite(rw, tpm2.HandleOwner, privHdl, EmptyPassword, priv, 0); err != nil { - return fmt.Errorf("NVWrite %v failed: %w", TpmSealedDiskPrivHdl, err) + return fmt.Errorf("NVWrite %v failed: %w", privHdl, err) } // Define space in NV storage if err := tpm2.NVDefineSpace(rw, tpm2.HandleOwner, - TpmSealedDiskPubHdl, + pubHdl, EmptyPassword, EmptyPassword, nil, tpm2.AttrOwnerWrite|tpm2.AttrOwnerRead, uint16(len(public)), ); err != nil { - return fmt.Errorf("NVDefineSpace %v failed: %w", TpmSealedDiskPubHdl, err) + return fmt.Errorf("NVDefineSpace %v failed: %w", pubHdl, err) } // Write the public data - if err := tpm2.NVWrite(rw, tpm2.HandleOwner, TpmSealedDiskPubHdl, + if err := tpm2.NVWrite(rw, tpm2.HandleOwner, pubHdl, EmptyPassword, public, 0); err != nil { - return fmt.Errorf("NVWrite %v failed: %w", TpmSealedDiskPubHdl, err) + return fmt.Errorf("NVWrite %v failed: %w", pubHdl, err) } - // save a snapshot of current PCR values - if err := saveDiskKeySealingPCRs(); err != nil { - log.Warnf("saving snapshot of sealing PCRs failed: %s", err) - } + if saveTpmLogs { + // save a snapshot of current PCR values + if err := saveDiskKeySealingPCRs(); err != nil { + log.Warnf("saving snapshot of sealing PCRs failed: %s", err) + } - // In order to not lose the ability to diff and diagnose the issue, - // first backup the previous pair of logs (if any). This is needed because - // once the failing devices get connected to the controller to fetch the - // backup key, we end up here again and it'll override the MeasurementLogSealSuccess - // file content with current tpm measurement logs (which is same as the - // content of MeasurementLogSealFail). - if err := backupCopiedMeasurementLogs(); err != nil { - log.Warnf("copying previous snapshot of TPM event log failed: %s", err) - } + // In order to not lose the ability to diff and diagnose the issue, + // first backup the previous pair of logs (if any). This is needed because + // once the failing devices get connected to the controller to fetch the + // backup key, we end up here again and it'll override the MeasurementLogSealSuccess + // file content with current tpm measurement logs (which is same as the + // content of MeasurementLogSealFail). + if err := backupCopiedMeasurementLogs(); err != nil { + log.Warnf("copying previous snapshot of TPM event log failed: %s", err) + } - // fresh start, remove old copies of measurement logs. - removeCopiedMeasurementLogs() + // fresh start, remove old copies of measurement logs. + removeCopiedMeasurementLogs() - // save a copy of the current measurement log, this is also called - // if unseal fails to have copy when we fail to unlock the vault. - if err := copyMeasurementLog(measurementLogSealSuccess); err != nil { - log.Warnf("copying current TPM measurement log failed: %s", err) + // save a copy of the current measurement log, this is also called + // if unseal fails to have copy when we fail to unlock the vault. + if err := copyMeasurementLog(measurementLogSealSuccess); err != nil { + log.Warnf("copying current TPM measurement log failed: %s", err) + } } return nil } -func isSealedKeyPresent() bool { +func isSealedDiskKeyPresent() bool { rw, err := tpm2.OpenTPM(TpmDevicePath) if err != nil { return false @@ -832,13 +834,13 @@ func isSealedKeyPresent() bool { return err == nil } -func isLegacyKeyPresent() bool { +func isLegacyDiskKeyPresent() bool { _, err := readDiskKey() return err == nil } -// UnsealDiskKey unseals key from TPM2.0 -func UnsealDiskKey(pcrSel tpm2.PCRSelection) ([]byte, error) { +// TpmUnsealData unseals private data (a key) from TPM2.0 +func TpmUnsealData(pcrSel tpm2.PCRSelection, privHdl, pubHdl tpmutil.Handle, saveTpmLogs bool) ([]byte, error) { rw, err := tpm2.OpenTPM(TpmDevicePath) if err != nil { return nil, err @@ -846,16 +848,16 @@ func UnsealDiskKey(pcrSel tpm2.PCRSelection) ([]byte, error) { defer rw.Close() // Read all of the data with NVReadEx - priv, err := tpm2.NVReadEx(rw, TpmSealedDiskPrivHdl, + priv, err := tpm2.NVReadEx(rw, privHdl, tpm2.HandleOwner, EmptyPassword, 0) if err != nil { - return nil, fmt.Errorf("NVReadEx %v failed: %w", TpmSealedDiskPrivHdl, err) + return nil, fmt.Errorf("NVReadEx %v failed: %w", privHdl, err) } // Read all of the data with NVReadEx - pub, err := tpm2.NVReadEx(rw, TpmSealedDiskPubHdl, + pub, err := tpm2.NVReadEx(rw, pubHdl, tpm2.HandleOwner, EmptyPassword, 0) if err != nil { - return nil, fmt.Errorf("NVReadEx %v failed: %w", TpmSealedDiskPubHdl, err) + return nil, fmt.Errorf("NVReadEx %v failed: %w", pubHdl, err) } sealedObjHandle, _, err := tpm2.Load(rw, TpmSRKHdl, "", pub, priv) @@ -872,13 +874,18 @@ func UnsealDiskKey(pcrSel tpm2.PCRSelection) ([]byte, error) { key, err := tpm2.UnsealWithSession(rw, session, sealedObjHandle, EmptyPassword) if err != nil { - // We get here mostly because of RCPolicyFail error, so first try to save - // a copy of TPM measurement log, it comes handy for diagnosing the issue. - evtLogStat := "copied (failed unseal) TPM measurement log" - if errEvtLog := copyMeasurementLog(measurementLogUnsealFail); errEvtLog != nil { - // just report the failure, still give findMismatchingPCRs a chance so - // we can at least have some partial information about why unseal failed. - evtLogStat = fmt.Sprintf("copying (failed unseal) TPM measurement log failed: %v", errEvtLog) + var evtLogStat string + if !saveTpmLogs { + evtLogStat = "TPM measurement log not copied (saveTpmLogs=false)" + } else { + // We get here mostly because of RCPolicyFail error, so first try to save + // a copy of TPM measurement log, it comes handy for diagnosing the issue. + evtLogStat = "copied (failed unseal) TPM measurement log" + if errEvtLog := copyMeasurementLog(measurementLogUnsealFail); errEvtLog != nil { + // just report the failure, still give findMismatchingPCRs a chance so + // we can at least have some partial information about why unseal failed. + evtLogStat = fmt.Sprintf("copying (failed unseal) TPM measurement log failed: %v", errEvtLog) + } } // try to find out the mismatching PCR index @@ -926,7 +933,7 @@ func PolicyPCRSession(rw io.ReadWriteCloser, pcrSel tpm2.PCRSelection) (tpmutil. // CompareLegacyandSealedKey compares legacy and sealed keys // to record if we are using a new key for sealed vault func CompareLegacyandSealedKey() SealedKeyType { - if !isSealedKeyPresent() { + if !isSealedDiskKeyPresent() { return SealedKeyTypeUnprotected } legacyKey, err := readDiskKey() @@ -934,7 +941,7 @@ func CompareLegacyandSealedKey() SealedKeyType { //no cloning case, return SealedKeyTypeNew return SealedKeyTypeNew } - unsealedKey, err := UnsealDiskKey(DiskKeySealingPCRs) + unsealedKey, err := TpmUnsealData(DiskKeySealingPCRs, TpmSealedDiskPrivHdl, TpmSealedDiskPubHdl, true) if err != nil { //key is present but can't unseal it //but legacy key is present diff --git a/pkg/pillar/evetpm/tpm_test.go b/pkg/pillar/evetpm/tpm_test.go index e6e7bcebbc..4b8d2ea5ec 100644 --- a/pkg/pillar/evetpm/tpm_test.go +++ b/pkg/pillar/evetpm/tpm_test.go @@ -50,7 +50,7 @@ func TestMain(m *testing.M) { func waitForTpmReadyState() error { for i := 0; i < 10; i++ { - if err := SealDiskKey(log, []byte("secret"), DiskKeySealingPCRs); err != nil { + if err := TpmSealData(log, []byte("secret"), DiskKeySealingPCRs, TpmSealedDiskPrivHdl, TpmSealedDiskPubHdl, true); err != nil { // this is RCRetry, so retry if strings.Contains(err.Error(), "code 0x22") { time.Sleep(100 * time.Millisecond) @@ -73,11 +73,11 @@ func TestSealUnseal(t *testing.T) { } dataToSeal := []byte("secret") - if err := SealDiskKey(log, dataToSeal, DiskKeySealingPCRs); err != nil { + if err := TpmSealData(log, dataToSeal, DiskKeySealingPCRs, TpmSealedDiskPrivHdl, TpmSealedDiskPubHdl, true); err != nil { t.Fatalf("Seal operation failed with err: %v", err) } - unsealedData, err := UnsealDiskKey(DiskKeySealingPCRs) + unsealedData, err := TpmUnsealData(DiskKeySealingPCRs, TpmSealedDiskPrivHdl, TpmSealedDiskPubHdl, true) if err != nil { t.Fatalf("Unseal operation failed with err: %v", err) } @@ -99,7 +99,7 @@ func TestSealUnsealMismatchReport(t *testing.T) { defer rw.Close() dataToSeal := []byte("secret") - if err := SealDiskKey(log, dataToSeal, DiskKeySealingPCRs); err != nil { + if err := TpmSealData(log, dataToSeal, DiskKeySealingPCRs, TpmSealedDiskPrivHdl, TpmSealedDiskPubHdl, true); err != nil { t.Fatalf("Seal operation failed with err: %v", err) } @@ -111,7 +111,7 @@ func TestSealUnsealMismatchReport(t *testing.T) { } } - _, err = UnsealDiskKey(DiskKeySealingPCRs) + _, err = TpmUnsealData(DiskKeySealingPCRs, TpmSealedDiskPrivHdl, TpmSealedDiskPubHdl, true) if err == nil { t.Fatalf("Expected error from UnsealDiskKey, got nil") } @@ -135,7 +135,7 @@ func TestSealUnsealTpmEventLogCollect(t *testing.T) { // this should write tpm event log to measurementLogSealSuccess file dataToSeal := []byte("secret") - if err := SealDiskKey(log, dataToSeal, DiskKeySealingPCRs); err != nil { + if err := TpmSealData(log, dataToSeal, DiskKeySealingPCRs, TpmSealedDiskPrivHdl, TpmSealedDiskPubHdl, true); err != nil { t.Fatalf("Seal operation failed with err: %v", err) } @@ -146,7 +146,7 @@ func TestSealUnsealTpmEventLogCollect(t *testing.T) { } // this should fail and result in saving creating measurementLogUnsealFail - _, err = UnsealDiskKey(DiskKeySealingPCRs) + _, err = TpmUnsealData(DiskKeySealingPCRs, TpmSealedDiskPrivHdl, TpmSealedDiskPubHdl, true) if err == nil { t.Fatalf("Expected error from UnsealDiskKey, got nil") } @@ -159,7 +159,7 @@ func TestSealUnsealTpmEventLogCollect(t *testing.T) { } // this should trigger backing up previously saved tpm event logs - if err := SealDiskKey(log, dataToSeal, DiskKeySealingPCRs); err != nil { + if err := TpmSealData(log, dataToSeal, DiskKeySealingPCRs, TpmSealedDiskPrivHdl, TpmSealedDiskPubHdl, true); err != nil { t.Fatalf("Seal operation failed with err: %v", err) }