From 709b15fb65201085646559914f5ea39cea4a4243 Mon Sep 17 00:00:00 2001 From: Graham Clark Date: Sun, 3 Jan 2021 10:42:04 -0500 Subject: [PATCH] A reworking of the tshark pcap loaders This is an attempt to split the responsibilities of the loader from one giant struct into several structs with mostly distinct roles - there is still some coordination between them, of course. Furthermore, the original loader was re-used, so it was essentially a collection of global variables. Some of the consequences of this change are: - various UI handlers, invokved on loader events, are now run in the main UI goroutine. Every handler simply used app.Run() to make that happen anyway, so this is a simplification. These handlers are also run serially, not launched in parallel, guaranteeing an execution order. The exception are the error handlers - they still manually invoked app.Run() when needed. - UI handlers are also provided a simple enum to show the source of the invocation. This allows e.g. BeforeBegin() handlers to distinguish whether this is being called from the PSML loader, PDML loader, etc etc. This seemed simpler than muliplying the various handler interfaces (BeforeBeginPsml, BeforeBeginPdml, etc etc). - loaders only change state - Loading <-> NotLoading - in the main goroutine. This means I can safely query the loader state from anything else running in the main goroutine - no race condition. - Process lifetimes are managed from a dedicated goroutine per loader - so there is no race between termshark invoking kill on a tshark process and termshark calling Wait() on the same process. --- capinfo/loader.go | 14 +- cmd/termshark/termshark.go | 223 ++- convs/loader.go | 14 +- pcap/handlers.go | 73 +- pcap/loader.go | 3232 ++++++++++++++++++------------------ streams/loader.go | 20 +- ui/capinfoui.go | 36 +- ui/convscallbacks.go | 18 +- ui/convsui.go | 6 +- ui/prochandlers.go | 340 ++++ ui/streamui.go | 48 +- ui/ui.go | 605 ++----- 12 files changed, 2415 insertions(+), 2214 deletions(-) create mode 100644 ui/prochandlers.go diff --git a/capinfo/loader.go b/capinfo/loader.go index 45c32c8..120e1d2 100644 --- a/capinfo/loader.go +++ b/capinfo/loader.go @@ -126,7 +126,7 @@ func (c *Loader) loadCapinfoAsync(pcapf string, app gowid.IApp, cb ICapinfoCallb "command": c.capinfoCmd.String(), "error": err, }) - pcap.HandleError(cerr, cb) + pcap.HandleError(pcap.CapinfoCode, app, cerr, cb) } } @@ -154,7 +154,7 @@ func (c *Loader) loadCapinfoAsync(pcapf string, app gowid.IApp, cb ICapinfoCallb capinfoOut, err := c.capinfoCmd.StdoutReader() if err != nil { - pcap.HandleError(err, cb) + pcap.HandleError(pcap.CapinfoCode, app, err, cb) return } @@ -162,15 +162,19 @@ func (c *Loader) loadCapinfoAsync(pcapf string, app gowid.IApp, cb ICapinfoCallb cb.AfterCapinfoEnd(true) }() - pcap.HandleBegin(cb) + app.Run(gowid.RunFunction(func(app gowid.IApp) { + pcap.HandleBegin(pcap.CapinfoCode, app, cb) + })) defer func() { - pcap.HandleEnd(cb) + app.Run(gowid.RunFunction(func(app gowid.IApp) { + pcap.HandleEnd(pcap.CapinfoCode, app, cb) + })) }() err = c.capinfoCmd.Start() if err != nil { err = fmt.Errorf("Error starting capinfo %v: %v", c.capinfoCmd, err) - pcap.HandleError(err, cb) + pcap.HandleError(pcap.CapinfoCode, app, err, cb) return } diff --git a/cmd/termshark/termshark.go b/cmd/termshark/termshark.go index d23635f..123a83b 100644 --- a/cmd/termshark/termshark.go +++ b/cmd/termshark/termshark.go @@ -691,17 +691,6 @@ func cmain() int { log.Infof("Config specifies pcap-bundle-size as %d - setting to max (%d)", bundleSize, maxBundleSize) bundleSize = maxBundleSize } - ui.PcapScheduler = pcap.NewScheduler( - pcap.MakeCommands(opts.DecodeAs, tsharkArgs, pdmlArgs, psmlArgs, ui.PacketColors), - pcap.Options{ - CacheSize: cacheSize, - PacketsPerLoad: bundleSize, - }, - ) - ui.Loader = ui.PcapScheduler.Loader - - // Buffered because I might send something in this goroutine - startUIChan := make(chan struct{}, 1) var ifaceTmpFile string @@ -715,7 +704,9 @@ func cmain() int { fmt.Printf("(The termshark UI will start when packets are detected...)\n") } else { // Start UI right away, reading from a file - startUIChan <- struct{}{} + close(ui.StartUIChan) + } + // Need to figure out possible changes to COLORTERM before creating the // tcell screen. Note that even though apprunner.Start() below will create // a new screen, it will use a terminfo that it constructed the first time @@ -777,6 +768,16 @@ func cmain() int { appRunner := app.Runner() + pcap.PcapCmds = pcap.MakeCommands(opts.DecodeAs, tsharkArgs, pdmlArgs, psmlArgs, ui.PacketColors) + pcap.PcapOpts = pcap.Options{ + CacheSize: cacheSize, + PacketsPerLoad: bundleSize, + } + + // This is a global. The type supports swapping out the real loader by embedding it via + // pointer, but I assume this only happens in the main goroutine. + ui.Loader = &pcap.PacketLoader{ParentLoader: pcap.NewPcapLoader(pcap.PcapCmds, &pcap.Runner{app}, pcap.PcapOpts)} + // Populate the filter widget initially - runs asynchronously go ui.FilterWidget.UpdateCompletions(app) @@ -816,8 +817,6 @@ func cmain() int { } validator.Valid = &filter.ValidateCB{Fn: doit, App: app} validator.Validate(displayFilter) - // no auto-scroll when reading a file - ui.AutoScroll = false } else { // Verifies whether or not we will be able to read from the interface (hopefully) @@ -843,29 +842,22 @@ func cmain() int { ui.FilterWidget.SetValue(displayFilter, app) })) ifacePcapFilename = ifaceTmpFile - ui.PcapScheduler.RequestLoadInterfaces(psrcs, captureFilter, displayFilter, ifaceTmpFile, - pcap.HandlerList{ - &ui.SignalPackets{C: startUIChan}, - ui.MakeSaveRecents("", displayFilter, app), - ui.MakePacketViewUpdater(app), - ui.MakeUpdateCurrentCaptureInTitle(app), - ui.ManageStreamCache{}, - }, - ) + ui.RequestLoadInterfaces(psrcs, captureFilter, displayFilter, ifaceTmpFile, app) } validator.Valid = &filter.ValidateCB{Fn: ifValid, App: app} validator.Validate(displayFilter) } quitIssuedToApp := false - prevstate := ui.Loader.State() - var prev float64 - progTicker := time.NewTicker(time.Duration(200) * time.Millisecond) + wasLoadingPdmlLastTime := ui.Loader.PdmlLoader.IsLoading() + wasLoadingAnythingLastTime := ui.Loader.LoadingAnything() - loaderPsmlFinChan := ui.Loader.PsmlFinishedChan - loaderIfaceFinChan := ui.Loader.IfaceFinishedChan - loaderPdmlFinChan := ui.Loader.Stage2FinishedChan + // Keep track of this across runs of the main loop so we don't go backwards (because + // that looks wrong to the user) + var prevProgPercentage float64 + + progTicker := time.NewTicker(time.Duration(200) * time.Millisecond) ctrlzLineDisc := tty.TerminalSignals{} @@ -877,25 +869,21 @@ func cmain() int { if ui.StreamLoader != nil { ui.StreamLoader.SuppressErrors = true } - ui.Loader.SuppressErrors = true - ui.Loader.Close() + ui.Loader.CloseMain() } inactiveDuration := 30 * time.Second inactivityTimer := time.NewTimer(inactiveDuration) + var progCancelTimer *time.Timer + Loop: for { var finChan <-chan time.Time - var opsChan <-chan pcap.RunFn var tickChan <-chan time.Time var inactivityChan <-chan time.Time - var emptyStructViewChan <-chan time.Time - var emptyHexViewChan <-chan time.Time - var psmlFinChan <-chan struct{} - var ifaceFinChan <-chan struct{} - var pdmlFinChan <-chan struct{} var tcellEvents <-chan tcell.Event + var opsChan <-chan gowid.RunFunction var afterRenderEvents <-chan gowid.IAfterRenderEvent // For setting struct views empty. This isn't done as soon as a load is initiated because // in the case we are loading from an interface and following new packets, we get an ugly @@ -905,17 +893,14 @@ Loop: // beginning to get new packets). Waiting 500ms to display loading gives enough time, in // practice, - if ui.EmptyStructViewTimer != nil { - emptyStructViewChan = ui.EmptyStructViewTimer.C - } - // For setting hex views empty - if ui.EmptyHexViewTimer != nil { - emptyHexViewChan = ui.EmptyHexViewTimer.C + // On change of state - check for new pdml requests + if ui.Loader.PdmlLoader.IsLoading() != wasLoadingPdmlLastTime { + ui.CacheRequestsChan <- struct{}{} } // This should really be moved to a handler... - if ui.Loader.State() == 0 { - if prevstate != 0 { + if !ui.Loader.LoadingAnything() { + if wasLoadingAnythingLastTime { // If the state has just switched to 0, it means no interface-reading process is // running. That means we will no longer be reading from an interface or a fifo, so // we point the loader at the file we wrote to the cache, and redirect all @@ -925,13 +910,17 @@ Loop: ui.ClearProgressWidget(app) ui.SetProgressDeterminate(app) // always switch back - for pdml (partial) loads of later data. })) + // When the progress bar is enabled, track the previous percentage reached. This is // so that I don't go "backwards" if I generate a progress value less than the last // one, using the current algorithm (because it would be confusing to see it go // backwards) - prev = 0.0 + prevProgPercentage = 0.0 } + // EnableOpsVar will be enabled when all the handlers have run, which happen in the main goroutine. + // I need them to run because the loader channel is closed in one, and the ticker goroutines + // don't terminate until these goroutines stop if ui.QuitRequested { if ui.Running { if !quitIssuedToApp { @@ -945,29 +934,20 @@ Loop: } } - if ui.Loader.State()&(pcap.LoadingPdml|pcap.LoadingPsml) != 0 { + // Only display the progress bar if PSML is loading or if PDML is loading that is needed + // by the UI. If the PDML is an optimistic load out of the display, then no need for + // progress. + if ui.Loader.PsmlLoader.IsLoading() || (ui.Loader.PdmlLoader.IsLoading() && ui.Loader.PdmlLoader.LoadIsVisible()) { tickChan = progTicker.C // progress is only enabled when a pcap may be loading + } else { + // Reset for the next load + prevProgPercentage = 0.0 } - if ui.Loader.State()&pcap.LoadingPdml != 0 { - pdmlFinChan = loaderPdmlFinChan - } - - if ui.Loader.State()&pcap.LoadingPsml != 0 { - psmlFinChan = loaderPsmlFinChan - } - - if ui.Loader.State()&pcap.LoadingIface != 0 { - ifaceFinChan = loaderIfaceFinChan + if ui.Loader.InterfaceLoader.IsLoading() { inactivityChan = inactivityTimer.C } - // (User) operations are enabled by default (the test predicate is nil), or if the predicate returns true - // meaning the operation has reached its desired state. Only one operation can be in progress at a time. - if ui.PcapScheduler.IsEnabled() { - opsChan = ui.PcapScheduler.OperationsChan - } - // Only process tcell and gowid events if the UI is running. if ui.Running { tcellEvents = app.TCellEvents @@ -977,9 +957,19 @@ Loop: finChan = ui.Fin.C() } + // For operations like ClearPcap - need previous loads to be fully finished first. The operations + // channel is enabled until an operation starts, then disabled until the operation re-enables it + // via a handler. + // + // Make sure state doesn't change until all handlers have been run + if !ui.Loader.PdmlLoader.IsLoading() && !ui.Loader.PsmlLoader.IsLoading() { + opsChan = pcap.OpsChan + } + afterRenderEvents = app.AfterRenderEvents - prevstate = ui.Loader.State() + wasLoadingPdmlLastTime = ui.Loader.PdmlLoader.IsLoading() + wasLoadingAnythingLastTime = ui.Loader.LoadingAnything() select { @@ -991,7 +981,7 @@ Loop: ui.Fin.Advance() app.Redraw() - case <-startUIChan: + case <-ui.StartUIChan: log.Infof("Launching termshark UI") // Go to termshark UI view @@ -1035,7 +1025,7 @@ Loop: ui.Running = true startedSuccessfully = true - startUIChan = nil // make sure it's not triggered again + ui.StartUIChan = nil // make sure it's not triggered again defer func() { // Do this to make sure the program quits quickly if quit is invoked @@ -1052,9 +1042,13 @@ Loop: ui.Running = false }() + case fn := <-opsChan: + app.Run(fn) + case <-ui.QuitRequestedChan: ui.QuitRequested = true - if ui.Loader.State() != 0 { + // Without this, a quit during a pcap load won't happen until the load is finished + if ui.Loader.LoadingAnything() { // We know we're not idle, so stop any load so the quit op happens quickly for the user. Quit // will happen next time round because the quitRequested flag is checked. stopLoaders() @@ -1121,79 +1115,56 @@ Loop: ui.RequestQuit() } - case fn := <-opsChan: - // We run the requested operation - because operations are now enabled, since this channel - // is listening - and the result tells us when operations can be re-enabled (i.e. the target - // state of the operation just started, for example). This means we can let an operation - // "complete", moving through a sequence of states to the final state, before accepting - // another request. - fn() - case <-ui.CacheRequestsChan: - ui.CacheRequests = pcap.ProcessPdmlRequests(ui.CacheRequests, ui.Loader, - struct { - ui.SetNewPdmlRequests - ui.SetStructWidgets - }{ - ui.SetNewPdmlRequests{ui.PcapScheduler}, - ui.SetStructWidgets{ui.Loader, app}, - }) + ui.CacheRequests = pcap.ProcessPdmlRequests(ui.CacheRequests, + ui.Loader.ParentLoader, ui.Loader.PdmlLoader, ui.SetStructWidgets{ui.Loader}, app) - case <-ifaceFinChan: - // this state change only happens if the load from the interface is explicitly - // stopped by the user (e.g. the stop button). When the current data has come - // from loading from an interface, when stopped we still want to be able to filter - // on that data. So the load routines should treat it like a regular pcap - // (until the interface is started again). That means the psml reader should read - // from the file and not the fifo. - loaderIfaceFinChan = ui.Loader.IfaceFinishedChan - ui.Loader.SetState(ui.Loader.State() & ^pcap.LoadingIface) - - case <-psmlFinChan: - if ui.Loader.LoadWasCancelled { - // Don't reset cancel state here. If, after stopping an interface load, I - // apply a filter, I need to know if the load was cancelled previously because - // if it was cancelled, I need to load from the temp pcap; if not cancelled, - // (meaning still running), then I just apply a new filter and have the pcap - // reader read from the fifo. Only do this if the user isn't quitting the app, - // otherwise it looks clumsy. - if !ui.QuitRequested { + case <-tickChan: + // We already know that we are LoadingPdml|LoadingPsml + ui.SetProgressWidget(app) + if progCancelTimer != nil { + progCancelTimer.Reset(time.Duration(500) * time.Millisecond) + } else { + progCancelTimer = time.AfterFunc(time.Duration(500)*time.Millisecond, func() { app.Run(gowid.RunFunction(func(app gowid.IApp) { - ui.OpenError("Loading was cancelled.", app) + ui.ClearProgressWidget(app) })) - } + }) } - // Reset - loaderPsmlFinChan = ui.Loader.PsmlFinishedChan - ui.Loader.SetState(ui.Loader.State() & ^pcap.LoadingPsml) - case <-pdmlFinChan: - loaderPdmlFinChan = ui.Loader.Stage2FinishedChan - ui.Loader.SetState(ui.Loader.State() & ^pcap.LoadingPdml) + // Rule: + // - prefer progress if we can apply it to psml only (not pdml) + // - otherwise use a spinner if interface load or fifo load in operation + // - otherwise use progress for pdml + doprog := false + if system.HaveFdinfo { + // Prefer progress, if the OS supports it. + doprog = true + if ui.Loader.ReadingFromFifo() { + // But if we are have an interface load (or a pipe load), then we can't + // predict when the data will run out, so use a spinner. That's because we + // feed the data to tshark -T psml with a tail command which reads from + // the tmp file being created by the pipe/interface source. + doprog = false + if !ui.Loader.InterfaceLoader.IsLoading() && !ui.Loader.PsmlLoader.IsLoading() { + // Unless those loads are finished, and the only loading activity is now + // PDML/pcap, which is loaded on demand in blocks of 1000. Then we can + // use the progress bar. + doprog = true + } + } + } - case <-tickChan: - if system.HaveFdinfo && (ui.Loader.State() == pcap.LoadingPdml || !ui.Loader.ReadingFromFifo()) { + if doprog { app.Run(gowid.RunFunction(func(app gowid.IApp) { - prev = ui.UpdateProgressBarForFile(ui.Loader, prev, app) + prevProgPercentage = ui.UpdateProgressBarForFile(ui.Loader, prevProgPercentage, app) })) } else { app.Run(gowid.RunFunction(func(app gowid.IApp) { - ui.UpdateProgressBarForInterface(ui.Loader, app) + ui.UpdateProgressBarForInterface(ui.Loader.InterfaceLoader, app) })) } - case <-emptyStructViewChan: - app.Run(gowid.RunFunction(func(app gowid.IApp) { - ui.SetStructViewMissing(app) - ui.StopEmptyStructViewTimer() - })) - - case <-emptyHexViewChan: - app.Run(gowid.RunFunction(func(app gowid.IApp) { - ui.SetHexViewMissing(app) - ui.StopEmptyHexViewTimer() - })) - case ev := <-tcellEvents: app.HandleTCellEvent(ev, gowid.IgnoreUnhandledInput) inactivityTimer.Reset(inactiveDuration) diff --git a/convs/loader.go b/convs/loader.go index 8fcd865..02d2214 100644 --- a/convs/loader.go +++ b/convs/loader.go @@ -138,7 +138,7 @@ func (c *Loader) loadConvAsync(pcapf string, convs []string, filter string, abs "command": c.convsCmd.String(), "error": err, }) - pcap.HandleError(cerr, cb) + pcap.HandleError(pcap.ConvCode, app, cerr, cb) } } @@ -166,7 +166,7 @@ func (c *Loader) loadConvAsync(pcapf string, convs []string, filter string, abs convsOut, err := c.convsCmd.StdoutReader() if err != nil { - pcap.HandleError(err, cb) + pcap.HandleError(pcap.ConvCode, app, err, cb) return } @@ -174,15 +174,19 @@ func (c *Loader) loadConvAsync(pcapf string, convs []string, filter string, abs cb.AfterDataEnd(true) }() - pcap.HandleBegin(cb) + app.Run(gowid.RunFunction(func(app gowid.IApp) { + pcap.HandleBegin(pcap.ConvCode, app, cb) + })) defer func() { - pcap.HandleEnd(cb) + app.Run(gowid.RunFunction(func(app gowid.IApp) { + pcap.HandleEnd(pcap.ConvCode, app, cb) + })) }() err = c.convsCmd.Start() if err != nil { err = fmt.Errorf("Error starting %v: %v", c.convsCmd, err) - pcap.HandleError(err, cb) + pcap.HandleError(pcap.ConvCode, app, err, cb) return } diff --git a/pcap/handlers.go b/pcap/handlers.go index e113dc5..fc50bbf 100644 --- a/pcap/handlers.go +++ b/pcap/handlers.go @@ -4,30 +4,45 @@ package pcap +import "github.com/gcla/gowid" + //====================================================================== +type HandlerCode int + +const ( + NoneCode HandlerCode = 1 << iota + PdmlCode + PsmlCode + TailCode + IfaceCode + ConvCode + StreamCode + CapinfoCode +) + type IClear interface { - OnClear() + OnClear(code HandlerCode, app gowid.IApp) } type INewSource interface { - OnNewSource() + OnNewSource(code HandlerCode, app gowid.IApp) } type IOnError interface { - OnError(err error) + OnError(code HandlerCode, app gowid.IApp, err error) } type IBeforeBegin interface { - BeforeBegin() + BeforeBegin(code HandlerCode, app gowid.IApp) } type IAfterEnd interface { - AfterEnd() + AfterEnd(code HandlerCode, app gowid.IApp) } type IPsmlHeader interface { - OnPsmlHeader() + OnPsmlHeader(code HandlerCode, app gowid.IApp) } type IUnpack interface { @@ -40,81 +55,81 @@ func (h HandlerList) Unpack() []interface{} { return h } -type unpackedHandlerFunc func(interface{}) bool +type unpackedHandlerFunc func(HandlerCode, gowid.IApp, interface{}) bool -func HandleUnpack(cb interface{}, handler unpackedHandlerFunc) bool { +func HandleUnpack(code HandlerCode, cb interface{}, handler unpackedHandlerFunc, app gowid.IApp) bool { if c, ok := cb.(IUnpack); ok { handlers := c.Unpack() for _, cb := range handlers { - handler(cb) // will wait on channel if it has to, doesn't matter if not + handler(code, app, cb) // will wait on channel if it has to, doesn't matter if not } return true } return false } -func HandleBegin(cb interface{}) bool { +func HandleBegin(code HandlerCode, app gowid.IApp, cb interface{}) bool { res := false - if !HandleUnpack(cb, HandleBegin) { + if !HandleUnpack(code, cb, HandleBegin, app) { if c, ok := cb.(IBeforeBegin); ok { - c.BeforeBegin() + c.BeforeBegin(code, app) res = true } } return res } -func HandleEnd(cb interface{}) bool { +func HandleEnd(code HandlerCode, app gowid.IApp, cb interface{}) bool { res := false - if !HandleUnpack(cb, HandleEnd) { + if !HandleUnpack(code, cb, HandleEnd, app) { if c, ok := cb.(IAfterEnd); ok { - c.AfterEnd() + c.AfterEnd(code, app) res = true } } return res } -func HandleError(err error, cb interface{}) bool { +func HandleError(code HandlerCode, app gowid.IApp, err error, cb interface{}) bool { res := false - if !HandleUnpack(cb, func(cb2 interface{}) bool { - return HandleError(err, cb2) - }) { + if !HandleUnpack(code, cb, func(code HandlerCode, app gowid.IApp, cb2 interface{}) bool { + return HandleError(code, app, err, cb2) + }, app) { if ec, ok := cb.(IOnError); ok { - ec.OnError(err) + ec.OnError(code, app, err) res = true } } return res } -func handlePsmlHeader(cb interface{}) bool { +func handlePsmlHeader(code HandlerCode, app gowid.IApp, cb interface{}) bool { res := false - if !HandleUnpack(cb, handlePsmlHeader) { + if !HandleUnpack(code, cb, handlePsmlHeader, app) { if c, ok := cb.(IPsmlHeader); ok { - c.OnPsmlHeader() + c.OnPsmlHeader(code, app) res = true } } return res } -func handleClear(cb interface{}) bool { +func handleClear(code HandlerCode, app gowid.IApp, cb interface{}) bool { res := false - if !HandleUnpack(cb, handleClear) { + if !HandleUnpack(code, cb, handleClear, app) { if c, ok := cb.(IClear); ok { - c.OnClear() + c.OnClear(code, app) res = true } } return res } -func handleNewSource(cb interface{}) bool { +func handleNewSource(code HandlerCode, app gowid.IApp, cb interface{}) bool { res := false - if !HandleUnpack(cb, handleNewSource) { + if !HandleUnpack(code, cb, handleNewSource, app) { if c, ok := cb.(INewSource); ok { - c.OnNewSource() + c.OnNewSource(code, app) res = true } } diff --git a/pcap/loader.go b/pcap/loader.go index 7ce1060..8cb24cc 100644 --- a/pcap/loader.go +++ b/pcap/loader.go @@ -18,9 +18,7 @@ import ( "strings" "sync" "sync/atomic" - "time" - "github.com/gcla/deep" "github.com/gcla/gowid" "github.com/gcla/gowid/gwutil" "github.com/gcla/termshark/v2" @@ -32,49 +30,41 @@ import ( //====================================================================== -var Goroutinewg *sync.WaitGroup +var PcapCmds ILoaderCmds +var PcapOpts Options -type RunFn func() -type whenFn func() bool +var OpsChan chan gowid.RunFunction -type runFnInState struct { - when whenFn - doit RunFn +func init() { + OpsChan = make(chan gowid.RunFunction, 100) } //====================================================================== -type LoaderState int +var Goroutinewg *sync.WaitGroup + +type RunFn func() + +//====================================================================== + +type LoaderState bool const ( - LoadingPsml LoaderState = 1 << iota // pcap+pdml might be finished, but this is what was initiated - LoadingPdml // from a cache request - LoadingIface // copying from iface to temp pcap + NotLoading LoaderState = false + Loading LoaderState = true ) -func (c *Loader) State() LoaderState { - return c.state +func (t LoaderState) String() string { + if t { + return "loading" + } else { + return "not-loading" + } } -type ProcessState int +//====================================================================== -// Repeatedly go back to the start if anything is triggered. -// Call only on the main thread (running the select loop) -func (c *Loader) SetState(st LoaderState) { - c.state = st -Outer: - for { - Inner: - for i, sc := range c.onStateChange { - if sc.when() { - c.onStateChange = append(c.onStateChange[:i], c.onStateChange[i+1:]...) - sc.doit() - break Inner - } - } - break Outer - } -} +type ProcessState int const ( NotStarted ProcessState = 0 @@ -95,24 +85,6 @@ func (p ProcessState) String() string { } } -func (t LoaderState) String() string { - s := make([]string, 0, 3) - if t&LoadingPsml != 0 { - s = append(s, "psml") - } - if t&LoadingPdml != 0 { - s = append(s, "pdml") - } - if t&LoadingIface != 0 { - s = append(s, "iface") - } - if len(s) == 0 { - return fmt.Sprintf("idle") - } else { - return strings.Join(s, "+") - } -} - //====================================================================== type IBasicCommand interface { @@ -142,77 +114,121 @@ type ILoaderCmds interface { Pdml(pcap string, displayFilter string) IPcapCommand } -type Loader struct { - cmds ILoaderCmds +//====================================================================== - state LoaderState // which pieces are currently loading +// PacketLoader supports swapping out loaders +type PacketLoader struct { + *ParentLoader +} + +func (c *PacketLoader) Renew() { + if c.ParentLoader != nil { + c.ParentLoader.CloseMain() + } + c.ParentLoader = NewPcapLoader(c.ParentLoader.cmds, c.runner, c.ParentLoader.opt) +} + +type ParentLoader struct { + // Note that a nil InterfaceLoader implies this loader is not handling a "live" packet source + *InterfaceLoader // these are only replaced from the main goroutine, so no lock needed + *PsmlLoader + *PdmlLoader + + cmds ILoaderCmds - SuppressErrors bool // true if loader is in a transient state due to a user operation e.g. stop, reload, etc + tailStoppedDeliberately bool // true if tail is stopped because its packet feed has run out psrcs []IPacketSource // The canonical struct for the loader's current packet source. - ifaceFile string // The temp pcap file that is created by reading from the interface displayFilter string captureFilter string + ifaceFile string // shared between InterfaceLoader and PsmlLoader - to preserve and feed packets + + mainCtx context.Context // cancelling this cancels the dependent contexts - used to close whole loader. + mainCancelFn context.CancelFunc + + loadWasCancelled bool // True if the last load (iface or file) was halted by the stop button or ctrl-c + + runner IMainRunner + opt Options // held only to pass to the PDML and PSML loaders when renewed +} + +type InterfaceLoader struct { + state LoaderState + + ifaceCtx context.Context // cancels the iface reader process + ifaceCancelFn context.CancelFunc + + ifaceCmd IBasicCommand + + sync.Mutex + // set by the iface procedure when it has finished e.g. the pipe to the fifo has finished, the + // iface process has been killed, etc. This tells the psml-reading procedure when it should stop i.e. + // when this many bytes have passed through. + totalFifoBytesWritten gwutil.Int64Option + totalFifoBytesRead gwutil.Int64Option + fifoError error +} + +type PsmlLoader struct { + state LoaderState // which pieces are currently loading + PcapPsml interface{} // Pcap file source for the psml reader - fifo if iface+!stopped; tmpfile if iface+stopped; pcap otherwise - PcapPdml string // Pcap file source for the pdml reader - tmpfile if iface; pcap otherwise - PcapPcap string // Pcap file source for the pcap reader - tmpfile if iface; pcap otherwise - - mainCtx context.Context // cancelling this cancels the dependent contexts - used to close whole loader. - mainCancelFn context.CancelFunc - thisSrcCtx context.Context // cancelling this cancels the dependent contexts - used to stop current load. - thisSrcCancelFn context.CancelFunc - psmlCtx context.Context // cancels the psml loading process - psmlCancelFn context.CancelFunc - stage2Ctx context.Context // cancels the pcap/pdml loading process - stage2CancelFn context.CancelFunc - ifaceCtx context.Context // cancels the iface reader process - ifaceCancelFn context.CancelFunc - - //stage1Wg sync.WaitGroup - stage2Wg sync.WaitGroup + + psmlStoppedDeliberately_ bool // true if loader is in a transient state due to a user operation e.g. stop, reload, etc + + psmlCtx context.Context // cancels the psml loading process + psmlCancelFn context.CancelFunc + tailCtx context.Context // cancels the tail reader process (if iface in operation) + tailCancelFn context.CancelFunc // Signalled when the psml is fully loaded (or already loaded) - to tell // the pdml and pcap reader goroutines to start - they can then map table // row -> frame number - StartStage2Chan chan struct{} - // Signalled to start the pdml reader. Will start concurrent with psml if - // psml loaded already or if filter == "" (then table row == frame number) - startPdmlChan chan struct{} - startPcapChan chan struct{} + startStage2Chan chan struct{} - PsmlFinishedChan chan struct{} // closed when entire psml load process is done - Stage2FinishedChan chan struct{} // closed when entire pdml+pcap load process is done - IfaceFinishedChan chan struct{} // closed when interface reader process has shut down (e.g. stopped) + PsmlFinishedChan chan struct{} // closed when entire psml load process is done - ifaceCmd IBasicCommand - tailCmd ITailCommand - PsmlCmd IPcapCommand - PdmlPid int // 0 if process not started - PcapPid int // 0 if process not started + tailCmd ITailCommand + PsmlCmd IPcapCommand // gcla later todo - change to pid like PdmlPid sync.Mutex - PacketPsmlData [][]string - PacketPsmlColors []PacketColors - PacketPsmlHeaders []string + packetPsmlData [][]string + packetPsmlColors []PacketColors + packetPsmlHeaders []string PacketNumberMap map[int]int // map from actual packet row
12
to pos in unsorted table // This would be affected by a display filter e.g. packet 12 might be the 1st packet in the table. // I need this so that if the user jumps to a mark stored as "packet 12", I can find the right table row. - PacketCache *lru.Cache // i -> [pdml(i * 1000)..pdml(i+1*1000)] + PacketCache *lru.Cache // i -> [pdml(i * 1000)..pdml(i+1*1000)] - accessed from any goroutine + + opt Options +} - onStateChange []runFnInState +type PdmlLoader struct { + state LoaderState // which pieces are currently loading - LoadWasCancelled bool // True if the last load (iface or file) was halted by the stop button - RowCurrentlyLoading int // set by the pdml loading stage - highestCachedRow int - KillAfterReadingThisMany int // A shortcut - tell pcap/pdml to read one + PcapPdml string // Pcap file source for the pdml reader - tmpfile if iface; pcap otherwise + PcapPcap string // Pcap file source for the pcap reader - tmpfile if iface; pcap otherwise - // set by the iface procedure when it has finished e.g. the pipe to the fifo has finished, the - // iface process has been killed, etc. This tells the psml-reading procedure when it should stop i.e. - // when this many bytes have passed through. - totalFifoBytesWritten gwutil.Int64Option - totalFifoBytesRead gwutil.Int64Option - fifoError error + pdmlStoppedDeliberately_ bool // true if loader is in a transient state due to a user operation e.g. stop, reload, etc + + stage2Ctx context.Context // cancels the pcap/pdml loading process + stage2CancelFn context.CancelFunc + + stage2Wg sync.WaitGroup + + startChan chan struct{} + + Stage2FinishedChan chan struct{} // closed when entire pdml+pcap load process is done + + PdmlPid int // 0 if process not started + PcapPid int // 0 if process not started + + sync.Mutex + visible bool // true if this pdml load is needed right now by the UI + rowCurrentlyLoading int // set by the pdml loading stage - main goroutine only + highestCachedRow int // main goroutine only + KillAfterReadingThisMany int // A shortcut - tell pcap/pdml to read one - no lock worked out yet opt Options } @@ -227,7 +243,42 @@ type Options struct { PacketsPerLoad int } -func NewPcapLoader(cmds ILoaderCmds, opts ...Options) *Loader { +type iLoaderEnv interface { + Commands() ILoaderCmds + MainRun(fn gowid.RunFunction) + Context() context.Context +} + +type iPsmlLoaderEnv interface { + iLoaderEnv + iTailCommand + PsmlStoppedDeliberately() bool + TailStoppedDeliberately() bool + LoadWasCancelled() bool + DisplayFilter() string + InterfaceFile() string + PacketSources() []IPacketSource +} + +// IMainRunner is implemented by a type that runs a closure on termshark's main loop +// (via gowid's App.Run) +type IMainRunner interface { + Run(fn gowid.RunFunction) +} + +type Runner struct { + gowid.IApp +} + +var _ IMainRunner = (*Runner)(nil) + +func (a *Runner) Run(fn gowid.RunFunction) { + a.IApp.Run(fn) +} + +//====================================================================== + +func NewPcapLoader(cmds ILoaderCmds, runner IMainRunner, opts ...Options) *ParentLoader { var opt Options if len(opts) > 0 { opt = opts[0] @@ -242,30 +293,37 @@ func NewPcapLoader(cmds ILoaderCmds, opts ...Options) *Loader { opt.PacketsPerLoad = 100 // minimum } - res := &Loader{ - cmds: cmds, - IfaceFinishedChan: make(chan struct{}), - PsmlFinishedChan: make(chan struct{}), - Stage2FinishedChan: make(chan struct{}), - onStateChange: make([]runFnInState, 0), - RowCurrentlyLoading: -1, - highestCachedRow: -1, - opt: opt, + res := &ParentLoader{ + PsmlLoader: &PsmlLoader{}, // so default fields are set and XmlLoader is not nil + PdmlLoader: &PdmlLoader{ + opt: opt, + }, + cmds: cmds, + runner: runner, + opt: opt, } - res.resetData() res.mainCtx, res.mainCancelFn = context.WithCancel(context.Background()) + res.RenewPsmlLoader() + res.RenewPdmlLoader() + return res } -func (c *Loader) resetData() { - c.Lock() - defer c.Unlock() - c.PacketPsmlData = make([][]string, 0) - c.PacketPsmlColors = make([]PacketColors, 0) - c.PacketPsmlHeaders = make([]string, 0, 10) - c.PcapPdml = "" +func (c *ParentLoader) RenewPsmlLoader() { + c.PsmlLoader = &PsmlLoader{ + PcapPsml: c.PsmlLoader.PcapPsml, + tailCmd: c.PsmlLoader.tailCmd, + PsmlCmd: c.PsmlLoader.PsmlCmd, + packetPsmlData: make([][]string, 0), + packetPsmlColors: make([]PacketColors, 0), + packetPsmlHeaders: make([]string, 0, 10), + PacketNumberMap: make(map[int]int), + startStage2Chan: make(chan struct{}), // do this before signalling start + PsmlFinishedChan: make(chan struct{}), + opt: c.opt, + } packetCache, err := lru.New(c.opt.CacheSize) if err != nil { log.Fatal(err) @@ -273,419 +331,244 @@ func (c *Loader) resetData() { c.PacketCache = packetCache } -func (c *Loader) PacketsPerLoad() int { - c.Lock() - defer c.Unlock() - return c.opt.PacketsPerLoad -} - -// Close shuts down the whole loader, including progress monitoring goroutines. Use this only -// when about to load a new pcap (use a new loader) -func (c *Loader) Close() error { - if c.mainCancelFn != nil { - c.mainCancelFn() +func (c *ParentLoader) RenewPdmlLoader() { + c.PdmlLoader = &PdmlLoader{ + PcapPdml: c.PcapPdml, + PcapPcap: c.PcapPcap, + rowCurrentlyLoading: -1, + highestCachedRow: -1, + opt: c.opt, } - return nil } -func (c *Loader) MainContext() context.Context { - return c.mainCtx +func (c *ParentLoader) RenewIfaceLoader() { + c.InterfaceLoader = &InterfaceLoader{} } -func (c *Loader) SourceContext() context.Context { - return c.thisSrcCtx +func (p *ParentLoader) LoadingAnything() bool { + return p.PsmlLoader.IsLoading() || p.PdmlLoader.IsLoading() || p.InterfaceLoader.IsLoading() } -func (c *Loader) stopLoadIface() { - if c.ifaceCancelFn != nil { - c.ifaceCancelFn() - } +func (p *ParentLoader) InterfaceFile() string { + return p.ifaceFile } -func (c *Loader) stopLoadPsml() { - if c.psmlCancelFn != nil { - c.psmlCancelFn() - } +func (p *ParentLoader) DisplayFilter() string { + return p.displayFilter } -func (c *Loader) stopLoadPdml() { - if c.stage2CancelFn != nil { - c.stage2CancelFn() - } +func (p *ParentLoader) CaptureFilter() string { + return p.captureFilter } -func (c *Loader) stopLoadCurrentSource() { - if c.thisSrcCancelFn != nil { - c.thisSrcCancelFn() +func (p *ParentLoader) TurnOffPipe() { + // Switch over to the temp pcap file. If a new filter is applied + // after stopping, we should read from the temp file and not the fifo + // because nothing will be feeding the fifo. + if p.PsmlLoader.PcapPsml != p.PdmlLoader.PcapPdml { + log.Infof("Switching from interface/fifo mode to file mode") + p.PsmlLoader.PcapPsml = p.PdmlLoader.PcapPdml } } -func (c *Loader) PsmlData() [][]string { - return c.PacketPsmlData -} - -func (c *Loader) PsmlHeaders() []string { - return c.PacketPsmlHeaders -} - -func (c *Loader) PsmlColors() []PacketColors { - return c.PacketPsmlColors -} - -//====================================================================== +var _ iPsmlLoaderEnv = (*ParentLoader)(nil) -type Scheduler struct { - *Loader - OperationsChan chan RunFn - disabled bool +func (p *ParentLoader) PacketSources() []IPacketSource { + return p.psrcs } -func NewScheduler(cmds ILoaderCmds, opts ...Options) *Scheduler { - return &Scheduler{ - OperationsChan: make(chan RunFn, 1000), - Loader: NewPcapLoader(cmds, opts...), - } +func (p *ParentLoader) PsmlStoppedDeliberately() bool { + return p.psmlStoppedDeliberately_ } -func (c *Scheduler) IsEnabled() bool { - return !c.disabled +func (p *ParentLoader) TailStoppedDeliberately() bool { + return p.tailStoppedDeliberately } -func (c *Scheduler) Enable() { - c.disabled = false - c.SuppressErrors = false +func (p *ParentLoader) LoadWasCancelled() bool { + return p.loadWasCancelled } -func (c *Scheduler) Disable() { - c.disabled = true - c.SuppressErrors = true +func (p *ParentLoader) Commands() ILoaderCmds { + return p.cmds } -func (c *Scheduler) RequestClearPcap(cb interface{}) { - log.Infof("Scheduler requested clear pcap") - c.OperationsChan <- func() { - c.Disable() - c.doClearPcapOperation(cb, func() { - c.Enable() - }) - - } +func (p *ParentLoader) Context() context.Context { + return p.mainCtx } -func (c *Scheduler) RequestStopLoadStage1(cb interface{}) { - log.Infof("Scheduler requested stop psml + iface") - c.OperationsChan <- func() { - c.Disable() - c.doStopLoadStage1Operation(cb, func() { - c.Enable() - }) - } +func (p *ParentLoader) MainRun(fn gowid.RunFunction) { + p.runner.Run(fn) } -func (c *Scheduler) RequestStopLoad(cb interface{}) { - log.Infof("Scheduler requested stop pcap load") - c.OperationsChan <- func() { - c.Disable() - c.doStopLoadOperation(cb, func() { - c.Enable() - }) +// CloseMain shuts down the whole loader, including progress monitoring goroutines. Use this only +// when about to load a new pcap (use a new loader) +func (c *ParentLoader) CloseMain() { + c.psmlStoppedDeliberately_ = true + c.pdmlStoppedDeliberately_ = true + if c.mainCancelFn != nil { + c.mainCancelFn() + c.mainCancelFn = nil } } -func (c *Scheduler) RequestNewFilter(newfilt string, cb interface{}) { - log.Infof("Scheduler requested application of display filter '%v'", newfilt) - c.OperationsChan <- func() { - c.Disable() - c.doNewFilterOperation(newfilt, cb, c.Enable) - } -} +func (c *ParentLoader) StopLoadPsmlAndIface(cb interface{}) { + log.Infof("Requested stop psml + iface") -func (c *Scheduler) RequestLoadInterfaces(psrcs []IPacketSource, captureFilter string, displayFilter string, tmpfile string, cb interface{}) { - log.Infof("Scheduler requested interface/fifo load for '%v'", SourcesString(psrcs)) - c.OperationsChan <- func() { - c.Disable() - c.doLoadInterfacesOperation(psrcs, captureFilter, displayFilter, tmpfile, cb, func() { - c.Enable() - }) - } -} + c.psmlStoppedDeliberately_ = true + c.loadWasCancelled = true -func (c *Scheduler) RequestLoadPcap(pcap string, displayFilter string, cb interface{}) { - log.Infof("Scheduler requested pcap file load for '%v'", pcap) - c.OperationsChan <- func() { - c.Disable() - c.doLoadPcapOperation(pcap, displayFilter, cb, func() { - c.Enable() - }) - } + c.stopTail() + c.stopLoadPsml() + c.stopLoadIface() } //====================================================================== -// Clears the currently loaded data. If the loader is currently reading from an -// interface, the loading continues after the current data has been discarded. If -// the loader is currently reading from a file, the loading *stops*. -func (c *Loader) doClearPcapOperation(cb interface{}, fn RunFn) { - if c.State() == 0 { - c.clearSource() - c.resetData() - - handleClear(cb) +// NewFilter is essentially a completely new load - psml + pdml. But not iface, if that's running +func (c *PacketLoader) NewFilter(newfilt string, cb interface{}, app gowid.IApp) { - fn() - } else { - // If we are reading from an interface when the clear operation is issued, we should - // continue again afterwards. If we're reading from a file, the clear stops the read. - // Track this state. - startIfaceAgain := false + log.Infof("Requested application of display filter '%v'", newfilt) - if c.State()&LoadingIface != 0 { - startIfaceAgain = true - for _, psrc := range c.psrcs { - startIfaceAgain = startIfaceAgain && CanRestart(psrc) // Only try to restart if the packet source allows - } + if c.DisplayFilter() == newfilt { + log.Infof("No operation - same filter applied ('%s').", newfilt) + // This is a hack to avoid displaying an error in the following situation. The user loads + // a pcap and applies a filter. Then they hit clear-packets. The filter is still shown. + // Then they run clear-filter. That will attempt to apply a new filter, but the loader's + // record of the filter is cleared, and the new filter to apply is also clear, so they + // are the same, so this error is shown. + if newfilt != "" { + HandleError(NoneCode, app, fmt.Errorf("Same filter - nothing to do."), cb) } + } else { + c.stopTail() + c.stopLoadPsml() + c.stopLoadPdml() - c.stopLoadCurrentSource() - - c.When(c.IdleState, func() { - // Document why this needs to be delayed again, since runWhenReadyFn - // will run in app goroutine - c.doClearPcapOperation(cb, func() { - if startIfaceAgain { - c.doLoadInterfacesOperation( - c.psrcs, c.CaptureFilter(), - c.DisplayFilter(), c.InterfaceFile(), cb, fn, - ) - } else { - fn() - } - }) - }) - - } -} - -func (c *Loader) IdleState() bool { - return c.State() == 0 -} - -func (c *Loader) When(pred whenFn, doit RunFn) { - c.onStateChange = append(c.onStateChange, runFnInState{pred, doit}) -} + OpsChan <- gowid.RunFunction(func(app gowid.IApp) { + c.RenewPsmlLoader() + c.RenewPdmlLoader() -func (c *Loader) doStopLoadOperation(cb interface{}, fn RunFn) { - c.LoadWasCancelled = true + // This is not ideal. I'm clearing the views, but I'm about to + // restart. It's not really a new source, so called the new source + // handler is an untify way of updating the current capture in the + // title bar again + handleClear(NoneCode, app, cb) - HandleBegin(cb) + c.displayFilter = newfilt - if c.State() != 0 { - c.stopLoadCurrentSource() + log.Infof("Applying new display filter '%s'", newfilt) - c.When(c.IdleState, func() { - c.doStopLoadOperation(cb, fn) + c.loadPsmlSync(c.InterfaceLoader, c, cb, app) }) - } else { - fn() - HandleEnd(cb) } } -func (c *Loader) doStopLoadStage1Operation(cb interface{}, fn RunFn) { - c.LoadWasCancelled = true +func (c *PacketLoader) LoadPcap(pcap string, displayFilter string, cb interface{}, app gowid.IApp) { + log.Infof("Requested pcap file load for '%v'", pcap) - HandleBegin(cb) - - if c.State()&(LoadingPsml|LoadingIface) != 0 { - - if c.State()&LoadingPsml != 0 { - c.stopLoadPsml() - } - if c.State()&LoadingIface != 0 { - c.stopLoadIface() - } + curDisplayFilter := displayFilter + // The channel is unbuffered, and monitored from the same goroutine, so this would block + // unless we start a new goroutine - c.When(func() bool { - return c.State()&(LoadingIface|LoadingPsml) == 0 - }, func() { - c.doStopLoadStage1Operation(cb, fn) - }) + if c.Pcap() == pcap && c.DisplayFilter() == curDisplayFilter { + log.Infof("No operation - same pcap and filter.") + HandleError(NoneCode, app, fmt.Errorf("Same pcap and filter - nothing to do."), cb) } else { - fn() - HandleEnd(cb) - } -} -// Issued e.g. if a new filter is applied while loading from an interface. We need -// to stop the psml (and fifo) and pdml reading processes, but keep alive the spooling -// process from iface -> temp file. When the current state is simply Loadingiface then -// the next operation can commence (e.g. applying the new filter value) -func (c *Loader) doStopLoadToIfaceOperation(fn RunFn) { - c.stopLoadPsml() - c.stopLoadPdml() - - c.When(func() bool { - return c.State() == LoadingIface - }, fn) -} + c.stopTail() + c.stopLoadPsml() + c.stopLoadPdml() + c.stopLoadIface() -// Called when state is appropriate -func (c *Loader) doNewFilterOperation(newfilt string, cb interface{}, fn RunFn) { - //var res EnableOperationsWhen + OpsChan <- gowid.RunFunction(func(app gowid.IApp) { + // This will enable the operation when clear completes + handleClear(NoneCode, app, cb) - if c.DisplayFilter() == newfilt { - log.Infof("No operation - same filter applied.") - fn() - } else if c.State() == 0 || c.State() == LoadingIface { - handleClear(cb) - - c.startLoadNewFilter(newfilt, cb) + c.Renew() - c.When(func() bool { - return c.State()&LoadingPsml == LoadingPsml - }, fn) + c.psrcs = []IPacketSource{FileSource{Filename: pcap}} + c.ifaceFile = "" - c.SetState(c.State() | LoadingPsml) + c.PcapPsml = pcap + c.PcapPdml = pcap + c.PcapPcap = pcap + c.displayFilter = displayFilter - } else { - if c.State()&LoadingPsml != 0 { - c.stopLoadPsml() - } - if c.State()&LoadingPdml != 0 { - c.stopLoadPdml() - } + // call from main goroutine - when new filename is established + handleNewSource(NoneCode, app, cb) - c.When(func() bool { - return c.State() == 0 || c.State() == LoadingIface - }, func() { - c.doNewFilterOperation(newfilt, cb, fn) + log.Infof("Starting new pcap file load '%s'", pcap) + c.loadPsmlSync(nil, c.ParentLoader, cb, app) }) } } -func (c *Loader) doLoadInterfacesOperation(psrcs []IPacketSource, captureFilter string, displayFilter string, tmpfile string, cb interface{}, fn RunFn) { - // The channel is unbuffered, and monitored from the same goroutine, so this would block - // unless we start a new goroutine - - //var res EnableOperationsWhen - - // If we're already loading, but the request is for the same, then ignore. If we were stopped, then - // process the request, because it implicitly means start reading from the interface again (and we - // are stopped) - names := SourcesString(psrcs) - if c.State()&LoadingPsml != 0 && deep.Equal(c.Interfaces(), names) == nil && c.DisplayFilter() == displayFilter && c.CaptureFilter() == captureFilter { - log.Infof("No operation - same interface and filters.") - fn() - } else if c.State() == 0 { - handleClear(cb) - handleNewSource(cb) - - if err := c.startLoadInterfacesNew(psrcs, captureFilter, displayFilter, tmpfile, cb); err == nil { - c.When(func() bool { - return c.State()&(LoadingIface|LoadingPsml) == LoadingIface|LoadingPsml - }, fn) - - c.SetState(c.State() | LoadingIface | LoadingPsml) - } else { - HandleError(err, cb) - } - } else if c.State() == LoadingIface && deep.Equal(c.Interfaces(), names) == nil { - //if iface == c.Interface() { // same interface, so just start it back up - iface spooler still running - handleClear(cb) - c.startLoadNewFilter(displayFilter, cb) +// Clears the currently loaded data. If the loader is currently reading from an +// interface, the loading continues after the current data has been discarded. If +// the loader is currently reading from a file, the loading *stops*. - c.When(func() bool { - return c.State()&(LoadingIface|LoadingPsml) == LoadingIface|LoadingPsml - }, fn) +// Intended to restart iface loader - since a clear will discard all data up to here. +func (c *PacketLoader) ClearPcap(cb interface{}) { + startIfaceAgain := false - c.SetState(c.State() | LoadingPsml) - } else { - // State contains Loadingpdml and/or Loadingpdml. Need to stop those first. OR state contains - // Loadingiface but the interface requested is different. - if c.State()&LoadingIface != 0 && deep.Equal(c.Interfaces(), names) != nil { - c.doStopLoadOperation(cb, func() { - c.doLoadInterfacesOperation(psrcs, captureFilter, displayFilter, tmpfile, cb, fn) - }) // returns an enable function when idle - } else { - c.doStopLoadToIfaceOperation(func() { - c.doLoadInterfacesOperation(psrcs, captureFilter, displayFilter, tmpfile, cb, fn) - }) + if c.InterfaceLoader != nil { + // Don't restart if the previous interface load was deliberately cancelled + if !c.loadWasCancelled { + startIfaceAgain = true + for _, psrc := range c.psrcs { + startIfaceAgain = startIfaceAgain && CanRestart(psrc) // Only try to restart if the packet source allows + } } + c.stopLoadIface() } -} - -// Call from app goroutine context -func (c *Loader) doLoadPcapOperation(pcap string, displayFilter string, cb interface{}, fn RunFn) { - curDisplayFilter := displayFilter - // The channel is unbuffered, and monitored from the same goroutine, so this would block - // unless we start a new goroutine - - if c.Pcap() == pcap && c.DisplayFilter() == curDisplayFilter { - log.Infof("No operation - same pcap and filter.") - fn() - } else if c.State() == 0 { - handleClear(cb) - handleNewSource(cb) - - c.startLoadNewFile(pcap, curDisplayFilter, cb) - - c.When(func() bool { - return c.State()&LoadingPsml == LoadingPsml - }, fn) - - c.SetState(c.State() | LoadingPsml) - } else { - - // First, wait until everything is stopped - c.doStopLoadOperation(cb, func() { - c.doLoadPcapOperation(pcap, displayFilter, cb, fn) - }) - } -} - -func (c *Loader) ReadingFromFifo() bool { - // If it's a string it means that it's a filename, so it's not a fifo. Other values - // in practise are the empty interface, or the read end of a fifo - _, ok := c.PcapPsml.(string) - return !ok -} -// https://stackoverflow.com/a/28005931/784226 -func TempPcapFile(tokens ...string) string { - tokensClean := make([]string, 0, len(tokens)) - for _, token := range tokens { - re := regexp.MustCompile(`[^a-zA-Z0-9.-]`) - tokensClean = append(tokensClean, re.ReplaceAllString(token, "_")) + // Don't close main context, it's used by interface process. + // We may not have anything running, but it's ok - then the op channel + // will be enabled + if !startIfaceAgain { + c.loadWasCancelled = true } + c.stopTail() + c.stopLoadPsml() + c.stopLoadPdml() - tokenClean := strings.Join(tokensClean, "-") - - return filepath.Join(termshark.PcapDir(), fmt.Sprintf("%s--%s.pcap", - tokenClean, - termshark.DateStringForFilename(), - )) -} + // When stop is done, launch the clear and restart + OpsChan <- gowid.RunFunction(func(app gowid.IApp) { + handleClear(NoneCode, app, cb) + + // Don't CloseMain - that will stop the interface process too + c.loadWasCancelled = false + c.RenewPsmlLoader() + c.RenewPdmlLoader() + + if !startIfaceAgain { + c.psrcs = c.psrcs[:0] + c.ifaceFile = "" + c.PcapPsml = "" + c.PcapPdml = "" + c.PcapPcap = "" + c.displayFilter = "" + } else { + c.RenewIfaceLoader() -func (c *Loader) makeNewSourceContext() { - c.thisSrcCtx, c.thisSrcCancelFn = context.WithCancel(c.mainCtx) + if err := c.loadInterfaces(c.psrcs, c.CaptureFilter(), c.DisplayFilter(), c.InterfaceFile(), cb, app); err != nil { + HandleError(NoneCode, app, err, cb) + } + } + }) } -// Save the file first // Always called from app goroutine context - so don't need to protect for race on cancelfn // Assumes gstate is ready // iface can be a number, or a fifo, or a pipe... -func (c *Loader) startLoadInterfacesNew(psrcs []IPacketSource, captureFilter string, displayFilter string, tmpfile string, cb interface{}) error { - c.PcapPsml = nil - c.PcapPdml = tmpfile - c.PcapPcap = tmpfile - - c.psrcs = psrcs // dpm't know if it's fifo (tfifo), pipe (/dev/fd/3) or iface (eth0). Treated same way - c.ifaceFile = tmpfile - c.displayFilter = displayFilter - c.captureFilter = captureFilter +func (c *PacketLoader) LoadInterfaces(psrcs []IPacketSource, captureFilter string, displayFilter string, tmpfile string, cb interface{}, app gowid.IApp) error { + c.RenewIfaceLoader() - c.makeNewSourceContext() + return c.loadInterfaces(psrcs, captureFilter, displayFilter, tmpfile, cb, app) +} +func (c *ParentLoader) loadPsmlForInterfaces(psrcs []IPacketSource, captureFilter string, displayFilter string, tmpfile string, cb interface{}, app gowid.IApp) error { // It's a temporary unique file, and no processes are started yet, so either // (a) it doesn't exist, OR // (b) it does exist in which case this load is a result of a restart. @@ -696,133 +579,39 @@ func (c *Loader) startLoadInterfacesNew(psrcs []IPacketSource, captureFilter str return err } - log.Infof("Starting new interface/fifo load '%v'", SourcesString(psrcs)) - c.startLoadPsml(cb) - termshark.TrackedGo(func() { - c.loadIfacesAsync(cb) - }, Goroutinewg) + c.PcapPsml = nil + c.PcapPdml = tmpfile + c.PcapPcap = tmpfile - return nil -} - -func (c *Loader) startLoadNewFilter(displayFilter string, cb interface{}) { - c.displayFilter = displayFilter - - c.makeNewSourceContext() - - log.Infof("Applying new display filter '%s'", displayFilter) - c.startLoadPsml(cb) -} - -func (c *Loader) clearSource() { - c.psrcs = make([]IPacketSource, 0) - c.ifaceFile = "" - c.PcapPsml = "" - c.PcapPdml = "" - c.PcapPcap = "" -} - -func (c *Loader) startLoadNewFile(pcap string, displayFilter string, cb interface{}) { - c.psrcs = []IPacketSource{FileSource{Filename: pcap}} - c.ifaceFile = "" + c.psrcs = psrcs // dpm't know if it's fifo (tfifo), pipe (/dev/fd/3) or iface (eth0). Treated same way - c.PcapPsml = pcap - c.PcapPdml = pcap - c.PcapPcap = pcap + c.ifaceFile = tmpfile c.displayFilter = displayFilter + c.captureFilter = captureFilter - c.makeNewSourceContext() - - log.Infof("Starting new pcap file load '%s'", pcap) - c.startLoadPsml(cb) -} - -func (c *Loader) startLoadPsml(cb interface{}) { - c.Lock() - c.PacketCache.Purge() - c.Unlock() - - termshark.TrackedGo(func() { - c.loadPsmlAsync(cb) - }, Goroutinewg) -} - -// assumes no pcap is being loaded -func (c *Loader) startLoadPdml(row int, cb interface{}) { - c.Lock() - c.RowCurrentlyLoading = row - c.Unlock() - - termshark.TrackedGo(func() { - c.loadPcapAsync(row, cb) - }, Goroutinewg) -} + handleNewSource(NoneCode, app, cb) -// if done==true, then this cache entry is complete -func (c *Loader) updateCacheEntryWithPdml(row int, pdml []IPdmlPacket, done bool) { - var ce CacheEntry - c.Lock() - defer c.Unlock() - if ce2, ok := c.PacketCache.Get(row); ok { - ce = ce2.(CacheEntry) - } - ce.Pdml = pdml - ce.PdmlComplete = done - c.PacketCache.Add(row, ce) -} + log.Infof("Starting new interface/fifo load '%v'", SourcesString(psrcs)) + c.PsmlLoader.loadPsmlSync(c.InterfaceLoader, c, cb, app) -func (c *Loader) updateCacheEntryWithPcap(row int, pcap [][]byte, done bool) { - var ce CacheEntry - c.Lock() - defer c.Unlock() - if ce2, ok := c.PacketCache.Get(row); ok { - ce = ce2.(CacheEntry) - } - ce.Pcap = pcap - ce.PcapComplete = done - c.PacketCache.Add(row, ce) + return nil } -func (c *Loader) LengthOfPdmlCacheEntry(row int) (int, error) { - c.Lock() - defer c.Unlock() - if ce, ok := c.PacketCache.Get(row); ok { - ce2 := ce.(CacheEntry) - return len(ce2.Pdml), nil - } - return -1, fmt.Errorf("No cache entry found for row %d", row) -} +// intended for internal use +func (c *ParentLoader) loadInterfaces(psrcs []IPacketSource, captureFilter string, displayFilter string, tmpfile string, cb interface{}, app gowid.IApp) error { -func (c *Loader) LengthOfPcapCacheEntry(row int) (int, error) { - c.Lock() - defer c.Unlock() - if ce, ok := c.PacketCache.Get(row); ok { - ce2 := ce.(CacheEntry) - return len(ce2.Pcap), nil + if err := c.loadPsmlForInterfaces(psrcs, captureFilter, displayFilter, tmpfile, cb, app); err != nil { + return err } - return -1, fmt.Errorf("No cache entry found for row %d", row) -} - -type ISimpleCache interface { - Complete() bool -} -var _ ISimpleCache = CacheEntry{} + // Deliberately use only HandleEnd handler once, in the PSML load - when it finishes, + // we'll reenable ops + c.InterfaceLoader.loadIfacesSync(c, cb, app) -type iPcapLoader interface { - Interfaces() []string - InterfaceFile() string - DisplayFilter() string - CaptureFilter() string - NumLoaded() int - CacheAt(int) (ISimpleCache, bool) - LoadingRow() int + return nil } -var _ iPcapLoader = (*Loader)(nil) -var _ fmt.Stringer = (*Loader)(nil) - -func (c *Loader) String() string { +func (c *ParentLoader) String() string { names := make([]string, 0, len(c.psrcs)) for _, psrc := range c.psrcs { switch { @@ -839,7 +628,11 @@ func (c *Loader) String() string { return strings.Join(names, " + ") } -func (c *Loader) Pcap() string { +func (c *ParentLoader) Empty() bool { + return len(c.psrcs) == 0 +} + +func (c *ParentLoader) Pcap() string { for _, psrc := range c.psrcs { if psrc != nil && psrc.IsFile() { return psrc.Name() @@ -848,7 +641,7 @@ func (c *Loader) Pcap() string { return "" } -func (c *Loader) Interfaces() []string { +func (c *ParentLoader) Interfaces() []string { names := make([]string, 0, len(c.psrcs)) for _, psrc := range c.psrcs { if psrc != nil && !psrc.IsFile() { @@ -858,38 +651,7 @@ func (c *Loader) Interfaces() []string { return names } -func (c *Loader) InterfaceFile() string { - return c.ifaceFile -} - -func (c *Loader) DisplayFilter() string { - return c.displayFilter -} - -func (c *Loader) CaptureFilter() string { - return c.captureFilter -} - -func (c *Loader) NumLoaded() int { - c.Lock() - defer c.Unlock() - return len(c.PacketPsmlData) -} - -func (c *Loader) LoadingRow() int { - c.Lock() - defer c.Unlock() - return c.RowCurrentlyLoading -} - -func (c *Loader) CacheAt(row int) (ISimpleCache, bool) { - if ce, ok := c.PacketCache.Get(row); ok { - return ce.(CacheEntry), ok - } - return CacheEntry{}, false -} - -func (c *Loader) loadIsNecessary(ev LoadPcapSlice) bool { +func (c *ParentLoader) loadIsNecessary(ev LoadPcapSlice) bool { res := true if ev.Row > c.NumLoaded() { res = false @@ -897,61 +659,121 @@ func (c *Loader) loadIsNecessary(ev LoadPcapSlice) bool { // Might be less because a cache load might've been interrupted - if it's not truncated then // we're set res = false + + // I can't conclude that a load based at row 0 is sufficient to ignore this one. + // The previous load might've started when only 10 packets were available (via the + // the PSML data), so the PDML end idx would be frame.number < 10. This load might + // be for a rocus position of 20, which would map via rounding to row 0. But we + // don't have the data. + + // Hang on - this is for a load that has finished. If it was a live load, the cache + // will not be marked complete for this batch of data - so a live load that is loading + // this batch, but started earlier in the load (so frame.number < X where X < row) + // will not be marked complete in the cache, so the load will be redone if needed. If + // we get here, the load is still underway, so let it complete. } else if c.LoadingRow() == ev.Row { res = false } return res } -func (c *Loader) signalStage2Done(cb interface{}) { - c.Lock() - ch := c.Stage2FinishedChan - c.Stage2FinishedChan = make(chan struct{}) - c.Unlock() - HandleEnd(cb) - close(ch) -} - -func (c *Loader) signalStage2Starting(cb interface{}) { - HandleBegin(cb) -} - -// Call from any goroutine - avoid calling in render, don't block it -// Procedure: -// - caller passes context, keeps cancel function -// - create a derived context for pcap reading processes -// - run them in goroutines -// - for each pcap process, -// - defer signal pcapchan when done -// - check for ctx2.Err to break -// - if err, then break -// - run goroutine to update UI with latest data on ticker -// - if ctxt2 done, then break -// - run controller watching for -// - if original ctxt done, then break (ctxt2 automatically cancelled) -// - if both processes done, then -// - cancel ticker with ctxt2 -// - wait for all to shut down -// - final UI update -func (c *Loader) loadPcapAsync(row int, cb interface{}) { +//====================================================================== + +// Holds a reference to the loader, and wraps Read() around the tail process's +// Read(). Count the bytes, and when they are equal to the final total of bytes +// written by the tshark -i process (spooling to a tmp file), a function is called +// which stops the PSML process. +type tailReadTracker struct { + tailReader io.Reader + loader *InterfaceLoader + tail iTailCommand + callback interface{} + app gowid.IApp +} + +func (r *tailReadTracker) Read(p []byte) (int, error) { + n, err := r.tailReader.Read(p) + + r.loader.Lock() + if r.loader.totalFifoBytesRead.IsNone() { + r.loader.totalFifoBytesRead = gwutil.SomeInt64(int64(n)) + } else { + r.loader.totalFifoBytesRead = gwutil.SomeInt64(int64(n) + r.loader.totalFifoBytesRead.Val()) + } + // err == ErrClosed if the pipe (tailReader) that is wrapped in this tracker is closed. + // This can happen because this call to Read() and the deferred closepipe() function run + // at the same time. + if err != nil && r.loader.fifoError == nil && err != io.EOF && !errIsAlreadyClosed(err) { + r.loader.fifoError = err + } + r.loader.Unlock() + + r.loader.checkAllBytesRead(r.tail, r.callback, r.app) + + return n, err +} + +func errIsAlreadyClosed(err error) bool { + if err == os.ErrClosed { + return true + } else if err, ok := err.(*os.PathError); ok { + return errIsAlreadyClosed(err.Err) + } else { + return false + } +} + +//====================================================================== + +type iPdmlLoaderEnv interface { + iLoaderEnv + DisplayFilter() string + ReadingFromFifo() bool + StartStage2ChanFn() chan struct{} + PacketCacheFn() *lru.Cache // i -> [pdml(i * 1000)..pdml(i+1*1000)] + updateCacheEntryWithPdml(row int, pdml []IPdmlPacket, done bool) + updateCacheEntryWithPcap(row int, pcap [][]byte, done bool) + LengthOfPdmlCacheEntry(row int) (int, error) + LengthOfPcapCacheEntry(row int) (int, error) + CacheAt(row int) (CacheEntry, bool) + DoWithPsmlData(func([][]string)) +} + +func (c *PdmlLoader) loadPcapSync(row int, visible bool, ps iPdmlLoaderEnv, cb interface{}, app gowid.IApp) { // Used to cancel the tickers below which update list widgets with the latest data and // update the progress meter. Note that if ctx is cancelled, then this context is cancelled // too. When the 2/3 data loading processes are done, a goroutine will then run uiCtxCancel() // to stop the UI updates. - c.Lock() - c.stage2Ctx, c.stage2CancelFn = context.WithCancel(c.thisSrcCtx) - c.Unlock() + c.stage2Ctx, c.stage2CancelFn = context.WithCancel(ps.Context()) + + c.state = Loading + c.rowCurrentlyLoading = row + c.visible = visible // Set to true by a goroutine started within here if ctxCancel() is called i.e. the outer context - var stageIsCancelled int32 - c.startPdmlChan = make(chan struct{}) - c.startPcapChan = make(chan struct{}) + var pdmlCancelled int32 + var pcapCancelled int32 + c.startChan = make(chan struct{}) + + c.Stage2FinishedChan = make(chan struct{}) // gcla later todo - suspect // Returns true if it's an error we should bring to user's attention - unexpectedError := func(err error) bool { - cancelled := atomic.LoadInt32(&stageIsCancelled) + unexpectedPdmlError := func(err error) bool { + cancelled := atomic.LoadInt32(&pdmlCancelled) + if cancelled == 0 { + if err != io.EOF { + if err, ok := err.(*xml.SyntaxError); !ok || err.Msg != "unexpected EOF" { + return true + } + } + } + return false + } + + unexpectedPcapError := func(err error) bool { + cancelled := atomic.LoadInt32(&pcapCancelled) if cancelled == 0 { if err != io.EOF { if err, ok := err.(*xml.SyntaxError); !ok || err.Msg != "unexpected EOF" { @@ -962,8 +784,12 @@ func (c *Loader) loadPcapAsync(row int, cb interface{}) { return false } - setCancelled := func() { - atomic.CompareAndSwapInt32(&stageIsCancelled, 0, 1) + setPcapCancelled := func() { + atomic.CompareAndSwapInt32(&pcapCancelled, 0, 1) + } + + setPdmlCancelled := func() { + atomic.CompareAndSwapInt32(&pdmlCancelled, 0, 1) } //====================================================================== @@ -973,585 +799,518 @@ func (c *Loader) loadPcapAsync(row int, cb interface{}) { sidx := -1 eidx := -1 - // signal to updater that we're about to start. This will block until cb completes - c.signalStage2Starting(cb) - - // This should correctly wait for all resources, no matter where in the process of creating them - // an interruption or error occurs - defer func() { - // Wait for all other goroutines to complete - c.stage2Wg.Wait() - - c.Lock() - c.RowCurrentlyLoading = -1 - c.Unlock() - - // The process Wait() goroutine will always expect a stage2 cancel at some point. It can - // come early, if the user interrupts the load. If not, then we send it now, to let - // that goroutine terminate. - c.stage2CancelFn() - - c.signalStage2Done(cb) - }() + // Determine this in main goroutine + termshark.TrackedGo(func() { - // Set these before starting the pcap and pdml process goroutines so that - // at the beginning, PdmlCmd and PcapCmd are definitely not nil. These - // values are saved by the goroutine, and used to access the pid of these - // processes, if they are started. - var pdmlCmd IPcapCommand - var pcapCmd IPcapCommand + ps.MainRun(gowid.RunFunction(func(app gowid.IApp) { + HandleBegin(PdmlCode, app, cb) + })) + + // This should correctly wait for all resources, no matter where in the process of creating them + // an interruption or error occurs + defer func(p *PdmlLoader) { + // Wait for all other goroutines to complete + p.stage2Wg.Wait() + + // The process Wait() goroutine will always expect a stage2 cancel at some point. It can + // come early, if the user interrupts the load. If not, then we send it now, to let + // that goroutine terminate. + p.stage2CancelFn() + + ps.MainRun(gowid.RunFunction(func(app gowid.IApp) { + close(p.Stage2FinishedChan) + HandleEnd(PdmlCode, app, cb) + + p.state = NotLoading + p.rowCurrentlyLoading = -1 + p.stage2CancelFn = nil + })) + }(c) + + // Set these before starting the pcap and pdml process goroutines so that + // at the beginning, PdmlCmd and PcapCmd are definitely not nil. These + // values are saved by the goroutine, and used to access the pid of these + // processes, if they are started. + var pdmlCmd IPcapCommand + var pcapCmd IPcapCommand - // - // Goroutine to set mapping between table rows and frame numbers - // - termshark.TrackedGo(func() { - select { - case <-c.StartStage2Chan: - break - case <-c.stage2Ctx.Done(): - setCancelled() - return - } + // + // Goroutine to set mapping between table rows and frame numbers + // + termshark.TrackedGo(func() { + select { + case <-ps.StartStage2ChanFn(): + break + case <-c.stage2Ctx.Done(): + return + } - // Do this - but if we're cancelled first (stage2Ctx.Done), then they - // don't need to be signalled because the other selects waiting on these - // channels will be cancelled too. - defer func() { - // Signal the pdml and pcap reader to start. - for _, ch := range []chan struct{}{c.startPdmlChan, c.startPcapChan} { + // Do this - but if we're cancelled first (stage2Ctx.Done), then they + // don't need to be signalled because the other selects waiting on these + // channels will be cancelled too. + // This has to wait until the PsmlCmd and PcapCmd are set - because next stages depend + // on those + defer func() { + // Signal the pdml and pcap reader to start. select { - case <-ch: // it will be closed if the psml has loaded already, and this e.g. a cached load + case <-c.startChan: // it will be closed if the psml has loaded already, and this e.g. a cached load default: - close(ch) + close(c.startChan) } - } - }() + }() - // If there's no filter, psml, pdml and pcap run concurrently for speed. Therefore the pdml and pcap - // don't know how large the psml will be. So we set numToRead to 1000. This might be too high, but - // we only use this to determine when we can kill the reading processes early. The result will be - // correct if we don't kill the processes, it just might load for longer. - c.KillAfterReadingThisMany = c.opt.PacketsPerLoad - var err error - if c.displayFilter == "" { - sidx = row + 1 - // +1 for frame.number being 1-based; +1 to read past the end so that - // the XML decoder doesn't stall and I can kill after abcdex - eidx = row + c.opt.PacketsPerLoad + 1 + 1 - } else { - c.Lock() - if len(c.PacketPsmlData) > row { - sidx, err = strconv.Atoi(c.PacketPsmlData[row][0]) - if err != nil { - log.Fatal(err) - } - if len(c.PacketPsmlData) > row+c.opt.PacketsPerLoad+1 { - // If we have enough packets to request one more than the amount to - // cache, then requesting one more will mean the XML decoder won't - // block at packet 999 waiting for - so this is a hack to - // let me promptly kill tshark when I've read enough. - eidx, err = strconv.Atoi(c.PacketPsmlData[row+c.opt.PacketsPerLoad+1][0]) - if err != nil { - log.Fatal(err) - } - } else { - eidx, err = strconv.Atoi(c.PacketPsmlData[len(c.PacketPsmlData)-1][0]) - if err != nil { - log.Fatal(err) + // If there's no filter, psml, pdml and pcap run concurrently for speed. Therefore the pdml and pcap + // don't know how large the psml will be. So we set numToRead to 1000. This might be too high, but + // we only use this to determine when we can kill the reading processes early. The result will be + // correct if we don't kill the processes, it just might load for longer. + c.KillAfterReadingThisMany = c.opt.PacketsPerLoad + var err error + if ps.DisplayFilter() == "" { + sidx = row + 1 + // +1 for frame.number being 1-based; +1 to read past the end so that + // the XML decoder doesn't stall and I can kill after abcdex + eidx = row + c.opt.PacketsPerLoad + 1 + 1 + } else { + ps.DoWithPsmlData(func(psmlData [][]string) { + if len(psmlData) > row { + sidx, err = strconv.Atoi(psmlData[row][0]) + if err != nil { + log.Fatal(err) + } + if len(psmlData) > row+c.opt.PacketsPerLoad+1 { + // If we have enough packets to request one more than the amount to + // cache, then requesting one more will mean the XML decoder won't + // block at packet 999 waiting for - so this is a hack to + // let me promptly kill tshark when I've read enough. + eidx, err = strconv.Atoi(psmlData[row+c.opt.PacketsPerLoad+1][0]) + if err != nil { + log.Fatal(err) + } + } else { + eidx, err = strconv.Atoi(psmlData[len(psmlData)-1][0]) + if err != nil { + log.Fatal(err) + } + eidx += 1 // beyond end of last frame + c.KillAfterReadingThisMany = len(psmlData) - row + } } - eidx += 1 // beyond end of last frame - c.KillAfterReadingThisMany = len(c.PacketPsmlData) - row - } + }) } - c.Unlock() - } - if c.displayFilter != "" { - displayFilterStr = fmt.Sprintf("(%s) and (frame.number >= %d) and (frame.number < %d)", c.displayFilter, sidx, eidx) - } else { - displayFilterStr = fmt.Sprintf("(frame.number >= %d) and (frame.number < %d)", sidx, eidx) - } + if ps.DisplayFilter() != "" { + displayFilterStr = fmt.Sprintf("(%s) and (frame.number >= %d) and (frame.number < %d)", ps.DisplayFilter(), sidx, eidx) + } else { + displayFilterStr = fmt.Sprintf("(frame.number >= %d) and (frame.number < %d)", sidx, eidx) + } - // These need to be set after displayFilterStr is set but before stage 2 is started - pdmlCmd = c.cmds.Pdml(c.PcapPdml, displayFilterStr) - pcapCmd = c.cmds.Pcap(c.PcapPcap, displayFilterStr) + // These need to be set after displayFilterStr is set but before stage 2 is started + pdmlCmd = ps.Commands().Pdml(c.PcapPdml, displayFilterStr) + pcapCmd = ps.Commands().Pcap(c.PcapPcap, displayFilterStr) - }, &c.stage2Wg, Goroutinewg) + }, &c.stage2Wg, Goroutinewg) - //====================================================================== + //====================================================================== - pdmlPidChan := make(chan int) - pcapPidChan := make(chan int) + pdmlPidChan := make(chan int) + pcapPidChan := make(chan int) - termshark.TrackedGo(func() { - select { - case <-c.startPdmlChan: - case <-c.stage2Ctx.Done(): - return - } + pdmlTermChan := make(chan error) + pcapTermChan := make(chan error) - var err error - stage2CtxChan := c.stage2Ctx.Done() - pdmlPidChan := pdmlPidChan - pcapPidChan := pcapPidChan + pdmlCtx, pdmlCancelFn := context.WithCancel(c.stage2Ctx) + pcapCtx, pcapCancelFn := context.WithCancel(c.stage2Ctx) - var pdmlCmd IPcapCommand - var pcapCmd IPcapCommand + // + // Goroutine to track pdml and pcap process lifetimes + // + termshark.TrackedGo(func() { + select { + case <-c.startChan: + case <-c.stage2Ctx.Done(): + return + } - origPdmlCmd := pdmlCmd - origPcapCmd := pcapCmd + var err error + stage2CtxChan := c.stage2Ctx.Done() + pdmlPidChan := pdmlPidChan + pcapPidChan := pcapPidChan - killPcap := func() { - err := termshark.KillIfPossible(pcapCmd) - if err != nil { - log.Infof("Did not kill pcap process: %v", err) + pdmlCancelledChan := pdmlCtx.Done() + pcapCancelledChan := pcapCtx.Done() + + pdmlState := NotStarted + pcapState := NotStarted + + killPcap := func() { + err := termshark.KillIfPossible(pcapCmd) + if err != nil { + log.Infof("Did not kill pcap process: %v", err) + } } - } - killPdml := func() { - err = termshark.KillIfPossible(pdmlCmd) - if err != nil { - log.Infof("Did not kill pdml process: %v", err) + killPdml := func() { + err = termshark.KillIfPossible(pdmlCmd) + if err != nil { + log.Infof("Did not kill pdml process: %v", err) + } } - } - loop: - for { - select { + loop: + for { + select { - case pid := <-pdmlPidChan: - // this channel can be closed on a stage2 cancel, before the - // pdml process has been started, meaning we get nil for the - // pid. If that's the case, don't save the cmd, so we know not - // to try to kill anything later. - pdmlPidChan = nil - if pid != 0 { - pdmlCmd = origPdmlCmd - c.PdmlPid = pid - if stage2CtxChan == nil { - // means that stage2 has been cancelled (so stop the load), and - // pdmlCmd != nil => for sure a process was started. So kill it. - // It won't have been cleaned up anywhere else because Wait() is - // only called below, in this goroutine. + case err = <-pdmlTermChan: + pdmlState = Terminated + + case err = <-pcapTermChan: + pcapState = Terminated + + case pid := <-pdmlPidChan: + // this channel can be closed on a stage2 cancel, before the + // pdml process has been started, meaning we get nil for the + // pid. If that's the case, don't save the cmd, so we know not + // to try to kill anything later. + pdmlPidChan = nil // don't select on this channel again + if pid != 0 { + pdmlState = Started + // gcla later todo - use lock? + c.PdmlPid = pid + if stage2CtxChan == nil || pdmlCancelledChan == nil { + // means that stage2 has been cancelled (so stop the load), and + // pdmlCmd != nil => for sure a process was started. So kill it. + // It won't have been cleaned up anywhere else because Wait() is + // only called below, in this goroutine. + killPdml() + } + } + + case pid := <-pcapPidChan: + pcapPidChan = nil // don't select on this channel again + if pid != 0 { + pcapState = Started + c.PcapPid = pid + if stage2CtxChan == nil || pcapCancelledChan == nil { + killPcap() + } + } + + case <-pdmlCancelledChan: + pdmlCancelledChan = nil // don't select on this channel again + setPdmlCancelled() + if pdmlState == Started { killPdml() } - } - case pid := <-pcapPidChan: - pcapPidChan = nil - if pid != 0 { - pcapCmd = origPcapCmd - c.PcapPid = pid - if stage2CtxChan == nil { + case <-pcapCancelledChan: + pcapCancelledChan = nil // don't select on this channel again + setPcapCancelled() + if pcapState == Started { + // means that for sure, a process was started killPcap() } - } - case <-stage2CtxChan: - // Once the pcap/pdml load is initiated, we guarantee we get a stage2 cancel - // once all the stage2 goroutines are finished. So we don't quit the select loop - // until this channel (as well as the others) has received a signal - stage2CtxChan = nil - setCancelled() - if pcapCmd != nil { - // means that for sure, a process was started - killPcap() + case <-stage2CtxChan: + // This will automatically signal pdmlCtx.Done and pcapCtx.Done() + + // Once the pcap/pdml load is initiated, we guarantee we get a stage2 cancel + // once all the stage2 goroutines are finished. So we don't quit the select loop + // until this channel (as well as the others) has received a signal + stage2CtxChan = nil } - if pdmlCmd != nil { - killPdml() + + // if pdmlpidchan is nil, it means the the channel has been closed or we've received a message + // a message means the proc has started + // closed means it won't be started + // if closed, then pdmlCmd == nil + if (pdmlState == Terminated || (pdmlCancelledChan == nil && pdmlState == NotStarted)) && + (pcapState == Terminated || (pcapCancelledChan == nil && pcapState == NotStarted)) { + // nothing to select on so break + break loop } } + }, Goroutinewg) - // if pdmlpidchan is nil, it means the the channel has been closed or we've received a message - // a message means the proc has started - // closed means it won't be started - // if closed, then pdmlCmd == nil - if pdmlPidChan == nil && pcapPidChan == nil && stage2CtxChan == nil { - // nothing to select on so break - break loop - } - } + //====================================================================== - if pcapCmd != nil { - pcapCmd.Wait() - c.PcapPid = 0 - } - if pdmlCmd != nil { - pdmlCmd.Wait() - c.PdmlPid = 0 - } - - }, Goroutinewg) - - //====================================================================== - - // - // Goroutine to run pdml process - // - termshark.TrackedGo(func() { - // Wait for stage 2 to be kicked off (potentially by psml load, then mapping table row to frame num); or - // quit if that happens first - select { - case <-c.startPdmlChan: - case <-c.stage2Ctx.Done(): - setCancelled() - close(pdmlPidChan) - return - } - - // We didn't get a stage2 cancel yet. We could now, but for now we've been told to continue - // now we'll guarantee either: - // - we'll send the pdml pid on pdmlPidChan if it starts - // - we'll close the channel if it doesn't start - - pid := 0 - - defer func() { - // Guarantee that at the end of this goroutine, if we didn't start a process (pid == 0) - // we will close the channel to signal the Wait() goroutine above. - if pid == 0 { + // + // Goroutine to run pdml process + // + termshark.TrackedGo(func() { + // Wait for stage 2 to be kicked off (potentially by psml load, then mapping table row to frame num); or + // quit if that happens first + select { + case <-c.startChan: + case <-c.stage2Ctx.Done(): close(pdmlPidChan) + return } - }() - pdmlOut, err := pdmlCmd.StdoutReader() - if err != nil { - HandleError(err, cb) - return - } + // We didn't get a stage2 cancel yet. We could now, but for now we've been told to continue + // now we'll guarantee either: + // - we'll send the pdml pid on pdmlPidChan if it starts + // - we'll close the channel if it doesn't start - err = pdmlCmd.Start() - if err != nil { - err = fmt.Errorf("Error starting PDML process %v: %v", pdmlCmd, err) - HandleError(err, cb) - return - } + pid := 0 - log.Infof("Started PDML command %v with pid %d", pdmlCmd, pdmlCmd.Pid()) + defer func() { + // Guarantee that at the end of this goroutine, if we didn't start a process (pid == 0) + // we will close the channel to signal the Wait() goroutine above. + if pid == 0 { + close(pdmlPidChan) + } + }() - pid = pdmlCmd.Pid() - pdmlPidChan <- pid + pdmlOut, err := pdmlCmd.StdoutReader() + if err != nil { + HandleError(PdmlCode, app, err, cb) + return + } - d := xml.NewDecoder(pdmlOut) - packets := make([]IPdmlPacket, 0, c.opt.PacketsPerLoad) - issuedKill := false - var packet PdmlPacket - var cpacket IPdmlPacket - Loop: - for { - tok, err := d.Token() + err = pdmlCmd.Start() if err != nil { - if unexpectedError(err) { - err = fmt.Errorf("Could not read PDML data: %v", err) - HandleError(err, cb) - } - break + err = fmt.Errorf("Error starting PDML process %v: %v", pdmlCmd, err) + HandleError(PdmlCode, app, err, cb) + return } - switch tok := tok.(type) { - case xml.StartElement: - switch tok.Name.Local { - case "packet": - err := d.DecodeElement(&packet, &tok) - if err != nil { - if !issuedKill && unexpectedError(err) { - err = fmt.Errorf("Could not decode PDML data: %v", err) - HandleError(err, cb) - } - break Loop + + log.Infof("Started PDML command %v with pid %d", pdmlCmd, pdmlCmd.Pid()) + + pid = pdmlCmd.Pid() + pdmlPidChan <- pid + + d := xml.NewDecoder(pdmlOut) + packets := make([]IPdmlPacket, 0, c.opt.PacketsPerLoad) + issuedKill := false + readAllRequiredPdml := false + var packet PdmlPacket + var cpacket IPdmlPacket + Loop: + for { + tok, err := d.Token() + if err != nil { + if !issuedKill && unexpectedPdmlError(err) { + err = fmt.Errorf("Could not read PDML data: %v", err) + HandleError(PdmlCode, app, err, cb) } - // Enabled for now - do something more subtle perhaps in the future - if true { - cpacket = SnappyPdmlPacket(packet) - } else { - cpacket = packet + if err == io.EOF { + readAllRequiredPdml = true } - packets = append(packets, cpacket) - c.updateCacheEntryWithPdml(row, packets, false) - if len(packets) == c.KillAfterReadingThisMany { - // Shortcut - we never take more than abcdex - so just kill here - issuedKill = true - err = termshark.KillIfPossible(pdmlCmd) + break + } + switch tok := tok.(type) { + case xml.StartElement: + switch tok.Name.Local { + case "packet": + err := d.DecodeElement(&packet, &tok) if err != nil { - log.Infof("Did not kill pdml process: %v", err) + if !issuedKill && unexpectedPdmlError(err) { + err = fmt.Errorf("Could not decode PDML data: %v", err) + HandleError(PdmlCode, app, err, cb) + } + break Loop + } + // Enabled for now - do something more subtle perhaps in the future + if true { + cpacket = SnappyPdmlPacket(packet) + } else { + cpacket = packet + } + packets = append(packets, cpacket) + ps.updateCacheEntryWithPdml(row, packets, false) + if len(packets) == c.KillAfterReadingThisMany { + // Shortcut - we never take more than abcdex - so just kill here + issuedKill = true + readAllRequiredPdml = true + c.pdmlStoppedDeliberately_ = true + pdmlCancelFn() } } + } } - } + // The Wait has to come after the last read, which is above + pdmlTermChan <- pdmlCmd.Wait() - // Want to preserve invariant - for simplicity - that we only add full loads - // to the cache - cancelled := atomic.LoadInt32(&stageIsCancelled) - if cancelled == 0 { - // never evict row 0 - c.PacketCache.Get(0) - c.Lock() - if c.highestCachedRow != -1 { - // try not to evict "end" - c.PacketCache.Get(c.highestCachedRow) - } - c.Unlock() - - // the cache entry is marked complete if we are not reading from a fifo, which implies - // the source of packets will not grow larger. If it could grow larger, we want to ensure - // that termshark doesn't think that there are only 900 packets, because that's what's - // in the cache from a previous request - now there might be 950 packets. - c.updateCacheEntryWithPdml(row, packets, !c.ReadingFromFifo()) - if row > c.highestCachedRow { - c.Lock() - c.highestCachedRow = row - c.Unlock() - } - } - }, &c.stage2Wg, Goroutinewg) + // Want to preserve invariant - for simplicity - that we only add full loads + // to the cache - //====================================================================== + ps.MainRun(gowid.RunFunction(func(gowid.IApp) { + // never evict row 0 + ps.PacketCacheFn().Get(0) + if c.highestCachedRow != -1 { + // try not to evict "end" + ps.PacketCacheFn().Get(c.highestCachedRow) + } - // - // Goroutine to run pcap process - // - termshark.TrackedGo(func() { - // Wait for stage 2 to be kicked off (potentially by psml load, then mapping table row to frame num); or - // quit if that happens first - select { - case <-c.startPcapChan: - case <-c.stage2Ctx.Done(): - setCancelled() - close(pcapPidChan) - return - } + // the cache entry is marked complete if we are not reading from a fifo, which implies + // the source of packets will not grow larger. If it could grow larger, we want to ensure + // that termshark doesn't think that there are only 900 packets, because that's what's + // in the cache from a previous request - now there might be 950 packets. + // + // If the PDML routine was stopped programmatically, that implies the load was not complete + // so we don't mark the cache as complete then either. + markComplete := false + if !ps.ReadingFromFifo() && readAllRequiredPdml { + markComplete = true + } + ps.updateCacheEntryWithPdml(row, packets, markComplete) + if row > c.highestCachedRow { + c.highestCachedRow = row + } + })) + }, &c.stage2Wg, Goroutinewg) - pid := 0 + //====================================================================== - defer func() { - if pid == 0 { + // + // Goroutine to run pcap process + // + termshark.TrackedGo(func() { + // Wait for stage 2 to be kicked off (potentially by psml load, then mapping table row to frame num); or + // quit if that happens first + select { + case <-c.startChan: + case <-c.stage2Ctx.Done(): close(pcapPidChan) + return } - }() - - pcapOut, err := pcapCmd.StdoutReader() - if err != nil { - HandleError(err, cb) - return - } - err = pcapCmd.Start() - if err != nil { - // e.g. on the pi - err = fmt.Errorf("Error starting PCAP process %v: %v", pcapCmd, err) - HandleError(err, cb) - return - } + pid := 0 - log.Infof("Started pcap command %v with pid %d", pcapCmd, pcapCmd.Pid()) - - pid = pcapCmd.Pid() - pcapPidChan <- pid + defer func() { + if pid == 0 { + close(pcapPidChan) + } + }() - packets := make([][]byte, 0, c.opt.PacketsPerLoad) - issuedKill := false - re := regexp.MustCompile(`([0-9a-f][0-9a-f] )`) - rd := bufio.NewReader(pcapOut) - packet := make([]byte, 0) + pcapOut, err := pcapCmd.StdoutReader() + if err != nil { + HandleError(PdmlCode, app, err, cb) + return + } - for { - line, err := rd.ReadString('\n') + err = pcapCmd.Start() if err != nil { - if !issuedKill && unexpectedError(err) { - err = fmt.Errorf("Could not read PCAP packet: %v", err) - HandleError(err, cb) - } - break + // e.g. on the pi + err = fmt.Errorf("Error starting PCAP process %v: %v", pcapCmd, err) + HandleError(PdmlCode, app, err, cb) + return } - parseResults := re.FindAllStringSubmatch(string(line), -1) + log.Infof("Started pcap command %v with pid %d", pcapCmd, pcapCmd.Pid()) - if len(parseResults) < 1 { - packets = append(packets, packet) - packet = make([]byte, 0) + pid = pcapCmd.Pid() + pcapPidChan <- pid - readEnough := (len(packets) >= c.KillAfterReadingThisMany) - c.updateCacheEntryWithPcap(row, packets, false) + packets := make([][]byte, 0, c.opt.PacketsPerLoad) + issuedKill := false + readAllRequiredPcap := false + re := regexp.MustCompile(`([0-9a-f][0-9a-f] )`) + rd := bufio.NewReader(pcapOut) + packet := make([]byte, 0) - if readEnough { - // Shortcut - we never take more than abcdex - so just kill here - issuedKill = true - err = termshark.KillIfPossible(pcapCmd) - if err != nil { - log.Infof("Did not kill pdml process: %v", err) + for { + line, err := rd.ReadString('\n') + if err != nil { + if !issuedKill && unexpectedPcapError(err) { + err = fmt.Errorf("Could not read PCAP packet: %v", err) + HandleError(PdmlCode, app, err, cb) + } + if err == io.EOF { + readAllRequiredPcap = true } + break } - } else { - // Ignore line number - for _, parsedByte := range parseResults[1:] { - b, err := strconv.ParseUint(string(parsedByte[0][0:2]), 16, 8) - if err != nil { - err = fmt.Errorf("Could not read PCAP packet: %v", err) - HandleError(err, cb) - break + + parseResults := re.FindAllStringSubmatch(string(line), -1) + + if len(parseResults) < 1 { + packets = append(packets, packet) + packet = make([]byte, 0) + + readEnough := (len(packets) >= c.KillAfterReadingThisMany) + ps.updateCacheEntryWithPcap(row, packets, false) + + if readEnough && !issuedKill { + // Shortcut - we never take more than abcdex - so just kill here + issuedKill = true + readAllRequiredPcap = true + pcapCancelFn() + } + } else { + // Ignore line number + for _, parsedByte := range parseResults[1:] { + b, err := strconv.ParseUint(string(parsedByte[0][0:2]), 16, 8) + if err != nil { + err = fmt.Errorf("Could not read PCAP packet: %v", err) + if !issuedKill { + HandleError(PdmlCode, app, err, cb) + } + break + } + packet = append(packet, byte(b)) } - packet = append(packet, byte(b)) } } - } - // I just want to ensure I read it from ram, obviously this is racey - cancelled := atomic.LoadInt32(&stageIsCancelled) - if cancelled == 0 { + // The Wait has to come after the last read, which is above + pcapTermChan <- pcapCmd.Wait() + + // I just want to ensure I read it from ram, obviously this is racey // never evict row 0 - c.PacketCache.Get(0) + ps.PacketCacheFn().Get(0) if c.highestCachedRow != -1 { // try not to evict "end" - c.PacketCache.Get(c.highestCachedRow) + ps.PacketCacheFn().Get(c.highestCachedRow) } - c.updateCacheEntryWithPcap(row, packets, !c.ReadingFromFifo()) - } - - }, &c.stage2Wg, Goroutinewg) -} - -func (c *Loader) TurnOffPipe() { - // Switch over to the temp pcap file. If a new filter is applied - // after stopping, we should read from the temp file and not the fifo - // because nothing will be feeding the fifo. - if c.PcapPsml != c.PcapPdml { - log.Infof("Switching from interface/fifo mode to file mode") - c.PcapPsml = c.PcapPdml - } -} - -func (c *Loader) signalPsmlStarting(cb interface{}) { - HandleBegin(cb) -} - -func (c *Loader) signalPsmlDone(cb interface{}) { - ch := c.PsmlFinishedChan - c.PsmlFinishedChan = make(chan struct{}) - HandleEnd(cb) - close(ch) -} - -// Holds a reference to the loader, and wraps Read() around the tail process's -// Read(). Count the bytes, and when they are equal to the final total of bytes -// written by the tshark -i process (spooling to a tmp file), a function is called -// which stops the PSML process. -type tailReadTracker struct { - tailReader io.Reader - loader *Loader - callback interface{} -} - -func (r *tailReadTracker) Read(p []byte) (int, error) { - n, err := r.tailReader.Read(p) - if r.loader.totalFifoBytesRead.IsNone() { - r.loader.totalFifoBytesRead = gwutil.SomeInt64(int64(n)) - } else { - r.loader.totalFifoBytesRead = gwutil.SomeInt64(int64(n) + r.loader.totalFifoBytesRead.Val()) - } - // err == ErrClosed if the pipe (tailReader) that is wrapped in this tracker is closed. - // This can happen because this call to Read() and the deferred closepipe() function run - // at the same time. - if err != nil && r.loader.fifoError == nil && err != io.EOF && !errIsAlreadyClosed(err) { - r.loader.fifoError = err - } - - r.loader.checkAllBytesRead(r.callback) - - return n, err -} - -func errIsAlreadyClosed(err error) bool { - if err == os.ErrClosed { - return true - } else if err, ok := err.(*os.PathError); ok { - return errIsAlreadyClosed(err.Err) - } else { - return false - } -} - -// checkAllBytesRead is called (a) when the tshark -i process is finished -// writing to the tmp file and (b) every time the tmpfile tail process reads -// bytes. totalFifoBytesWrite is set to non-nil only when the tail process -// completes. totalFifoBytesRead is updated every read. If they are every -// found to be equal, it means that (1) the tail process has finished, meaning -// killed or has reached EOF with its packet source (e.g. stdin, fifo) and (2) -// the tail process has read all those bytes - so no packets will be -// missed. In that case, the tail process is killed and its stdout closed, -// which will trigger the psml reading process to shut down, and termshark -// will turn off its loading UI. -func (c *Loader) checkAllBytesRead(cb interface{}) { - cancel := false - if !c.totalFifoBytesWritten.IsNone() && !c.totalFifoBytesRead.IsNone() { - if c.totalFifoBytesRead.Val() == c.totalFifoBytesWritten.Val() { - cancel = true - } - } - if c.fifoError != nil { - cancel = true - } + markComplete := false + if !ps.ReadingFromFifo() && readAllRequiredPcap { + markComplete = true + } + ps.updateCacheEntryWithPcap(row, packets, markComplete) - // if there was a fifo error, OR we have read all the bytes that were written, then - // we need to stop the tail command - if cancel { - if c.fifoError != nil { - err := fmt.Errorf("Fifo error: %v", c.fifoError) - HandleError(err, cb) - } - if c.tailCmd != nil { - c.totalFifoBytesWritten = gwutil.NoneInt64() - c.totalFifoBytesRead = gwutil.NoneInt64() + }, &c.stage2Wg, Goroutinewg) - err := termshark.KillIfPossible(c.tailCmd) - if err != nil { - log.Infof("Did not kill tail process: %v", err) - } else { - c.tailCmd.Wait() // this will block the exit of this function until the command is killed + }, Goroutinewg) - // We need to explicitly close the write side of the pipe. Without this, - // the PSML process Wait() function won't complete, because golang won't - // complete termination until IO has finished, and io.Copy() will be stuck - // in a loop. - c.tailCmd.Close() - } - } - } } -func (c *Loader) loadPsmlAsync(cb interface{}) { +// loadPsmlSync starts tshark processes, and other processes, to generate PSML +// data. There is coordination with the PDML loader via a channel, +// startStage2Chan. If a filter is set, then we might need to read far more +// than a block of 1000 PDML packets (via frame.number <= 4000, for example), +// and we don't know how many to read until the PSML is loaded. We don't want +// to only load one PDML packet at a time, and reload as the user hits arrow +// down through the PSML (in the case the packets selected by the filter are +// very spaced out). +// +// The flow is as follows: +// - if the source of packets is a fifo/interface then +// - create a pipe +// - set PcapPsml to a Reader object that tracks bytes read from the pipe +// - start the PSML tshark command and get its stdout +// - if the source of packets is a fifo/interface then +// - use inotify to wait for the tmp pcap file to appear +// - start the tail command to read the tmp file created by the interface loader +// - read the PSML and add to data structures +// +// Goroutines are started to track the process lifetimes of both processes. +// +func (p *PsmlLoader) loadPsmlSync(iloader *InterfaceLoader, e iPsmlLoaderEnv, cb interface{}, app gowid.IApp) { // Used to cancel the tickers below which update list widgets with the latest data and // update the progress meter. Note that if ctx is cancelled, then this context is cancelled // too. When the 2/3 data loading processes are done, a goroutine will then run uiCtxCancel() // to stop the UI updates. - c.psmlCtx, c.psmlCancelFn = context.WithCancel(c.thisSrcCtx) + p.psmlCtx, p.psmlCancelFn = context.WithCancel(e.Context()) + p.tailCtx, p.tailCancelFn = context.WithCancel(e.Context()) intPsmlCtx, intPsmlCancelFn := context.WithCancel(context.Background()) - // signalling psml done to the goroutine that started - - //====================================================================== - - // Make sure data is cleared before we signal we're starting. This gives callbacks a clean - // view, not the old view of a loader with old data. - c.Lock() - c.PacketPsmlData = make([][]string, 0) - c.PacketPsmlColors = make([]PacketColors, 0) - c.PacketPsmlHeaders = make([]string, 0, 10) - c.PacketNumberMap = make(map[int]int) - c.PacketCache.Purge() - c.LoadWasCancelled = false - c.StartStage2Chan = make(chan struct{}) // do this before signalling start - c.Unlock() - - // signal to updater that we're about to start. This will block until cb completes - c.signalPsmlStarting(cb) - - defer func() { - c.signalPsmlDone(cb) - }() + p.state = Loading //====================================================================== @@ -1567,543 +1326,846 @@ func (c *Loader) loadPsmlAsync(cb interface{}) { // Make sure we start the goroutine that monitors for shutdown early - so if/when // a shutdown happens, and we get blocked in the XML parser, this will be able to // respond - psmlProcChan := make(chan int) - tailProcChan := make(chan int) + psmlPidChan := make(chan int) + tailPidChan := make(chan int) + psmlTermChan := make(chan error) + tailTermChan := make(chan error) psmlPid := 0 // 0 means not running tailPid := 0 //====================================================================== - // Set to true by a goroutine started within here if ctxCancel() is called i.e. the outer context - if c.displayFilter == "" || c.ReadingFromFifo() { - // don't hold up pdml and pcap generators. If the filter is "", then the frame numbers - // equal the row numbers, so we don't need the psml to map from row -> frame. - // - // And, if we are in interface mode, we won't reach the end of the psml anyway. - // - close(c.StartStage2Chan) - } - - //====================================================================== + termshark.TrackedGo(func() { - closeFifoPipe := func() { - if fifoPipeWriter != nil { - fifoPipeWriter.Close() - fifoPipeWriter = nil + e.MainRun(gowid.RunFunction(func(app gowid.IApp) { + HandleBegin(PsmlCode, app, cb) + })) + + defer func(ch chan struct{}) { + // This will signal goroutines using select on this channel to terminate - like + // ticker routines that update the packet list UI with new data every second. + close(p.PsmlFinishedChan) + + e.MainRun(gowid.RunFunction(func(gowid.IApp) { + HandleEnd(PsmlCode, app, cb) + p.state = NotLoading + p.psmlCancelFn = nil + })) + }(p.PsmlFinishedChan) + + //====================================================================== + + // Set to true by a goroutine started within here if ctxCancel() is called i.e. the outer context + if e.DisplayFilter() == "" || p.ReadingFromFifo() { + // don't hold up pdml and pcap generators. If the filter is "", then the frame numbers + // equal the row numbers, so we don't need the psml to map from row -> frame. + // + // And, if we are in interface mode, we won't reach the end of the psml anyway. + // + close(p.startStage2Chan) } - if fifoPipeReader != nil { - fifoPipeReader.Close() - fifoPipeReader = nil + + //====================================================================== + + if p.ReadingFromFifo() { + // PcapPsml will be nil if here + + // Build a pipe - the side to be read from will be given to the PSML process + // and the side to be written to is given to the tail process, which feeds in + // data from the pcap source. + // + fifoPipeReader, fifoPipeWriter, err = os.Pipe() + if err != nil { + err = fmt.Errorf("Could not create pipe: %v", err) + HandleError(PsmlCode, app, err, cb) + intPsmlCancelFn() + return + } + // pw is used as Stdout for the tail command, which unwinds in this + // goroutine - so we can close at this point in the unwinding. pr + // is used as stdin for the psml command, which also runs in this + // goroutine. + defer func() { + fifoPipeWriter.Close() + fifoPipeReader.Close() + }() + + // wrap the read end of the pipe with a Read() function that counts + // bytes. If they are equal to the total bytes written to the tmpfile by + // the tshark -i process, then that means the source is exhausted, and + // the tail + psml processes are stopped. + p.PcapPsml = &tailReadTracker{ + tailReader: fifoPipeReader, + loader: iloader, + tail: e, + callback: cb, + app: app, + } } - } - if c.ReadingFromFifo() { - // PcapPsml will be nil if here + // Set c.PsmlCmd before it's referenced in the goroutine below. We want to be + // sure that if if psmlCmd is nil then that means the process has finished (not + // has not yet started) + p.PsmlCmd = e.Commands().Psml(p.PcapPsml, e.DisplayFilter()) - // Build a pipe - the side to be read from will be given to the PSML process - // and the side to be written to is given to the tail process, which feeds in - // data from the pcap source. - // - fifoPipeReader, fifoPipeWriter, err = os.Pipe() + // this channel always needs to be signalled or else the goroutine below won't terminate. + // Closing it will pass a zero-value int (pid) to the goroutine which will understand that + // means the psml process is NOT running, so it won't call cmd.Wait() on it. + defer func() { + if psmlPid == 0 { + close(psmlPidChan) + } + }() + + //====================================================================== + // Goroutine to track process state changes + termshark.TrackedGo(func() { + cancelledChan := p.psmlCtx.Done() + intCancelledChan := intPsmlCtx.Done() + + var err error + psmlCmd := p.PsmlCmd + pidChan := psmlPidChan + state := NotStarted + + kill := func() { + err := termshark.KillIfPossible(psmlCmd) + if err != nil { + log.Infof("Did not kill tshark psml process: %v", err) + } + } + + loop: + for { + select { + case err = <-psmlTermChan: + state = Terminated + if !p.psmlStoppedDeliberately_ { + if err != nil { + if _, ok := err.(*exec.ExitError); ok { + cerr := gowid.WithKVs(termshark.BadCommand, map[string]interface{}{ + "command": psmlCmd.String(), + "error": err, + }) + HandleError(PsmlCode, app, cerr, cb) + } + } + } + + case <-cancelledChan: + intPsmlCancelFn() // start internal shutdown + cancelledChan = nil + + case <-intCancelledChan: + intCancelledChan = nil + if state == Started { + kill() + } + + case pid := <-pidChan: + pidChan = nil + if pid != 0 { + state = Started + if intCancelledChan == nil { + kill() + } + } + } + + if state == Terminated || (intCancelledChan == nil && state == NotStarted) { + break loop + } + } + + }, Goroutinewg) + + //====================================================================== + + psmlOut, err = p.PsmlCmd.StdoutReader() if err != nil { - err = fmt.Errorf("Could not create pipe: %v", err) - HandleError(err, cb) + err = fmt.Errorf("Could not access pipe output: %v", err) + HandleError(PsmlCode, app, err, cb) intPsmlCancelFn() return } - // pw is used as Stdout for the tail command, which unwinds in this - // goroutine - so we can close at this point in the unwinding. pr - // is used as stdin for the psml command, which also runs in this - // goroutine. - defer closeFifoPipe() - - // wrap the read end of the pipe with a Read() function that counts - // bytes. If they are equal to the total bytes written to the tmpfile by - // the tshark -i process, then that means the source is exhausted, and - // the tail + psml processes are stopped. - c.PcapPsml = &tailReadTracker{ - tailReader: fifoPipeReader, - loader: c, - callback: cb, - } - } - c.Lock() - // Set c.PsmlCmd before it's referenced in the goroutine below. We want to be - // sure that if if psmlCmd is nil then that means the process has finished (not - // has not yet started) - c.PsmlCmd = c.cmds.Psml(c.PcapPsml, c.displayFilter) - c.Unlock() - - // this channel always needs to be signalled or else the goroutine below won't terminate. - // Closing it will pass a zero-value int (pid) to the goroutine which will understand that - // means the psml process is NOT running, so it won't call cmd.Wait() on it. - defer func() { - if psmlPid == 0 { - close(psmlProcChan) + err = p.PsmlCmd.Start() + if err != nil { + err = fmt.Errorf("Error starting PSML command %v: %v", p.PsmlCmd, err) + HandleError(PsmlCode, app, err, cb) + intPsmlCancelFn() + return } - }() - termshark.TrackedGo(func() { - psmlCtxChan := c.psmlCtx.Done() - intPsmlCtxChan := intPsmlCtx.Done() + log.Infof("Started PSML command %v with pid %d", p.PsmlCmd, p.PsmlCmd.Pid()) - var err error - var cmd IPcapCommand - origCmd := c.PsmlCmd - psmlProcChan := psmlProcChan + // Do this here because code later can return early - e.g. the watcher fails to be + // set up - and then we'll never issue a Wait + waitedForPsml := false - kill := func() { - err := termshark.KillIfPossible(cmd) - if err != nil { - log.Infof("Did not kill tshark psml process: %v", err) + // Prefer a defer rather than a goroutine here. That's because otherwise, this goroutine + // and the XML processing routine reading the process's StdoutPipe are running in parallel, + // and the XML routine should not issue a Read() (which it does behind the scenes) after + // Wait() has been called. + waitForPsml := func() { + if !waitedForPsml { + psmlTermChan <- p.PsmlCmd.Wait() + waitedForPsml = true } } - loop: - for { - select { - case <-psmlCtxChan: - intPsmlCancelFn() // start internal shutdown - psmlCtxChan = nil - - case <-intPsmlCtxChan: - intPsmlCtxChan = nil - if psmlProcChan != nil { - kill() + defer waitForPsml() + + psmlPid = p.PsmlCmd.Pid() + psmlPidChan <- psmlPid + + //====================================================================== + + // If it was cancelled, then we don't need to start the tail process because + // psml will read from the tmp pcap file generated by the interface reading + // process. + + p.tailCmd = nil + + // Need to run dumpcap -i eth0 -w + if p.ReadingFromFifo() { + p.tailCmd = e.Commands().Tail(e.InterfaceFile()) + + defer func() { + if tailPid == 0 { + close(tailPidChan) } - if psmlOut != nil { - psmlOut.Close() // explicitly close else this goroutine can block + }() + + //====================================================================== + // process lifetime goroutine for the tail process: + // tshark -i > tmp + // tail -f tmp | tshark -i - -t psml + // ^^^^^^^^^^^ + termshark.TrackedGo(func() { + cancelledChan := p.tailCtx.Done() + + var err error + tailCmd := p.tailCmd + pidChan := tailPidChan + state := NotStarted + + kill := func() { + err := termshark.KillIfPossible(tailCmd) + if err != nil { + log.Infof("Did not kill tshark tail process: %v", err) + } } - case pid := <-psmlProcChan: - psmlProcChan = nil - if pid != 0 { - cmd = origCmd - if intPsmlCtxChan == nil { - kill() + loop: + for { + select { + case err = <-tailTermChan: + state = Terminated + // Don't close the pipe - the psml might not have finished reading yet + // gcla later todo - is this right or wrong + + // Close the pipe so that the psml reader gets EOF and will also terminate; + // otherwise the PSML reader will block waiting for more data from the pipe + fifoPipeWriter.Close() + if !p.psmlStoppedDeliberately_ && !e.TailStoppedDeliberately() { + if err != nil { + if _, ok := err.(*exec.ExitError); ok { + cerr := gowid.WithKVs(termshark.BadCommand, map[string]interface{}{ + "command": tailCmd.String(), + "error": err, + }) + HandleError(PsmlCode, app, cerr, cb) + } + } + } + + case <-cancelledChan: + cancelledChan = nil + if state == Started { + kill() + } + + case pid := <-pidChan: + pidChan = nil + if pid != 0 { + state = Started + if cancelledChan == nil { + kill() + } + } + } + + // successfully started then died/kill, OR + // was never started, won't be started, and cancelled + if state == Terminated || (cancelledChan == nil && state == NotStarted) { + break loop } } + }, Goroutinewg) + + //====================================================================== + + p.tailCmd.SetStdout(fifoPipeWriter) + + // this set up is so that I can detect when there are actually packets to read (e.g + // maybe there's no traffic on the interface). When there's something to read, the + // rest of the procedure can spring into action. Why not spring into action right away? + // Because the tail command needs a file to exist to watch it with -f. Can I rely on + // tail -F across all supported platforms? (e.g. Windows) + watcher, err := fsnotify.NewWatcher() + if err != nil { + err = fmt.Errorf("Could not create FS watch: %v", err) + HandleError(PsmlCode, app, err, cb) + intPsmlCancelFn() + p.tailCancelFn() // needed to end the goroutine, end if tailcmd has not started + return } + defer watcher.Close() - if psmlProcChan == nil && intPsmlCtxChan == nil { - break loop + file, err := os.OpenFile(e.InterfaceFile(), os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + err = fmt.Errorf("Could not touch temporary pcap file %s: %v", e.InterfaceFile(), err) + HandleError(PsmlCode, app, err, cb) + intPsmlCancelFn() + p.tailCancelFn() // needed to end the goroutine, end if tailcmd has not started } - } + file.Close() - if cmd != nil { - err = cmd.Wait() - if !c.SuppressErrors { - if err != nil { - if _, ok := err.(*exec.ExitError); ok { - cerr := gowid.WithKVs(termshark.BadCommand, map[string]interface{}{ - "command": cmd.String(), - "error": err, - }) - HandleError(cerr, cb) + if err := watcher.Add(e.InterfaceFile()); err != nil { + err = fmt.Errorf("Could not set up watcher for %s: %v", e.InterfaceFile(), err) + HandleError(PsmlCode, app, err, cb) + intPsmlCancelFn() + p.tailCancelFn() // needed to end the goroutine, end if tailcmd has not started + return + } + + removeWatcher := func(file string) { + if watcher != nil { + watcher.Remove(file) + watcher = nil + } + } + + // Make sure that no matter what happens from here on, the watcher is not leaked. But we'll remove + // it earlier under normal operation so that setting and removing watches with new loaders do not + // race. + defer removeWatcher(e.InterfaceFile()) + + Loop: + for { + select { + case fe := <-watcher.Events: + if fe.Name == e.InterfaceFile() { + break Loop } + case err := <-watcher.Errors: + err = fmt.Errorf("Unexpected watcher error for %s: %v", e.InterfaceFile(), err) + HandleError(PsmlCode, app, err, cb) + intPsmlCancelFn() + p.tailCancelFn() // needed to end the goroutine, end if tailcmd has not started + return + case <-intPsmlCtx.Done(): + return } } - c.stopLoadIface() - } - }, Goroutinewg) + // Remove early if possible - because then if we clear the pcap and restart, we won't + // race the termination of this function with the starting of a new instance of it, meaning + // the new call adds the same watcher (idempotent) but then the terminating instance removes + // it + removeWatcher(e.InterfaceFile()) - psmlOut, err = c.PsmlCmd.StdoutReader() - if err != nil { - err = fmt.Errorf("Could not access pipe output: %v", err) - HandleError(err, cb) - intPsmlCancelFn() - return - } + log.Infof("Starting Tail command: %v", p.tailCmd) - err = c.PsmlCmd.Start() - if err != nil { - err = fmt.Errorf("Error starting PSML command %v: %v", c.PsmlCmd, err) - HandleError(err, cb) - intPsmlCancelFn() - return - } + err = p.tailCmd.Start() + if err != nil { + err = fmt.Errorf("Could not start tail command %v: %v", p.tailCmd, err) + HandleError(PsmlCode, app, err, cb) + intPsmlCancelFn() + p.tailCancelFn() // needed to end the goroutine, end if tailcmd has not started + return + } - log.Infof("Started PSML command %v with pid %d", c.PsmlCmd, c.PsmlCmd.Pid()) + termshark.TrackedGo(func() { + tailTermChan <- p.tailCmd.Wait() + }, Goroutinewg) + + tailPid = p.tailCmd.Pid() + tailPidChan <- tailPid + } // end of reading from fifo + + //====================================================================== + + // + // Goroutine to read psml xml and update data structures + // + defer func(ch chan struct{}) { + select { + case <-ch: + // already done/closed, do nothing + default: + close(ch) + } + + // This will kill the tail process if there is one + intPsmlCancelFn() // stop the ticker + }(p.startStage2Chan) + + d := xml.NewDecoder(psmlOut) + + // + //
1
+ //
0.000000
+ //
192.168.44.123
+ //
192.168.44.213
+ //
TFTP
+ //
77
+ //
Read Request, File: C:\IBMTCPIP\lccm.1, Transfer type: octet
+ //
+ + var curPsml []string + var fg string + var bg string + var pidx int + ready := false + empty := true + structure := false + for { + if intPsmlCtx.Err() != nil { + break + } + tok, err := d.Token() + if err != nil { + // gcla later todo - LoadWasCancelled is checked outside of the main goroutine here + if err != io.EOF && !e.LoadWasCancelled() { + err = fmt.Errorf("Could not read PSML data: %v", err) + HandleError(PsmlCode, app, err, cb) + } + break + } + switch tok := tok.(type) { + case xml.EndElement: + switch tok.Name.Local { + case "structure": + structure = false + case "packet": + p.Lock() + p.packetPsmlData = append(p.packetPsmlData, curPsml) + + // Track the mapping of packet number
12
to position + // in the table e.g. 5th element. This is so that I can jump to the correct + // row with marks even if a filter is currently applied. + pidx, err = strconv.Atoi(curPsml[0]) + if err != nil { + log.Fatal(err) + } + p.PacketNumberMap[pidx] = len(p.packetPsmlData) - 1 + + p.packetPsmlColors = append(p.packetPsmlColors, PacketColors{ + FG: psmlColorToIColor(fg), + BG: psmlColorToIColor(bg), + }) + p.Unlock() + + case "section": + ready = false + // Means we got without any char data i.e. empty
+ if empty { + curPsml = append(curPsml, "") + } + } + case xml.StartElement: + switch tok.Name.Local { + case "structure": + structure = true + case "packet": + curPsml = make([]string, 0, 10) + fg = "" + bg = "" + for _, attr := range tok.Attr { + switch attr.Name.Local { + case "foreground": + fg = attr.Value + case "background": + bg = attr.Value + } + } + case "section": + ready = true + empty = true + } + case xml.CharData: + if ready { + if structure { + p.Lock() + p.packetPsmlHeaders = append(p.packetPsmlHeaders, string(tok)) + p.Unlock() + e.MainRun(gowid.RunFunction(func(app gowid.IApp) { + handlePsmlHeader(PsmlCode, app, cb) + })) + } else { + curPsml = append(curPsml, string(format.TranslateHexCodes(tok))) + empty = false + } + } + } + } - psmlPid = c.PsmlCmd.Pid() - psmlProcChan <- psmlPid + }, Goroutinewg) - defer func() { - // These need to close so the tailreader Read() terminates so that the - // PsmlCmd.Wait() below completes. - closeFifoPipe() - }() +} - //====================================================================== +func (c *PsmlLoader) DoWithPsmlData(fn func([][]string)) { + c.Lock() + defer c.Unlock() + fn(c.packetPsmlData) +} - // If it was cancelled, then we don't need to start the tail process because - // psml will read from the tmp pcap file generated by the interface reading - // process. +func (c *PsmlLoader) ReadingFromFifo() bool { + // If it's a string it means that it's a filename, so it's not a fifo. Other values + // in practise are the empty interface, or the read end of a fifo + _, ok := c.PcapPsml.(string) + return !ok +} - c.tailCmd = nil +func (c *PsmlLoader) IsLoading() bool { + return c.state == Loading +} - if c.ReadingFromFifo() { - c.tailCmd = c.cmds.Tail(c.ifaceFile) +func (c *PsmlLoader) StartStage2ChanFn() chan struct{} { + return c.startStage2Chan +} - defer func() { - if tailPid == 0 { - close(tailProcChan) - } - }() +func (c *PsmlLoader) PacketCacheFn() *lru.Cache { // i -> [pdml(i * 1000)..pdml(i+1*1000)] + return c.PacketCache +} - termshark.TrackedGo(func() { - psmlCtxChan := c.psmlCtx.Done() - intPsmlCtxChan := intPsmlCtx.Done() +func (c *PsmlLoader) packetPsmlDataFn() [][]string { + return c.packetPsmlData +} - var err error - var tailCmd ITailCommand - origTailCmd := c.tailCmd - tailProcChan := tailProcChan +// Assumes this is a clean stop, not an error +func (p *ParentLoader) stopTail() { + p.tailStoppedDeliberately = true + if p.tailCancelFn != nil { + p.tailCancelFn() + } +} - kill := func() { - err := termshark.KillIfPossible(tailCmd) - if err != nil { - log.Infof("Did not kill tshark tail process: %v", err) - } - } +func (p *PsmlLoader) PacketsPerLoad() int { + p.Lock() + defer p.Unlock() + return p.opt.PacketsPerLoad +} - loop: - for { - select { - case <-psmlCtxChan: - intPsmlCancelFn() // start internal shutdown - psmlCtxChan = nil +func (p *PsmlLoader) stopLoadPsml() { + p.psmlStoppedDeliberately_ = true + if p.psmlCancelFn != nil { + p.psmlCancelFn() + } +} - case <-intPsmlCtxChan: - intPsmlCtxChan = nil - if tailProcChan != nil { - kill() - } +func (p *PsmlLoader) PsmlData() [][]string { + return p.packetPsmlData +} - case pid := <-tailProcChan: - tailProcChan = nil - if pid != 0 { - tailCmd = origTailCmd - if intPsmlCtxChan == nil { - kill() - } - } - } +func (p *PsmlLoader) PsmlHeaders() []string { + return p.packetPsmlHeaders +} - if tailProcChan == nil && intPsmlCtxChan == nil { - break loop - } - } +func (p *PsmlLoader) PsmlColors() []PacketColors { + return p.packetPsmlColors +} - if tailCmd != nil { - err = tailCmd.Wait() - if !c.SuppressErrors { - if err != nil { - if _, ok := err.(*exec.ExitError); ok { - cerr := gowid.WithKVs(termshark.BadCommand, map[string]interface{}{ - "command": tailCmd.String(), - "error": err, - }) - HandleError(cerr, cb) - } - } - } - fifoPipeWriter.Close() - } +// if done==true, then this cache entry is complete +func (p *PsmlLoader) updateCacheEntryWithPdml(row int, pdml []IPdmlPacket, done bool) { + var ce CacheEntry + p.Lock() + defer p.Unlock() + if ce2, ok := p.PacketCache.Get(row); ok { + ce = ce2.(CacheEntry) + } + ce.Pdml = pdml + ce.PdmlComplete = done + p.PacketCache.Add(row, ce) +} - }, Goroutinewg) +func (p *PsmlLoader) updateCacheEntryWithPcap(row int, pcap [][]byte, done bool) { + var ce CacheEntry + p.Lock() + defer p.Unlock() + if ce2, ok := p.PacketCache.Get(row); ok { + ce = ce2.(CacheEntry) + } + ce.Pcap = pcap + ce.PcapComplete = done + p.PacketCache.Add(row, ce) +} - c.tailCmd.SetStdout(fifoPipeWriter) +func (p *PsmlLoader) LengthOfPdmlCacheEntry(row int) (int, error) { + p.Lock() + defer p.Unlock() + if ce, ok := p.PacketCache.Get(row); ok { + ce2 := ce.(CacheEntry) + return len(ce2.Pdml), nil + } + return -1, fmt.Errorf("No cache entry found for row %d", row) +} - // this set up is so that I can detect when there are actually packets to read (e.g - // maybe there's no traffic on the interface). When there's something to read, the - // rest of the procedure can spring into action. - watcher, err := fsnotify.NewWatcher() - if err != nil { - err = fmt.Errorf("Could not create FS watch: %v", err) - HandleError(err, cb) - intPsmlCancelFn() - return - } - defer watcher.Close() +func (p *PsmlLoader) LengthOfPcapCacheEntry(row int) (int, error) { + p.Lock() + defer p.Unlock() + if ce, ok := p.PacketCache.Get(row); ok { + ce2 := ce.(CacheEntry) + return len(ce2.Pcap), nil + } + return -1, fmt.Errorf("No cache entry found for row %d", row) +} - if err := watcher.Add(filepath.Dir(c.ifaceFile)); err != nil { - err = fmt.Errorf("Could not set up watcher for %s: %v", c.ifaceFile, err) - HandleError(err, cb) - intPsmlCancelFn() - return - } else { - // If it's there, touch it so watcher below is notified that everything is in order - if _, err := os.Stat(c.ifaceFile); err == nil { - if err = os.Chtimes(c.ifaceFile, time.Now(), time.Now()); err != nil { - HandleError(err, cb) - intPsmlCancelFn() - return - } - } +func (c *PsmlLoader) CacheAt(row int) (CacheEntry, bool) { + if ce, ok := c.PacketCache.Get(row); ok { + return ce.(CacheEntry), ok + } + return CacheEntry{}, false +} - } +func (c *PsmlLoader) NumLoaded() int { + c.Lock() + defer c.Unlock() + return len(c.packetPsmlData) +} - defer func() { - watcher.Remove(filepath.Dir(c.ifaceFile)) - }() +//====================================================================== - Loop: - for { - select { - case fe := <-watcher.Events: - if fe.Name == c.ifaceFile { - break Loop - } - case err := <-watcher.Errors: - err = fmt.Errorf("Unexpected watcher error for %s: %v", c.ifaceFile, err) - HandleError(err, cb) - intPsmlCancelFn() - return - case <-intPsmlCtx.Done(): - return - } - } +func (c *PdmlLoader) IsLoading() bool { + return c.state == Loading +} - log.Infof("Starting Tail command: %v", c.tailCmd) +func (c *PdmlLoader) LoadIsVisible() bool { + return c.visible +} - err = c.tailCmd.Start() - if err != nil { - err = fmt.Errorf("Could not start tail command %v: %v", c.tailCmd, err) - HandleError(err, cb) - intPsmlCancelFn() - return - } +// Only call from main goroutine +func (c *PdmlLoader) LoadingRow() int { + return c.rowCurrentlyLoading +} - tailPid = c.tailCmd.Pid() - tailProcChan <- tailPid +func (p *PdmlLoader) stopLoadPdml() { + p.pdmlStoppedDeliberately_ = true + if p.stage2CancelFn != nil { + p.stage2CancelFn() } +} - //====================================================================== - - // - // Goroutine to read psml xml and update data structures - // - defer func() { - select { - case <-c.StartStage2Chan: - // already done/closed, do nothing - default: - close(c.StartStage2Chan) - } - - // This will kill the tail process if there is one - intPsmlCancelFn() // stop the ticker - }() - - d := xml.NewDecoder(psmlOut) - - // - //
1
- //
0.000000
- //
192.168.44.123
- //
192.168.44.213
- //
TFTP
- //
77
- //
Read Request, File: C:\IBMTCPIP\lccm.1, Transfer type: octet
- //
- - var curPsml []string - var fg string - var bg string - var pidx int - ready := false - empty := true - structure := false - for { - if intPsmlCtx.Err() != nil { - break - } - tok, err := d.Token() - if err != nil { - if err != io.EOF && !c.LoadWasCancelled { - err = fmt.Errorf("Could not read PSML data: %v", err) - HandleError(err, cb) - } - break - } - switch tok := tok.(type) { - case xml.EndElement: - switch tok.Name.Local { - case "structure": - structure = false - case "packet": - c.Lock() - c.PacketPsmlData = append(c.PacketPsmlData, curPsml) - - // Track the mapping of packet number
12
to position - // in the table e.g. 5th element. This is so that I can jump to the correct - // row with marks even if a filter is currently applied. - pidx, err = strconv.Atoi(curPsml[0]) - if err != nil { - log.Fatal(err) - } - c.PacketNumberMap[pidx] = len(c.PacketPsmlData) - 1 +//====================================================================== - c.PacketPsmlColors = append(c.PacketPsmlColors, PacketColors{ - FG: psmlColorToIColor(fg), - BG: psmlColorToIColor(bg), - }) - c.Unlock() +type iTailCommand interface { + stopTail() +} - case "section": - ready = false - // Means we got
without any char data i.e. empty
- if empty { - curPsml = append(curPsml, "") - } - } - case xml.StartElement: - switch tok.Name.Local { - case "structure": - structure = true - case "packet": - curPsml = make([]string, 0, 10) - fg = "" - bg = "" - for _, attr := range tok.Attr { - switch attr.Name.Local { - case "foreground": - fg = attr.Value - case "background": - bg = attr.Value - } - } - case "section": - ready = true - empty = true - } - case xml.CharData: - if ready { - if structure { - c.Lock() - c.PacketPsmlHeaders = append(c.PacketPsmlHeaders, string(tok)) - c.Unlock() - handlePsmlHeader(cb) - } else { - curPsml = append(curPsml, string(format.TranslateHexCodes(tok))) - empty = false - } - } - } - } +type iIfaceLoaderEnv interface { + iLoaderEnv + iTailCommand + PsmlStoppedDeliberately() bool + InterfaceFile() string + PacketSources() []IPacketSource + CaptureFilter() string } // dumpcap -i eth0 -w /tmp/foo.pcap // dumpcap -i /dev/fd/3 -w /tmp/foo.pcap -func (c *Loader) loadIfacesAsync(cb interface{}) { - c.totalFifoBytesWritten = gwutil.NoneInt64() - c.ifaceCtx, c.ifaceCancelFn = context.WithCancel(c.thisSrcCtx) +func (i *InterfaceLoader) loadIfacesSync(e iIfaceLoaderEnv, cb interface{}, app gowid.IApp) { + i.totalFifoBytesWritten = gwutil.NoneInt64() + + i.ifaceCtx, i.ifaceCancelFn = context.WithCancel(e.Context()) + + log.Infof("Starting Iface command: %v", i.ifaceCmd) + + pid := 0 + ifacePidChan := make(chan int) defer func() { - ch := c.IfaceFinishedChan - c.IfaceFinishedChan = make(chan struct{}) - close(ch) - c.ifaceCtx = nil - c.ifaceCancelFn = nil + if pid == 0 { + close(ifacePidChan) + } }() - c.ifaceCmd = c.cmds.Iface(SourcesNames(c.psrcs), c.captureFilter, c.ifaceFile) + // tshark -i eth0 -w foo.pcap + i.ifaceCmd = e.Commands().Iface(SourcesNames(e.PacketSources()), e.CaptureFilter(), e.InterfaceFile()) - log.Infof("Starting Iface command: %v", c.ifaceCmd) - - err := c.ifaceCmd.Start() + err := i.ifaceCmd.Start() if err != nil { - err = fmt.Errorf("Error starting interface reader %v: %v", c.ifaceCmd, err) - HandleError(err, cb) + err = fmt.Errorf("Error starting interface reader %v: %v", i.ifaceCmd, err) + HandleError(IfaceCode, app, err, cb) return } - procWaitChan := make(chan error, 1) + ifaceTermChan := make(chan error) + + i.state = Loading + log.Infof("Started Iface command %v with pid %d", i.ifaceCmd, i.ifaceCmd.Pid()) + + // Do this in a goroutine because the function is expected to return quickly termshark.TrackedGo(func() { - procWaitChan <- c.ifaceCmd.Wait() + ifaceTermChan <- i.ifaceCmd.Wait() }, Goroutinewg) - proceedChan := make(chan struct{}) + //====================================================================== + // Process goroutine termshark.TrackedGo(func() { + defer func() { + // if psrc is a PipeSource, then we open /dev/fd/3 in termshark, and reroute descriptor + // stdin to number 3 when termshark starts. So to kill the process writing in, we need + // to close our side of the pipe. + for _, psrc := range e.PacketSources() { + if cl, ok := psrc.(io.Closer); ok { + cl.Close() + } + } + + e.MainRun(gowid.RunFunction(func(gowid.IApp) { + i.state = NotLoading + i.ifaceCancelFn = nil + })) + + }() + + cancelledChan := i.ifaceCtx.Done() + state := NotStarted + var err error - cancelled := c.ifaceCtx.Done() + pidChan := ifacePidChan + + ifaceCmd := i.ifaceCmd + + killIface := func() { + err = termshark.KillIfPossible(i.ifaceCmd) + if err != nil { + log.Infof("Did not kill iface process: %v", err) + } + } + loop: for { select { - case <-cancelled: - err := termshark.KillIfPossible(c.ifaceCmd) - if err != nil { - log.Infof("Did not kill iface reader process: %v", err) - } - cancelled = nil - case err = <-procWaitChan: - if !c.SuppressErrors && err != nil { + case err = <-ifaceTermChan: + state = Terminated + if !e.PsmlStoppedDeliberately() && err != nil { if _, ok := err.(*exec.ExitError); ok { // This could be if termshark is started like this: cat nosuchfile.pcap | termshark -i - // Then dumpcap will be started with /dev/fd/3 as its stdin, but will fail with EOF and // exit status 1. cerr := gowid.WithKVs(termshark.BadCommand, map[string]interface{}{ - "command": c.ifaceCmd.String(), + "command": ifaceCmd.String(), "error": err, }) - HandleError(cerr, cb) + HandleError(IfaceCode, app, cerr, cb) } } - proceedChan <- struct{}{} + + case pid := <-pidChan: + // this channel can be closed on a stage2 cancel, before the + // pdml process has been started, meaning we get nil for the + // pid. If that's the case, don't save the cmd, so we know not + // to try to kill anything later. + pidChan = nil + if pid != 0 { + state = Started + if cancelledChan == nil { + killIface() + } + } + + case <-cancelledChan: + cancelledChan = nil + if state == Started { + killIface() + } + } + + // if pdmlpidchan is nil, it means the the channel has been closed or we've received a message + // a message means the proc has started + // closed means it won't be started + // if closed, then pdmlCmd == nil + if state == Terminated || (cancelledChan == nil && state == NotStarted) { + // nothing to select on so break break loop } } - }, Goroutinewg) - defer func() { - // if psrc is a PipeSource, then we open /dev/fd/3 in termshark, and reroute descriptor - // stdin to number 3 when termshark starts. So to kill the process writing in, we need - // to close our side of the pipe. - for _, psrc := range c.psrcs { - if cl, ok := psrc.(io.Closer); ok { - cl.Close() + // Calculate the final size of the tmp file we wrote with packets read from the + // interface/pipe. This runs after the dumpcap command finishes. + fi, err := os.Stat(e.InterfaceFile()) + i.Lock() + if err != nil { + log.Warn(err) + // Deliberately not a fatal error - it can happen if the source of packets to tshark -i + // is corrupt, resulting in a tshark error. Setting zero here will line up with the + // reading end which will read zero, and so terminate the tshark -T psml procedure. + + if i.fifoError == nil && !os.IsNotExist(err) { + // Ignore ENOENT because it means there was an error before dumpcap even wrote + // anything to disk + i.fifoError = err } + } else { + i.totalFifoBytesWritten = gwutil.SomeInt64(fi.Size()) } - }() + i.Unlock() - <-proceedChan - - // If something killed it, then start the internal shutdown procedure anyway to clean up - // goroutines waiting on the context. This could also happen if tshark -i is reading from - // a fifo and the write has stopped e.g. - // - // cat foo.pcap > myfifo - // termshark -i myfifo - // - // termshark will get EOF when the cat terminates (if there are no more writers). - // - - // Calculate the final size of the tmp file we wrote with packets read from the - // interface/pipe. This runs after the dumpcap command finishes. - fi, err := os.Stat(c.ifaceFile) - if err != nil { - log.Warn(err) - // Deliberately not a fatal error - it can happen if the source of packets to tshark -i - // is corrupt, resulting in a tshark error. Setting zero here will line up with the - // reading end which will read zero, and so terminate the tshark -T psml procedure. - - if c.fifoError == nil && !os.IsNotExist(err) { - // Ignore ENOENT because it means there was an error before dumpcap even wrote - // anything to disk - c.fifoError = err + i.checkAllBytesRead(e, cb, app) + }, Goroutinewg) + + //====================================================================== + + pid = i.ifaceCmd.Pid() + ifacePidChan <- pid +} + +// checkAllBytesRead is called (a) when the tshark -i process is finished +// writing to the tmp file and (b) every time the tmpfile tail process reads +// bytes. totalFifoBytesWrite is set to non-nil only when the tail process +// completes. totalFifoBytesRead is updated every read. If they are every +// found to be equal, it means that (1) the tail process has finished, meaning +// killed or has reached EOF with its packet source (e.g. stdin, fifo) and (2) +// the tail process has read all those bytes - so no packets will be +// missed. In that case, the tail process is killed and its stdout closed, +// which will trigger the psml reading process to shut down, and termshark +// will turn off its loading UI. +func (i *InterfaceLoader) checkAllBytesRead(e iTailCommand, cb interface{}, app gowid.IApp) { + cancel := false + if !i.totalFifoBytesWritten.IsNone() && !i.totalFifoBytesRead.IsNone() { + if i.totalFifoBytesRead.Val() == i.totalFifoBytesWritten.Val() { + cancel = true } - } else { - c.totalFifoBytesWritten = gwutil.SomeInt64(fi.Size()) } + if i.fifoError != nil { + cancel = true + } + + // if there was a fifo error, OR we have read all the bytes that were written, then + // we need to stop the tail command + if cancel { + if i.fifoError != nil { + err := fmt.Errorf("Fifo error: %v", i.fifoError) + HandleError(IfaceCode, app, err, cb) + } + + e.stopTail() + } +} - c.checkAllBytesRead(cb) +func (i *InterfaceLoader) stopLoadIface() { + if i != nil && i.ifaceCancelFn != nil { + i.ifaceCancelFn() + } +} - c.ifaceCancelFn() +func (c *InterfaceLoader) IsLoading() bool { + return c != nil && c.state == Loading } //====================================================================== @@ -2122,34 +2184,27 @@ func (c CacheEntry) Complete() bool { //====================================================================== type LoadPcapSlice struct { - Row int - Cancel bool + Row int + CancelCurrent bool + Jump int // 0 means no jump } -func (m *LoadPcapSlice) String() string { - if m.Cancel { - return fmt.Sprintf("[loadslice: %d, cancel: %v]", m.Row, m.Cancel) - } else { - return fmt.Sprintf("[loadslice: %d]", m.Row) +func (m LoadPcapSlice) String() string { + pieces := make([]string, 0, 3) + pieces = append(pieces, fmt.Sprintf("loadslice: %d", m.Row)) + if m.CancelCurrent { + pieces = append(pieces, fmt.Sprintf("cancelcurrent: %v", m.CancelCurrent)) + } + if m.Jump != 0 { + pieces = append(pieces, fmt.Sprintf("jumpto: %d", m.Jump)) } + return fmt.Sprintf("[%s]", strings.Join(pieces, ", ")) } //====================================================================== -type ICacheUpdater interface { - WhenLoadingPdml() - WhenNotLoadingPdml() -} - -type ICacheLoader interface { - State() LoaderState - SetState(LoaderState) - loadIsNecessary(ev LoadPcapSlice) bool - stopLoadPdml() - startLoadPdml(int, interface{}) -} - -func ProcessPdmlRequests(requests []LoadPcapSlice, loader ICacheLoader, updater ICacheUpdater) []LoadPcapSlice { +func ProcessPdmlRequests(requests []LoadPcapSlice, mloader *ParentLoader, + loader *PdmlLoader, cb interface{}, app gowid.IApp) []LoadPcapSlice { Loop: for { if len(requests) == 0 { @@ -2157,21 +2212,20 @@ Loop: } else { ev := requests[0] - if loader.loadIsNecessary(ev) { - if loader.State()&LoadingPdml != 0 { - // we are loading a piece. Do we need to cancel? If not, reschedule for when idle - if ev.Cancel { + if !mloader.loadIsNecessary(ev) { + requests = requests[1:] + } else { + if loader.state == Loading { + if ev.CancelCurrent { loader.stopLoadPdml() } - updater.WhenNotLoadingPdml() } else { - loader.startLoadPdml(ev.Row, updater) - loader.SetState(loader.State() | LoadingPdml) - updater.WhenLoadingPdml() + mloader.RenewPdmlLoader() + // ops? + mloader.loadPcapSync(ev.Row, ev.CancelCurrent, mloader, cb, app) + requests = requests[1:] } break Loop - } else { - requests = requests[1:] } } } @@ -2188,6 +2242,22 @@ func psmlColorToIColor(col string) gowid.IColor { } } +// https://stackoverflow.com/a/28005931/784226 +func TempPcapFile(tokens ...string) string { + tokensClean := make([]string, 0, len(tokens)) + for _, token := range tokens { + re := regexp.MustCompile(`[^a-zA-Z0-9.-]`) + tokensClean = append(tokensClean, re.ReplaceAllString(token, "_")) + } + + tokenClean := strings.Join(tokensClean, "-") + + return filepath.Join(termshark.PcapDir(), fmt.Sprintf("%s--%s.pcap", + tokenClean, + termshark.DateStringForFilename(), + )) +} + //====================================================================== // Local Variables: // mode: Go diff --git a/streams/loader.go b/streams/loader.go index 0b8457b..59169b8 100644 --- a/streams/loader.go +++ b/streams/loader.go @@ -159,7 +159,7 @@ func (c *Loader) loadStreamReassemblyAsync(pcapf string, proto string, idx int, "command": c.streamCmd.String(), "error": err, }) - pcap.HandleError(cerr, cb) + pcap.HandleError(pcap.StreamCode, app, cerr, cb) } } @@ -187,19 +187,23 @@ func (c *Loader) loadStreamReassemblyAsync(pcapf string, proto string, idx int, streamOut, err := c.streamCmd.StdoutReader() if err != nil { - pcap.HandleError(err, cb) + pcap.HandleError(pcap.StreamCode, app, err, cb) return } - pcap.HandleBegin(cb) + app.Run(gowid.RunFunction(func(app gowid.IApp) { + pcap.HandleBegin(pcap.StreamCode, app, cb) + })) defer func() { - pcap.HandleEnd(cb) + app.Run(gowid.RunFunction(func(app gowid.IApp) { + pcap.HandleEnd(pcap.StreamCode, app, cb) + })) }() err = c.streamCmd.Start() if err != nil { err = fmt.Errorf("Error starting stream reassembly %v: %v", c.streamCmd, err) - pcap.HandleError(err, cb) + pcap.HandleError(pcap.StreamCode, app, err, cb) return } @@ -244,7 +248,7 @@ func (c *Loader) startStreamIndexerAsync(pcapf string, proto string, idx int, ap streamOut, err := c.indexerCmd.StdoutReader() if err != nil { - pcap.HandleError(err, cb) + pcap.HandleError(pcap.StreamCode, app, err, cb) return } @@ -274,7 +278,7 @@ func (c *Loader) startStreamIndexerAsync(pcapf string, proto string, idx int, ap "command": c.indexerCmd.String(), "error": err, }) - pcap.HandleError(cerr, cb) + pcap.HandleError(pcap.StreamCode, app, cerr, cb) } } streamOut.Close() @@ -310,7 +314,7 @@ func (c *Loader) startStreamIndexerAsync(pcapf string, proto string, idx int, ap err = c.indexerCmd.Start() if err != nil { err = fmt.Errorf("Error starting stream indexer %v: %v", c.indexerCmd, err) - pcap.HandleError(err, cb) + pcap.HandleError(pcap.StreamCode, app, err, cb) return } diff --git a/ui/capinfoui.go b/ui/capinfoui.go index 7020944..074bde2 100644 --- a/ui/capinfoui.go +++ b/ui/capinfoui.go @@ -32,11 +32,9 @@ func startCapinfo(app gowid.IApp) { fi, err := os.Stat(Loader.PcapPdml) if err != nil || CapinfoTime.Before(fi.ModTime()) { - CapinfoLoader = capinfo.NewLoader(capinfo.MakeCommands(), Loader.SourceContext()) + CapinfoLoader = capinfo.NewLoader(capinfo.MakeCommands(), Loader.Context()) - handler := capinfoParseHandler{ - app: app, - } + handler := capinfoParseHandler{} CapinfoLoader.StartLoad( Loader.PcapPdml, @@ -51,13 +49,14 @@ func startCapinfo(app gowid.IApp) { //====================================================================== type capinfoParseHandler struct { - app gowid.IApp tick *time.Ticker // for updating the spinner stop chan struct{} pleaseWaitClosed bool } var _ capinfo.ICapinfoCallbacks = (*capinfoParseHandler)(nil) +var _ pcap.IBeforeBegin = (*capinfoParseHandler)(nil) +var _ pcap.IAfterEnd = (*capinfoParseHandler)(nil) func (t *capinfoParseHandler) OnCapinfoData(data string) { CapinfoData = strings.Replace(data, "\r\n", "\n", -1) // For windows... @@ -72,9 +71,12 @@ func (t *capinfoParseHandler) OnCapinfoData(data string) { func (t *capinfoParseHandler) AfterCapinfoEnd(success bool) { } -func (t *capinfoParseHandler) BeforeBegin() { - t.app.Run(gowid.RunFunction(func(app gowid.IApp) { - OpenPleaseWait(appView, t.app) +func (t *capinfoParseHandler) BeforeBegin(code pcap.HandlerCode, app gowid.IApp) { + if code&pcap.CapinfoCode == 0 { + return + } + app.Run(gowid.RunFunction(func(app gowid.IApp) { + OpenPleaseWait(appView, app) })) t.tick = time.NewTicker(time.Duration(200) * time.Millisecond) @@ -85,7 +87,7 @@ func (t *capinfoParseHandler) BeforeBegin() { for { select { case <-t.tick.C: - t.app.Run(gowid.RunFunction(func(app gowid.IApp) { + app.Run(gowid.RunFunction(func(app gowid.IApp) { pleaseWaitSpinner.Update() })) case <-t.stop: @@ -95,11 +97,14 @@ func (t *capinfoParseHandler) BeforeBegin() { }, Goroutinewg) } -func (t *capinfoParseHandler) AfterEnd() { - t.app.Run(gowid.RunFunction(func(app gowid.IApp) { +func (t *capinfoParseHandler) AfterEnd(code pcap.HandlerCode, app gowid.IApp) { + if code&pcap.CapinfoCode == 0 { + return + } + app.Run(gowid.RunFunction(func(app gowid.IApp) { if !t.pleaseWaitClosed { t.pleaseWaitClosed = true - ClosePleaseWait(t.app) + ClosePleaseWait(app) } OpenMessageForCopy(CapinfoData, appView, app) @@ -118,9 +123,14 @@ func clearCapinfoState() { type ManageCapinfoCache struct{} var _ pcap.INewSource = ManageCapinfoCache{} +var _ pcap.IClear = ManageCapinfoCache{} // Make sure that existing stream widgets are discarded if the user loads a new pcap. -func (t ManageCapinfoCache) OnNewSource() { +func (t ManageCapinfoCache) OnNewSource(pcap.HandlerCode, gowid.IApp) { + clearCapinfoState() +} + +func (t ManageCapinfoCache) OnClear(pcap.HandlerCode, gowid.IApp) { clearCapinfoState() } diff --git a/ui/convscallbacks.go b/ui/convscallbacks.go index 9c08d8c..f0bc64a 100644 --- a/ui/convscallbacks.go +++ b/ui/convscallbacks.go @@ -11,6 +11,7 @@ import ( "github.com/gcla/gowid" "github.com/gcla/termshark/v2" + "github.com/gcla/termshark/v2/pcap" ) //====================================================================== @@ -28,6 +29,9 @@ type convsParseHandler struct { pleaseWaitClosed bool } +var _ pcap.IBeforeBegin = (*convsParseHandler)(nil) +var _ pcap.IAfterEnd = (*convsParseHandler)(nil) + func (t *convsParseHandler) OnData(data string) { data = strings.Replace(data, "\r\n", "\n", -1) // For windows... @@ -46,8 +50,11 @@ func (t *convsParseHandler) AfterDataEnd(success bool) { } } -func (t *convsParseHandler) BeforeBegin() { - t.app.Run(gowid.RunFunction(func(app gowid.IApp) { +func (t *convsParseHandler) BeforeBegin(code pcap.HandlerCode, app gowid.IApp) { + if code&pcap.ConvCode == 0 { + return + } + app.Run(gowid.RunFunction(func(app gowid.IApp) { OpenPleaseWait(appView, t.app) })) @@ -59,7 +66,7 @@ func (t *convsParseHandler) BeforeBegin() { for { select { case <-t.tick.C: - t.app.Run(gowid.RunFunction(func(app gowid.IApp) { + app.Run(gowid.RunFunction(func(app gowid.IApp) { pleaseWaitSpinner.Update() })) case <-t.stop: @@ -69,7 +76,10 @@ func (t *convsParseHandler) BeforeBegin() { }, Goroutinewg) } -func (t *convsParseHandler) AfterEnd() { +func (t *convsParseHandler) AfterEnd(code pcap.HandlerCode, app gowid.IApp) { + if code&pcap.ConvCode == 0 { + return + } t.app.Run(gowid.RunFunction(func(app gowid.IApp) { if !t.pleaseWaitClosed { t.pleaseWaitClosed = true diff --git a/ui/convsui.go b/ui/convsui.go index 37a4627..d96fe01 100644 --- a/ui/convsui.go +++ b/ui/convsui.go @@ -121,7 +121,7 @@ type ManageConvsCache struct{} var _ pcap.INewSource = ManageConvsCache{} // Make sure that existing data is discarded if the user loads a new pcap. -func (t ManageConvsCache) OnNewSource() { +func (t ManageConvsCache) OnNewSource(pcap.HandlerCode, gowid.IApp) { convsView = nil // which then deletes all refs to loaded data convsPcapSize = 0 } @@ -205,7 +205,7 @@ func (p pleaseWait) ClosePleaseWait(app gowid.IApp) { func openConvsUi(app gowid.IApp) { var convCtx context.Context - convCtx, convCancel = context.WithCancel(context.TODO()) + convCtx, convCancel = context.WithCancel(Loader.Context()) newSize, reset := termshark.FileSizeDifferentTo(Loader.PcapPdml, convsPcapSize) if reset { @@ -659,7 +659,7 @@ func (w *ConvsUiWidget) doFilterMenuOp(dirOp FilterMask, app gowid.IApp) { w.focusOnFilter = true OpenMessage("Display filter prepared.", appView, app) } else { - PcapScheduler.RequestNewFilter(filter, MakePacketViewUpdater(app)) + RequestNewFilter(filter, app) w.displayFilter = filter OpenMessage("Display filter applied.", appView, app) w.ReloadNeeded() diff --git a/ui/prochandlers.go b/ui/prochandlers.go new file mode 100644 index 0000000..ff3f2e2 --- /dev/null +++ b/ui/prochandlers.go @@ -0,0 +1,340 @@ +// Copyright 2019-2021 Graham Clark. All rights reserved. Use of this source +// code is governed by the MIT license that can be found in the LICENSE +// file. + +// Package ui contains user-interface functions and helpers for termshark. +package ui + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/gcla/gowid" + "github.com/gcla/gowid/widgets/table" + "github.com/gcla/termshark/v2" + "github.com/gcla/termshark/v2/pcap" + log "github.com/sirupsen/logrus" +) + +//====================================================================== + +type NoHandlers struct{} + +//====================================================================== + +type updateCurrentCaptureInTitle struct { + Ld *pcap.PacketLoader +} + +var _ pcap.IBeforeBegin = updateCurrentCaptureInTitle{} +var _ pcap.IClear = updateCurrentCaptureInTitle{} + +func MakeUpdateCurrentCaptureInTitle() updateCurrentCaptureInTitle { + return updateCurrentCaptureInTitle{ + Ld: Loader, + } +} + +func (t updateCurrentCaptureInTitle) BeforeBegin(code pcap.HandlerCode, app gowid.IApp) { + if code&pcap.PsmlCode != 0 { + currentCapture.SetText(t.Ld.String(), app) + currentCaptureWidgetHolder.SetSubWidget(currentCaptureWidget, app) + } +} + +func (t updateCurrentCaptureInTitle) OnClear(code pcap.HandlerCode, app gowid.IApp) { + currentCaptureWidgetHolder.SetSubWidget(nullw, app) +} + +//====================================================================== + +type updatePacketViews struct { + Ld *pcap.PacketLoader +} + +var _ pcap.IOnError = updatePacketViews{} +var _ pcap.IClear = updatePacketViews{} +var _ pcap.IBeforeBegin = updatePacketViews{} +var _ pcap.IAfterEnd = updatePacketViews{} + +func MakePacketViewUpdater() updatePacketViews { + res := updatePacketViews{} + res.Ld = Loader + return res +} + +func (t updatePacketViews) OnClear(code pcap.HandlerCode, app gowid.IApp) { + clearPacketViews(app) +} + +func (t updatePacketViews) BeforeBegin(code pcap.HandlerCode, app gowid.IApp) { + if code&pcap.PsmlCode == 0 { + return + } + ch2 := Loader.PsmlFinishedChan + clearPacketViews(app) + t.Ld.PsmlLoader.Lock() + defer t.Ld.PsmlLoader.Unlock() + setPacketListWidgets(t.Ld, app) + + // Start this after widgets have been cleared, to get focus change + termshark.TrackedGo(func() { + fn2 := func() { + app.Run(gowid.RunFunction(func(app gowid.IApp) { + updatePacketListWithData(Loader, app) + })) + } + + termshark.RunOnDoubleTicker(ch2, fn2, + time.Duration(100)*time.Millisecond, + time.Duration(2000)*time.Millisecond, + 10) + }, Goroutinewg) +} + +func (t updatePacketViews) AfterEnd(code pcap.HandlerCode, app gowid.IApp) { + if code&pcap.PsmlCode == 0 { + return + } + updatePacketListWithData(t.Ld, app) + StopEmptyStructViewTimer() + StopEmptyHexViewTimer() + log.Infof("Load operation complete") +} + +func (t updatePacketViews) OnError(code pcap.HandlerCode, app gowid.IApp, err error) { + if code&pcap.PsmlCode == 0 { + return + } + log.Error(err) + if !Running { + fmt.Fprintf(os.Stderr, "%v\n", err) + RequestQuit() + } else { + + var errstr string + if kverr, ok := err.(gowid.KeyValueError); ok { + errstr = fmt.Sprintf("%v\n\n", kverr.Cause()) + kvs := make([]string, 0, len(kverr.KeyVals)) + for k, v := range kverr.KeyVals { + kvs = append(kvs, fmt.Sprintf("%v: %v", k, v)) + } + errstr = errstr + strings.Join(kvs, "\n") + } else { + errstr = fmt.Sprintf("%v", err) + } + + OpenLongError(errstr, app) + StopEmptyStructViewTimer() + StopEmptyHexViewTimer() + } +} + +//====================================================================== + +type SimpleErrors struct{} + +var _ pcap.IOnError = SimpleErrors{} + +func (t SimpleErrors) OnError(code pcap.HandlerCode, app gowid.IApp, err error) { + if code&pcap.NoneCode == 0 { + return + } + log.Error(err) + // Hack to avoid picking up errors at other parts of the load + // cycle. There should be specific handlers for specific errors. + app.Run(gowid.RunFunction(func(app gowid.IApp) { + OpenError(fmt.Sprintf("%v", err), app) + })) +} + +//====================================================================== + +type SaveRecents struct { + Pcap string + Filter string +} + +var _ pcap.IBeforeBegin = SaveRecents{} + +func MakeSaveRecents(pcap string, filter string) SaveRecents { + return SaveRecents{ + Pcap: pcap, + Filter: filter, + } +} + +func (t SaveRecents) BeforeBegin(code pcap.HandlerCode, app gowid.IApp) { + if code&pcap.PsmlCode == 0 { + return + } + // Run on main goroutine to avoid problems flagged by -race + if t.Pcap != "" { + termshark.AddToRecentFiles(t.Pcap) + } + if t.Filter != "" { + // Run on main goroutine to avoid problems flagged by -race + termshark.AddToRecentFilters(t.Filter) + } +} + +//====================================================================== + +type CancelledMessage struct{} + +var _ pcap.IAfterEnd = CancelledMessage{} + +func (t CancelledMessage) AfterEnd(code pcap.HandlerCode, app gowid.IApp) { + if code&pcap.PsmlCode == 0 { + return + } + // Run on main goroutine to avoid problems flagged by -race + if Loader.LoadWasCancelled() { + // Only do this if the user isn't quitting the app, + // otherwise it looks clumsy. + if !QuitRequested { + OpenError("Loading was cancelled.", app) + } + } +} + +//====================================================================== + +type StartUIWhenThereArePackets struct{} + +var _ pcap.IPsmlHeader = StartUIWhenThereArePackets{} + +func (t StartUIWhenThereArePackets) OnPsmlHeader(code pcap.HandlerCode, app gowid.IApp) { + StartUIOnce.Do(func() { + close(StartUIChan) + }) +} + +//====================================================================== + +type ClearMarksHandler struct{} + +var _ pcap.IClear = checkGlobalJumpAfterPsml{} +var _ pcap.INewSource = checkGlobalJumpAfterPsml{} + +func clearMarks() { + for k := range marksMap { + delete(marksMap, k) + } + lastJumpPos = -1 +} + +func (t checkGlobalJumpAfterPsml) OnNewSource(code pcap.HandlerCode, app gowid.IApp) { + clearMarks() +} + +func (t checkGlobalJumpAfterPsml) OnClear(code pcap.HandlerCode, app gowid.IApp) { + clearMarks() +} + +//====================================================================== + +type checkGlobalJumpAfterPsml struct { + Jump termshark.GlobalJumpPos +} + +var _ pcap.IAfterEnd = checkGlobalJumpAfterPsml{} + +func MakeCheckGlobalJumpAfterPsml(jmp termshark.GlobalJumpPos) checkGlobalJumpAfterPsml { + return checkGlobalJumpAfterPsml{ + Jump: jmp, + } +} + +func (t checkGlobalJumpAfterPsml) AfterEnd(code pcap.HandlerCode, app gowid.IApp) { + // Run on main goroutine to avoid problems flagged by -race + if code&pcap.PsmlCode == 0 { + return + } + if QuitRequested { + return + } + if t.Jump.Filename == Loader.Pcap() { + if packetListView != nil { + tableRow, err := tableRowFromPacketNumber(t.Jump.Pos) + if err != nil { + OpenError(err.Error(), app) + } else { + + tableCol := 0 + curTablePos, err := packetListView.FocusXY() + if err == nil { + tableCol = curTablePos.Column + } + + packetListView.SetFocusXY(app, table.Coords{Column: tableCol, Row: tableRow}) + } + } + } +} + +//====================================================================== + +// used for the pdml loader +type SetStructWidgets struct { + Ld *pcap.PacketLoader +} + +var _ pcap.IOnError = SetStructWidgets{} + +var _ pcap.IBeforeBegin = SetStructWidgets{} +var _ pcap.IAfterEnd = SetStructWidgets{} + +func (s SetStructWidgets) BeforeBegin(code pcap.HandlerCode, app gowid.IApp) { + if code&pcap.PdmlCode == 0 { + return + } + s2ch := s.Ld.Stage2FinishedChan + + termshark.TrackedGo(func() { + fn2 := func() { + app.Run(gowid.RunFunction(func(app gowid.IApp) { + setLowerWidgets(app) + })) + } + + termshark.RunOnDoubleTicker(s2ch, fn2, + time.Duration(100)*time.Millisecond, + time.Duration(2000)*time.Millisecond, + 10) + }, Goroutinewg) +} + +// Close the channel before the callback. When the global loader state is idle, +// app.Quit() will stop accepting app callbacks, so the goroutine that waits +// for ch to be closed will never terminate. +func (s SetStructWidgets) AfterEnd(code pcap.HandlerCode, app gowid.IApp) { + if code&pcap.PdmlCode == 0 { + return + } + setLowerWidgets(app) + StopEmptyHexViewTimer() + StopEmptyStructViewTimer() +} + +func (s SetStructWidgets) OnError(code pcap.HandlerCode, app gowid.IApp, err error) { + if code&pcap.PdmlCode == 0 { + return + } + log.Error(err) + // Hack to avoid picking up errors at other parts of the load + // cycle. There should be specific handlers for specific errors. + if s.Ld.PdmlLoader.IsLoading() { + app.Run(gowid.RunFunction(func(app gowid.IApp) { + OpenLongError(fmt.Sprintf("%v", err), app) + })) + } +} + +//====================================================================== +// Local Variables: +// mode: Go +// fill-column: 110 +// End: diff --git a/ui/streamui.go b/ui/streamui.go index db7765e..6151f78 100644 --- a/ui/streamui.go +++ b/ui/streamui.go @@ -54,9 +54,14 @@ type streamKey struct { type ManageStreamCache struct{} var _ pcap.INewSource = ManageStreamCache{} +var _ pcap.IClear = ManageStreamCache{} // Make sure that existing stream widgets are discarded if the user loads a new pcap. -func (t ManageStreamCache) OnNewSource() { +func (t ManageStreamCache) OnNewSource(pcap.HandlerCode, gowid.IApp) { + clearStreamState() +} + +func (t ManageStreamCache) OnClear(pcap.HandlerCode, gowid.IApp) { clearStreamState() } @@ -104,7 +109,7 @@ func startStreamReassembly(app gowid.IApp) { previousFilterValue := FilterWidget.Value() FilterWidget.SetValue(filter, app) - PcapScheduler.RequestNewFilter(filter, MakePacketViewUpdater(app)) + RequestNewFilter(filter, app) currentStreamKey = &streamKey{proto: proto, idx: streamIndex.Val()} @@ -136,7 +141,7 @@ func startStreamReassembly(app gowid.IApp) { // Use the source context. At app shutdown, canceling main will cancel src which will cancel the stream // loader. And changing source should also cancel the stream loader on all occasions. - StreamLoader = streams.NewLoader(streams.MakeCommands(), Loader.SourceContext()) + StreamLoader = streams.NewLoader(streams.MakeCommands(), Loader.Context()) sh := &streamParseHandler{ app: app, @@ -176,6 +181,9 @@ type streamParseHandler struct { var _ streams.IOnStreamChunk = (*streamParseHandler)(nil) var _ streams.IOnStreamHeader = (*streamParseHandler)(nil) +var _ pcap.IBeforeBegin = (*streamParseHandler)(nil) +var _ pcap.IAfterEnd = (*streamParseHandler)(nil) +var _ pcap.IOnError = (*streamParseHandler)(nil) // Run from the app goroutine func (t *streamParseHandler) drainChunks() int { @@ -202,9 +210,12 @@ func (t *streamParseHandler) drainPacketIndices() int { return curLen } -func (t *streamParseHandler) BeforeBegin() { - t.app.Run(gowid.RunFunction(func(app gowid.IApp) { - OpenPleaseWait(appView, t.app) +func (t *streamParseHandler) BeforeBegin(code pcap.HandlerCode, app gowid.IApp) { + if code&pcap.StreamCode == 0 { + return + } + app.Run(gowid.RunFunction(func(app gowid.IApp) { + OpenPleaseWait(appView, app) })) t.tick = time.NewTicker(time.Duration(200) * time.Millisecond) @@ -216,7 +227,7 @@ func (t *streamParseHandler) BeforeBegin() { // Start this after widgets have been cleared, to get focus change termshark.TrackedGo(func() { fn := func() { - t.app.Run(gowid.RunFunction(func(app gowid.IApp) { + app.Run(gowid.RunFunction(func(app gowid.IApp) { t.drainChunks() if !t.openedStreams { @@ -235,7 +246,7 @@ func (t *streamParseHandler) BeforeBegin() { termshark.TrackedGo(func() { fn := func() { - t.app.Run(gowid.RunFunction(func(app gowid.IApp) { + app.Run(gowid.RunFunction(func(app gowid.IApp) { t.drainPacketIndices() })) } @@ -251,7 +262,7 @@ func (t *streamParseHandler) BeforeBegin() { for { select { case <-t.tick.C: - t.app.Run(gowid.RunFunction(func(app gowid.IApp) { + app.Run(gowid.RunFunction(func(app gowid.IApp) { pleaseWaitSpinner.Update() })) case <-t.stopChunks: @@ -272,11 +283,14 @@ func (t *streamParseHandler) AfterIndexEnd(success bool) { } } -func (t *streamParseHandler) AfterEnd() { - t.app.Run(gowid.RunFunction(func(app gowid.IApp) { +func (t *streamParseHandler) AfterEnd(code pcap.HandlerCode, app gowid.IApp) { + if code&pcap.StreamCode == 0 { + return + } + app.Run(gowid.RunFunction(func(app gowid.IApp) { if !t.pleaseWaitClosed { t.pleaseWaitClosed = true - ClosePleaseWait(t.app) + ClosePleaseWait(app) } if !t.openedStreams { openStreamUi(t.wid, app) @@ -317,7 +331,10 @@ func (t *streamParseHandler) OnStreamChunk(chunk streams.IChunk) { t.chunks <- chunk } -func (t *streamParseHandler) OnError(err error) { +func (t *streamParseHandler) OnError(code pcap.HandlerCode, app gowid.IApp, err error) { + if code&pcap.StreamCode == 0 { + return + } log.Error(err) if !Running { fmt.Fprintf(os.Stderr, "%v\n", err) @@ -336,7 +353,7 @@ func (t *streamParseHandler) OnError(err error) { errstr = fmt.Sprintf("%v", err) } - t.app.Run(gowid.RunFunction(func(app gowid.IApp) { + app.Run(gowid.RunFunction(func(app gowid.IApp) { OpenError(errstr, app) })) } @@ -445,7 +462,8 @@ func makeStreamWidget(previousFilter string, filter string, cap string, proto st } FilterWidget.SetValue(newFilter, app) - PcapScheduler.RequestNewFilter(newFilter, MakePacketViewUpdater(app)) + //Loader.NewFilter(newFilter, MakePacketViewUpdater(), app) + RequestNewFilter(newFilter, app) }, CopyModeWidget: CopyModeWidget, diff --git a/ui/ui.go b/ui/ui.go index 8e5f07a..ba9deae 100644 --- a/ui/ui.go +++ b/ui/ui.go @@ -8,6 +8,7 @@ package ui import ( "encoding/xml" "fmt" + "math" "os" "reflect" "runtime" @@ -147,8 +148,8 @@ var packetListView *psmlTableRowWidget var Loadingw gowid.IWidget // "loading..." var MissingMsgw gowid.IWidget // centered, holding singlePacketViewMsgHolder -var EmptyStructViewTimer *time.Ticker -var EmptyHexViewTimer *time.Ticker +var EmptyStructViewTimer *time.Timer +var EmptyHexViewTimer *time.Timer var curExpandedStructNodes pdmltree.ExpandedPaths // a path to each expanded node in the packet, preserved while navigating var curStructPosition tree.IPos // e.g. [0, 2, 1] -> the indices of the expanded nodes @@ -159,6 +160,8 @@ var CacheRequests []pcap.LoadPcapSlice var CacheRequestsChan chan struct{} // false means started, true means finished var QuitRequestedChan chan struct{} +var StartUIChan chan struct{} +var StartUIOnce sync.Once // Store this for vim-like keypresses that are a sequence e.g. "ZZ" var keyState termshark.KeyState @@ -166,17 +169,18 @@ var marksMap map[rune]termshark.JumpPos var globalMarksMap map[rune]termshark.GlobalJumpPos var lastJumpPos int -var Loader *pcap.Loader -var PcapScheduler *pcap.Scheduler var NoGlobalJump termshark.GlobalJumpPos // leave as default, like a placeholder -var DarkMode bool // global state in app -var PacketColors bool // global state in app -var PacketColorsSupported bool // global state in app - true if it's even possible -var AutoScroll bool // true if the packet list should auto-scroll when listening on an interface. -var newPacketsArrived bool // true if current updates are due to new packets when listening on an interface. -var reenableAutoScroll bool // set to true by keypress processing widgets - used with newPacketsArrived -var Running bool // true if gowid/tcell is controlling the terminal -var QuitRequested bool // true if a quit has been issued, but not yet processed. Stops some handlers displaying errors. + +var Loader *pcap.PacketLoader + +var DarkMode bool // global state in app +var PacketColors bool // global state in app +var PacketColorsSupported bool // global state in app - true if it's even possible +var AutoScroll bool // true if the packet list should auto-scroll when listening on an interface. +var newPacketsArrived bool // true if current updates are due to new packets when listening on an interface. +var reenableAutoScroll bool // set to true by keypress processing widgets - used with newPacketsArrived +var Running bool // true if gowid/tcell is controlling the terminal +var QuitRequested bool // true if a quit has been issued, but not yet processed. Stops some handlers displaying errors. //====================================================================== @@ -185,6 +189,8 @@ func init() { QuitRequestedChan = make(chan struct{}, 1) // buffered because send happens from ui goroutine, which runs global select CacheRequestsChan = make(chan struct{}, 1000) CacheRequests = make([]pcap.LoadPcapSlice, 0) + // Buffered because I might send something in this goroutine + StartUIChan = make(chan struct{}, 1) keyState.NumberPrefix = -1 // 0 might be meaningful marksMap = make(map[rune]termshark.JumpPos) globalMarksMap = make(map[rune]termshark.GlobalJumpPos) @@ -223,22 +229,16 @@ func RequestQuit() { } // Runs in app goroutine -func UpdateProgressBarForInterface(c *pcap.Loader, app gowid.IApp) { +func UpdateProgressBarForInterface(c *pcap.InterfaceLoader, app gowid.IApp) { SetProgressIndeterminate(app) - switch Loader.State() { - case 0: - ClearProgressWidget(app) - default: - loadSpinner.Update() - setProgressWidget(app) - } + loadSpinner.Update() } // Runs in app goroutine -func UpdateProgressBarForFile(c *pcap.Loader, prevRatio float64, app gowid.IApp) float64 { +func UpdateProgressBarForFile(c *pcap.PacketLoader, prevRatio float64, app gowid.IApp) float64 { SetProgressDeterminate(app) - psmlProg := Prog{100, 100} + psmlProg := Prog{0, 100} pdmlPacketProg := Prog{0, 100} pdmlIdxProg := Prog{0, 100} pcapPacketProg := Prog{0, 100} @@ -264,16 +264,16 @@ func UpdateProgressBarForFile(c *pcap.Loader, prevRatio float64, app gowid.IApp) currentRow = int(foo) currentRowMod = int64(currentRow % pktsPerLoad) currentRowDiv = (currentRow / pktsPerLoad) * pktsPerLoad - c.Lock() - curRowProg.cur, curRowProg.max = int64(currentRow), int64(len(c.PacketPsmlData)) - c.Unlock() + c.PsmlLoader.Lock() + curRowProg.cur, curRowProg.max = int64(currentRow), int64(len(c.PsmlData())) + c.PsmlLoader.Unlock() } } } // Progress determined by how many of the (up to) pktsPerLoad pdml packets are read // If it's not the same chunk of rows, assume it won't affect our view, so no progress needed - if c.State()&pcap.LoadingPdml != 0 { + if c.PdmlLoader.IsLoading() { if c.LoadingRow() == currentRowDiv { if x, err = c.LengthOfPdmlCacheEntry(c.LoadingRow()); err == nil { pdmlPacketProg.cur = int64(x) @@ -284,9 +284,9 @@ func UpdateProgressBarForFile(c *pcap.Loader, prevRatio float64, app gowid.IApp) } // Progress determined by how far through the pcap the pdml reader is. - c.Lock() + c.PdmlLoader.Lock() c2, m, err = system.ProcessProgress(c.PdmlPid, c.PcapPdml) - c.Unlock() + c.PdmlLoader.Unlock() if err == nil { pdmlIdxProg.cur, pdmlIdxProg.max = c2, m if currentRow != -1 { @@ -305,9 +305,9 @@ func UpdateProgressBarForFile(c *pcap.Loader, prevRatio float64, app gowid.IApp) } // Progress determined by how far through the pcap the pcap reader is. - c.Lock() + c.PdmlLoader.Lock() c2, m, err = system.ProcessProgress(c.PcapPid, c.PcapPcap) - c.Unlock() + c.PdmlLoader.Unlock() if err == nil { pcapIdxProg.cur, pcapIdxProg.max = c2, m if currentRow != -1 { @@ -318,10 +318,10 @@ func UpdateProgressBarForFile(c *pcap.Loader, prevRatio float64, app gowid.IApp) } } - if psml, ok := c.PcapPsml.(string); ok && c.State()&pcap.LoadingPsml != 0 { - c.Lock() + if psml, ok := c.PcapPsml.(string); ok && c.PsmlLoader.IsLoading() { + c.PsmlLoader.Lock() c2, m, err = system.ProcessProgress(termshark.SafePid(c.PsmlCmd), psml) - c.Unlock() + c.PsmlLoader.Unlock() if err == nil { psmlProg.cur, psmlProg.max = c2, m } @@ -330,48 +330,35 @@ func UpdateProgressBarForFile(c *pcap.Loader, prevRatio float64, app gowid.IApp) var prog Prog // state is guaranteed not to include pcap.Loadingiface if we showing a determinate progress bar - switch c.State() { - case pcap.LoadingPsml: - prog = psmlProg + switch { + case c.PsmlLoader.IsLoading() && c.PdmlLoader.IsLoading() && c.PdmlLoader.LoadIsVisible(): select { - case <-c.StartStage2Chan: - default: - prog.cur = prog.cur / 2 // temporarily divide in 2. Leave original for case above - so that the 50% - } - case pcap.LoadingPdml: - prog = progMin( - progMax(pcapPacketProg, pcapIdxProg), // max because the fastest will win and cancel the other - progMax(pdmlPacketProg, pdmlIdxProg), - ) - case pcap.LoadingPsml | pcap.LoadingPdml: - select { - case <-c.StartStage2Chan: - prog = progMin( // min because all of these have to complete, so the slowest determines progress - psmlProg, - progMin( - progMax(pcapPacketProg, pcapIdxProg), // max because the fastest will win and cancel the other + case <-c.StartStage2ChanFn(): + prog = psmlProg.Add( + progMax(pcapPacketProg, pcapIdxProg).Add( progMax(pdmlPacketProg, pdmlIdxProg), ), ) default: - prog = psmlProg - prog.cur = prog.cur / 2 // temporarily divide in 2. Leave original for case above - so that the 50% + prog = psmlProg.Div(2) // temporarily divide in 2. Leave original for case above - so that the 50% } + case c.PsmlLoader.IsLoading(): + prog = psmlProg + case c.PdmlLoader.IsLoading() && c.PdmlLoader.LoadIsVisible(): + prog = progMax(pcapPacketProg, pcapIdxProg).Add( + progMax(pdmlPacketProg, pdmlIdxProg), + ) } curRatio := float64(prog.cur) / float64(prog.max) - if prog.Complete() { - if prevRatio < 1.0 { - ClearProgressWidget(app) - } - } else { + + if !prog.Complete() { if prevRatio < curRatio { loadProgress.SetTarget(app, int(prog.max)) loadProgress.SetProgress(app, int(prog.cur)) - setProgressWidget(app) } } - return curRatio + return math.Max(prevRatio, curRatio) } //====================================================================== @@ -1060,7 +1047,7 @@ func lastLineMode(app gowid.IApp) { MiniBuffer.Register("clear-filter", minibufferFn(func(gowid.IApp, ...string) error { FilterWidget.SetValue("", app) - ApplyCurrentFilter(app) + RequestNewFilter(FilterWidget.Value(), app) return nil })) @@ -1102,8 +1089,8 @@ func getCurrentStructModel(row int) *pdmltree.Model { pktsPerLoad := Loader.PacketsPerLoad() row2 := (row / pktsPerLoad) * pktsPerLoad - Loader.Lock() - defer Loader.Unlock() + Loader.PsmlLoader.Lock() + defer Loader.PsmlLoader.Unlock() if ws, ok := Loader.PacketCache.Get(row2); ok { srca := ws.(pcap.CacheEntry).Pdml if len(srca) > row%pktsPerLoad { @@ -1121,130 +1108,6 @@ func getCurrentStructModel(row int) *pdmltree.Model { //====================================================================== -type NoHandlers struct{} - -//====================================================================== - -type updateCurrentCaptureInTitle struct { - Ld *pcap.Scheduler - App gowid.IApp -} - -var _ pcap.INewSource = updateCurrentCaptureInTitle{} -var _ pcap.IClear = updateCurrentCaptureInTitle{} - -func MakeUpdateCurrentCaptureInTitle(app gowid.IApp) updateCurrentCaptureInTitle { - return updateCurrentCaptureInTitle{ - Ld: PcapScheduler, - App: app, - } -} - -func (t updateCurrentCaptureInTitle) OnNewSource() { - t.App.Run(gowid.RunFunction(func(app gowid.IApp) { - currentCapture.SetText(t.Ld.String(), app) - currentCaptureWidgetHolder.SetSubWidget(currentCaptureWidget, app) - })) -} - -func (t updateCurrentCaptureInTitle) OnClear() { - t.App.Run(gowid.RunFunction(func(app gowid.IApp) { - currentCaptureWidgetHolder.SetSubWidget(nullw, app) - })) -} - -//====================================================================== - -type updatePacketViews struct { - Ld *pcap.Scheduler - App gowid.IApp -} - -var _ pcap.IOnError = updatePacketViews{} -var _ pcap.IClear = updatePacketViews{} -var _ pcap.IBeforeBegin = updatePacketViews{} -var _ pcap.IAfterEnd = updatePacketViews{} - -func MakePacketViewUpdater(app gowid.IApp) updatePacketViews { - res := updatePacketViews{} - res.App = app - res.Ld = PcapScheduler - return res -} - -func (t updatePacketViews) EnableOperations() { - t.Ld.Enable() -} - -func (t updatePacketViews) OnClear() { - t.App.Run(gowid.RunFunction(func(app gowid.IApp) { - clearPacketViews(app) - })) -} - -func (t updatePacketViews) BeforeBegin() { - ch2 := Loader.PsmlFinishedChan - t.App.Run(gowid.RunFunction(func(app gowid.IApp) { - clearPacketViews(app) - t.Ld.Lock() - defer t.Ld.Unlock() - setPacketListWidgets(t.Ld, app) - setProgressWidget(app) - - // Start this after widgets have been cleared, to get focus change - termshark.TrackedGo(func() { - fn2 := func() { - app.Run(gowid.RunFunction(func(app gowid.IApp) { - updatePacketListWithData(Loader, app) - })) - } - - termshark.RunOnDoubleTicker(ch2, fn2, - time.Duration(100)*time.Millisecond, - time.Duration(2000)*time.Millisecond, - 10) - }, Goroutinewg) - })) -} - -func (t updatePacketViews) AfterEnd() { - t.App.Run(gowid.RunFunction(func(app gowid.IApp) { - updatePacketListWithData(t.Ld, app) - StopEmptyStructViewTimer() - StopEmptyHexViewTimer() - log.Infof("Load operation complete") - })) -} - -func (t updatePacketViews) OnError(err error) { - log.Error(err) - if !Running { - fmt.Fprintf(os.Stderr, "%v\n", err) - RequestQuit() - } else { - - var errstr string - if kverr, ok := err.(gowid.KeyValueError); ok { - errstr = fmt.Sprintf("%v\n\n", kverr.Cause()) - kvs := make([]string, 0, len(kverr.KeyVals)) - for k, v := range kverr.KeyVals { - kvs = append(kvs, fmt.Sprintf("%v: %v", k, v)) - } - errstr = errstr + strings.Join(kvs, "\n") - } else { - errstr = fmt.Sprintf("%v", err) - } - - t.App.Run(gowid.RunFunction(func(app gowid.IApp) { - OpenLongError(errstr, app) - StopEmptyStructViewTimer() - StopEmptyHexViewTimer() - })) - } -} - -//====================================================================== - func reallyClear(app gowid.IApp) { msgt := "Do you want to clear current capture?" msg := text.New(msgt) @@ -1256,12 +1119,16 @@ func reallyClear(app gowid.IApp) { Msg: "Ok", Action: func(app gowid.IApp, w gowid.IWidget) { YesNo.Close(app) - PcapScheduler.RequestClearPcap( + Loader.ClearPcap( pcap.HandlerList{ - MakePacketViewUpdater(app), - MakeUpdateCurrentCaptureInTitle(app), + SimpleErrors{}, + MakePacketViewUpdater(), + MakeUpdateCurrentCaptureInTitle(), ManageStreamCache{}, ManageCapinfoCache{}, + SetStructWidgets{Loader}, // for OnClear + ClearMarksHandler{}, + CancelledMessage{}, }, ) }, @@ -1461,7 +1328,7 @@ func tableRowFromPacketNumber(savedPacket int) (int, error) { // Map e.g. packet number #123 to the index in the PSML array - e.g. index 10 (order of psml load) packetRowId, ok := Loader.PacketNumberMap[savedPacket] if !ok { - return -1, fmt.Errorf("Error mapping packet %v", savedPacket) + return -1, fmt.Errorf("Error finding packet %v", savedPacket) } // This psml order is also the table RowId order. The table might be sorted though, so // map this RowId to the actual table row, so we can change focus to it @@ -1482,11 +1349,11 @@ func packetNumberFromTableRow(tableRow int) (termshark.JumpPos, error) { // e.g. packet #123 var summary string - if len(Loader.PacketPsmlData) > int(packetRowId) { - summary = psmlSummary(Loader.PacketPsmlData[packetRowId]).String() + if len(Loader.PsmlData()) > int(packetRowId) { + summary = psmlSummary(Loader.PsmlData()[packetRowId]).String() } - packetNum, err := strconv.Atoi(Loader.PacketPsmlData[packetRowId][0]) + packetNum, err := strconv.Atoi(Loader.PsmlData()[packetRowId][0]) if err != nil { return termshark.JumpPos{}, fmt.Errorf("Unexpected error determining no. of packet %d: %v.", tableRow, err) } @@ -1610,7 +1477,7 @@ func vimKeysMainView(evk *tcell.EventKey, app gowid.IApp) bool { if savedPacket != -1 { // Map that packet number #123 to the index in the PSML array - e.g. index 10 (order of psml load) if packetRowId, ok := Loader.PacketNumberMap[savedPacket]; !ok { - OpenError(fmt.Sprintf("Error mapping packet %v", savedPacket), app) + OpenError(fmt.Sprintf("Error finding packet %v", savedPacket), app) } else { // This psml order is also the table RowId order. The table might be sorted though, so // map this RowId to the actual table row, so we can change focus to it @@ -1692,8 +1559,8 @@ func mainKeyPress(evk *tcell.EventKey, app gowid.IApp) bool { isrune := evk.Key() == tcell.KeyRune - if evk.Key() == tcell.KeyCtrlC && Loader.State()&pcap.LoadingPsml != 0 { - PcapScheduler.RequestStopLoadStage1(NoHandlers{}) // iface and psml + if evk.Key() == tcell.KeyCtrlC && Loader.PsmlLoader.IsLoading() { + Loader.StopLoadPsmlAndIface(NoHandlers{}) // iface and psml } else if evk.Key() == tcell.KeyTAB || evk.Key() == tcell.KeyBacktab { isTab := (evk.Key() == tcell.KeyTab) var tabMap map[gowid.IWidget]gowid.IWidget @@ -1834,12 +1701,12 @@ func ClearProgressWidget(app gowid.IApp) { filterCols.SetDimensions(ds, app) } -func setProgressWidget(app gowid.IApp) { +func SetProgressWidget(app gowid.IApp) { stop := button.New(text.New("Stop")) stop2 := styled.NewExt(stop, gowid.MakePaletteRef("button"), gowid.MakePaletteRef("button-focus")) stop.OnClick(gowid.MakeWidgetCallback("cb", func(app gowid.IApp, w gowid.IWidget) { - PcapScheduler.RequestStopLoadStage1(NoHandlers{}) // psml and iface + Loader.StopLoadPsmlAndIface(NoHandlers{}) // psml and iface })) prog := vpadding.New(progressHolder, gowid.VAlignTop{}, flow) @@ -1884,6 +1751,7 @@ func setLowerWidgets(app gowid.IApp) { ), ) } + str := getStructWidgetToDisplay(row, app) if str != nil { sw2 = enableselected.New(str) @@ -1894,16 +1762,57 @@ func setLowerWidgets(app gowid.IApp) { packetHexViewHolder.SetSubWidget(sw1, app) StopEmptyHexViewTimer() } else { - if EmptyHexViewTimer == nil { - startEmptyHexViewTimer() + // If autoscroll is on, it's annoying to see the constant loading message, so + // suppress and just remain on the last displayed hex + timer := false + if AutoScroll { + // Only displaying loading if the current panel is blank. If it's data, leave the data + if packetHexViewHolder.SubWidget() == nullw { + timer = true + } + } else { + if packetHexViewHolder.SubWidget() != MissingMsgw { + timer = true + } + } + + if timer { + if EmptyHexViewTimer == nil { + EmptyHexViewTimer = time.AfterFunc(time.Duration(1000)*time.Millisecond, func() { + app.Run(gowid.RunFunction(func(app gowid.IApp) { + singlePacketViewMsgHolder.SetSubWidget(Loadingw, app) + packetHexViewHolder.SetSubWidget(MissingMsgw, app) + })) + }) + } } } if sw2 != nil { packetStructureViewHolder.SetSubWidget(sw2, app) StopEmptyStructViewTimer() } else { - if EmptyStructViewTimer == nil { - startEmptyStructViewTimer() + timer := false + if AutoScroll { + if packetStructureViewHolder.SubWidget() == nullw { + timer = true + } + } else { + if packetStructureViewHolder.SubWidget() != MissingMsgw { + timer = true + } + } + + // If autoscroll is on, it's annoying to see the constant loading message, so + // suppress and just remain on the last displayed hex + if timer { + if EmptyStructViewTimer == nil { + EmptyStructViewTimer = time.AfterFunc(time.Duration(1000)*time.Millisecond, func() { + app.Run(gowid.RunFunction(func(app gowid.IApp) { + singlePacketViewMsgHolder.SetSubWidget(Loadingw, app) + packetStructureViewHolder.SetSubWidget(MissingMsgw, app) + })) + }) + } } } @@ -2041,14 +1950,16 @@ func setPacketListWidgets(psml psmlInfo, app gowid.IApp) { CacheRequests = CacheRequests[:0] CacheRequests = append(CacheRequests, pcap.LoadPcapSlice{ - Row: (row / pktsPerLoad) * pktsPerLoad, - Cancel: true, + Row: (row / pktsPerLoad) * pktsPerLoad, + CancelCurrent: true, }) if rowm > pktsPerLoad/2 { + // Optimistically load the batch below this one CacheRequests = append(CacheRequests, pcap.LoadPcapSlice{ Row: ((row / pktsPerLoad) + 1) * pktsPerLoad, }) } else { + // Optimistically load the batch above this one row2 := ((row / pktsPerLoad) - 1) * pktsPerLoad if row2 < 0 { row2 = 0 @@ -2059,9 +1970,11 @@ func setPacketListWidgets(psml psmlInfo, app gowid.IApp) { } CacheRequestsChan <- struct{}{} - - setLowerWidgets(app) } + + // When the focus changes, update the hex and struct view. If they cannot + // be populated, display a loading message + setLowerWidgets(app) })) withScrollbar := withscrollbar.New(packetListView, withscrollbar.Options{ @@ -2358,112 +2271,23 @@ func (r copyModePalette) AlterWidget(w gowid.IWidget, app gowid.IApp) gowid.IWid //====================================================================== -type SaveRecents struct { - Pcap string - Filter string - App gowid.IApp -} - -var _ pcap.IBeforeBegin = SaveRecents{} - -func MakeSaveRecents(pcap string, filter string, app gowid.IApp) SaveRecents { - return SaveRecents{ - Pcap: pcap, - Filter: filter, - App: app, - } -} - -func (t SaveRecents) BeforeBegin() { - // Run on main goroutine to avoid problems flagged by -race - t.App.Run(gowid.RunFunction(func(gowid.IApp) { - if t.Pcap != "" { - termshark.AddToRecentFiles(t.Pcap) - } - if t.Filter != "" { - // Run on main goroutine to avoid problems flagged by -race - termshark.AddToRecentFilters(t.Filter) - } - })) -} - -//====================================================================== - - -type SignalPackets struct { - done bool - C chan struct{} -} - -var _ pcap.IPsmlHeader = (*SignalPackets)(nil) - -func (t *SignalPackets) OnPsmlHeader() { - if !t.done { - close(t.C) - t.done = true - } -} - -//====================================================================== - -type checkGlobalJumpAfterPsml struct { - App gowid.IApp - Jump termshark.GlobalJumpPos -} - -var _ pcap.IAfterEnd = checkGlobalJumpAfterPsml{} -var _ pcap.IOnError = checkGlobalJumpAfterPsml{} -var _ pcap.INewSource = checkGlobalJumpAfterPsml{} - -func MakeCheckGlobalJumpAfterPsml(app gowid.IApp, jmp termshark.GlobalJumpPos) checkGlobalJumpAfterPsml { - return checkGlobalJumpAfterPsml{ - App: app, - Jump: jmp, - } -} - -func clearMarks() { - for k := range marksMap { - delete(marksMap, k) - } - lastJumpPos = -1 -} - -func (t checkGlobalJumpAfterPsml) OnNewSource() { - clearMarks() -} - -func (t checkGlobalJumpAfterPsml) OnClear() { - clearMarks() -} - -func (t checkGlobalJumpAfterPsml) OnError(err error) { -} - -func (t checkGlobalJumpAfterPsml) AfterEnd() { - // Run on main goroutine to avoid problems flagged by -race - t.App.Run(gowid.RunFunction(func(app gowid.IApp) { - if QuitRequested { - return - } - if t.Jump.Filename == Loader.Pcap() { - if packetListView != nil { - tableRow, err := tableRowFromPacketNumber(t.Jump.Pos) - if err != nil { - OpenError(err.Error(), app) - } else { - - tableCol := 0 - curTablePos, err := packetListView.FocusXY() - if err == nil { - tableCol = curTablePos.Column - } - - packetListView.SetFocusXY(app, table.Coords{Column: tableCol, Row: tableRow}) - } - } - } - })) +func RequestLoadInterfaces(psrcs []pcap.IPacketSource, captureFilter string, displayFilter string, tmpfile string, app gowid.IApp) { + Loader.Renew() + Loader.LoadInterfaces(psrcs, captureFilter, displayFilter, tmpfile, + pcap.HandlerList{ + StartUIWhenThereArePackets{}, + SimpleErrors{}, + MakeSaveRecents("", displayFilter), + MakePacketViewUpdater(), + MakeUpdateCurrentCaptureInTitle(), + ManageStreamCache{}, + ManageCapinfoCache{}, + SetStructWidgets{Loader}, // for OnClear + ClearMarksHandler{}, + CancelledMessage{}, + }, + app, + ) } //====================================================================== @@ -2471,19 +2295,45 @@ func (t checkGlobalJumpAfterPsml) AfterEnd() { // Call from app goroutine context func RequestLoadPcapWithCheck(pcapf string, displayFilter string, jump termshark.GlobalJumpPos, app gowid.IApp) { handlers := pcap.HandlerList{ - MakeSaveRecents(pcapf, displayFilter, app), - MakePacketViewUpdater(app), - MakeUpdateCurrentCaptureInTitle(app), + SimpleErrors{}, + MakeSaveRecents(pcapf, displayFilter), + MakePacketViewUpdater(), + MakeUpdateCurrentCaptureInTitle(), ManageStreamCache{}, ManageCapinfoCache{}, - MakeCheckGlobalJumpAfterPsml(app, jump), + SetStructWidgets{Loader}, // for OnClear + MakeCheckGlobalJumpAfterPsml(jump), + ClearMarksHandler{}, + CancelledMessage{}, } if _, err := os.Stat(pcapf); os.IsNotExist(err) { - pcap.HandleError(err, handlers) + pcap.HandleError(pcap.NoneCode, app, err, handlers) } else { - PcapScheduler.RequestLoadPcap(pcapf, displayFilter, handlers) + // no auto-scroll when reading a file + AutoScroll = false + Loader.LoadPcap(pcapf, displayFilter, handlers, app) + } +} + +//====================================================================== + +func RequestNewFilter(displayFilter string, app gowid.IApp) { + handlers := pcap.HandlerList{ + SimpleErrors{}, + MakePacketViewUpdater(), + MakeUpdateCurrentCaptureInTitle(), + SetStructWidgets{Loader}, // for OnClear + ClearMarksHandler{}, + // Don't use this one - we keep the cancelled flag set so that we + // don't restart live captures on clear if ctrl-c has been issued + // so we don't want this handler on a new filter because we don't + // want to be told again after applying the filter that the load + // was cancelled + //MakeCancelledMessage(), } + + Loader.NewFilter(displayFilter, handlers, app) } //====================================================================== @@ -2502,6 +2352,15 @@ func (p Prog) String() string { return fmt.Sprintf("cur=%d max=%d", p.cur, p.max) } +func (p Prog) Div(y int64) Prog { + p.cur /= y + return p +} + +func (p Prog) Add(y Prog) Prog { + return Prog{cur: p.cur + y.cur, max: p.max + y.max} +} + func progMin(x, y Prog) Prog { if float64(x.cur)/float64(x.max) < float64(y.cur)/float64(y.max) { return x @@ -2586,63 +2445,6 @@ func (s savedCompleter) Completions(prefix string, cb termshark.IPrefixCompleter //====================================================================== -type SetStructWidgets struct { - Ld *pcap.Loader - App gowid.IApp -} - -var _ pcap.IOnError = SetStructWidgets{} -var _ pcap.IClear = SetStructWidgets{} -var _ pcap.IBeforeBegin = SetStructWidgets{} -var _ pcap.IAfterEnd = SetStructWidgets{} - -func (s SetStructWidgets) OnClear() { - s.AfterEnd() -} - -func (s SetStructWidgets) BeforeBegin() { - s2ch := s.Ld.Stage2FinishedChan - - termshark.TrackedGo(func() { - fn2 := func() { - s.App.Run(gowid.RunFunction(func(app gowid.IApp) { - setLowerWidgets(app) - })) - } - - termshark.RunOnDoubleTicker(s2ch, fn2, - time.Duration(100)*time.Millisecond, - time.Duration(2000)*time.Millisecond, - 10) - }, Goroutinewg) -} - -// Close the channel before the callback. When the global loader state is idle, -// app.Quit() will stop accepting app callbacks, so the goroutine that waits -// for ch to be closed will never terminate. -func (s SetStructWidgets) AfterEnd() { - s.App.Run(gowid.RunFunction(func(app gowid.IApp) { - setLowerWidgets(app) - })) -} - -func (s SetStructWidgets) OnError(err error) { - log.Error(err) - s.App.Run(gowid.RunFunction(func(app gowid.IApp) { - OpenLongError(fmt.Sprintf("%v", err), app) - })) -} - -//====================================================================== - -func startEmptyStructViewTimer() { - EmptyStructViewTimer = time.NewTicker(time.Duration(1000) * time.Millisecond) -} - -func startEmptyHexViewTimer() { - EmptyHexViewTimer = time.NewTicker(time.Duration(1000) * time.Millisecond) -} - func StopEmptyStructViewTimer() { if EmptyStructViewTimer != nil { EmptyStructViewTimer.Stop() @@ -2659,40 +2461,6 @@ func StopEmptyHexViewTimer() { //====================================================================== -type SetNewPdmlRequests struct { - *pcap.Scheduler -} - -var _ pcap.ICacheUpdater = SetNewPdmlRequests{} - -func (u SetNewPdmlRequests) WhenLoadingPdml() { - u.When(func() bool { - return u.State()&pcap.LoadingPdml == pcap.LoadingPdml - }, func() { - CacheRequestsChan <- struct{}{} - }) -} - -func (u SetNewPdmlRequests) WhenNotLoadingPdml() { - u.When(func() bool { - return u.State()&pcap.LoadingPdml == 0 - }, func() { - CacheRequestsChan <- struct{}{} - }) -} - -func SetStructViewMissing(app gowid.IApp) { - singlePacketViewMsgHolder.SetSubWidget(Loadingw, app) - packetStructureViewHolder.SetSubWidget(MissingMsgw, app) -} - -func SetHexViewMissing(app gowid.IApp) { - singlePacketViewMsgHolder.SetSubWidget(Loadingw, app) - packetHexViewHolder.SetSubWidget(MissingMsgw, app) -} - -//====================================================================== - func assignTo(wp interface{}, w gowid.IWidget) gowid.IWidget { reflect.ValueOf(wp).Elem().Set(reflect.ValueOf(w)) return w @@ -2745,19 +2513,6 @@ func (w *prefixKeyWidget) UserInput(ev interface{}, size gowid.IRenderSize, focu //====================================================================== -func ApplyCurrentFilter(app gowid.IApp) { - PcapScheduler.RequestNewFilter(FilterWidget.Value(), - pcap.HandlerList{ - MakeSaveRecents("", FilterWidget.Value(), app), - MakePacketViewUpdater(app), - ManageStreamCache{}, - ManageCapinfoCache{}, - }, - ) -} - -//====================================================================== - func Build() (*gowid.App, error) { var err error @@ -3178,7 +2933,7 @@ func Build() (*gowid.App, error) { }) validFilterCb := gowid.MakeWidgetCallback("cb", func(app gowid.IApp, w gowid.IWidget) { - ApplyCurrentFilter(app) + RequestNewFilter(FilterWidget.Value(), app) }) // Will only be enabled to click if filter is valid