Skip to content

Commit

Permalink
nil pointer fix
Browse files Browse the repository at this point in the history
  • Loading branch information
rusq committed Jan 22, 2024
1 parent 8883831 commit b764355
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 11 deletions.
15 changes: 11 additions & 4 deletions cmd/slackdump/internal/dump/dump.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ import (
"text/template"
"time"

"github.com/rusq/dlog"
"github.com/rusq/fsadapter"

"github.com/rusq/slackdump/v3"
Expand Down Expand Up @@ -78,6 +77,8 @@ func RunDump(ctx context.Context, _ *base.Command, args []string) error {
return ErrNothingToDo
}

lg := logger.FromContext(ctx)

// Retrieve the Authentication provider.
prov, err := auth.FromContext(ctx)
if err != nil {
Expand Down Expand Up @@ -110,7 +111,11 @@ func RunDump(ctx context.Context, _ *base.Command, args []string) error {
base.SetExitStatus(base.SApplicationError)
return err
}
defer fsa.Close()
defer func() {
if err := fsa.Close(); err != nil {
lg.Printf("warning: failed to close the filesystem: %v", err)
}
}()

sess, err := slackdump.New(ctx, prov, slackdump.WithLogger(logger.FromContext(ctx)), slackdump.WithFilesystem(fsa))
if err != nil {
Expand Down Expand Up @@ -138,7 +143,7 @@ func RunDump(ctx context.Context, _ *base.Command, args []string) error {
base.SetExitStatus(base.SApplicationError)
return err
}
dlog.FromContext(ctx).Printf("dumped %d conversations in %s", len(p.list.Include), time.Since(start))
lg.Printf("dumped %d conversations in %s", len(p.list.Include), time.Since(start))
return nil
}

Expand Down Expand Up @@ -239,7 +244,9 @@ func dump(ctx context.Context, sess *slackdump.Session, fsa fsadapter.FS, p dump
if sr.Err != nil {
return sr.Err
}
dlog.Printf("conversation %s dumped", sr)
if sr.IsLast {
lg.Printf("%s dumped", sr)
}
return nil
}),
).Conversations(ctx, proc, p.list.Generator(ctx)); err != nil {
Expand Down
10 changes: 5 additions & 5 deletions internal/chunk/transform/export_coordinator.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ import (
"github.com/slack-go/slack"
)

// ExportCoordinator is a transformer that takes the chunks produced by the
// processor and transforms them into a Slack Export format. It is sutable
// ExportCoordinator is a takes the chunks produced by the
// processor and transforms them into a Slack Export format. It is suitable
// for async processing, in which case, OnFinalise function is passed to the
// processor, and the finalisation requests will be queued (up to a
// processor, the finalisation requests will be queued (up to a
// [bufferSz]) and will be processed once Start or StartWithUsers is called.
//
// Please note, that transform requires users to be passed either through
Expand All @@ -29,8 +29,8 @@ import (
// all users are fetched, call [ExportCoordinator.StartWithUsers], passing
// the fetched users slice.
// 4. In another goroutine, start the ExportCoordinator Conversation
// processor, passsing the transformer's OnFinalise function as the
// finaliser option. It will be called by export processor for each
// processor, passing the transformer's OnFinalise function as the
// Finaliser option. It will be called by export processor for each
// channel that was completed.
type ExportCoordinator struct {
cvt UserConverter
Expand Down
10 changes: 8 additions & 2 deletions stream.go
Original file line number Diff line number Diff line change
Expand Up @@ -612,16 +612,22 @@ func (cs *Stream) channelInfoWithUsers(ctx context.Context, proc processor.Chann
eg.Go(func() error {
defer close(chC)
ch, err := cs.channelInfo(ctx, proc, channelID, threadTS)
if err != nil {
return err
}
chC <- *ch
return err
return nil
})

var uC = make(chan []string, 1)
eg.Go(func() error {
defer close(uC)
m, err := cs.channelUsers(ctx, proc, channelID, threadTS)
if err != nil {
return err
}
uC <- m
return err
return nil
})

if err := eg.Wait(); err != nil {
Expand Down

0 comments on commit b764355

Please sign in to comment.