Implement message store side of message fetching
This commit is contained in:
parent
0f8b7a1203
commit
11f0a7267f
4 changed files with 58 additions and 21 deletions
|
@ -58,7 +58,7 @@ func NewAccountView(conf *config.AccountConfig,
|
|||
dirlist := NewDirectoryList(conf, logger, worker)
|
||||
grid.AddChild(ui.NewBordered(dirlist, ui.BORDER_RIGHT)).Span(2, 1)
|
||||
|
||||
msglist := NewMessageList(logger, worker)
|
||||
msglist := NewMessageList(logger)
|
||||
grid.AddChild(msglist).At(0, 1)
|
||||
|
||||
acct := &AccountView{
|
||||
|
@ -183,7 +183,7 @@ func (acct *AccountView) onMessage(msg types.WorkerMessage) {
|
|||
if store, ok := acct.msgStores[msg.Name]; ok {
|
||||
store.Update(msg)
|
||||
} else {
|
||||
acct.msgStores[msg.Name] = NewMessageStore(msg)
|
||||
acct.msgStores[msg.Name] = NewMessageStore(acct.worker, msg)
|
||||
}
|
||||
case *types.DirectoryContents:
|
||||
store := acct.msgStores[acct.dirlist.selected]
|
||||
|
|
|
@ -3,6 +3,7 @@ package widgets
|
|||
import (
|
||||
"log"
|
||||
|
||||
"github.com/emersion/go-imap"
|
||||
"github.com/gdamore/tcell"
|
||||
|
||||
"git.sr.ht/~sircmpwn/aerc2/config"
|
||||
|
@ -12,20 +13,52 @@ import (
|
|||
|
||||
type MessageStore struct {
|
||||
DirInfo types.DirectoryInfo
|
||||
Messages map[uint64]*types.MessageInfo
|
||||
Messages map[uint32]*types.MessageInfo
|
||||
// Map of uids we've asked the worker to fetch
|
||||
onUpdate func(store *MessageStore)
|
||||
pendingBodies map[uint32]interface{}
|
||||
pendingHeaders map[uint32]interface{}
|
||||
worker *types.Worker
|
||||
}
|
||||
|
||||
func NewMessageStore(dirInfo *types.DirectoryInfo) *MessageStore {
|
||||
return &MessageStore{DirInfo: *dirInfo}
|
||||
func NewMessageStore(worker *types.Worker,
|
||||
dirInfo *types.DirectoryInfo) *MessageStore {
|
||||
|
||||
return &MessageStore{
|
||||
DirInfo: *dirInfo,
|
||||
|
||||
pendingBodies: make(map[uint32]interface{}),
|
||||
pendingHeaders: make(map[uint32]interface{}),
|
||||
worker: worker,
|
||||
}
|
||||
}
|
||||
|
||||
func (store *MessageStore) FetchHeaders(uids []uint32) {
|
||||
// TODO: this could be optimized by pre-allocating toFetch and trimming it
|
||||
// at the end. In practice we expect to get most messages back in one frame.
|
||||
var toFetch imap.SeqSet
|
||||
for _, uid := range uids {
|
||||
if _, ok := store.pendingHeaders[uid]; !ok {
|
||||
toFetch.AddNum(uint32(uid))
|
||||
store.pendingHeaders[uid] = nil
|
||||
}
|
||||
}
|
||||
if !toFetch.Empty() {
|
||||
store.worker.PostAction(&types.FetchMessageHeaders{
|
||||
Uids: toFetch,
|
||||
}, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func (store *MessageStore) Update(msg types.WorkerMessage) {
|
||||
update := false
|
||||
switch msg := msg.(type) {
|
||||
case *types.DirectoryInfo:
|
||||
store.DirInfo = *msg
|
||||
update = true
|
||||
break
|
||||
case *types.DirectoryContents:
|
||||
newMap := make(map[uint64]*types.MessageInfo)
|
||||
newMap := make(map[uint32]*types.MessageInfo)
|
||||
for _, uid := range msg.Uids {
|
||||
if msg, ok := store.Messages[uid]; ok {
|
||||
newMap[uid] = msg
|
||||
|
@ -34,11 +67,23 @@ func (store *MessageStore) Update(msg types.WorkerMessage) {
|
|||
}
|
||||
}
|
||||
store.Messages = newMap
|
||||
update = true
|
||||
break
|
||||
case *types.MessageInfo:
|
||||
store.Messages[msg.Uid] = msg
|
||||
if _, ok := store.pendingHeaders[msg.Uid]; msg.Envelope != nil && ok {
|
||||
delete(store.pendingHeaders, msg.Uid)
|
||||
}
|
||||
update = true
|
||||
break
|
||||
}
|
||||
if update && store.onUpdate != nil {
|
||||
store.onUpdate(store)
|
||||
}
|
||||
}
|
||||
|
||||
func (store *MessageStore) OnUpdate(fn func(store *MessageStore)) {
|
||||
store.onUpdate = fn
|
||||
}
|
||||
|
||||
type MessageList struct {
|
||||
|
@ -47,15 +92,13 @@ type MessageList struct {
|
|||
onInvalidate func(d ui.Drawable)
|
||||
spinner *Spinner
|
||||
store *MessageStore
|
||||
worker *types.Worker
|
||||
}
|
||||
|
||||
// TODO: fish in config
|
||||
func NewMessageList(logger *log.Logger, worker *types.Worker) *MessageList {
|
||||
func NewMessageList(logger *log.Logger) *MessageList {
|
||||
ml := &MessageList{
|
||||
logger: logger,
|
||||
spinner: NewSpinner(),
|
||||
worker: worker,
|
||||
}
|
||||
ml.spinner.OnInvalidate(func(_ ui.Drawable) {
|
||||
ml.Invalidate()
|
||||
|
@ -84,7 +127,7 @@ func (ml *MessageList) Draw(ctx *ui.Context) {
|
|||
}
|
||||
|
||||
var (
|
||||
needsHeaders []uint64
|
||||
needsHeaders []uint32
|
||||
row int = 0
|
||||
)
|
||||
|
||||
|
@ -102,12 +145,11 @@ func (ml *MessageList) Draw(ctx *ui.Context) {
|
|||
}
|
||||
|
||||
if len(needsHeaders) != 0 {
|
||||
ml.store.FetchHeaders(needsHeaders)
|
||||
ml.spinner.Start()
|
||||
} else {
|
||||
ml.spinner.Stop()
|
||||
}
|
||||
|
||||
// TODO: Fetch these messages
|
||||
}
|
||||
|
||||
func (ml *MessageList) SetStore(store *MessageStore) {
|
||||
|
|
|
@ -29,7 +29,7 @@ func (imapw *IMAPWorker) handleFetchDirectoryContents(
|
|||
go func() {
|
||||
seqSet := &imap.SeqSet{}
|
||||
seqSet.AddRange(1, imapw.selected.Messages)
|
||||
uid32, err := imapw.client.UidSearch(&imap.SearchCriteria{
|
||||
uids, err := imapw.client.UidSearch(&imap.SearchCriteria{
|
||||
SeqNum: seqSet,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -38,12 +38,7 @@ func (imapw *IMAPWorker) handleFetchDirectoryContents(
|
|||
Error: err,
|
||||
}, nil)
|
||||
} else {
|
||||
imapw.worker.Logger.Printf("Found %d UIDs", len(uid32))
|
||||
var uids []uint64
|
||||
for _, uid := range uid32 {
|
||||
uids = append(uids,
|
||||
(uint64(imapw.selected.UidValidity)<<32)|uint64(uid))
|
||||
}
|
||||
imapw.worker.Logger.Printf("Found %d UIDs", len(uids))
|
||||
imapw.worker.PostMessage(&types.DirectoryContents{
|
||||
Message: types.RespondTo(msg),
|
||||
Uids: uids,
|
||||
|
|
|
@ -110,7 +110,7 @@ type DirectoryInfo struct {
|
|||
|
||||
type DirectoryContents struct {
|
||||
Message
|
||||
Uids []uint64
|
||||
Uids []uint32
|
||||
}
|
||||
|
||||
type MessageInfo struct {
|
||||
|
@ -120,5 +120,5 @@ type MessageInfo struct {
|
|||
InternalDate time.Time
|
||||
Mail *mail.Message
|
||||
Size uint32
|
||||
Uid uint64
|
||||
Uid uint32
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue