aerc/lib/msgstore.go

276 lines
6.2 KiB
Go
Raw Normal View History

2019-03-16 02:36:06 +01:00
package lib
import (
"io"
"sync"
2019-03-31 17:10:10 +02:00
"time"
2019-03-16 02:36:06 +01:00
"github.com/emersion/go-imap"
"git.sr.ht/~sircmpwn/aerc2/worker/types"
)
// Accesses to fields must be guarded by MessageStore.Lock/Unlock
2019-03-16 02:36:06 +01:00
type MessageStore struct {
sync.Mutex
Deleted map[uint32]interface{}
2019-03-16 02:36:06 +01:00
DirInfo types.DirectoryInfo
Messages map[uint32]*types.MessageInfo
// Ordered list of known UIDs
Uids []uint32
2019-03-30 03:35:53 +01:00
bodyCallbacks map[uint32][]func(io.Reader)
2019-03-30 03:35:53 +01:00
headerCallbacks map[uint32][]func(*types.MessageInfo)
2019-03-16 02:36:06 +01:00
// Map of uids we've asked the worker to fetch
2019-03-16 02:43:33 +01:00
onUpdate func(store *MessageStore) // TODO: multiple onUpdate handlers
2019-03-16 02:36:06 +01:00
pendingBodies map[uint32]interface{}
pendingHeaders map[uint32]interface{}
worker *types.Worker
}
func NewMessageStore(worker *types.Worker,
dirInfo *types.DirectoryInfo) *MessageStore {
return &MessageStore{
Deleted: make(map[uint32]interface{}),
2019-03-16 02:36:06 +01:00
DirInfo: *dirInfo,
bodyCallbacks: make(map[uint32][]func(io.Reader)),
2019-03-30 03:35:53 +01:00
headerCallbacks: make(map[uint32][]func(*types.MessageInfo)),
2019-03-16 02:36:06 +01:00
pendingBodies: make(map[uint32]interface{}),
pendingHeaders: make(map[uint32]interface{}),
worker: worker,
}
}
2019-03-30 03:35:53 +01:00
func (store *MessageStore) FetchHeaders(uids []uint32,
cb func(*types.MessageInfo)) {
store.Lock()
defer store.Unlock()
2019-03-16 02:36:06 +01:00
// TODO: this could be optimized by pre-allocating toFetch and trimming it
// at the end. In practice we expect to get most messages back in one frame.
var toFetch imap.SeqSet
for _, uid := range uids {
if _, ok := store.pendingHeaders[uid]; !ok {
toFetch.AddNum(uint32(uid))
store.pendingHeaders[uid] = nil
2019-03-30 03:35:53 +01:00
if cb != nil {
if list, ok := store.headerCallbacks[uid]; ok {
store.headerCallbacks[uid] = append(list, cb)
} else {
store.headerCallbacks[uid] = []func(*types.MessageInfo){cb}
}
}
}
}
if !toFetch.Empty() {
store.worker.PostAction(&types.FetchMessageHeaders{Uids: toFetch}, nil)
}
}
func (store *MessageStore) FetchFull(uids []uint32, cb func(io.Reader)) {
store.Lock()
defer store.Unlock()
2019-03-30 03:35:53 +01:00
// TODO: this could be optimized by pre-allocating toFetch and trimming it
// at the end. In practice we expect to get most messages back in one frame.
var toFetch imap.SeqSet
for _, uid := range uids {
if _, ok := store.pendingBodies[uid]; !ok {
toFetch.AddNum(uint32(uid))
store.pendingBodies[uid] = nil
if cb != nil {
if list, ok := store.bodyCallbacks[uid]; ok {
store.bodyCallbacks[uid] = append(list, cb)
} else {
store.bodyCallbacks[uid] = []func(io.Reader){cb}
2019-03-30 03:35:53 +01:00
}
}
2019-03-16 02:36:06 +01:00
}
}
if !toFetch.Empty() {
store.worker.PostAction(&types.FetchFullMessages{Uids: toFetch}, nil)
2019-03-30 03:35:53 +01:00
}
}
2019-03-31 18:14:37 +02:00
func (store *MessageStore) FetchBodyPart(
uid uint32, part int, cb func(io.Reader)) {
store.worker.PostAction(&types.FetchMessageBodyPart{
Uid: uid,
Part: part,
}, func(resp types.WorkerMessage) {
msg, ok := resp.(*types.MessageBodyPart)
if !ok {
return
}
cb(msg.Reader)
})
}
func merge(to *types.MessageInfo, from *types.MessageInfo) {
2019-03-30 03:35:53 +01:00
2019-03-31 17:10:10 +02:00
if from.BodyStructure != nil {
to.BodyStructure = from.BodyStructure
}
2019-03-30 03:35:53 +01:00
if from.Envelope != nil {
to.Envelope = from.Envelope
2019-03-16 02:36:06 +01:00
}
2019-03-31 17:10:10 +02:00
if len(from.Flags) != 0 {
to.Flags = from.Flags
}
if from.Size != 0 {
to.Size = from.Size
}
var zero time.Time
if from.InternalDate != zero {
to.InternalDate = from.InternalDate
}
2019-03-16 02:36:06 +01:00
}
func (store *MessageStore) Update(msg types.WorkerMessage) {
store.Lock()
2019-03-16 02:36:06 +01:00
update := false
switch msg := msg.(type) {
case *types.DirectoryInfo:
store.DirInfo = *msg
2019-05-14 02:16:55 +02:00
if store.DirInfo.Exists != len(store.Uids) {
store.worker.PostAction(&types.FetchDirectoryContents{}, nil)
}
2019-03-16 02:36:06 +01:00
update = true
case *types.DirectoryContents:
newMap := make(map[uint32]*types.MessageInfo)
for _, uid := range msg.Uids {
if msg, ok := store.Messages[uid]; ok {
newMap[uid] = msg
} else {
newMap[uid] = nil
}
}
store.Messages = newMap
store.Uids = msg.Uids
update = true
case *types.MessageInfo:
2019-03-30 03:35:53 +01:00
if existing, ok := store.Messages[msg.Uid]; ok && existing != nil {
merge(existing, msg)
2019-03-30 03:35:53 +01:00
} else {
store.Messages[msg.Uid] = msg
}
2019-03-16 02:36:06 +01:00
if _, ok := store.pendingHeaders[msg.Uid]; msg.Envelope != nil && ok {
delete(store.pendingHeaders, msg.Uid)
2019-03-30 03:35:53 +01:00
if cbs, ok := store.headerCallbacks[msg.Uid]; ok {
for _, cb := range cbs {
cb(msg)
}
}
2019-03-16 02:36:06 +01:00
}
update = true
2019-03-31 18:35:51 +02:00
case *types.FullMessage:
2019-03-30 03:35:53 +01:00
if _, ok := store.pendingBodies[msg.Uid]; ok {
delete(store.pendingBodies, msg.Uid)
if cbs, ok := store.bodyCallbacks[msg.Uid]; ok {
for _, cb := range cbs {
cb(msg.Reader)
2019-03-30 03:35:53 +01:00
}
}
}
2019-03-21 04:23:38 +01:00
case *types.MessagesDeleted:
toDelete := make(map[uint32]interface{})
for _, uid := range msg.Uids {
toDelete[uid] = nil
delete(store.Messages, uid)
if _, ok := store.Deleted[uid]; ok {
delete(store.Deleted, uid)
}
2019-03-21 04:23:38 +01:00
}
uids := make([]uint32, len(store.Uids)-len(msg.Uids))
j := 0
2019-05-16 21:28:33 +02:00
for _, uid := range store.Uids {
if _, deleted := toDelete[uid]; !deleted && j < len(uids) {
uids[j] = uid
2019-03-21 04:23:38 +01:00
j += 1
}
}
store.Uids = uids
update = true
2019-03-16 02:36:06 +01:00
}
store.Unlock()
if update {
store.update()
2019-03-16 02:36:06 +01:00
}
}
func (store *MessageStore) OnUpdate(fn func(store *MessageStore)) {
store.onUpdate = fn
}
2019-03-21 04:23:38 +01:00
func (store *MessageStore) update() {
if store.onUpdate != nil {
store.onUpdate(store)
}
}
2019-05-14 22:34:42 +02:00
func (store *MessageStore) Delete(uids []uint32,
cb func(msg types.WorkerMessage)) {
store.Lock()
2019-03-21 04:23:38 +01:00
var set imap.SeqSet
for _, uid := range uids {
set.AddNum(uid)
store.Deleted[uid] = nil
2019-03-21 04:23:38 +01:00
}
store.Unlock()
2019-05-14 22:34:42 +02:00
store.worker.PostAction(&types.DeleteMessages{Uids: set}, cb)
store.update()
2019-03-21 04:23:38 +01:00
}
2019-05-14 22:34:42 +02:00
func (store *MessageStore) Copy(uids []uint32, dest string,
cb func(msg types.WorkerMessage)) {
var set imap.SeqSet
for _, uid := range uids {
set.AddNum(uid)
}
store.worker.PostAction(&types.CopyMessages{
Destination: dest,
Uids: set,
}, cb)
}
2019-05-14 22:55:50 +02:00
func (store *MessageStore) Move(uids []uint32, dest string,
cb func(msg types.WorkerMessage)) {
store.Lock()
var set imap.SeqSet
for _, uid := range uids {
set.AddNum(uid)
store.Deleted[uid] = nil
}
store.Unlock()
store.worker.PostAction(&types.CopyMessages{
Destination: dest,
Uids: set,
}, func(msg types.WorkerMessage) {
switch msg.(type) {
case *types.Error:
cb(msg)
case *types.Done:
store.worker.PostAction(&types.DeleteMessages{Uids: set}, cb)
}
})
store.update()
}