aerc/worker/imap/open.go

182 lines
4.7 KiB
Go
Raw Normal View History

2019-01-13 22:18:10 +01:00
package imap
import (
"sort"
2020-09-12 15:05:02 +02:00
sortthread "github.com/emersion/go-imap-sortthread"
"git.sr.ht/~rjarry/aerc/logging"
"git.sr.ht/~rjarry/aerc/worker/types"
2019-01-13 22:18:10 +01:00
)
func (imapw *IMAPWorker) handleOpenDirectory(msg *types.OpenDirectory) {
logging.Infof("Opening %s", msg.Directory)
imap: add option to cache headers Add option to cache headers for imap accounts. Cache db is located at $XDG_CACHE_DIR/aerc/{account name}. The cache is cleaned of stale entries when aerc is first opened. Two new account level configuration options are introduced: * cache-headers (Default: false) * cache-max-age (Default: 30 days (720 hours)) The change in worker/imap/open.go is to set the selected directory. This is required to access the UIDVALIDITY field, which is used in combination with the message ID to form the key for use in the cache db. The key structure is: "header.{UIDVALIDITY}.{UID}" Where reasonable, cache does not stop aerc from running. In general, if there is an error in the cache, aerc should continue working as usual. Errors are either displayed to the user or logged. All messages are stored without flags, and when retrieved have the flags set to SEEN. This is to prevent UI flashes. A new method to FetchMessageFlags is introduced to update flags of cached headers. This is done asynchronously, and the user will see their messages appear and then any flags updated. The message will initially show as SEEN, but will update to unread. I considered updating the cache with the last-known flag state, however it seems prudent to spare the R/W cycle and assume that - eventually - all messages will end up read, and if it isn't the update will occur rather quickly. Note that leveldb puts a lock on the database, preventing multiple instances of aerc from accessing the cache at the same time. Much of this work is based on previous efforts by Vladimír Magyar. Implements: https://todo.sr.ht/~rjarry/aerc/2 Thanks: Vladimír Magyar <vladimir@mgyar.me> Signed-off-by: Tim Culverhouse <tim@timculverhouse.com> Tested-by: inwit <inwit@sindominio.net> Reviewed-by: Koni Marti <koni.marti@gmail.com> Acked-by: Robin Jarry <robin@jarry.cc>
2022-06-15 14:23:51 +02:00
sel, err := imapw.client.Select(msg.Directory, false)
if err != nil {
imapw.worker.PostMessage(&types.Error{
Message: types.RespondTo(msg),
Error: err,
}, nil)
} else {
imap: add option to cache headers Add option to cache headers for imap accounts. Cache db is located at $XDG_CACHE_DIR/aerc/{account name}. The cache is cleaned of stale entries when aerc is first opened. Two new account level configuration options are introduced: * cache-headers (Default: false) * cache-max-age (Default: 30 days (720 hours)) The change in worker/imap/open.go is to set the selected directory. This is required to access the UIDVALIDITY field, which is used in combination with the message ID to form the key for use in the cache db. The key structure is: "header.{UIDVALIDITY}.{UID}" Where reasonable, cache does not stop aerc from running. In general, if there is an error in the cache, aerc should continue working as usual. Errors are either displayed to the user or logged. All messages are stored without flags, and when retrieved have the flags set to SEEN. This is to prevent UI flashes. A new method to FetchMessageFlags is introduced to update flags of cached headers. This is done asynchronously, and the user will see their messages appear and then any flags updated. The message will initially show as SEEN, but will update to unread. I considered updating the cache with the last-known flag state, however it seems prudent to spare the R/W cycle and assume that - eventually - all messages will end up read, and if it isn't the update will occur rather quickly. Note that leveldb puts a lock on the database, preventing multiple instances of aerc from accessing the cache at the same time. Much of this work is based on previous efforts by Vladimír Magyar. Implements: https://todo.sr.ht/~rjarry/aerc/2 Thanks: Vladimír Magyar <vladimir@mgyar.me> Signed-off-by: Tim Culverhouse <tim@timculverhouse.com> Tested-by: inwit <inwit@sindominio.net> Reviewed-by: Koni Marti <koni.marti@gmail.com> Acked-by: Robin Jarry <robin@jarry.cc>
2022-06-15 14:23:51 +02:00
imapw.selected = sel
imapw.worker.PostMessage(&types.Done{Message: types.RespondTo(msg)}, nil)
}
2019-01-13 22:18:10 +01:00
}
func (imapw *IMAPWorker) handleFetchDirectoryContents(
msg *types.FetchDirectoryContents,
) {
logging.Infof("Fetching UID list")
searchCriteria, err := parseSearch(msg.FilterCriteria)
if err != nil {
imapw.worker.PostMessage(&types.Error{
Message: types.RespondTo(msg),
Error: err,
}, nil)
return
2020-09-12 15:05:02 +02:00
}
sortCriteria := translateSortCriterions(msg.SortCriteria)
var uids []uint32
// If the server supports the SORT extension, do the sorting server side
ok, err := imapw.client.sort.SupportSort()
if err == nil && ok && len(sortCriteria) > 0 {
uids, err = imapw.client.sort.UidSort(sortCriteria, searchCriteria)
// copy in reverse as msgList displays backwards
for i, j := 0, len(uids)-1; i < j; i, j = i+1, j-1 {
uids[i], uids[j] = uids[j], uids[i]
}
} else {
if err != nil {
// Non fatal, but we do want to print to get some debug info
logging.Errorf("can't check for SORT support: %v", err)
} else if len(sortCriteria) > 0 {
logging.Warnf("SORT is not supported but requested: list messages by UID")
2020-09-12 15:05:02 +02:00
}
uids, err = imapw.client.UidSearch(searchCriteria)
}
if err != nil {
imapw.worker.PostMessage(&types.Error{
Message: types.RespondTo(msg),
Error: err,
}, nil)
} else {
logging.Infof("Found %d UIDs", len(uids))
seqmap: refactor seqmap to use slice instead of map The imap worker's seqmap is represented as a map of sequence number to UID. This presents a problem when expunging group of messages from the mailbox: each individual expunge decrements the sequence numbers by 1 (for every sequence number greater than the expunged). This requires a looping around the map to update the keys. The use of a map also requires that both the sequence number and the UID of a message be known in order to insert it into the map. This is only discovered by fetching individual message body parts (flags, headers, etc), leaving the seqmap to be empty until we have fetched information about each message. In certain instances (if a mailbox has recently been loaded), all information is loaded in memory and no new information is fetched - leaving the seqmap empty and the UI out of sync with the worker. Refactor the seqmap as a slice, so that any expunge automatically decrements the rest of the sequences. Use the results of FetchDirectoryContents or FetchDirectoryThreaded to initialize the seqmap with all discovered UIDs. Sort the UIDs in ascending order: IMAP specification requires that sequence numbers start at 1 increase in order of ascending UID. Add individual messages to the map if they come via a MessageUpdate and have a sequence number larger than our slice. Update seqmap tests with new logic. Reference: https://datatracker.ietf.org/doc/html/rfc3501#section-2.3.1.2 Fixes: https://todo.sr.ht/~rjarry/aerc/69 Signed-off-by: Tim Culverhouse <tim@timculverhouse.com> Acked-by: Robin Jarry <robin@jarry.cc>
2022-08-01 19:18:25 +02:00
if len(msg.FilterCriteria) == 1 {
// Only initialize if we are not filtering
imapw.seqMap.Initialize(uids)
}
imapw.worker.PostMessage(&types.DirectoryContents{
Message: types.RespondTo(msg),
Uids: uids,
}, nil)
imapw.worker.PostMessage(&types.Done{Message: types.RespondTo(msg)}, nil)
}
}
2020-09-12 15:05:02 +02:00
type sortFieldMapT map[types.SortField]sortthread.SortField
// caution, incomplete mapping
var sortFieldMap sortFieldMapT = sortFieldMapT{
types.SortArrival: sortthread.SortArrival,
types.SortCc: sortthread.SortCc,
types.SortDate: sortthread.SortDate,
types.SortFrom: sortthread.SortFrom,
types.SortSize: sortthread.SortSize,
types.SortSubject: sortthread.SortSubject,
types.SortTo: sortthread.SortTo,
}
func translateSortCriterions(
cs []*types.SortCriterion,
) []sortthread.SortCriterion {
2020-09-12 15:05:02 +02:00
result := make([]sortthread.SortCriterion, 0, len(cs))
for _, c := range cs {
if f, ok := sortFieldMap[c.Field]; ok {
result = append(result, sortthread.SortCriterion{Field: f, Reverse: c.Reverse})
2020-09-12 15:05:02 +02:00
}
}
return result
}
func (imapw *IMAPWorker) handleDirectoryThreaded(
msg *types.FetchDirectoryThreaded,
) {
logging.Infof("Fetching threaded UID list")
searchCriteria, err := parseSearch(msg.FilterCriteria)
if err != nil {
imapw.worker.PostMessage(&types.Error{
Message: types.RespondTo(msg),
Error: err,
}, nil)
return
}
threads, err := imapw.client.thread.UidThread(sortthread.References,
searchCriteria)
if err != nil {
imapw.worker.PostMessage(&types.Error{
Message: types.RespondTo(msg),
Error: err,
}, nil)
} else {
aercThreads, count := convertThreads(threads, nil)
sort.Sort(types.ByUID(aercThreads))
logging.Infof("Found %d threaded messages", count)
seqmap: refactor seqmap to use slice instead of map The imap worker's seqmap is represented as a map of sequence number to UID. This presents a problem when expunging group of messages from the mailbox: each individual expunge decrements the sequence numbers by 1 (for every sequence number greater than the expunged). This requires a looping around the map to update the keys. The use of a map also requires that both the sequence number and the UID of a message be known in order to insert it into the map. This is only discovered by fetching individual message body parts (flags, headers, etc), leaving the seqmap to be empty until we have fetched information about each message. In certain instances (if a mailbox has recently been loaded), all information is loaded in memory and no new information is fetched - leaving the seqmap empty and the UI out of sync with the worker. Refactor the seqmap as a slice, so that any expunge automatically decrements the rest of the sequences. Use the results of FetchDirectoryContents or FetchDirectoryThreaded to initialize the seqmap with all discovered UIDs. Sort the UIDs in ascending order: IMAP specification requires that sequence numbers start at 1 increase in order of ascending UID. Add individual messages to the map if they come via a MessageUpdate and have a sequence number larger than our slice. Update seqmap tests with new logic. Reference: https://datatracker.ietf.org/doc/html/rfc3501#section-2.3.1.2 Fixes: https://todo.sr.ht/~rjarry/aerc/69 Signed-off-by: Tim Culverhouse <tim@timculverhouse.com> Acked-by: Robin Jarry <robin@jarry.cc>
2022-08-01 19:18:25 +02:00
if len(msg.FilterCriteria) == 1 {
// Only initialize if we are not filtering
var uids []uint32
for i := len(aercThreads) - 1; i >= 0; i-- {
aercThreads[i].Walk(func(t *types.Thread, level int, currentErr error) error {
uids = append(uids, t.Uid)
return nil
})
}
imapw.seqMap.Initialize(uids)
}
imapw.worker.PostMessage(&types.DirectoryThreaded{
Message: types.RespondTo(msg),
Threads: aercThreads,
}, nil)
imapw.worker.PostMessage(&types.Done{Message: types.RespondTo(msg)}, nil)
}
}
func convertThreads(threads []*sortthread.Thread, parent *types.Thread) ([]*types.Thread, int) {
if threads == nil {
return nil, 0
}
conv := make([]*types.Thread, len(threads))
count := 0
for i := 0; i < len(threads); i++ {
t := threads[i]
conv[i] = &types.Thread{
Uid: t.Id,
}
// Set the first child node
children, childCount := convertThreads(t.Children, conv[i])
if len(children) > 0 {
conv[i].FirstChild = children[0]
}
// Set the parent node
if parent != nil {
conv[i].Parent = parent
// elements of threads are siblings
if i > 0 {
conv[i].PrevSibling = conv[i-1]
conv[i-1].NextSibling = conv[i]
}
}
count += childCount + 1
}
return conv, count
}