mirror of
https://github.com/minio/minio
synced 2024-11-05 17:34:01 +00:00
Fix incremental usage accounting (#12871)
Remote caches were not returned correctly, so they would not get updated on save. Furthermore make some tweaks for more reliable updates. Invalidate bloom filter to ensure rescan.
This commit is contained in:
parent
4197870287
commit
cc60d66909
5 changed files with 21 additions and 8 deletions
|
@ -525,12 +525,22 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
|||
return
|
||||
}
|
||||
if !into.Compacted {
|
||||
into.addChild(dataUsageHash(folder.name))
|
||||
h := dataUsageHash(folder.name)
|
||||
into.addChild(h)
|
||||
// We scanned a folder, optionally send update.
|
||||
f.updateCache.deleteRecursive(h)
|
||||
f.updateCache.copyWithChildren(&f.newCache, h, folder.parent)
|
||||
f.sendUpdate()
|
||||
}
|
||||
// We scanned a folder, optionally send update.
|
||||
f.sendUpdate()
|
||||
}
|
||||
|
||||
// Transfer existing
|
||||
if !into.Compacted {
|
||||
for _, folder := range existingFolders {
|
||||
h := hashPath(folder.name)
|
||||
f.updateCache.copyWithChildren(&f.oldCache, h, folder.parent)
|
||||
}
|
||||
}
|
||||
// Scan new...
|
||||
for _, folder := range newFolders {
|
||||
h := hashPath(folder.name)
|
||||
|
|
|
@ -46,7 +46,7 @@ const (
|
|||
dataUpdateTrackerQueueSize = 0
|
||||
|
||||
dataUpdateTrackerFilename = dataUsageBucket + SlashSeparator + ".tracker.bin"
|
||||
dataUpdateTrackerVersion = 5
|
||||
dataUpdateTrackerVersion = 6
|
||||
dataUpdateTrackerSaveInterval = 5 * time.Minute
|
||||
)
|
||||
|
||||
|
@ -397,7 +397,7 @@ func (d *dataUpdateTracker) deserialize(src io.Reader, newerThan time.Time) erro
|
|||
return err
|
||||
}
|
||||
switch tmp[0] {
|
||||
case 1, 2, 3, 4:
|
||||
case 1, 2, 3, 4, 5:
|
||||
if intDataUpdateTracker.debug {
|
||||
console.Debugln(color.Green("dataUpdateTracker: ") + "deprecated data version, updating.")
|
||||
}
|
||||
|
|
|
@ -513,7 +513,11 @@ func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compact
|
|||
|
||||
// StringAll returns a detailed string representation of all entries in the cache.
|
||||
func (d *dataUsageCache) StringAll() string {
|
||||
// Remove bloom filter from print.
|
||||
bf := d.Info.BloomFilter
|
||||
d.Info.BloomFilter = nil
|
||||
s := fmt.Sprintf("info:%+v\n", d.Info)
|
||||
d.Info.BloomFilter = bf
|
||||
for k, v := range d.Cache {
|
||||
s += fmt.Sprintf("\t%v: %+v\n", k, v)
|
||||
}
|
||||
|
|
|
@ -482,11 +482,10 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf
|
|||
Entry: update,
|
||||
}
|
||||
if intDataUpdateTracker.debug {
|
||||
console.Debugln("bucket", bucket.Name, "got update", update)
|
||||
console.Debugln("z:", er.poolIndex, "s:", er.setIndex, "bucket", name, "got update", update)
|
||||
}
|
||||
}
|
||||
}(cache.Info.Name)
|
||||
|
||||
// Calc usage
|
||||
before := cache.Info.LastUpdate
|
||||
var err error
|
||||
|
|
|
@ -250,7 +250,7 @@ func (client *storageRESTClient) NSScanner(ctx context.Context, cache dataUsageC
|
|||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
return cache, err
|
||||
return newCache, err
|
||||
}
|
||||
|
||||
func (client *storageRESTClient) GetDiskID() (string, error) {
|
||||
|
|
Loading…
Reference in a new issue