mirror of
https://github.com/daveallie/crosspoint-reader.git
synced 2026-02-05 23:27:38 +03:00
Merge 5f34388143 into 3ce11f14ce
This commit is contained in:
commit
66cedd5112
@ -133,17 +133,12 @@ bool BookMetadataCache::buildBookBin(const std::string& epubPath, const BookMeta
|
|||||||
tocFile.close();
|
tocFile.close();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// TODO: For large ZIPs loading the all localHeaderOffsets will crash.
|
// NOTE: We intentionally skip calling loadAllFileStatSlims() here.
|
||||||
// However not having them loaded is extremely slow. Need a better solution here.
|
// For large EPUBs (2000+ chapters), pre-loading all ZIP central directory entries
|
||||||
// Perhaps only a cache of spine items or a better way to speedup lookups?
|
// into memory causes OOM crashes on ESP32-C3's limited ~380KB RAM.
|
||||||
if (!zip.loadAllFileStatSlims()) {
|
// Instead, we let loadFileStatSlim() do individual lookups per spine item.
|
||||||
Serial.printf("[%lu] [BMC] Could not load zip local header offsets for size calculations\n", millis());
|
// This is O(n*m) instead of O(n) for lookups, but avoids memory exhaustion.
|
||||||
bookFile.close();
|
// See: https://github.com/crosspoint-reader/crosspoint-reader/issues/134
|
||||||
spineFile.close();
|
|
||||||
tocFile.close();
|
|
||||||
zip.close();
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
uint32_t cumSize = 0;
|
uint32_t cumSize = 0;
|
||||||
spineFile.seek(0);
|
spineFile.seek(0);
|
||||||
int lastSpineTocIndex = -1;
|
int lastSpineTocIndex = -1;
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user