Revert "Implementing PR #458"

This reverts commit 0e0dd5bc9c.
This commit is contained in:
bean 2026-01-26 15:08:21 -05:00
parent fcdb4db2fe
commit 250b542e06
8 changed files with 69 additions and 401 deletions

View File

@ -226,8 +226,6 @@ bool Epub::load(const bool buildIfMissing) {
Serial.printf("[%lu] [EBP] Cache not found, building spine/TOC cache\n", millis()); Serial.printf("[%lu] [EBP] Cache not found, building spine/TOC cache\n", millis());
setupCacheDir(); setupCacheDir();
const uint32_t indexingStart = millis();
// Begin building cache - stream entries to disk immediately // Begin building cache - stream entries to disk immediately
if (!bookMetadataCache->beginWrite()) { if (!bookMetadataCache->beginWrite()) {
Serial.printf("[%lu] [EBP] Could not begin writing cache\n", millis()); Serial.printf("[%lu] [EBP] Could not begin writing cache\n", millis());
@ -235,7 +233,6 @@ bool Epub::load(const bool buildIfMissing) {
} }
// OPF Pass // OPF Pass
const uint32_t opfStart = millis();
BookMetadataCache::BookMetadata bookMetadata; BookMetadataCache::BookMetadata bookMetadata;
if (!bookMetadataCache->beginContentOpfPass()) { if (!bookMetadataCache->beginContentOpfPass()) {
Serial.printf("[%lu] [EBP] Could not begin writing content.opf pass\n", millis()); Serial.printf("[%lu] [EBP] Could not begin writing content.opf pass\n", millis());
@ -249,10 +246,8 @@ bool Epub::load(const bool buildIfMissing) {
Serial.printf("[%lu] [EBP] Could not end writing content.opf pass\n", millis()); Serial.printf("[%lu] [EBP] Could not end writing content.opf pass\n", millis());
return false; return false;
} }
Serial.printf("[%lu] [EBP] OPF pass completed in %lu ms\n", millis(), millis() - opfStart);
// TOC Pass - try EPUB 3 nav first, fall back to NCX // TOC Pass - try EPUB 3 nav first, fall back to NCX
const uint32_t tocStart = millis();
if (!bookMetadataCache->beginTocPass()) { if (!bookMetadataCache->beginTocPass()) {
Serial.printf("[%lu] [EBP] Could not begin writing toc pass\n", millis()); Serial.printf("[%lu] [EBP] Could not begin writing toc pass\n", millis());
return false; return false;
@ -281,7 +276,6 @@ bool Epub::load(const bool buildIfMissing) {
Serial.printf("[%lu] [EBP] Could not end writing toc pass\n", millis()); Serial.printf("[%lu] [EBP] Could not end writing toc pass\n", millis());
return false; return false;
} }
Serial.printf("[%lu] [EBP] TOC pass completed in %lu ms\n", millis(), millis() - tocStart);
// Close the cache files // Close the cache files
if (!bookMetadataCache->endWrite()) { if (!bookMetadataCache->endWrite()) {
@ -290,13 +284,10 @@ bool Epub::load(const bool buildIfMissing) {
} }
// Build final book.bin // Build final book.bin
const uint32_t buildStart = millis();
if (!bookMetadataCache->buildBookBin(filepath, bookMetadata)) { if (!bookMetadataCache->buildBookBin(filepath, bookMetadata)) {
Serial.printf("[%lu] [EBP] Could not update mappings and sizes\n", millis()); Serial.printf("[%lu] [EBP] Could not update mappings and sizes\n", millis());
return false; return false;
} }
Serial.printf("[%lu] [EBP] buildBookBin completed in %lu ms\n", millis(), millis() - buildStart);
Serial.printf("[%lu] [EBP] Total indexing completed in %lu ms\n", millis(), millis() - indexingStart);
if (!bookMetadataCache->cleanupTmpFiles()) { if (!bookMetadataCache->cleanupTmpFiles()) {
Serial.printf("[%lu] [EBP] Could not cleanup tmp files - ignoring\n", millis()); Serial.printf("[%lu] [EBP] Could not cleanup tmp files - ignoring\n", millis());
@ -329,11 +320,16 @@ bool Epub::clearCache() const {
} }
void Epub::setupCacheDir() const { void Epub::setupCacheDir() const {
if (SdMan.exists(cachePath.c_str())) { // Always try to create, just in case.
return; if (!SdMan.mkdir(cachePath.c_str())) {
// If mkdir failed, it might already exist. Check if it's a directory.
// SdMan doesn't allow checking type easily without opening.
// But let's log the detailed failure state.
bool exists = SdMan.exists(cachePath.c_str());
Serial.printf("[%lu] [EBP] mkdir failed for %s. Exists? %s\n", millis(), cachePath.c_str(), exists ? "YES" : "NO");
} else {
// Serial.printf("[%lu] [EBP] Created cache directory: %s\n", millis(), cachePath.c_str());
} }
SdMan.mkdir(cachePath.c_str());
} }
const std::string& Epub::getCachePath() const { return cachePath; } const std::string& Epub::getCachePath() const { return cachePath; }

View File

@ -40,6 +40,7 @@ bool BookMetadataCache::endContentOpfPass() {
bool BookMetadataCache::beginTocPass() { bool BookMetadataCache::beginTocPass() {
Serial.printf("[%lu] [BMC] Beginning toc pass\n", millis()); Serial.printf("[%lu] [BMC] Beginning toc pass\n", millis());
// Open spine file for reading
if (!SdMan.openFileForRead("BMC", cachePath + tmpSpineBinFile, spineFile)) { if (!SdMan.openFileForRead("BMC", cachePath + tmpSpineBinFile, spineFile)) {
return false; return false;
} }
@ -47,41 +48,12 @@ bool BookMetadataCache::beginTocPass() {
spineFile.close(); spineFile.close();
return false; return false;
} }
if (spineCount >= LARGE_SPINE_THRESHOLD) {
spineHrefIndex.clear();
spineHrefIndex.reserve(spineCount);
spineFile.seek(0);
for (int i = 0; i < spineCount; i++) {
auto entry = readSpineEntry(spineFile);
SpineHrefIndexEntry idx;
idx.hrefHash = fnvHash64(entry.href);
idx.hrefLen = static_cast<uint16_t>(entry.href.size());
idx.spineIndex = static_cast<int16_t>(i);
spineHrefIndex.push_back(idx);
}
std::sort(spineHrefIndex.begin(), spineHrefIndex.end(),
[](const SpineHrefIndexEntry& a, const SpineHrefIndexEntry& b) {
return a.hrefHash < b.hrefHash || (a.hrefHash == b.hrefHash && a.hrefLen < b.hrefLen);
});
spineFile.seek(0);
useSpineHrefIndex = true;
Serial.printf("[%lu] [BMC] Using fast index for %d spine items\n", millis(), spineCount);
} else {
useSpineHrefIndex = false;
}
return true; return true;
} }
bool BookMetadataCache::endTocPass() { bool BookMetadataCache::endTocPass() {
tocFile.close(); tocFile.close();
spineFile.close(); spineFile.close();
spineHrefIndex.clear();
spineHrefIndex.shrink_to_fit();
useSpineHrefIndex = false;
return true; return true;
} }
@ -152,18 +124,6 @@ bool BookMetadataCache::buildBookBin(const std::string& epubPath, const BookMeta
// LUTs complete // LUTs complete
// Loop through spines from spine file matching up TOC indexes, calculating cumulative size and writing to book.bin // Loop through spines from spine file matching up TOC indexes, calculating cumulative size and writing to book.bin
// Build spineIndex->tocIndex mapping in one pass (O(n) instead of O(n*m))
std::vector<int16_t> spineToTocIndex(spineCount, -1);
tocFile.seek(0);
for (int j = 0; j < tocCount; j++) {
auto tocEntry = readTocEntry(tocFile);
if (tocEntry.spineIndex >= 0 && tocEntry.spineIndex < spineCount) {
if (spineToTocIndex[tocEntry.spineIndex] == -1) {
spineToTocIndex[tocEntry.spineIndex] = static_cast<int16_t>(j);
}
}
}
ZipFile zip(epubPath); ZipFile zip(epubPath);
// Pre-open zip file to speed up size calculations // Pre-open zip file to speed up size calculations
if (!zip.open()) { if (!zip.open()) {
@ -173,56 +133,31 @@ bool BookMetadataCache::buildBookBin(const std::string& epubPath, const BookMeta
tocFile.close(); tocFile.close();
return false; return false;
} }
// NOTE: We intentionally skip calling loadAllFileStatSlims() here. // TODO: For large ZIPs loading the all localHeaderOffsets will crash.
// For large EPUBs (2000+ chapters), pre-loading all ZIP central directory entries // However not having them loaded is extremely slow. Need a better solution here.
// into memory causes OOM crashes on ESP32-C3's limited ~380KB RAM. // Perhaps only a cache of spine items or a better way to speedup lookups?
// Instead, for large books we use a one-pass batch lookup that scans the ZIP if (!zip.loadAllFileStatSlims()) {
// central directory once and matches against spine targets using hash comparison. Serial.printf("[%lu] [BMC] Could not load zip local header offsets for size calculations\n", millis());
// This is O(n*log(m)) instead of O(n*m) while avoiding memory exhaustion. bookFile.close();
// See: https://github.com/crosspoint-reader/crosspoint-reader/issues/134 spineFile.close();
tocFile.close();
std::vector<uint32_t> spineSizes; zip.close();
bool useBatchSizes = false; return false;
if (spineCount >= LARGE_SPINE_THRESHOLD) {
Serial.printf("[%lu] [BMC] Using batch size lookup for %d spine items\n", millis(), spineCount);
std::vector<ZipFile::SizeTarget> targets;
targets.reserve(spineCount);
spineFile.seek(0);
for (int i = 0; i < spineCount; i++) {
auto entry = readSpineEntry(spineFile);
std::string path = FsHelpers::normalisePath(entry.href);
ZipFile::SizeTarget t;
t.hash = ZipFile::fnvHash64(path.c_str(), path.size());
t.len = static_cast<uint16_t>(path.size());
t.index = static_cast<uint16_t>(i);
targets.push_back(t);
}
std::sort(targets.begin(), targets.end(), [](const ZipFile::SizeTarget& a, const ZipFile::SizeTarget& b) {
return a.hash < b.hash || (a.hash == b.hash && a.len < b.len);
});
spineSizes.resize(spineCount, 0);
int matched = zip.fillUncompressedSizes(targets, spineSizes);
Serial.printf("[%lu] [BMC] Batch lookup matched %d/%d spine items\n", millis(), matched, spineCount);
targets.clear();
targets.shrink_to_fit();
useBatchSizes = true;
} }
uint32_t cumSize = 0; uint32_t cumSize = 0;
spineFile.seek(0); spineFile.seek(0);
int lastSpineTocIndex = -1; int lastSpineTocIndex = -1;
for (int i = 0; i < spineCount; i++) { for (int i = 0; i < spineCount; i++) {
auto spineEntry = readSpineEntry(spineFile); auto spineEntry = readSpineEntry(spineFile);
spineEntry.tocIndex = spineToTocIndex[i]; tocFile.seek(0);
for (int j = 0; j < tocCount; j++) {
auto tocEntry = readTocEntry(tocFile);
if (tocEntry.spineIndex == i) {
spineEntry.tocIndex = j;
break;
}
}
// Not a huge deal if we don't fine a TOC entry for the spine entry, this is expected behaviour for EPUBs // Not a huge deal if we don't fine a TOC entry for the spine entry, this is expected behaviour for EPUBs
// Logging here is for debugging // Logging here is for debugging
@ -234,25 +169,16 @@ bool BookMetadataCache::buildBookBin(const std::string& epubPath, const BookMeta
} }
lastSpineTocIndex = spineEntry.tocIndex; lastSpineTocIndex = spineEntry.tocIndex;
// Calculate size for cumulative size
size_t itemSize = 0; size_t itemSize = 0;
if (useBatchSizes) { const std::string path = FsHelpers::normalisePath(spineEntry.href);
itemSize = spineSizes[i]; if (zip.getInflatedFileSize(path.c_str(), &itemSize)) {
if (itemSize == 0) { cumSize += itemSize;
const std::string path = FsHelpers::normalisePath(spineEntry.href); spineEntry.cumulativeSize = cumSize;
if (!zip.getInflatedFileSize(path.c_str(), &itemSize)) {
Serial.printf("[%lu] [BMC] Warning: Could not get size for spine item: %s\n", millis(), path.c_str());
}
}
} else { } else {
const std::string path = FsHelpers::normalisePath(spineEntry.href); Serial.printf("[%lu] [BMC] Warning: Could not get size for spine item: %s\n", millis(), path.c_str());
if (!zip.getInflatedFileSize(path.c_str(), &itemSize)) {
Serial.printf("[%lu] [BMC] Warning: Could not get size for spine item: %s\n", millis(), path.c_str());
}
} }
cumSize += itemSize;
spineEntry.cumulativeSize = cumSize;
// Write out spine data to book.bin // Write out spine data to book.bin
writeSpineEntry(bookFile, spineEntry); writeSpineEntry(bookFile, spineEntry);
} }
@ -322,38 +248,21 @@ void BookMetadataCache::createTocEntry(const std::string& title, const std::stri
return; return;
} }
int16_t spineIndex = -1; int spineIndex = -1;
// find spine index
if (useSpineHrefIndex) { // TODO: This lookup is slow as need to scan through all items each time. We can't hold it all in memory due to size.
uint64_t targetHash = fnvHash64(href); // But perhaps we can load just the hrefs in a vector/list to do an index lookup?
uint16_t targetLen = static_cast<uint16_t>(href.size()); spineFile.seek(0);
for (int i = 0; i < spineCount; i++) {
auto it = auto spineEntry = readSpineEntry(spineFile);
std::lower_bound(spineHrefIndex.begin(), spineHrefIndex.end(), SpineHrefIndexEntry{targetHash, targetLen, 0}, if (spineEntry.href == href) {
[](const SpineHrefIndexEntry& a, const SpineHrefIndexEntry& b) { spineIndex = i;
return a.hrefHash < b.hrefHash || (a.hrefHash == b.hrefHash && a.hrefLen < b.hrefLen);
});
while (it != spineHrefIndex.end() && it->hrefHash == targetHash && it->hrefLen == targetLen) {
spineIndex = it->spineIndex;
break; break;
} }
}
if (spineIndex == -1) { if (spineIndex == -1) {
Serial.printf("[%lu] [BMC] createTocEntry: Could not find spine item for TOC href %s\n", millis(), href.c_str()); Serial.printf("[%lu] [BMC] addTocEntry: Could not find spine item for TOC href %s\n", millis(), href.c_str());
}
} else {
spineFile.seek(0);
for (int i = 0; i < spineCount; i++) {
auto spineEntry = readSpineEntry(spineFile);
if (spineEntry.href == href) {
spineIndex = static_cast<int16_t>(i);
break;
}
}
if (spineIndex == -1) {
Serial.printf("[%lu] [BMC] createTocEntry: Could not find spine item for TOC href %s\n", millis(), href.c_str());
}
} }
const TocEntry entry(title, href, anchor, level, spineIndex); const TocEntry entry(title, href, anchor, level, spineIndex);

View File

@ -2,9 +2,7 @@
#include <SDCardManager.h> #include <SDCardManager.h>
#include <algorithm>
#include <string> #include <string>
#include <vector>
class BookMetadataCache { class BookMetadataCache {
public: public:
@ -55,27 +53,6 @@ class BookMetadataCache {
FsFile spineFile; FsFile spineFile;
FsFile tocFile; FsFile tocFile;
// Index for fast href→spineIndex lookup (used only for large EPUBs)
struct SpineHrefIndexEntry {
uint64_t hrefHash; // FNV-1a 64-bit hash
uint16_t hrefLen; // length for collision reduction
int16_t spineIndex;
};
std::vector<SpineHrefIndexEntry> spineHrefIndex;
bool useSpineHrefIndex = false;
static constexpr uint16_t LARGE_SPINE_THRESHOLD = 400;
// FNV-1a 64-bit hash function
static uint64_t fnvHash64(const std::string& s) {
uint64_t hash = 14695981039346656037ull;
for (char c : s) {
hash ^= static_cast<uint8_t>(c);
hash *= 1099511628211ull;
}
return hash;
}
uint32_t writeSpineEntry(FsFile& file, const SpineEntry& entry) const; uint32_t writeSpineEntry(FsFile& file, const SpineEntry& entry) const;
uint32_t writeTocEntry(FsFile& file, const TocEntry& entry) const; uint32_t writeTocEntry(FsFile& file, const TocEntry& entry) const;
SpineEntry readSpineEntry(FsFile& file) const; SpineEntry readSpineEntry(FsFile& file) const;

View File

@ -38,9 +38,6 @@ ContentOpfParser::~ContentOpfParser() {
if (SdMan.exists((cachePath + itemCacheFile).c_str())) { if (SdMan.exists((cachePath + itemCacheFile).c_str())) {
SdMan.remove((cachePath + itemCacheFile).c_str()); SdMan.remove((cachePath + itemCacheFile).c_str());
} }
itemIndex.clear();
itemIndex.shrink_to_fit();
useItemIndex = false;
} }
size_t ContentOpfParser::write(const uint8_t data) { return write(&data, 1); } size_t ContentOpfParser::write(const uint8_t data) { return write(&data, 1); }
@ -132,15 +129,6 @@ void XMLCALL ContentOpfParser::startElement(void* userData, const XML_Char* name
"[%lu] [COF] Couldn't open temp items file for reading. This is probably going to be a fatal error.\n", "[%lu] [COF] Couldn't open temp items file for reading. This is probably going to be a fatal error.\n",
millis()); millis());
} }
// Sort item index for binary search if we have enough items
if (self->itemIndex.size() >= LARGE_SPINE_THRESHOLD) {
std::sort(self->itemIndex.begin(), self->itemIndex.end(), [](const ItemIndexEntry& a, const ItemIndexEntry& b) {
return a.idHash < b.idHash || (a.idHash == b.idHash && a.idLen < b.idLen);
});
self->useItemIndex = true;
Serial.printf("[%lu] [COF] Using fast index for %zu manifest items\n", millis(), self->itemIndex.size());
}
return; return;
} }
@ -192,15 +180,6 @@ void XMLCALL ContentOpfParser::startElement(void* userData, const XML_Char* name
} }
} }
// Record index entry for fast lookup later
if (self->tempItemStore) {
ItemIndexEntry entry;
entry.idHash = fnvHash(itemId);
entry.idLen = static_cast<uint16_t>(itemId.size());
entry.fileOffset = static_cast<uint32_t>(self->tempItemStore.position());
self->itemIndex.push_back(entry);
}
// Write items down to SD card // Write items down to SD card
serialization::writeString(self->tempItemStore, itemId); serialization::writeString(self->tempItemStore, itemId);
serialization::writeString(self->tempItemStore, href); serialization::writeString(self->tempItemStore, href);
@ -236,50 +215,19 @@ void XMLCALL ContentOpfParser::startElement(void* userData, const XML_Char* name
for (int i = 0; atts[i]; i += 2) { for (int i = 0; atts[i]; i += 2) {
if (strcmp(atts[i], "idref") == 0) { if (strcmp(atts[i], "idref") == 0) {
const std::string idref = atts[i + 1]; const std::string idref = atts[i + 1];
// Resolve the idref to href using items map
// TODO: This lookup is slow as need to scan through all items each time.
// It can take up to 200ms per item when getting to 1500 items.
self->tempItemStore.seek(0);
std::string itemId;
std::string href; std::string href;
bool found = false; while (self->tempItemStore.available()) {
serialization::readString(self->tempItemStore, itemId);
if (self->useItemIndex) { serialization::readString(self->tempItemStore, href);
// Fast path: binary search if (itemId == idref) {
uint32_t targetHash = fnvHash(idref); self->cache->createSpineEntry(href);
uint16_t targetLen = static_cast<uint16_t>(idref.size()); break;
auto it = std::lower_bound(self->itemIndex.begin(), self->itemIndex.end(),
ItemIndexEntry{targetHash, targetLen, 0},
[](const ItemIndexEntry& a, const ItemIndexEntry& b) {
return a.idHash < b.idHash || (a.idHash == b.idHash && a.idLen < b.idLen);
});
// Check for match (may need to check a few due to hash collisions)
while (it != self->itemIndex.end() && it->idHash == targetHash) {
self->tempItemStore.seek(it->fileOffset);
std::string itemId;
serialization::readString(self->tempItemStore, itemId);
if (itemId == idref) {
serialization::readString(self->tempItemStore, href);
found = true;
break;
}
++it;
} }
} else {
// Slow path: linear scan (for small manifests, keeps original behavior)
// TODO: This lookup is slow as need to scan through all items each time.
// It can take up to 200ms per item when getting to 1500 items.
self->tempItemStore.seek(0);
std::string itemId;
while (self->tempItemStore.available()) {
serialization::readString(self->tempItemStore, itemId);
serialization::readString(self->tempItemStore, href);
if (itemId == idref) {
found = true;
break;
}
}
}
if (found && self->cache) {
self->cache->createSpineEntry(href);
} }
} }
} }

View File

@ -1,9 +1,6 @@
#pragma once #pragma once
#include <Print.h> #include <Print.h>
#include <algorithm>
#include <vector>
#include "Epub.h" #include "Epub.h"
#include "expat.h" #include "expat.h"
@ -31,27 +28,6 @@ class ContentOpfParser final : public Print {
FsFile tempItemStore; FsFile tempItemStore;
std::string coverItemId; std::string coverItemId;
// Index for fast idref→href lookup (used only for large EPUBs)
struct ItemIndexEntry {
uint32_t idHash; // FNV-1a hash of itemId
uint16_t idLen; // length for collision reduction
uint32_t fileOffset; // offset in .items.bin
};
std::vector<ItemIndexEntry> itemIndex;
bool useItemIndex = false;
static constexpr uint16_t LARGE_SPINE_THRESHOLD = 400;
// FNV-1a hash function
static uint32_t fnvHash(const std::string& s) {
uint32_t hash = 2166136261u;
for (char c : s) {
hash ^= static_cast<uint8_t>(c);
hash *= 16777619u;
}
return hash;
}
static void startElement(void* userData, const XML_Char* name, const XML_Char** atts); static void startElement(void* userData, const XML_Char* name, const XML_Char** atts);
static void characterData(void* userData, const XML_Char* s, int len); static void characterData(void* userData, const XML_Char* s, int len);
static void endElement(void* userData, const XML_Char* name); static void endElement(void* userData, const XML_Char* name);

View File

@ -4,8 +4,6 @@
#include <SDCardManager.h> #include <SDCardManager.h>
#include <miniz.h> #include <miniz.h>
#include <algorithm>
bool inflateOneShot(const uint8_t* inputBuf, const size_t deflatedSize, uint8_t* outputBuf, const size_t inflatedSize) { bool inflateOneShot(const uint8_t* inputBuf, const size_t deflatedSize, uint8_t* outputBuf, const size_t inflatedSize) {
// Setup inflator // Setup inflator
const auto inflator = static_cast<tinfl_decompressor*>(malloc(sizeof(tinfl_decompressor))); const auto inflator = static_cast<tinfl_decompressor*>(malloc(sizeof(tinfl_decompressor)));
@ -76,10 +74,6 @@ bool ZipFile::loadAllFileStatSlims() {
file.seekCur(m + k); file.seekCur(m + k);
} }
// Set cursor to start of central directory for sequential access
lastCentralDirPos = zipDetails.centralDirOffset;
lastCentralDirPosValid = true;
if (!wasOpen) { if (!wasOpen) {
close(); close();
} }
@ -108,35 +102,15 @@ bool ZipFile::loadFileStatSlim(const char* filename, FileStatSlim* fileStat) {
return false; return false;
} }
// Phase 1: Try scanning from cursor position first file.seek(zipDetails.centralDirOffset);
uint32_t startPos = lastCentralDirPosValid ? lastCentralDirPos : zipDetails.centralDirOffset;
uint32_t wrapPos = zipDetails.centralDirOffset;
bool wrapped = false;
bool found = false;
file.seek(startPos);
uint32_t sig; uint32_t sig;
char itemName[256]; char itemName[256];
bool found = false;
while (true) { while (file.available()) {
uint32_t entryStart = file.position(); file.read(&sig, 4);
if (sig != 0x02014b50) break; // End of list
if (file.read(&sig, 4) != 4 || sig != 0x02014b50) {
// End of central directory
if (!wrapped && lastCentralDirPosValid && startPos != zipDetails.centralDirOffset) {
// Wrap around to beginning
file.seek(zipDetails.centralDirOffset);
wrapped = true;
continue;
}
break;
}
// If we've wrapped and reached our start position, stop
if (wrapped && entryStart >= startPos) {
break;
}
file.seekCur(6); file.seekCur(6);
file.read(&fileStat->method, 2); file.read(&fileStat->method, 2);
@ -149,25 +123,15 @@ bool ZipFile::loadFileStatSlim(const char* filename, FileStatSlim* fileStat) {
file.read(&k, 2); file.read(&k, 2);
file.seekCur(8); file.seekCur(8);
file.read(&fileStat->localHeaderOffset, 4); file.read(&fileStat->localHeaderOffset, 4);
file.read(itemName, nameLen);
itemName[nameLen] = '\0';
if (nameLen < 256) { if (strcmp(itemName, filename) == 0) {
file.read(itemName, nameLen); found = true;
itemName[nameLen] = '\0'; break;
if (strcmp(itemName, filename) == 0) {
// Found it! Update cursor to next entry
file.seekCur(m + k);
lastCentralDirPos = file.position();
lastCentralDirPosValid = true;
found = true;
break;
}
} else {
// Name too long, skip it
file.seekCur(nameLen);
} }
// Skip extra field + comment // Skip the rest of this entry (extra field + comment)
file.seekCur(m + k); file.seekCur(m + k);
} }
@ -289,8 +253,6 @@ bool ZipFile::close() {
if (file) { if (file) {
file.close(); file.close();
} }
lastCentralDirPos = 0;
lastCentralDirPosValid = false;
return true; return true;
} }
@ -304,80 +266,6 @@ bool ZipFile::getInflatedFileSize(const char* filename, size_t* size) {
return true; return true;
} }
int ZipFile::fillUncompressedSizes(std::vector<SizeTarget>& targets, std::vector<uint32_t>& sizes) {
if (targets.empty()) {
return 0;
}
const bool wasOpen = isOpen();
if (!wasOpen && !open()) {
return 0;
}
if (!loadZipDetails()) {
if (!wasOpen) {
close();
}
return 0;
}
file.seek(zipDetails.centralDirOffset);
int matched = 0;
uint32_t sig;
char itemName[256];
while (file.available()) {
file.read(&sig, 4);
if (sig != 0x02014b50) break;
file.seekCur(6);
uint16_t method;
file.read(&method, 2);
file.seekCur(8);
uint32_t compressedSize, uncompressedSize;
file.read(&compressedSize, 4);
file.read(&uncompressedSize, 4);
uint16_t nameLen, m, k;
file.read(&nameLen, 2);
file.read(&m, 2);
file.read(&k, 2);
file.seekCur(8);
uint32_t localHeaderOffset;
file.read(&localHeaderOffset, 4);
if (nameLen < 256) {
file.read(itemName, nameLen);
itemName[nameLen] = '\0';
uint64_t hash = fnvHash64(itemName, nameLen);
SizeTarget key = {hash, nameLen, 0};
auto it = std::lower_bound(targets.begin(), targets.end(), key, [](const SizeTarget& a, const SizeTarget& b) {
return a.hash < b.hash || (a.hash == b.hash && a.len < b.len);
});
while (it != targets.end() && it->hash == hash && it->len == nameLen) {
if (it->index < sizes.size()) {
sizes[it->index] = uncompressedSize;
matched++;
}
++it;
}
} else {
file.seekCur(nameLen);
}
file.seekCur(m + k);
}
if (!wasOpen) {
close();
}
return matched;
}
uint8_t* ZipFile::readFileToMemory(const char* filename, size_t* size, const bool trailingNullByte) { uint8_t* ZipFile::readFileToMemory(const char* filename, size_t* size, const bool trailingNullByte) {
const bool wasOpen = isOpen(); const bool wasOpen = isOpen();
if (!wasOpen && !open()) { if (!wasOpen && !open()) {

View File

@ -3,7 +3,6 @@
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <vector>
class ZipFile { class ZipFile {
public: public:
@ -20,33 +19,12 @@ class ZipFile {
bool isSet; bool isSet;
}; };
// Target for batch uncompressed size lookup (sorted by hash, then len)
struct SizeTarget {
uint64_t hash; // FNV-1a 64-bit hash of normalized path
uint16_t len; // Length of path for collision reduction
uint16_t index; // Caller's index (e.g. spine index)
};
// FNV-1a 64-bit hash computed from char buffer (no std::string allocation)
static uint64_t fnvHash64(const char* s, size_t len) {
uint64_t hash = 14695981039346656037ull;
for (size_t i = 0; i < len; i++) {
hash ^= static_cast<uint8_t>(s[i]);
hash *= 1099511628211ull;
}
return hash;
}
private: private:
const std::string& filePath; const std::string& filePath;
FsFile file; FsFile file;
ZipDetails zipDetails = {0, 0, false}; ZipDetails zipDetails = {0, 0, false};
std::unordered_map<std::string, FileStatSlim> fileStatSlimCache; std::unordered_map<std::string, FileStatSlim> fileStatSlimCache;
// Cursor for sequential central-dir scanning optimization
uint32_t lastCentralDirPos = 0;
bool lastCentralDirPosValid = false;
bool loadFileStatSlim(const char* filename, FileStatSlim* fileStat); bool loadFileStatSlim(const char* filename, FileStatSlim* fileStat);
long getDataOffset(const FileStatSlim& fileStat); long getDataOffset(const FileStatSlim& fileStat);
bool loadZipDetails(); bool loadZipDetails();
@ -61,10 +39,6 @@ class ZipFile {
bool close(); bool close();
bool loadAllFileStatSlims(); bool loadAllFileStatSlims();
bool getInflatedFileSize(const char* filename, size_t* size); bool getInflatedFileSize(const char* filename, size_t* size);
// Batch lookup: scan ZIP central dir once and fill sizes for matching targets.
// targets must be sorted by (hash, len). sizes[target.index] receives uncompressedSize.
// Returns number of targets matched.
int fillUncompressedSizes(std::vector<SizeTarget>& targets, std::vector<uint32_t>& sizes);
// Due to the memory required to run each of these, it is recommended to not preopen the zip file for multiple // Due to the memory required to run each of these, it is recommended to not preopen the zip file for multiple
// These functions will open and close the zip as needed // These functions will open and close the zip as needed
uint8_t* readFileToMemory(const char* filename, size_t* size = nullptr, bool trailingNullByte = false); uint8_t* readFileToMemory(const char* filename, size_t* size = nullptr, bool trailingNullByte = false);

View File

@ -110,7 +110,7 @@ void MyLibraryActivity::loadFiles() {
char name[500]; char name[500];
for (auto file = root.openNextFile(); file; file = root.openNextFile()) { for (auto file = root.openNextFile(); file; file = root.openNextFile()) {
file.getName(name, sizeof(name)); file.getName(name, sizeof(name));
if (name[0] == '.' || strcmp(name, "System Volume Information") == 0 || strcmp(name, "fonts") == 0) { if (name[0] == '.' || strcmp(name, "System Volume Information") == 0 || strcmp(name, "fonts")) {
file.close(); file.close();
continue; continue;
} }