Artic Base: Fix out of bounds cache reads (#127)

This commit is contained in:
PabloMK7 2024-05-16 00:03:40 +02:00 committed by GitHub
parent 893dad57b2
commit 05cccb585d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 56 additions and 11 deletions

View file

@ -88,8 +88,16 @@ public:
} }
} }
void invalidate(const key_type& key) {
auto i = find(key);
if (i != m_list.cend()) {
m_list.erase(i);
}
}
void clear() { void clear() {
m_list.clear(); m_list.clear();
m_array.fill(value_type{});
} }
private: private:

View file

@ -25,13 +25,20 @@ ResultVal<std::size_t> ArticCache::Read(s32 file_handle, std::size_t offset, std
auto res = auto res =
ReadFromArtic(file_handle, reinterpret_cast<u8*>(big_cache_entry.second.data()), ReadFromArtic(file_handle, reinterpret_cast<u8*>(big_cache_entry.second.data()),
length, offset); length, offset);
if (res.Failed()) if (res.Failed()) {
big_cache.invalidate(std::make_pair(offset, length));
return res; return res;
length = res.Unwrap();
} else {
LOG_TRACE(Service_FS, "ArticCache BHIT: offset={}, length={}", offset, length);
} }
memcpy(buffer, big_cache_entry.second.data(), length); read_progress = res.Unwrap();
} else {
LOG_TRACE(Service_FS, "ArticCache BHIT: offset={}, length={}", offset,
read_progress);
}
memcpy(buffer, big_cache_entry.second.data(), read_progress);
if (read_progress < length) {
// Invalidate the entry as it is not fully read
big_cache.invalidate(std::make_pair(offset, length));
}
} else { } else {
if (segments[0].second < very_big_cache_skip) { if (segments[0].second < very_big_cache_skip) {
std::unique_lock very_big_read_guard(very_big_cache_mutex); std::unique_lock very_big_read_guard(very_big_cache_mutex);
@ -44,28 +51,39 @@ ResultVal<std::size_t> ArticCache::Read(s32 file_handle, std::size_t offset, std
auto res = ReadFromArtic( auto res = ReadFromArtic(
file_handle, reinterpret_cast<u8*>(very_big_cache_entry.second.data()), file_handle, reinterpret_cast<u8*>(very_big_cache_entry.second.data()),
length, offset); length, offset);
if (res.Failed()) if (res.Failed()) {
very_big_cache.invalidate(std::make_pair(offset, length));
return res; return res;
length = res.Unwrap();
} else {
LOG_TRACE(Service_FS, "ArticCache VBHIT: offset={}, length={}", offset, length);
} }
memcpy(buffer, very_big_cache_entry.second.data(), length); read_progress = res.Unwrap();
} else {
LOG_TRACE(Service_FS, "ArticCache VBHIT: offset={}, length={}", offset,
read_progress);
}
memcpy(buffer, very_big_cache_entry.second.data(), read_progress);
if (read_progress < length) {
// Invalidate the entry as it is not fully read
very_big_cache.invalidate(std::make_pair(offset, length));
}
} else { } else {
LOG_TRACE(Service_FS, "ArticCache SKIP: offset={}, length={}", offset, length); LOG_TRACE(Service_FS, "ArticCache SKIP: offset={}, length={}", offset, length);
auto res = ReadFromArtic(file_handle, buffer, length, offset); auto res = ReadFromArtic(file_handle, buffer, length, offset);
if (res.Failed()) if (res.Failed())
return res; return res;
length = res.Unwrap(); read_progress = res.Unwrap();
} }
} }
return length; return read_progress;
} }
// TODO(PabloMK7): Make cache thread safe, read the comment in CacheReady function. // TODO(PabloMK7): Make cache thread safe, read the comment in CacheReady function.
std::unique_lock read_guard(cache_mutex); std::unique_lock read_guard(cache_mutex);
bool read_past_end = false;
for (const auto& seg : segments) { for (const auto& seg : segments) {
if (read_past_end) {
break;
}
std::size_t read_size = cache_line_size; std::size_t read_size = cache_line_size;
std::size_t page = OffsetToPage(seg.first); std::size_t page = OffsetToPage(seg.first);
// Check if segment is in cache // Check if segment is in cache
@ -73,9 +91,28 @@ ResultVal<std::size_t> ArticCache::Read(s32 file_handle, std::size_t offset, std
if (!cache_entry.first) { if (!cache_entry.first) {
// If not found, read from artic and cache the data // If not found, read from artic and cache the data
auto res = ReadFromArtic(file_handle, cache_entry.second.data(), read_size, page); auto res = ReadFromArtic(file_handle, cache_entry.second.data(), read_size, page);
if (res.Failed()) if (res.Failed()) {
// Invalidate the requested entry as it is not populated
cache.invalidate(page);
// In the very unlikely case the file size is a multiple of the cache size,
// and the game request more data than the file size, this will save us from
// returning an incorrect out of bounds error caused by reading at just the very end
// of the file.
constexpr u32 out_of_bounds_read = 714;
if (res.Code().description == out_of_bounds_read) {
return read_progress;
}
return res; return res;
}
size_t expected_read_size = read_size;
read_size = res.Unwrap(); read_size = res.Unwrap();
//
if (read_size < expected_read_size) {
// Invalidate the requested entry as it is not fully read
cache.invalidate(page);
read_past_end = true;
}
LOG_TRACE(Service_FS, "ArticCache MISS: page={}, length={}, into={}", page, seg.second, LOG_TRACE(Service_FS, "ArticCache MISS: page={}, length={}, into={}", page, seg.second,
(seg.first - page)); (seg.first - page));
} else { } else {