first comit

This commit is contained in:
drygrass
2025-11-18 23:41:04 +08:00
commit 89351183c3
13 changed files with 2376 additions and 0 deletions

92
KArcCache/KArcCache.h Normal file
View File

@@ -0,0 +1,92 @@
#pragma once
#include "../KICachePolicy.h"
#include "KArcLruPart.h"
#include "KArcLfuPart.h"
#include <memory>
namespace KamaCache
{
template<typename Key, typename Value>
class KArcCache : public KICachePolicy<Key, Value>
{
public:
explicit KArcCache(size_t capacity = 10, size_t transformThreshold = 2)
: capacity_(capacity)
, transformThreshold_(transformThreshold)
, lruPart_(std::make_unique<ArcLruPart<Key, Value>>(capacity, transformThreshold))
, lfuPart_(std::make_unique<ArcLfuPart<Key, Value>>(capacity, transformThreshold))
{}
~KArcCache() override = default;
void put(Key key, Value value) override
{
checkGhostCaches(key);
// 检查 LFU 部分是否存在该键
bool inLfu = lfuPart_->contain(key);
// 更新 LRU 部分缓存
lruPart_->put(key, value);
// 如果 LFU 部分存在该键,则更新 LFU 部分
if (inLfu)
{
lfuPart_->put(key, value);
}
}
bool get(Key key, Value& value) override
{
checkGhostCaches(key);
bool shouldTransform = false;
if (lruPart_->get(key, value, shouldTransform))
{
if (shouldTransform)
{
lfuPart_->put(key, value);
}
return true;
}
return lfuPart_->get(key, value);
}
Value get(Key key) override
{
Value value{};
get(key, value);
return value;
}
private:
bool checkGhostCaches(Key key)
{
bool inGhost = false;
if (lruPart_->checkGhost(key))
{
if (lfuPart_->decreaseCapacity())
{
lruPart_->increaseCapacity();
}
inGhost = true;
}
else if (lfuPart_->checkGhost(key))
{
if (lruPart_->decreaseCapacity())
{
lfuPart_->increaseCapacity();
}
inGhost = true;
}
return inGhost;
}
private:
size_t capacity_;
size_t transformThreshold_;
std::unique_ptr<ArcLruPart<Key, Value>> lruPart_;
std::unique_ptr<ArcLfuPart<Key, Value>> lfuPart_;
};
} // namespace KamaCache

41
KArcCache/KArcCacheNode.h Normal file
View File

@@ -0,0 +1,41 @@
#pragma once
#include <memory>
namespace KamaCache
{
template<typename Key, typename Value>
class ArcNode
{
private:
Key key_;
Value value_;
size_t accessCount_;
std::weak_ptr<ArcNode> prev_;
std::shared_ptr<ArcNode> next_;
public:
ArcNode() : accessCount_(1), next_(nullptr) {}
ArcNode(Key key, Value value)
: key_(key)
, value_(value)
, accessCount_(1)
, next_(nullptr)
{}
// Getters
Key getKey() const { return key_; }
Value getValue() const { return value_; }
size_t getAccessCount() const { return accessCount_; }
// Setters
void setValue(const Value& value) { value_ = value; }
void incrementAccessCount() { ++accessCount_; }
template<typename K, typename V> friend class ArcLruPart;
template<typename K, typename V> friend class ArcLfuPart;
};
} // namespace KamaCache

231
KArcCache/KArcLfuPart.h Normal file
View File

@@ -0,0 +1,231 @@
#pragma once
#include "KArcCacheNode.h"
#include <unordered_map>
#include <map>
#include <mutex>
namespace KamaCache
{
template<typename Key, typename Value>
class ArcLfuPart
{
public:
using NodeType = ArcNode<Key, Value>;
using NodePtr = std::shared_ptr<NodeType>;
using NodeMap = std::unordered_map<Key, NodePtr>;
using FreqMap = std::map<size_t, std::list<NodePtr>>;
explicit ArcLfuPart(size_t capacity, size_t transformThreshold)
: capacity_(capacity)
, ghostCapacity_(capacity)
, transformThreshold_(transformThreshold)
, minFreq_(0)
{
initializeLists();
}
bool put(Key key, Value value)
{
if (capacity_ == 0)
return false;
std::lock_guard<std::mutex> lock(mutex_);
auto it = mainCache_.find(key);
if (it != mainCache_.end())
{
return updateExistingNode(it->second, value);
}
return addNewNode(key, value);
}
bool get(Key key, Value& value)
{
std::lock_guard<std::mutex> lock(mutex_);
auto it = mainCache_.find(key);
if (it != mainCache_.end())
{
updateNodeFrequency(it->second);
value = it->second->getValue();
return true;
}
return false;
}
bool contain(Key key)
{
return mainCache_.find(key) != mainCache_.end();
}
bool checkGhost(Key key)
{
auto it = ghostCache_.find(key);
if (it != ghostCache_.end())
{
removeFromGhost(it->second);
ghostCache_.erase(it);
return true;
}
return false;
}
void increaseCapacity() { ++capacity_; }
bool decreaseCapacity()
{
if (capacity_ <= 0) return false;
if (mainCache_.size() == capacity_)
{
evictLeastFrequent();
}
--capacity_;
return true;
}
private:
void initializeLists()
{
ghostHead_ = std::make_shared<NodeType>();
ghostTail_ = std::make_shared<NodeType>();
ghostHead_->next_ = ghostTail_;
ghostTail_->prev_ = ghostHead_;
}
bool updateExistingNode(NodePtr node, const Value& value)
{
node->setValue(value);
updateNodeFrequency(node);
return true;
}
bool addNewNode(const Key& key, const Value& value)
{
if (mainCache_.size() >= capacity_)
{
evictLeastFrequent();
}
NodePtr newNode = std::make_shared<NodeType>(key, value);
mainCache_[key] = newNode;
// 将新节点添加到频率为1的列表中
if (freqMap_.find(1) == freqMap_.end())
{
freqMap_[1] = std::list<NodePtr>();
}
freqMap_[1].push_back(newNode);
minFreq_ = 1;
return true;
}
void updateNodeFrequency(NodePtr node)
{
size_t oldFreq = node->getAccessCount();
node->incrementAccessCount();
size_t newFreq = node->getAccessCount();
// 从旧频率列表中移除
auto& oldList = freqMap_[oldFreq];
oldList.remove(node);
if (oldList.empty())
{
freqMap_.erase(oldFreq);
if (oldFreq == minFreq_)
{
minFreq_ = newFreq;
}
}
// 添加到新频率列表
if (freqMap_.find(newFreq) == freqMap_.end())
{
freqMap_[newFreq] = std::list<NodePtr>();
}
freqMap_[newFreq].push_back(node);
}
void evictLeastFrequent()
{
if (freqMap_.empty())
return;
// 获取最小频率的列表
auto& minFreqList = freqMap_[minFreq_];
if (minFreqList.empty())
return;
// 移除最少使用的节点
NodePtr leastNode = minFreqList.front();
minFreqList.pop_front();
// 如果该频率的列表为空,则删除该频率项
if (minFreqList.empty())
{
freqMap_.erase(minFreq_);
// 更新最小频率
if (!freqMap_.empty())
{
minFreq_ = freqMap_.begin()->first;
}
}
// 将节点移到幽灵缓存
if (ghostCache_.size() >= ghostCapacity_)
{
removeOldestGhost();
}
addToGhost(leastNode);
// 从主缓存中移除
mainCache_.erase(leastNode->getKey());
}
void removeFromGhost(NodePtr node)
{
if (!node->prev_.expired() && node->next_) {
auto prev = node->prev_.lock();
prev->next_ = node->next_;
node->next_->prev_ = node->prev_;
node->next_ = nullptr; // 清空指针,防止悬垂引用
}
}
void addToGhost(NodePtr node)
{
node->next_ = ghostTail_;
node->prev_ = ghostTail_->prev_;
if (!ghostTail_->prev_.expired()) {
ghostTail_->prev_.lock()->next_ = node;
}
ghostTail_->prev_ = node;
ghostCache_[node->getKey()] = node;
}
void removeOldestGhost()
{
NodePtr oldestGhost = ghostHead_->next_;
if (oldestGhost != ghostTail_)
{
removeFromGhost(oldestGhost);
ghostCache_.erase(oldestGhost->getKey());
}
}
private:
size_t capacity_;
size_t ghostCapacity_;
size_t transformThreshold_;
size_t minFreq_;
std::mutex mutex_;
NodeMap mainCache_;
NodeMap ghostCache_;
FreqMap freqMap_;
NodePtr ghostHead_;
NodePtr ghostTail_;
};
} // namespace KamaCache

222
KArcCache/KArcLruPart.h Normal file
View File

@@ -0,0 +1,222 @@
#pragma once
#include "KArcCacheNode.h"
#include <unordered_map>
#include <mutex>
namespace KamaCache
{
template<typename Key, typename Value>
class ArcLruPart
{
public:
using NodeType = ArcNode<Key, Value>;
using NodePtr = std::shared_ptr<NodeType>;
using NodeMap = std::unordered_map<Key, NodePtr>;
explicit ArcLruPart(size_t capacity, size_t transformThreshold)
: capacity_(capacity)
, ghostCapacity_(capacity)
, transformThreshold_(transformThreshold)
{
initializeLists();
}
bool put(Key key, Value value)
{
if (capacity_ == 0) return false;
std::lock_guard<std::mutex> lock(mutex_);
auto it = mainCache_.find(key);
if (it != mainCache_.end())
{
return updateExistingNode(it->second, value);
}
return addNewNode(key, value);
}
bool get(Key key, Value& value, bool& shouldTransform)
{
std::lock_guard<std::mutex> lock(mutex_);
auto it = mainCache_.find(key);
if (it != mainCache_.end())
{
shouldTransform = updateNodeAccess(it->second);
value = it->second->getValue();
return true;
}
return false;
}
bool checkGhost(Key key)
{
auto it = ghostCache_.find(key);
if (it != ghostCache_.end()) {
removeFromGhost(it->second);
ghostCache_.erase(it);
return true;
}
return false;
}
void increaseCapacity() { ++capacity_; }
bool decreaseCapacity()
{
if (capacity_ <= 0) return false;
if (mainCache_.size() == capacity_) {
evictLeastRecent();
}
--capacity_;
return true;
}
private:
void initializeLists()
{
mainHead_ = std::make_shared<NodeType>();
mainTail_ = std::make_shared<NodeType>();
mainHead_->next_ = mainTail_;
mainTail_->prev_ = mainHead_;
ghostHead_ = std::make_shared<NodeType>();
ghostTail_ = std::make_shared<NodeType>();
ghostHead_->next_ = ghostTail_;
ghostTail_->prev_ = ghostHead_;
}
bool updateExistingNode(NodePtr node, const Value& value)
{
node->setValue(value);
moveToFront(node);
return true;
}
bool addNewNode(const Key& key, const Value& value)
{
if (mainCache_.size() >= capacity_)
{
evictLeastRecent(); // 驱逐最近最少访问
}
NodePtr newNode = std::make_shared<NodeType>(key, value);
mainCache_[key] = newNode;
addToFront(newNode);
return true;
}
bool updateNodeAccess(NodePtr node)
{
moveToFront(node);
node->incrementAccessCount();
return node->getAccessCount() >= transformThreshold_;
}
void moveToFront(NodePtr node)
{
// 先从当前位置移除
if (!node->prev_.expired() && node->next_) {
auto prev = node->prev_.lock();
prev->next_ = node->next_;
node->next_->prev_ = node->prev_;
node->next_ = nullptr; // 清空指针,防止悬垂引用
}
// 添加到头部
addToFront(node);
}
void addToFront(NodePtr node)
{
node->next_ = mainHead_->next_;
node->prev_ = mainHead_;
mainHead_->next_->prev_ = node;
mainHead_->next_ = node;
}
void evictLeastRecent()
{
NodePtr leastRecent = mainTail_->prev_.lock();
if (!leastRecent || leastRecent == mainHead_)
return;
// 从主链表中移除
removeFromMain(leastRecent);
// 添加到幽灵缓存
if (ghostCache_.size() >= ghostCapacity_)
{
removeOldestGhost();
}
addToGhost(leastRecent);
// 从主缓存映射中移除
mainCache_.erase(leastRecent->getKey());
}
void removeFromMain(NodePtr node)
{
if (!node->prev_.expired() && node->next_) {
auto prev = node->prev_.lock();
prev->next_ = node->next_;
node->next_->prev_ = node->prev_;
node->next_ = nullptr; // 清空指针,防止悬垂引用
}
}
void removeFromGhost(NodePtr node)
{
if (!node->prev_.expired() && node->next_) {
auto prev = node->prev_.lock();
prev->next_ = node->next_;
node->next_->prev_ = node->prev_;
node->next_ = nullptr; // 清空指针,防止悬垂引用
}
}
void addToGhost(NodePtr node)
{
// 重置节点的访问计数
node->accessCount_ = 1;
// 添加到幽灵缓存的头部
node->next_ = ghostHead_->next_;
node->prev_ = ghostHead_;
ghostHead_->next_->prev_ = node;
ghostHead_->next_ = node;
// 添加到幽灵缓存映射
ghostCache_[node->getKey()] = node;
}
void removeOldestGhost()
{
// 使用lock()方法并添加null检查
NodePtr oldestGhost = ghostTail_->prev_.lock();
if (!oldestGhost || oldestGhost == ghostHead_)
return;
removeFromGhost(oldestGhost);
ghostCache_.erase(oldestGhost->getKey());
}
private:
size_t capacity_;
size_t ghostCapacity_;
size_t transformThreshold_; // 转换门槛值
std::mutex mutex_;
NodeMap mainCache_; // key -> ArcNode
NodeMap ghostCache_;
// 主链表
NodePtr mainHead_;
NodePtr mainTail_;
// 淘汰链表
NodePtr ghostHead_;
NodePtr ghostTail_;
};
} // namespace KamaCache