最近最少使用 (LRU) 缓存指的是优先丢弃最近最少使用的缓存项。如何设计和实现这样一个缓存类? 设计要求如下:
1) 尽可能快地查找缓存项
2) 一旦缓存未命中且缓存已满,我们需要尽可能快地替换最近最少使用的缓存项。
如何从设计模式和算法设计的角度分析和实现这个问题?
最近最少使用 (LRU) 缓存指的是优先丢弃最近最少使用的缓存项。如何设计和实现这样一个缓存类? 设计要求如下:
1) 尽可能快地查找缓存项
2) 一旦缓存未命中且缓存已满,我们需要尽可能快地替换最近最少使用的缓存项。
如何从设计模式和算法设计的角度分析和实现这个问题?
这是我对LRU缓存的简单示例C++实现,使用了哈希(unordered_map)和列表的组合。列表上的项具有用于访问映射表的键,而映射表上的项具有用于访问列表的迭代器。
#include <list>
#include <unordered_map>
#include <assert.h>
using namespace std;
template <class KEY_T, class VAL_T> class LRUCache{
private:
list< pair<KEY_T,VAL_T> > item_list;
unordered_map<KEY_T, decltype(item_list.begin()) > item_map;
size_t cache_size;
private:
void clean(void){
while(item_map.size()>cache_size){
auto last_it = item_list.end(); last_it --;
item_map.erase(last_it->first);
item_list.pop_back();
}
};
public:
LRUCache(int cache_size_):cache_size(cache_size_){
;
};
void put(const KEY_T &key, const VAL_T &val){
auto it = item_map.find(key);
if(it != item_map.end()){
item_list.erase(it->second);
item_map.erase(it);
}
item_list.push_front(make_pair(key,val));
item_map.insert(make_pair(key, item_list.begin()));
clean();
};
bool exist(const KEY_T &key){
return (item_map.count(key)>0);
};
VAL_T get(const KEY_T &key){
assert(exist(key));
auto it = item_map.find(key);
item_list.splice(item_list.begin(), item_list, it->second);
return it->second->second;
};
};
我看到这里有几个不必要的复杂实现,所以我决定提供我的实现。缓存只有两种方法:get和set。希望这样更易读、易懂:
#include<unordered_map>
#include<list>
using namespace std;
template<typename K, typename V = K>
class LRUCache
{
private:
list<K>items;
unordered_map <K, pair<V, typename list<K>::iterator>> keyValuesMap;
int csize;
public:
LRUCache(int s) :csize(s) {
if (csize < 1)
csize = 10;
}
void set(const K key, const V value) {
auto pos = keyValuesMap.find(key);
if (pos == keyValuesMap.end()) {
items.push_front(key);
keyValuesMap[key] = { value, items.begin() };
if (keyValuesMap.size() > csize) {
keyValuesMap.erase(items.back());
items.pop_back();
}
}
else {
items.erase(pos->second.second);
items.push_front(key);
keyValuesMap[key] = { value, items.begin() };
}
}
bool get(const K key, V &value) {
auto pos = keyValuesMap.find(key);
if (pos == keyValuesMap.end())
return false;
items.erase(pos->second.second);
items.push_front(key);
keyValuesMap[key] = { pos->second.first, items.begin() };
value = pos->second.first;
return true;
}
};
//LRU Cache
#include <cassert>
#include <list>
template <typename K,
typename V
>
class LRUCache
{
// Key access history, most recent at back
typedef std::list<K> List;
// Key to value and key history iterator
typedef unordered_map< K,
std::pair<
V,
typename std::list<K>::iterator
>
> Cache;
typedef V (*Fn)(const K&);
public:
LRUCache( size_t aCapacity, Fn aFn )
: mFn( aFn )
, mCapacity( aCapacity )
{}
//get value for key aKey
V operator()( const K& aKey )
{
typename Cache::iterator it = mCache.find( aKey );
if( it == mCache.end() ) //cache-miss: did not find the key
{
V v = mFn( aKey );
insert( aKey, v );
return v;
}
// cache-hit
// Update access record by moving accessed key to back of the list
mList.splice( mList.end(), mList, (it)->second.second );
// return the retrieved value
return (it)->second.first;
}
private:
// insert a new key-value pair in the cache
void insert( const K& aKey, V aValue )
{
//method should be called only when cache-miss happens
assert( mCache.find( aKey ) == mCache.end() );
// make space if necessary
if( mList.size() == mCapacity )
{
evict();
}
// record k as most-recently-used key
typename std::list<K>::iterator it = mList.insert( mList.end(), aKey );
// create key-value entry, linked to the usage record
mCache.insert( std::make_pair( aKey, std::make_pair( aValue, it ) ) );
}
//Purge the least-recently used element in the cache
void evict()
{
assert( !mList.empty() );
// identify least-recently-used key
const typename Cache::iterator it = mCache.find( mList.front() );
//erase both elements to completely purge record
mCache.erase( it );
mList.pop_front();
}
private:
List mList;
Cache mCache;
Fn mFn;
size_t mCapacity;
};
LRU近似算法是否允许?这里有一个可以执行2000万次get/set操作的算法,用于某些图像平滑算法。我不知道它是否是最好的,但肯定比JavaScript等效算法快得多,后者每秒只能执行150万次get/set操作。
使用unordered_map来跟踪循环缓冲区中的项目。循环缓冲区不像其他链表版本那样添加/删除节点。因此,除非缓存大小比L1/L2/L3缓存大得多,否则它应该至少对CPU的L1/L2/L3缓存友好。算法很简单。有一个时钟手指会驱逐受害者槽位,而另一个手指会将其中一些保存下来以免被驱逐,作为“第二次机会”,但是它会滞后50%的相位,以便如果缓存很大,则缓存项有足够的时间来获得第二次机会/免于被驱逐。
由于这是一种近似算法,您不应该期望它总是会驱逐最近未使用的缓存项。但是,它确实可以加速某些网络I/O、磁盘读写等速度比RAM慢的操作。我在一个VRAM虚拟缓冲区类中使用了这个算法,该类使用了系统视频RAM的100%(来自多个显卡)。VRAM比RAM慢,因此在RAM中缓存可以使6GB VRAM对于某些缓存友好的访问模式看起来像RAM一样快。
以下是实现:
#ifndef LRUCLOCKCACHE_H_
#define LRUCLOCKCACHE_H_
#include<vector>
#include<algorithm>
#include<unordered_map>
#include<functional>
#include<mutex>
#include<unordered_map>
/* LRU-CLOCK-second-chance implementation */
template< typename LruKey, typename LruValue>
class LruClockCache
{
public:
// allocates circular buffers for numElements number of cache slots
// readMiss: cache-miss for read operations. User needs to give this function
// to let the cache automatically get data from backing-store
// example: [&](MyClass key){ return redis.get(key); }
// takes a LruKey as key, returns LruValue as value
// writeMiss: cache-miss for write operations. User needs to give this function
// to let the cache automatically set data to backing-store
// example: [&](MyClass key, MyAnotherClass value){ redis.set(key,value); }
// takes a LruKey as key and LruValue as value
LruClockCache(size_t numElements,
const std::function<LruValue(LruKey)> & readMiss,
const std::function<void(LruKey,LruValue)> & writeMiss):size(numElements)
{
ctr = 0;
// 50% phase difference between eviction and second-chance hands of the "second-chance" CLOCK algorithm
ctrEvict = numElements/2;
loadData=readMiss;
saveData=writeMiss;
// initialize circular buffers
for(size_t i=0;i<numElements;i++)
{
valueBuffer.push_back(LruValue());
chanceToSurviveBuffer.push_back(0);
isEditedBuffer.push_back(0);
keyBuffer.push_back(LruKey());
}
}
// get element from cache
// if cache doesn't find it in circular buffers,
// then cache gets data from backing-store
// then returns the result to user
// then cache is available from RAM on next get/set access with same key
inline
const LruValue get(const LruKey & key) noexcept
{
return accessClock2Hand(key,nullptr);
}
// thread-safe but slower version of get()
inline
const LruValue getThreadSafe(const LruKey & key) noexcept
{
std::lock_guard<std::mutex> lg(mut);
return accessClock2Hand(key,nullptr);
}
// set element to cache
// if cache doesn't find it in circular buffers,
// then cache sets data on just cache
// writing to backing-store only happens when
// another access evicts the cache slot containing this key/value
// or when cache is flushed by flush() method
// then returns the given value back
// then cache is available from RAM on next get/set access with same key
inline
void set(const LruKey & key, const LruValue & val) noexcept
{
accessClock2Hand(key,&val,1);
}
// thread-safe but slower version of set()
inline
void setThreadSafe(const LruKey & key, const LruValue & val) noexcept
{
std::lock_guard<std::mutex> lg(mut);
accessClock2Hand(key,&val,1);
}
void flush()
{
for (auto mp = mapping.cbegin(); mp != mapping.cend() /* not hoisted */; /* no increment */)
{
if (isEditedBuffer[mp->second] == 1)
{
isEditedBuffer[mp->second]=0;
auto oldKey = keyBuffer[mp->second];
auto oldValue = valueBuffer[mp->second];
saveData(oldKey,oldValue);
mapping.erase(mp++); // or "it = m.erase(it)" since C++11
}
else
{
++mp;
}
}
}
// CLOCK algorithm with 2 hand counters (1 for second chance for a cache slot to survive, 1 for eviction of cache slot)
// opType=0: get
// opType=1: set
LruValue const accessClock2Hand(const LruKey & key,const LruValue * value, const bool opType = 0)
{
typename std::unordered_map<LruKey,size_t>::iterator it = mapping.find(key);
if(it!=mapping.end())
{
chanceToSurviveBuffer[it->second]=1;
if(opType == 1)
{
isEditedBuffer[it->second]=1;
valueBuffer[it->second]=*value;
}
return valueBuffer[it->second];
}
else
{
long long ctrFound = -1;
LruValue oldValue;
LruKey oldKey;
while(ctrFound==-1)
{
if(chanceToSurviveBuffer[ctr]>0)
{
chanceToSurviveBuffer[ctr]=0;
}
ctr++;
if(ctr>=size)
{
ctr=0;
}
if(chanceToSurviveBuffer[ctrEvict]==0)
{
ctrFound=ctrEvict;
oldValue = valueBuffer[ctrFound];
oldKey = keyBuffer[ctrFound];
}
ctrEvict++;
if(ctrEvict>=size)
{
ctrEvict=0;
}
}
if(isEditedBuffer[ctrFound] == 1)
{
// if it is "get"
if(opType==0)
{
isEditedBuffer[ctrFound]=0;
}
saveData(oldKey,oldValue);
// "get"
if(opType==0)
{
LruValue loadedData = loadData(key);
mapping.erase(keyBuffer[ctrFound]);
valueBuffer[ctrFound]=loadedData;
chanceToSurviveBuffer[ctrFound]=0;
mapping[key]=ctrFound;
keyBuffer[ctrFound]=key;
return loadedData;
}
else /* "set" */
{
mapping.erase(keyBuffer[ctrFound]);
valueBuffer[ctrFound]=*value;
chanceToSurviveBuffer[ctrFound]=0;
mapping[key]=ctrFound;
keyBuffer[ctrFound]=key;
return *value;
}
}
else // not edited
{
// "set"
if(opType == 1)
{
isEditedBuffer[ctrFound]=1;
}
// "get"
if(opType == 0)
{
LruValue loadedData = loadData(key);
mapping.erase(keyBuffer[ctrFound]);
valueBuffer[ctrFound]=loadedData;
chanceToSurviveBuffer[ctrFound]=0;
mapping[key]=ctrFound;
keyBuffer[ctrFound]=key;
return loadedData;
}
else // "set"
{
mapping.erase(keyBuffer[ctrFound]);
valueBuffer[ctrFound]=*value;
chanceToSurviveBuffer[ctrFound]=0;
mapping[key]=ctrFound;
keyBuffer[ctrFound]=key;
return *value;
}
}
}
}
private:
size_t size;
std::mutex mut;
std::unordered_map<LruKey,size_t> mapping;
std::vector<LruValue> valueBuffer;
std::vector<unsigned char> chanceToSurviveBuffer;
std::vector<unsigned char> isEditedBuffer;
std::vector<LruKey> keyBuffer;
std::function<LruValue(LruKey)> loadData;
std::function<void(LruKey,LruValue)> saveData;
size_t ctr;
size_t ctrEvict;
};
#endif /* LRUCLOCKCACHE_H_ */
这里是用法:
using MyKeyType = std::string;
using MyValueType = MinecraftChunk;
LruClockCache<MyKeyType,MyValueType> cache(1024*5,[&](MyKeyType key){
// cache miss (read)
// access data-store (network, hdd, graphics card, anything that is slower than RAM or higher-latency than RAM-latency x2)
return readChunkFromHDD(key);
},[&](MyKeyType key,MyValueType value){
// cache miss (write)
// access data-store
writeChunkToHDD(key,value);
});
// cache handles all cace-miss functions automatically
MinecraftChunk chunk = cache.get("world coordinates 1500 35 2000");
// cache handles all cace-miss functions automatically
cache.set("world coordinates 1502 35 1999",chunk);
cache.flush(); // clears all pending-writes in the cache and writes to backing-store
我在两年前实现了一个线程安全的LRU缓存。
通常情况下,LRU是通过HashMap和LinkedList来实现的。你可以搜索相关实现细节,有很多关于它的资源(Wikipedia也有很好的解释)。
为了保证线程安全,在修改LRU状态时需要加锁。
以下是我参考的C++代码。
这是实现。
/***
A template thread-safe LRU container.
Typically LRU cache is implemented using a doubly linked list and a hash map.
Doubly Linked List is used to store list of pages with most recently used page
at the start of the list. So, as more pages are added to the list,
least recently used pages are moved to the end of the list with page
at tail being the least recently used page in the list.
Additionally, this LRU provides time-to-live feature. Each entry has an expiration
datetime.
***/
#ifndef LRU_CACHE_H
#define LRU_CACHE_H
#include <iostream>
#include <list>
#include <boost/unordered_map.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/thread/mutex.hpp>
template <typename KeyType, typename ValueType>
class LRUCache {
private:
typedef boost::posix_time::ptime DateTime;
// Cache-entry
struct ListItem {
ListItem(const KeyType &key,
const ValueType &value,
const DateTime &expiration_datetime)
: m_key(key), m_value(value), m_expiration_datetime(expiration_datetime){}
KeyType m_key;
ValueType m_value;
DateTime m_expiration_datetime;
};
typedef boost::shared_ptr<ListItem> ListItemPtr;
typedef std::list<ListItemPtr> LruList;
typedef typename std::list<ListItemPtr>::iterator LruListPos;
typedef boost::unordered_map<KeyType, LruListPos> LruMapper;
// A mutext to ensuare thread-safety.
boost::mutex m_cache_mutex;
// Maximum number of entries.
std::size_t m_capacity;
// Stores cache-entries from latest to oldest.
LruList m_list;
// Mapper for key to list-position.
LruMapper m_mapper;
// Default time-to-live being add to entry every time we touch it.
unsigned long m_ttl_in_seconds;
/***
Note : This is a helper function whose function call need to be wrapped
within a lock. It returns true/false whether key exists and
not expires. Delete the expired entry if necessary.
***/
bool containsKeyHelper(const KeyType &key) {
bool has_key(m_mapper.count(key) != 0);
if (has_key) {
LruListPos pos = m_mapper[key];
ListItemPtr & cur_item_ptr = *pos;
// Remove the entry if key expires
if (isDateTimeExpired(cur_item_ptr->m_expiration_datetime)) {
has_key = false;
m_list.erase(pos);
m_mapper.erase(key);
}
}
return has_key;
}
/***
Locate an item in list by key, and move it at the front of the list,
which means make it the latest item.
Note : This is a helper function whose function call need to be wrapped
within a lock.
***/
void makeEntryTheLatest(const KeyType &key) {
if (m_mapper.count(key)) {
// Add original item at the front of the list,
// and update <Key, ListPosition> mapper.
LruListPos original_list_position = m_mapper[key];
const ListItemPtr & cur_item_ptr = *original_list_position;
m_list.push_front(cur_item_ptr);
m_mapper[key] = m_list.begin();
// Don't forget to update its expiration datetime.
m_list.front()->m_expiration_datetime = getExpirationDatetime(m_list.front()->m_expiration_datetime);
// Erase the item at original position.
m_list.erase(original_list_position);
}
}
public:
/***
Cache should have capacity to limit its memory usage.
We also add time-to-live for each cache entry to expire
the stale information. By default, ttl is one hour.
***/
LRUCache(std::size_t capacity, unsigned long ttl_in_seconds = 3600)
: m_capacity(capacity), m_ttl_in_seconds(ttl_in_seconds) {}
/***
Return now + time-to-live
***/
DateTime getExpirationDatetime(const DateTime &now) {
static const boost::posix_time::seconds ttl(m_ttl_in_seconds);
return now + ttl;
}
/***
If input datetime is older than current datetime,
then it is expired.
***/
bool isDateTimeExpired(const DateTime &date_time) {
return date_time < boost::posix_time::second_clock::local_time();
}
/***
Return the number of entries in this cache.
***/
std::size_t size() {
boost::mutex::scoped_lock lock(m_cache_mutex);
return m_mapper.size();
}
/***
Get value by key.
Return true/false whether key exists.
If key exists, input paramter value will get updated.
***/
bool get(const KeyType &key, ValueType &value) {
boost::mutex::scoped_lock lock(m_cache_mutex);
if (!containsKeyHelper(key)) {
return false;
} else {
// Make the entry the latest and update its TTL.
makeEntryTheLatest(key);
// Then get its value.
value = m_list.front()->m_value;
return true;
}
}
/***
Add <key, value> pair if no such key exists.
Otherwise, just update the value of old key.
***/
void put(const KeyType &key, const ValueType &value) {
boost::mutex::scoped_lock lock(m_cache_mutex);
if (containsKeyHelper(key)) {
// Make the entry the latest and update its TTL.
makeEntryTheLatest(key);
// Now we only need to update its value.
m_list.front()->m_value = value;
} else { // Key exists and is not expired.
if (m_list.size() == m_capacity) {
KeyType delete_key = m_list.back()->m_key;
m_list.pop_back();
m_mapper.erase(delete_key);
}
DateTime now = boost::posix_time::second_clock::local_time();
m_list.push_front(boost::make_shared<ListItem>(key, value,
getExpirationDatetime(now)));
m_mapper[key] = m_list.begin();
}
}
};
#endif
这里是单元测试。
#include "cxx_unit.h"
#include "lru_cache.h"
struct LruCacheTest
: public FDS::CxxUnit::TestFixture<LruCacheTest>{
CXXUNIT_TEST_SUITE();
CXXUNIT_TEST(LruCacheTest, testContainsKey);
CXXUNIT_TEST(LruCacheTest, testGet);
CXXUNIT_TEST(LruCacheTest, testPut);
CXXUNIT_TEST_SUITE_END();
void testContainsKey();
void testGet();
void testPut();
};
void LruCacheTest::testContainsKey() {
LRUCache<int,std::string> cache(3);
cache.put(1,"1"); // 1
cache.put(2,"2"); // 2,1
cache.put(3,"3"); // 3,2,1
cache.put(4,"4"); // 4,3,2
std::string value_holder("");
CXXUNIT_ASSERT(cache.get(1, value_holder) == false); // 4,3,2
CXXUNIT_ASSERT(value_holder == "");
CXXUNIT_ASSERT(cache.get(2, value_holder) == true); // 2,4,3
CXXUNIT_ASSERT(value_holder == "2");
cache.put(5,"5"); // 5, 2, 4
CXXUNIT_ASSERT(cache.get(3, value_holder) == false); // 5, 2, 4
CXXUNIT_ASSERT(value_holder == "2"); // value_holder is still "2"
CXXUNIT_ASSERT(cache.get(4, value_holder) == true); // 4, 5, 2
CXXUNIT_ASSERT(value_holder == "4");
cache.put(2,"II"); // {2, "II"}, 4, 5
CXXUNIT_ASSERT(cache.get(2, value_holder) == true); // 2, 4, 5
CXXUNIT_ASSERT(value_holder == "II");
// Cache-entries : {2, "II"}, {4, "4"}, {5, "5"}
CXXUNIT_ASSERT(cache.size() == 3);
CXXUNIT_ASSERT(cache.get(2, value_holder) == true);
CXXUNIT_ASSERT(cache.get(4, value_holder) == true);
CXXUNIT_ASSERT(cache.get(5, value_holder) == true);
}
void LruCacheTest::testGet() {
LRUCache<int,std::string> cache(3);
cache.put(1,"1"); // 1
cache.put(2,"2"); // 2,1
cache.put(3,"3"); // 3,2,1
cache.put(4,"4"); // 4,3,2
std::string value_holder("");
CXXUNIT_ASSERT(cache.get(1, value_holder) == false); // 4,3,2
CXXUNIT_ASSERT(value_holder == "");
CXXUNIT_ASSERT(cache.get(2, value_holder) == true); // 2,4,3
CXXUNIT_ASSERT(value_holder == "2");
cache.put(5,"5"); // 5,2,4
CXXUNIT_ASSERT(cache.get(5, value_holder) == true); // 5,2,4
CXXUNIT_ASSERT(value_holder == "5");
CXXUNIT_ASSERT(cache.get(4, value_holder) == true); // 4, 5, 2
CXXUNIT_ASSERT(value_holder == "4");
cache.put(2,"II");
CXXUNIT_ASSERT(cache.get(2, value_holder) == true); // {2 : "II"}, 4, 5
CXXUNIT_ASSERT(value_holder == "II");
// Cache-entries : {2, "II"}, {4, "4"}, {5, "5"}
CXXUNIT_ASSERT(cache.size() == 3);
CXXUNIT_ASSERT(cache.get(2, value_holder) == true);
CXXUNIT_ASSERT(cache.get(4, value_holder) == true);
CXXUNIT_ASSERT(cache.get(5, value_holder) == true);
}
void LruCacheTest::testPut() {
LRUCache<int,std::string> cache(3);
cache.put(1,"1"); // 1
cache.put(2,"2"); // 2,1
cache.put(3,"3"); // 3,2,1
cache.put(4,"4"); // 4,3,2
cache.put(5,"5"); // 5,4,3
std::string value_holder("");
CXXUNIT_ASSERT(cache.get(2, value_holder) == false); // 5,4,3
CXXUNIT_ASSERT(value_holder == "");
CXXUNIT_ASSERT(cache.get(4, value_holder) == true); // 4,5,3
CXXUNIT_ASSERT(value_holder == "4");
cache.put(2,"II");
CXXUNIT_ASSERT(cache.get(2, value_holder) == true); // II,4,5
CXXUNIT_ASSERT(value_holder == "II");
// Cache-entries : {2, "II"}, {4, "4"}, {5, "5"}
CXXUNIT_ASSERT(cache.size() == 3);
CXXUNIT_ASSERT(cache.get(2, value_holder) == true);
CXXUNIT_ASSERT(cache.get(4, value_holder) == true);
CXXUNIT_ASSERT(cache.get(5, value_holder) == true);
}
CXXUNIT_REGISTER_TEST(LruCacheTest);
sweet::Cache<std::string,std::vector<int>, 48> c1;
c1.insert("key1", std::vector<int>());
c1.insert("key2", std::vector<int>());
assert(c1.contains("key1"));
我们需要创建一个数据结构,使我们能够同时优化所有三个主要操作。
基于上述图表,我们可以推断出:
对于一般情况,使用树是最好的选择。
如果我们知道缓存的大小足够大(并且插入新元素不频繁),以至于很少需要删除最旧的条目,则哈希表将是最佳选择。
如果删除旧条目比存储条目或查找缓存元素更重要,则链表可能是一个有效的选项:但在这种情况下,缓存基本上是无用的,添加它将不会提供任何好处。
在所有情况下,存储n个条目所需的内存为O(n)。
现在我最喜欢的问题是,我们能做得更好吗?
单一的数据结构可能不足以构建最有效的问题解决方案。一方面,我们有数据结构特别适合快速存储和检索条目。哈希表几乎是无法被打败的。另一方面,哈希表在维护事物排序方面很差,当检索它们包含的最小(或最大)元素时也不太好,但我们有其他可以很好处理这些问题的结构。根据我们想要保持的排序类型,我们可能需要树,或者只需要列表就可以了。LRU页面置换技术:
当引用一个页面时,所需的页面可能已经在缓存中。
如果在缓存中
:我们需要将其移到缓存队列的前面。
如果不在缓存中
:我们将其加入到缓存中。简单来说,我们将新页面添加到缓存队列的前面。如果缓存已满,即所有帧都已满,我们将从缓存队列的后面删除一个页面,并将新页面添加到缓存队列的前面。
# Cache Size
csize = int(input())
# Sequence of pages
pages = list(map(int,input().split()))
# Take a cache list
cache=[]
# Keep track of number of elements in cache
n=0
# Count Page Fault
fault=0
for page in pages:
# If page exists in cache
if page in cache:
# Move the page to front as it is most recent page
# First remove from cache and then append at front
cache.remove(page)
cache.append(page)
else:
# Cache is full
if(n==csize):
# Remove the least recent page
cache.pop(0)
else:
# Increment element count in cache
n=n+1
# Page not exist in cache => Page Fault
fault += 1
cache.append(page)
print("Page Fault:",fault)
输入/输出
Input:
3
1 2 3 4 1 2 5 1 2 3 4 5
Output:
Page Fault: 10
Java 代码:
package DataStructures;
import java.util.HashMap;
class Node2 {
int key;
int value;
Node2 pre;
Node2 next;
Node2(int key ,int value)
{
this.key=key;
this.value=value;
}
}
class LRUCache {
private HashMap<Integer,Node2> lrumap;
private int capacity;
private Node2 head,tail;
LRUCache(int capacity)
{
this.capacity=capacity;
lrumap=new HashMap<Integer,Node2>();
head=null;
tail=null;
}
public void deleteNode(Node2 node)
{
if(node==head)
{
head.next.pre=null;
head=head.next;
node=null;
}
else if(node==tail)
{
tail.pre.next=null;
tail=tail.pre;
node=null;
}
else
{
node.pre.next=node.next;
node.next.pre=node.pre;
node=null;
}
}
public void addToHead(Node2 node)
{
if(head==null && tail==null)
{
head=node;
tail=node;
}
else
{
node.next=head;
head.pre=node;
head=node;
}
}
public int get(int key)
{
if(lrumap.containsKey(key))
{
Node2 gnode=lrumap.get(key);
int result=gnode.value;
deleteNode(gnode);
addToHead(gnode);
return result;
}
return -1;
}
public void set(int key,int value)
{
if(lrumap.containsKey(key))
{
Node2 snode=lrumap.get(key);
snode.value=value;
deleteNode(snode);
addToHead(snode);
}
else
{
Node2 node=new Node2(key,value);
//System.out.println("mapsize="+lrumap.size()+" capacity="+capacity);
if(lrumap.size()>=capacity)
{
System.out.println("remove="+tail.key);
lrumap.remove(tail.key);
deleteNode(tail);
}
lrumap.put(key, node);
addToHead(node);
}
}
public void show()
{
Node2 node = head;
while(node.next!=null)
{
System.out.print("["+node.key+","+node.value+"]--");
node=node.next;
}
System.out.print("["+node.key+","+node.value+"]--");
System.out.println();
}
}
public class LRUCacheDS{
public static void main(String[] args) {
LRUCache lr= new LRUCache(4);
lr.set(4,8);
lr.set(2,28);
lr.set(6,38);
lr.show();
lr.set(14,48);
lr.show();
lr.set(84,58);
lr.show();
lr.set(84,34);
lr.show();
lr.get(6);
System.out.println("---------------------------------------------------------");
lr.show();
}
}