leetcode || 146、LRU Cache

来源:互联网 发布:ios国外福利软件 编辑:程序博客网 时间:2024/06/08 01:32

problem:

Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and set.

get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
set(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item.

Hide Tags
 Data Structure
题意:实现LRU算法,题意没有详细说明怎么度量最近最少使用,难点所在

thinking:

(1)这道题我自己借助unordered_map和list实现了一个版本,小数据测试通过,大数据测试时(连续set、get),由于频繁的操作map、list,超时了。

最高效的做法是将list嵌入到节点的结构体中。

        我的思路:构建一个包含key值和访问次数visited的结构体,每次访问结构体对象时,访问次数+1,在节点数未达到上限时,将新节点插入到list的头部。在达到上限时,

将访问次数最小的节点移到list的末尾,移除尾部节点,在list头部插入新的节点。使用unordered_map提高超找效率。

code:

class node{public:    int _key;    int _visited;    node(int key, int visited):_key(key),_visited(visited){}};class LRUCache{private:   unordered_map<int,list<node *>::iterator> map_record;    list<node *> list_record;    int size;public:    LRUCache(int capacity) {        size=capacity;    }    int get(int key) {        if(map_record.count(key)!=0)        {           unordered_map<int,list<node *>::iterator>::iterator it_a=map_record.find(key);            list<node *>::iterator it_b=it_a->second;            node *node_tmp=*it_b;            node_tmp->_visited+=1;            adjust();            return key;        }        else            return -1;    }    void set(int key, int value) {        if(map_record.count(key)!=0)        {           unoredered_map<int,list<node *>::iterator>::iterator it_a=map_record.find(key);            list<node *>::iterator it_b=it_a->second;            node *node_tmp=*it_b;            node_tmp->_key=value;            node_tmp->_visited+=1;        }        else        {            if(list_record.size()<size)            {                node *tmp_node=new node(value,1);                list_record.push_front(tmp_node);                list<node *>::iterator it=find(list_record.begin(),list_record.end(),tmp_node);                map_record.insert(make_pair(value,it));            }            else            {                adjust();               node *tmp_node=new node(value,1);               list_record.push_front(tmp_node);               map_record.clear();               for(list<node *>::iterator it=list_record.begin();it!=list_record.end();++it)               {                   node *tmp=*it;                   map_record.insert(make_pair(tmp->_key,it));                   }            }        }    }protected:    void adjust()    {        if(list_record.empty())            return;        node *index=list_record.back();        list<node *>::iterator min_it;        int min_visited=index->_visited;        for(list<node *>::iterator it=list_record.begin();it!=list_record.end();++it)        {            node *tmp_node = *it;            if(tmp_node->_visited<min_visited)                min_it=it;        }        list<node *>::iterator last=list_record.end();        swap(min_it,--last);        list_record.pop_back();    }};



一个高效且测试通过的版本:

struct CacheNode {    int key;    int value;    CacheNode* next;    CacheNode* prev;    CacheNode(int _key, int _value) {        key = _key;        value = _value;        next = nullptr;        prev = nullptr;    }};class LRUCache{public:    LRUCache(int capacity) {        _capacity = capacity;        head = new CacheNode(INT_MIN,-1);        tail = head->next;        len = 0;    }        int get(int key) {        auto found = cache.find(key);        if (found != cache.end()) {            if (found -> second == tail) {              tail = tail->prev;            }            insertToHead(found->second);            if (tail == head) tail = head -> next;            return found -> second -> value;        }        return -1;    }        void set(int key, int value) {        auto found = cache.find(key);        if (found != cache.end()) {            found->second->value = value;            if (found -> second == tail) {              tail = tail->prev;            }            insertToHead(found->second);            if (tail == head) tail = head->next;            return ;        }        if (len == _capacity) {            CacheNode* tmp = tail -> prev;            cache.erase(tail->key);            deleteNodeLink(tail);            delete tail;            tail = tmp;            insertToHead(new CacheNode(key, value));            return ;        }        insertToHead(new CacheNode(key, value));        if (tail == nullptr) tail = head->next;        len++;    }private:   CacheNode* head;   CacheNode* tail;   int _capacity;   int len;   unordered_map<int, CacheNode*> cache;      void deleteNodeLink(CacheNode* node) {       CacheNode* prev = nullptr;       CacheNode* next = nullptr;       if (node -> prev) prev = node->prev;       if (prev) {           prev -> next = node -> next;       }       if(node->next) next = node -> next;       if(next) {           next -> prev = prev;       }   }      void insertToHead(CacheNode* node) {       deleteNodeLink(node);       node->prev = head;       node->next = head->next;       if (head -> next) head->next->prev = node;       head->next = node;       cache[node->key] = node;   }};


0 0
原创粉丝点击