一、redis的内存管理
一般来说,稍微有点规模的软件,都会自己搞一块内存管理,原因很简单,统一管理内存,适应自己的场景。其实按大牛们的话,这未必是最优选择,实在是小看了写库的那群大牛们。不过说归说,人家写也不会给你报备,想写自然就写了。Redis就听从了大牛的看法,使用了底层更好的内存分配库,根据情况使用tmalloc,jemalloc 以及glibc中的 malloc(pmalloc)。
一般来说,内存管理不外乎这么几块:
内存大小的管理,比如分不分页,一页多大为好。内存分配管理,怎么分配,多大一块;内存回收管理,什么时候儿回收,回收后如何处理,内存碎片如何处理;内存回收的策略;内存的读写操作和异常控制等。针对上面的这些实际情况,结合Redis分析一下其相关内容:
最大内存管理:默认为0即无限制,只要物理内存足够就好;但实际上为了保证服务器正常运行,在保证冗余的情况下,一般还是会针对实际的应用场景进行限制。
内存回收管理:使用LRU,近似LRU和LFU等策略进行内存的回收控制;通过自动内存交换进行内存碎片的处理;
读写和异常控制:读写操作为了快速,以固定值来操作,换句话说强制字节对齐;而在内存超出使用范围后,使用内存溢出管理和处理这种异常情况,这也是引起内存碎片的原因;
从上面的分析是不是可以看出,其实设计就是平衡,根据实际情况进行取舍。
二、源码分析
下面就根据上面的分析对源码进行解读。
1、基础的内存管理
在Redist的zmalloc.h头文件中定义:
#ifndef __ZMALLOC_H
#define __ZMALLOC_H
/* Double expansion needed for stringification of macro values. */
#define __xstr(s) __str(s)
#define __str(s) #s
#if defined(USE_TCMALLOC)
#define ZMALLOC_LIB ("tcmalloc-" __xstr(TC_VERSION_MAJOR) "." __xstr(TC_VERSION_MINOR))
#include <google/tcmalloc.h>
#if (TC_VERSION_MAJOR == 1 && TC_VERSION_MINOR >= 6) || (TC_VERSION_MAJOR > 1)
#define HAVE_MALLOC_SIZE 1
#define zmalloc_size(p) tc_malloc_size(p)
#else
#error "Newer version of tcmalloc required"
#endif
#elif defined(USE_JEMALLOC)
#define ZMALLOC_LIB ("jemalloc-" __xstr(JEMALLOC_VERSION_MAJOR) "." __xstr(JEMALLOC_VERSION_MINOR) "." __xstr(JEMALLOC_VERSION_BUGFIX))
#include <jemalloc/jemalloc.h>
#if (JEMALLOC_VERSION_MAJOR == 2 && JEMALLOC_VERSION_MINOR >= 1) || (JEMALLOC_VERSION_MAJOR > 2)
#define HAVE_MALLOC_SIZE 1
#define zmalloc_size(p) je_malloc_usable_size(p)
#else
#error "Newer version of jemalloc required"
#endif
#elif defined(__APPLE__)
#include <malloc/malloc.h>
#define HAVE_MALLOC_SIZE 1
#define zmalloc_size(p) malloc_size(p)
#endif
#ifndef ZMALLOC_LIB
#define ZMALLOC_LIB "libc"
#ifdef __GLIBC__
#include <malloc.h>
#define HAVE_MALLOC_SIZE 1
#define zmalloc_size(p) malloc_usable_size(p)
#endif
#endif
通过宏的定义来检查当前的机器上是否安装了相关的内存库,前面两个就是针对tmalloc和 jemalloc来定义的。如果有这两个更有优势的库,那么就优势使用这两个库。如果都没有,那么就使用glibc中的malloc。它的主要接口也很简单:
//分配内存,根据情况调用不同的库实现
void *zmalloc(size_t size);
//重写块分配内存,但策略有所不同,不再成倍增加
void *zcalloc(size_t size);
//重新调整内存
void *zrealloc(void *ptr, size_t size);
void zfree(void *ptr);
//字符持久化
char *zstrdup(const char *s);
//当前使用内存大小
size_t zmalloc_used_memory(void);
//异常调用
void zmalloc_set_oom_handler(void (*oom_handler)(size_t));
//获取进程当前内存大小(RSS,未被交换的内存)
size_t zmalloc_get_rss(void);
int zmalloc_get_allocator_info(size_t *allocated, size_t *active, size_t *resident);
void set_jemalloc_bg_thread(int enable);
int jemalloc_purge();//jemalloc 的内存碎片清理
size_t zmalloc_get_private_dirty(long pid);//进程修改内存
size_t zmalloc_get_smap_bytes_by_field(char *field, long pid);///proc/self/smaps中的数据
size_t zmalloc_get_memory_size(void);//物理内存大小
void zlibc_free(void *ptr);
#ifdef HAVE_DEFRAG
void zfree_no_tcache(void * ptr);
void * zmalloc_no_tcache(size_t size);
#endif
#ifndef HAVE_MALLOC_SIZE
//获得当前内存块总大小
size_t zmalloc_size(void * ptr);
size_t zmalloc_usable(void * ptr);
#else
#define zmalloc_usable(p) zmalloc_size(p)
#endif
看一下基础的内存分配:
#if defined(USE_TCMALLOC)
#define malloc(size) tc_malloc(size)
#define calloc(count,size) tc_calloc(count,size)
#define realloc(ptr,size) tc_realloc(ptr,size)
#define free(ptr) tc_free(ptr)
#elif defined(USE_JEMALLOC)
#define malloc(size) je_malloc(size)
#define calloc(count,size) je_calloc(count,size)
#define realloc(ptr,size) je_realloc(ptr,size)
#define free(ptr) je_free(ptr)
#define mallocx(size,flags) je_mallocx(size,flags)
#define dallocx(ptr,flags) je_dallocx(ptr,flags)
#endif
//其实就是一个锁来控制内存的多线程安全的增长数量
#define atomicIncr(var,count) do { \ 这个斜杠,表明这个宏定义和下面是连接在一起的
pthread_mutex_lock(&var ## _mutex); \ 两个#表示两个字符连接在一起,一个表示这是一个变量
var += (count); \
pthread_mutex_unlock(&var ## _mutex); \
} while(0)
#define atomicGetIncr(var,oldvalue_var,count) do { \
pthread_mutex_lock(&var ## _mutex); \
oldvalue_var = var; \
var += (count); \
pthread_mutex_unlock(&var ## _mutex); \
} while(0)
//do while在这里是为了保证宏的完整性的一种技巧,不必当成一个循环
#define update_zmalloc_stat_alloc(__n) do { \
//下面的宏代码是为了判断8字节对齐
size_t _n = ( __n); \
if (_n&(sizeof(long)-1)) _n += sizeof(long)-(_n&(sizeof(long)-1)); \
atomicIncr(used_memory,__n); \
} while(0)
#define update_zmalloc_stat_free(__n) do { \
size_t _n = (__n); \
if (_n&(sizeof(long)-1)) _n += sizeof(long)-(_n&(sizeof(long)-1)); \
atomicDecr(used_memory,__n); \
} while(0)
//输出错误并打印错误信息
static void zmalloc_default_oom(size_t size) {
fprintf(stderr, "zmalloc: Out of memory trying to allocate %zu bytes\n",
size);
fflush(stderr);
abort();
}
static void (*zmalloc_oom_handler)(size_t) = zmalloc_default_oom;
void *zmalloc(size_t size) {
void *ptr = malloc(size+PREFIX_SIZE);//多分配一个PREFIX_SIZE用来存储Size
if (!ptr) zmalloc_oom_handler(size);
#ifdef HAVE_MALLOC_SIZE
update_zmalloc_stat_alloc(zmalloc_size(ptr));
return ptr;
#else
*((size_t*)ptr) = size;
update_zmalloc_stat_alloc(size+PREFIX_SIZE);
return (char*)ptr+PREFIX_SIZE;
#endif
}
只根据宏配置来调用相应的malloc,如果失败,则将异常函数抛出。在update_zmalloc_stat_alloc中的宏,通过对8字节对齐的判断来实现对内存used_memory的精确度量。因为无论哪种malloc,本身都是会按照相应的字节对齐来实现的。下面的atomicIncr其实就是多线程安全的增加内存数量的控制。
再看一下内存的块控制和重新调整:
void *zcalloc(size_t size) {
//仍然是调用calloc
void *ptr = calloc(1, size+PREFIX_SIZE);
if (!ptr) zmalloc_oom_handler(size);
#ifdef HAVE_MALLOC_SIZE
update_zmalloc_stat_alloc(zmalloc_size(ptr));
return ptr;
#else
*((size_t*)ptr) = size;
update_zmalloc_stat_alloc(size+PREFIX_SIZE);
return (char*)ptr+PREFIX_SIZE;
#endif
}
//这个也是调用malloc来实现,和普通的realloc基本相同
void *zrealloc(void *ptr, size_t size) {
#ifndef HAVE_MALLOC_SIZE
void *realptr;
#endif
size_t oldsize;
void *newptr;
if (size == 0 && ptr != NULL) {
zfree(ptr);
return NULL;
}
if (ptr == NULL) return zmalloc(size);
#ifdef HAVE_MALLOC_SIZE
oldsize = zmalloc_size(ptr);
newptr = realloc(ptr,size);
if (!newptr) zmalloc_oom_handler(size);
update_zmalloc_stat_free(oldsize);
update_zmalloc_stat_alloc(zmalloc_size(newptr));
return newptr;
#else
realptr = (char*)ptr-PREFIX_SIZE;
oldsize = *((size_t*)realptr);
newptr = realloc(realptr,size+PREFIX_SIZE);
if (!newptr) zmalloc_oom_handler(size);
*((size_t*)newptr) = size;
update_zmalloc_stat_free(oldsize+PREFIX_SIZE);
update_zmalloc_stat_alloc(size+PREFIX_SIZE);
return (char*)newptr+PREFIX_SIZE;
#endif
}
再看一下内存的释放:
void zfree(void *ptr) {
#ifndef HAVE_MALLOC_SIZE
void *realptr;
size_t oldsize;
#endif
if (ptr == NULL) return;
#ifdef HAVE_MALLOC_SIZE
update_zmalloc_stat_free(zmalloc_size(ptr));
free(ptr);
#else
realptr = (char*)ptr-PREFIX_SIZE;
oldsize = *((size_t*)realptr);
update_zmalloc_stat_free(oldsize+PREFIX_SIZE);
free(realptr);
#endif
}
释放函数和C的基本相同,就是把相关的内存回收到堆中去。其中需要注意的是拿到内存的真正的起始位置然后再释放,不要弄错了。这一段的难点在于,如果对C语言的宏不清楚的话,一系列的预定义宏和编译宏,还有宏函数,往往会让你眼花缭乱。
其它的函数,包括获得当前进程中的方法,其实就是对Linux下的文件的操作,这个如果熟悉在LINUX上查看内存等资源的使用的就非常明白了。
2、内存的大小管理
在前面反复提到过内存大小控制,看一下定义:
static size_t used_memory = 0;
pthread_mutex_t used_memory_mutex = PTHREAD_MUTEX_INITIALIZER;
size_t zmalloc_used_memory(void) {
size_t um;
atomicGet(used_memory,um);
return um;
}
通过函数可以得到的used_memory,而这个函数会在内存需要释放时调用检查:
int freeMemoryIfNeeded(void) {
int keys_freed = 0;
.....
/* Finally remove the selected key. */
if (bestkey) {
......//下面两处
delta = (long long) zmalloc_used_memory();
......
delta -= (long long) zmalloc_used_memory();
......
}
latencyEndMonitor(latency);
latencyAddSampleIfNeeded("eviction-cycle",latency);
return C_OK;
cant_free:
while(bioPendingJobsOfType(BIO_LAZY_FREE)) {
//下面一处
if (((mem_reported - zmalloc_used_memory()) + mem_freed) >= mem_tofree)
break;
usleep(1000);
}
return C_ERR;
}
当内存不足时,需要调用策略进行内存的回收。内存回收很简单,一个是删除到期时间对象,另外一个当used_memory到达触发点后,进行强制删除对象。
3、内存回收策略LRU和LFU
内存的过期删除有主动方式、被动方式和从库过期指令,另外就是内存超限的策略,前面几个都好理解,这里重点看最后的策略:
先看一下数据结构:
struct redisServer {
/* General */
pid_t pid; /* Main process pid. */
char *configfile; /* Absolute config file path, or NULL */
...
_Atomic unsigned int lruclock; /* Clock for LRU eviction */
...
}
typedef struct redisObject {
unsigned type:4;
unsigned encoding:4;
unsigned lru:LRU_BITS; /* LRU time (relative to global lru_clock) or
* LFU data (least significant 8 bits frequency
* and most significant 16 bits access time). */
int refcount;
void * ptr;
} robj;
LRU其实就是OS中的最少最近使用的方法,如果好好看过操作系统相关的调度,应该知道它是怎么回事儿。他的缺点在于如果一个KEY刚刚使用过,可能以后都不再用了,那么就会有一些问题,而LFU就是增加了一个最近使用频率,其实就是排除了这种情况。
int freeMemoryIfNeeded(void) {
int keys_freed = 0;
/* By default replicas should ignore maxmemory
* and just be masters exact copies. */
if (server.masterhost && server.repl_slave_ignore_maxmemory) return C_OK;
size_t mem_reported, mem_tofree, mem_freed;
mstime_t latency, eviction_latency;
long long delta;
int slaves = listLength(server.slaves);
/* When clients are paused the dataset should be static not just from the
* POV of clients not being able to write, but also from the POV of
* expires and evictions of keys not being performed. */
if (clientsArePaused()) return C_OK;
if (getMaxmemoryState(&mem_reported,NULL,&mem_tofree,NULL) == C_OK)
return C_OK;
mem_freed = 0;
if (server.maxmemory_policy == MAXMEMORY_NO_EVICTION)
goto cant_free; /* We need to free memory, but policy forbids. */
latencyStartMonitor(latency);
//循环判断是否满足内存的需要,直到达到为止
while (mem_freed < mem_tofree) {
int j, k, i;
static unsigned int next_db = 0;
sds bestkey = NULL;
int bestdbid;
redisDb *db;
dict *dict;
dictEntry *de;
//不同策略的过期池
if (server.maxmemory_policy & (MAXMEMORY_FLAG_LRU|MAXMEMORY_FLAG_LFU) ||
server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL)
{
struct evictionPoolEntry *pool = EvictionPoolLRU;
while(bestkey == NULL) {
unsigned long total_keys = 0, keys;
/* We don't want to make local-db choices when expiring keys,
* so to start populate the eviction pool sampling keys from
* every DB. */
//循环每个数据库,根据策略查找KEY并放入POOL中
for (i = 0; i < server.dbnum; i++) {
db = server.db+i;
dict = (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) ?
db->dict : db->expires;
if ((keys = dictSize(dict)) != 0) {
evictionPoolPopulate(i, dict, db->dict, pool);
total_keys += keys;
}
}
if (!total_keys) break; /* No keys to evict. */
/* Go backward from best to worst element to evict. */
//后台删除
for (k = EVPOOL_SIZE-1; k >= 0; k--) {
if (pool[k].key == NULL) continue;
bestdbid = pool[k].dbid;
if (server.maxmemory_policy & MAXMEMORY_FLAG_ALLKEYS) {
de = dictFind(server.db[pool[k].dbid].dict,
pool[k].key);
} else {
de = dictFind(server.db[pool[k].dbid].expires,
pool[k].key);
}
/* Remove the entry from the pool. */
if (pool[k].key != pool[k].cached)
sdsfree(pool[k].key);
pool[k].key = NULL;
pool[k].idle = 0;
/* If the key exists, is our pick. Otherwise it is
* a ghost and we need to try the next element. */
if (de) {
bestkey = dictGetKey(de);
break;
} else {
/* Ghost... Iterate again. */
}
}
}
}
/* volatile-random and allkeys-random policy */
//随机策略忽略
else if (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM ||
server.maxmemory_policy == MAXMEMORY_VOLATILE_RANDOM)
{
/* When evicting a random key, we try to evict a key for
* each DB, so we use the static 'next_db' variable to
* incrementally visit all DBs. */
for (i = 0; i < server.dbnum; i++) {
j = (++next_db) % server.dbnum;
db = server.db+j;
dict = (server.maxmemory_policy == MAXMEMORY_ALLKEYS_RANDOM) ?
db->dict : db->expires;
if (dictSize(dict) != 0) {
de = dictGetRandomKey(dict);
bestkey = dictGetKey(de);
bestdbid = j;
break;
}
}
}
/* Finally remove the selected key. */
if (bestkey) {
db = server.db+bestdbid;
robj *keyobj = createStringObject(bestkey,sdslen(bestkey));
propagateExpire(db,keyobj,server.lazyfree_lazy_eviction);
/* We compute the amount of memory freed by db*Delete() alone.
* It is possible that actually the memory needed to propagate
* the DEL in AOF and replication link is greater than the one
* we are freeing removing the key, but we can't account for
* that otherwise we would never exit the loop.
*
* AOF and Output buffer memory will be freed eventually so
* we only care about memory used by the key space. */
delta = (long long) zmalloc_used_memory();
latencyStartMonitor(eviction_latency);
if (server.lazyfree_lazy_eviction)
dbAsyncDelete(db,keyobj);
else
dbSyncDelete(db,keyobj);
latencyEndMonitor(eviction_latency);
latencyAddSampleIfNeeded("eviction-del",eviction_latency);
latencyRemoveNestedEvent(latency,eviction_latency);
delta -= (long long) zmalloc_used_memory();
mem_freed += delta;
server.stat_evictedkeys++;
notifyKeyspaceEvent(NOTIFY_EVICTED, "evicted",
keyobj, db->id);
decrRefCount(keyobj);
keys_freed++;
/* When the memory to free starts to be big enough, we may
* start spending so much time here that is impossible to
* deliver data to the slaves fast enough, so we force the
* transmission here inside the loop. */
if (slaves) flushSlavesOutputBuffers();
/* Normally our stop condition is the ability to release
* a fixed, pre-computed amount of memory. However when we
* are deleting objects in another thread, it's better to
* check, from time to time, if we already reached our target
* memory, since the "mem_freed" amount is computed only
* across the dbAsyncDelete() call, while the thread can
* release the memory all the time. */
if (server.lazyfree_lazy_eviction && !(keys_freed % 16)) {
if (getMaxmemoryState(NULL,NULL,NULL,NULL) == C_OK) {
/* Let's satisfy our stop condition. */
mem_freed = mem_tofree;
}
}
} else {
latencyEndMonitor(latency);
latencyAddSampleIfNeeded("eviction-cycle",latency);
goto cant_free; /* nothing to free... */
}
}
latencyEndMonitor(latency);
latencyAddSampleIfNeeded("eviction-cycle",latency);
return C_OK;
cant_free:
/* We are here if we are not able to reclaim memory. There is only one
* last thing we can try: check if the lazyfree thread has jobs in queue
* and wait... */
while(bioPendingJobsOfType(BIO_LAZY_FREE)) {
if (((mem_reported - zmalloc_used_memory()) + mem_freed) >= mem_tofree)
break;
usleep(1000);
}
return C_ERR;
}
int freeMemoryIfNeededAndSafe(void) {
if (server.lua_timedout || server.loading) return C_OK;
return freeMemoryIfNeeded();
}
在Redis中有一个升序的过期池,注意,它只是应用于非随机的策略中。
#define EVPOOL_SIZE 16
#define EVPOOL_CACHED_SDS_SIZE 255
struct evictionPoolEntry {
unsigned long long idle; /* Object idle time (inverse frequency for LFU) */
sds key; /* Key name. */
sds cached; /* Cached SDS object for key name. */
int dbid; /* Key DB number. * /
};
static struct evictionPoolEntry * EvictionPoolLRU;
/* Return the LRU clock, based on the clock resolution. This is a time
* in a reduced-bits format that can be used to set and check the
* object->lru field of redisObject structures. */
unsigned int getLRUClock(void) {
return (mstime()/LRU_CLOCK_RESOLUTION) & LRU_CLOCK_MAX;
}
/* This function is used to obtain the current LRU clock.
* If the current resolution is lower than the frequency we refresh the
* LRU clock (as it should be in production servers) we return the
* precomputed value, otherwise we need to resort to a system call. */
//LRU时钟计算的方法
unsigned int LRU_CLOCK(void) {
unsigned int lruclock;
if (1000/server.hz <= LRU_CLOCK_RESOLUTION) {
lruclock = server.lruclock;
} else {
lruclock = getLRUClock();
}
return lruclock;
}
/* Given an object returns the min number of milliseconds the object was never
* requested, using an approximated LRU algorithm. */
//近似的LRU估算算法
unsigned long long estimateObjectIdleTime(robj *o) {
unsigned long long lruclock = LRU_CLOCK();
if (lruclock >= o->lru) {
return (lruclock - o->lru) * LRU_CLOCK_RESOLUTION;
} else {
return (lruclock + (LRU_CLOCK_MAX - o->lru)) *
LRU_CLOCK_RESOLUTION;
}
}
//从sampledict随机挑选对象并计算LRU,以升序插入POOL中
void evictionPoolPopulate(int dbid, dict *sampledict, dict *keydict, struct evictionPoolEntry *pool) {
int j, k, count;
dictEntry *samples[server.maxmemory_samples];
count = dictGetSomeKeys(sampledict,samples,server.maxmemory_samples);
for (j = 0; j < count; j++) {
unsigned long long idle;
sds key;
robj *o;
dictEntry *de;
de = samples[j];
key = dictGetKey(de);
/* If the dictionary we are sampling from is not the main
* dictionary (but the expires one) we need to lookup the key
* again in the key dictionary to obtain the value object. */
if (server.maxmemory_policy != MAXMEMORY_VOLATILE_TTL) {
if (sampledict != keydict) de = dictFind(keydict, key);
o = dictGetVal(de);
}
/* Calculate the idle time according to the policy. This is called
* idle just because the code initially handled LRU, but is in fact
* just a score where an higher score means better candidate. */
if (server.maxmemory_policy & MAXMEMORY_FLAG_LRU) {
idle = estimateObjectIdleTime(o);
} else if (server.maxmemory_policy & MAXMEMORY_FLAG_LFU) {
/* When we use an LRU policy, we sort the keys by idle time
* so that we expire keys starting from greater idle time.
* However when the policy is an LFU one, we have a frequency
* estimation, and we want to evict keys with lower frequency
* first. So inside the pool we put objects using the inverted
* frequency subtracting the actual frequency to the maximum
* frequency of 255. */
idle = 255-LFUDecrAndReturn(o);
} else if (server.maxmemory_policy == MAXMEMORY_VOLATILE_TTL) {
/* In this case the sooner the expire the better. */
idle = ULLONG_MAX - (long)dictGetVal(de);
} else {
serverPanic("Unknown eviction policy in evictionPoolPopulate()");
}
/* Insert the element inside the pool.
* First, find the first empty bucket or the first populated
* bucket that has an idle time smaller than our idle time. */
k = 0;
while (k < EVPOOL_SIZE &&
pool[k].key &&
pool[k].idle < idle) k++;
if (k == 0 && pool[EVPOOL_SIZE-1].key != NULL) {
/* Can't insert if the element is < the worst element we have
* and there are no empty buckets. */
continue;
} else if (k < EVPOOL_SIZE && pool[k].key == NULL) {
/* Inserting into empty position. No setup needed before insert. */
} else {
/* Inserting in the middle. Now k points to the first element
* greater than the element to insert. */
if (pool[EVPOOL_SIZE-1].key == NULL) {
/* Free space on the right? Insert at k shifting
* all the elements from k to end to the right. */
/* Save SDS before overwriting. */
sds cached = pool[EVPOOL_SIZE-1].cached;
memmove(pool+k+1,pool+k,
sizeof(pool[0])*(EVPOOL_SIZE-k-1));
pool[k].cached = cached;
} else {
/* No free space on right? Insert at k-1 */
k--;
/* Shift all elements on the left of k (included) to the
* left, so we discard the element with smaller idle time. */
sds cached = pool[0].cached; /* Save SDS before overwriting. */
if (pool[0].key != pool[0].cached) sdsfree(pool[0].key);
memmove(pool,pool+1,sizeof(pool[0])*k);
pool[k].cached = cached;
}
}
/* Try to reuse the cached SDS string allocated in the pool entry,
* because allocating and deallocating this object is costly
* (according to the profiler, not my fantasy. Remember:
* premature optimizbla bla bla bla. * /
int klen = sdslen(key);
if (klen > EVPOOL_CACHED_SDS_SIZE) {
pool[k].key = sdsdup(key);
} else {
memcpy(pool[k].cached,key,klen+1);
sdssetlen(pool[k].cached,klen);
pool[k].key = pool[k].cached;
}
pool[k].idle = idle;
pool[k].dbid = dbid;
}
}
之所以不再使用LRU算法,主要原因是哈希+双向链表太耗费空间,不如使用近似的LRU算法。它在KEY上增加了一个时间戳,通过随机采样中配置的数量,然后将其删除,如果达到目的即停止否则再次进行。
再看一下LFU:
unsigned long LFUGetTimeInMinutes(void) {
return (server.unixtime/60) & 65535;
}
/* Given an object last access time, compute the minimum number of minutes
* that elapsed since the last access. Handle overflow (ldt greater than
* the current 16 bits minutes time) considering the time as wrapping
* exactly once. */
unsigned long LFUTimeElapsed(unsigned long ldt) {
unsigned long now = LFUGetTimeInMinutes();
if (now >= ldt) return now-ldt;
return 65535-ldt+now;
}
/* Logarithmically increment a counter. The greater is the current counter value
* the less likely is that it gets really implemented. Saturate it at 255. */
uint8_t LFULogIncr(uint8_t counter) {
if (counter == 255) return 255;
double r = (double)rand()/RAND_MAX;
double baseval = counter - LFU_INIT_VAL;
if (baseval < 0) baseval = 0;
double p = 1.0/(baseval*server.lfu_log_factor+1);
if (r < p) counter++;
return counter;
}
/* If the object decrement time is reached decrement the LFU counter but
* do not update LFU fields of the object, we update the access time
* and counter in an explicit way when the object is really accessed.
* And we will times halve the counter according to the times of
* elapsed time than server.lfu_decay_time.
* Return the object frequency counter.
*
* This function is used in order to scan the dataset for the best object
* to fit: as we check for the candidate, we incrementally decrement the
* counter of the scanned objects if needed. */
unsigned long LFUDecrAndReturn(robj *o) {
unsigned long ldt = o->lru >> 8;
unsigned long counter = o->lru & 255;
unsigned long num_periods = server.lfu_decay_time ? LFUTimeElapsed(ldt) / server.lfu_decay_time : 0;
if (num_periods)
counter = (num_periods > counter) ? 0 : counter - num_periods;
return counter;
}
LFU划分key对象的内部时钟的24位为两块,前16位表示时钟,后8位表示一个计数器。16位以小时为单位。后8位表示当前key值的读写频率,8位最高是255,可redis并没有采用线性上升的方式,而是通过配置参数调整一个复杂的公式动态控制数据的增减速度。
lfu-log-factor 可以调整计数器counter的增长速度,lfu-log-factor越大,counter增长的越慢。
lfu-decay-time 是一个以分钟为单位的数值,可以调整counter的减少速度。
同时为了保证新生KEY不被快速删除,默认值提高到5.
4、内存的操作
这个底层的内存控制其实是应用到上层的所有的数据结构,看下面的list创建:
list *listCreate(void)
{
struct list * list;
if ((list = zmalloc(sizeof(*list))) == NULL)
return NULL;
list->head = list->tail = NULL;
list->len = 0;
list->dup = NULL;
list->free = NULL;
list->match = NULL;
return list;
}
换句话说,所有的数据结构的操作都是在这个基础之上进行的。看一下前面几个数据结构的分析就明白了。不过正如前面所言,REDIS的操作,是按照固定的大小来分配的,也就是说,如果你申请8个字节,会给你8个字节,但申请12个会给你16个字节。
同样,修改也可能造成内存空间的增加或者减少,比如原来KV中Value是“ISOK”,可能修改成“ISFALSE”或者“NIL”。这样,内存碎片就出现了,由此可以明白删除也会出现这种问题。那解决这个问题,就得需要空间的转换了,其实就是把附近的内存碎片通过不断的迁移,形成一个大块的可用内存。
可能很快就想到了,移动内存又占用CPU又占用一部分内存,所以需要有条件的进行:
active-defrag-ignore-bytes 100mb:碎片到100MB时,启动清理。
active-defrag-threshold-lower 10:当碎片超过 10% 时,启动清理。
active-defrag-threshold-upper 100:内存碎片超过 100%,最大清理。
只有CPU满足下列条件才会工作:
active-defrag-cycle-min 5:占用 CPU 时间的比例不低于此值。
active-defrag-cycle-max 75:占用 CPU 时间的比例不高于此值。超过后自动停止,防止影响使用。
void computeDefragCycles() {
size_t frag_bytes;
float frag_pct = getAllocatorFragmentation(&frag_bytes);
/* If we're not already running, and below the threshold, exit. */
if (!server.active_defrag_running) {
if(frag_pct < server.active_defrag_threshold_lower || frag_bytes < server.active_defrag_ignore_bytes)
return;
}
//看到下面的相关变量了吧
/* Calculate the adaptive aggressiveness of the defrag */
//CPU计算,这个可以用在自己的代码中
int cpu_pct = INTERPOLATE(frag_pct,
server.active_defrag_threshold_lower,
server.active_defrag_threshold_upper,
server.active_defrag_cycle_min,
server.active_defrag_cycle_max);
cpu_pct = LIMIT(cpu_pct,
server.active_defrag_cycle_min,
server.active_defrag_cycle_max);
/* We allow increasing the aggressiveness during a scan, but don't
* reduce it. */
if (!server.active_defrag_running ||
cpu_pct > server.active_defrag_running)
{
server.active_defrag_running = cpu_pct;
serverLog(LL_VERBOSE,
"Starting active defrag, frag=%.0f%%, frag_bytes=%zu, cpu=%d%%",
frag_pct, frag_bytes, cpu_pct);
}
}
void activeDefragCycle(void) {
static int current_db = -1;
static unsigned long cursor = 0;
static redisDb *db = NULL;
static long long start_scan, start_stat;
unsigned int iterations = 0;
unsigned long long prev_defragged = server.stat_active_defrag_hits;
unsigned long long prev_scanned = server.stat_active_defrag_scanned;
long long start, timelimit, endtime;
mstime_t latency;
int quit = 0;
if (!server.active_defrag_enabled) {
if (server.active_defrag_running) {
/* if active defrag was disabled mid-run, start from fresh next time. */
server.active_defrag_running = 0;
if (db)
listEmpty(db->defrag_later);
defrag_later_current_key = NULL;
defrag_later_cursor = 0;
current_db = -1;
cursor = 0;
db = NULL;
}
return;
}
if (hasActiveChildProcess())
return; /* Defragging memory while there's a fork will just do damage. */
/* Once a second, check if we the fragmentation justfies starting a scan
* or making it more aggressive. */
run_with_period(1000) {
computeDefragCycles();
}
if (!server.active_defrag_running)
return;
/* See activeExpireCycle for how timelimit is handled. */
start = ustime();
timelimit = 1000000*server.active_defrag_running/server.hz/100;
if (timelimit <= 0) timelimit = 1;
endtime = start + timelimit;
latencyStartMonitor(latency);
do {
/* if we're not continuing a scan from the last call or loop, start a new one */
if (!cursor) {
/* finish any leftovers from previous db before moving to the next one */
if (db && defragLaterStep(db, endtime)) {
quit = 1; /* time is up, we didn't finish all the work */
break; /* this will exit the function and we'll continue on the next cycle */
}
/* Move on to next database, and stop if we reached the last one. */
if (++current_db >= server.dbnum) {
/* defrag other items not part of the db / keys */
defragOtherGlobals();
long long now = ustime();
size_t frag_bytes;
float frag_pct = getAllocatorFragmentation(&frag_bytes);
serverLog(LL_VERBOSE,
"Active defrag done in %dms, reallocated=%d, frag=%.0f%%, frag_bytes=%zu",
(int)((now - start_scan)/1000), (int)(server.stat_active_defrag_hits - start_stat), frag_pct, frag_bytes);
start_scan = now;
current_db = -1;
cursor = 0;
db = NULL;
server.active_defrag_running = 0;
computeDefragCycles(); /* if another scan is needed, start it right away */
if (server.active_defrag_running != 0 && ustime() < endtime)
continue;
break;
}
else if (current_db==0) {
/* Start a scan from the first database. */
start_scan = ustime();
start_stat = server.stat_active_defrag_hits;
}
db = &server.db[current_db];
cursor = 0;
}
do {
/* before scanning the next bucket, see if we have big keys left from the previous bucket to scan */
if (defragLaterStep(db, endtime)) {
quit = 1; /* time is up, we didn't finish all the work */
break; /* this will exit the function and we'll continue on the next cycle */
}
cursor = dictScan(db->dict, cursor, defragScanCallback, defragDictBucketCallback, db);
/* Once in 16 scan iterations, 512 pointer reallocations. or 64 keys
* (if we have a lot of pointers in one hash bucket or rehasing),
* check if we reached the time limit.
* But regardless, don't start a new db in this loop, this is because after
* the last db we call defragOtherGlobals, which must be done in once cycle */
if (!cursor || (++iterations > 16 ||
server.stat_active_defrag_hits - prev_defragged > 512 ||
server.stat_active_defrag_scanned - prev_scanned > 64)) {
if (!cursor || ustime() > endtime) {
quit = 1;
break;
}
iterations = 0;
prev_defragged = server.stat_active_defrag_hits;
prev_scanned = server.stat_active_defrag_scanned;
}
} while(cursor && !quit);
} while(!quit);
latencyEndMonitor(latency);
latencyAddSampleIfNeeded("active-defrag-cycle",latency);
}
这个的操作仍然是在serverCron函数中调用databasesCron来调用这个清理函数。
5、内存的溢出管理
在Redis中内存溢出其实和上面的内存回收有相当大的关系,在内存到达上限后Redis的处理方法有几种可选:
noeviction:默认策略,不删除任何数据,但是会拒绝服务并返回错误。(error)OOM command not allowed when used memory,出现这种情况Redis只可进行读操作。
volatile-lru:由LRU算法删除设置了超时属性(expire)的键,一直到空间满足需求为止。假如没有可删除对象,则转到noeviction策略。
allkeys-lru:由LRU算法删除键,删除到空间满足。
allkeys-random:随机删除所有键,到空间满足。
volatile-random:随机删除过期键,到空间满足。
volatile-ttl:根据键值对象的ttl属性,删除最近将要过期数据。没有则转vnoeviction策略。
volatile-lfu:从配置过期时间的键中删除使用频率最少的键
allkeys-lfu:从键中删除使用频率最少的键
这个策略是可以通过config进行配置的。不过需要注意的是,如果设置了最大内存的参数,内存一旦溢出,同时设置为非noeviction 策略时,会频繁的进行内存的回收,这会严重影响Redis的性能。
三、总结
内存管理一般在实际情况都是在原生的库的基础上匹配出一个内存池(有复杂的有简单的,统一叫内存池),对内存进行动态的管理和控制,Redis看来明白借鉴大牛们的技术更方便和安全一些,所以直接根据实际情况取舍来调用不同的内存管理库,这其实也是其高明之处,毕竟Redis是以消耗内存为主的数据库。“他山之石,可以攻玉”,做好自己的工作,把优点亮化,这也是Redis成功的一个原因吧,这可以做为国内开发者的一个借鉴。

本文围绕Redis的内存管理展开,介绍其使用底层更好的内存分配库。分析了最大内存管理、内存回收管理、读写和异常控制等方面。还对Redis源码进行解读,涉及基础内存管理、大小管理、回收策略等。最后指出Redis借鉴优秀技术进行内存管理,值得国内开发者借鉴。
1625

被折叠的 条评论
为什么被折叠?



