最近,我想通过redis的源码来学习redis。虽然平时工作中用得不多,不过对redis还是比较感兴趣的,毕竟它的性能是不错的。redis是一个 开源的项目,我们可以通过源代码去了解redis。我后面会通过自己的学习,写一些关于redis源码的帖子。帖子的主要内容是分析代码设计,而并不会对 源码进行详细解说。如果有不对的地方,请指正。源码是reids 3.0.3版本。
dict 字典
一、数据结构
//字典条目
typedef struct dictEntry {
void *key;
union {
void *val;
uint64_t u64;
int64_t s64;
double d;
} v;
struct dictEntry *next;
} dictEntry;
typedef struct dictType {
unsigned int (*hashFunction)(const void *key); //计算key的哈希值
void *(*keyDup)(void *privdata, const void *key); //复制key的函数
void *(*valDup)(void *privdata, const void *obj); //复制value的函数
int (*keyCompare)(void *privdata, const void *key1, const void *key2);//比较key相等的函数
void (*keyDestructor)(void *privdata, void *key);//销毁key的函数
void (*valDestructor)(void *privdata, void *obj);//销毁value的函数
} dictType;
//用于存储字典条件的哈希表
/* This is our hash table structure. Every dictionary has two of this as we
* implement incremental rehashing, for the old to the new table. */
typedef struct dictht {
dictEntry **table;
unsigned long size;
unsigned long sizemask;
unsigned long used;
} dictht;
//字典
typedef struct dict {
dictType *type;
void *privdata;
dictht ht[2]; //两个哈希表,在rehash时会使用两个哈希表,否则只会使用一个表
long rehashidx; /* rehashing not in progress if rehashidx == -1 */
int iterators; /* number of iterators currently running */
} dict;
/* If safe is set to 1 this is a safe iterator, that means, you can call
* dictAdd, dictFind, and other functions against the dictionary even while
* iterating. Otherwise it is a non safe iterator, and only dictNext()
* should be called while iterating. */
typedef struct dictIterator {
dict *d;
long index;
int table, safe;
dictEntry *entry, *nextEntry;
/* unsafe iterator fingerprint for misuse detection. */
long long fingerprint;
} dictIterator;
二、宏实现的简单函数
举三个例子:
#define dictFreeVal(d, entry) \
if ((d)->type->valDestructor) \
(d)->type->valDestructor((d)->privdata, (entry)->v.val)
#define dictSetVal(d, entry, _val_) do { \
if ((d)->type->valDup) \
entry->v.val = (d)->type->valDup((d)->privdata, _val_); \
else \
entry->v.val = (_val_); \
} while(0)
#define dictSetSignedIntegerVal(entry, _val_) \
do { entry->v.s64 = _val_; } while(0)
dictFreeVal,在释放字典条目的value时使用。实现中没使用 do{}while(0),我没有想明白是为何不使用,但我觉得应该加的,不然使用不当时会出问题,具体可见我的另一个贴子:http://chhquan.blog.51cto.com/1346841/1358254
dictSetSignedIntegerVal 中加上do{}while(0),应该是为了阻止以表达形式使用宏。
三、部分代码解析
由于dict行为特点比较多,本贴子打算详解部分代码。
1. dict_can_resize
/* Using dictEnableResize() / dictDisableResize() we make possible to
* enable/disable resizing of the hash table as needed. This is very important
* for Redis, as we use copy-on-write and don't want to move too much memory
* around when there is a child performing saving operations.
*
* Note that even when dict_can_resize is set to 0, not all resizes are
* prevented: a hash table is still allowed to grow if the ratio between
* the number of elements and the buckets > dict_force_resize_ratio. */
static int dict_can_resize = 1;
static unsigned int dict_force_resize_ratio = 5;
dict_can_resize,可控制dict是否可以进行rehash,1 时允许rehash,0 - 通常情况不允许rehash,但如果满足 条目数/桶 > dict_force_resize_ratio时,仍可进行rehash。通过 dictEnableResize() 或 dictDisableResize() 可以设置 dict_can_resize。这样的设置的目的在于:当redis需要对dict进行保存操作时(写文件),是要把dict的当前快照作保存,要保持dict不变,但这样会使字典的不能接收写入操作或是进行rehash,为了确保dict能正常处理请求,redis采用copy-on-write的策略,即当dict有修改操作时,需要把dict进行复制,以同时支持保存操作和修改操作。由于rehash也是对dict进行修改,也可能会使正在保存的dict进行复制,所以使用 dict_force_resize_ratio 可阻止rehash,从而一定程度上避免复制。但如果保存 条目数/桶 > dict_force_resize_ratio 时,redis认为这时dict的条目数相对于桶来说已经太多了,有些桶上所挂的元素个数可能比较多,对dict的效率产生严重的影响。所以此时宁可复制dict也要允许rehash以恢复dict的性能。当然具体 dict_force_resize_ratio 是多少,应该由实验得出吧。又或者如何度量复制与保持dict高效的转折点也是要进行实验的,不一定是 条目数/桶,具体也就由实验得出吧。由于没有实验,我也不能多说了。
2. hash计算
计算hash值的函数,具体算法我也不并熟悉,跳过。
3. 重置哈希表
//重置哈希表
/* Reset a hash table already initialized with ht_init().
* NOTE: This function should only be called by ht_destroy(). */
static void _dictReset(dictht *ht)
{
//下面直接覆盖table的值,调用方需确保table要么不指向一块动态内存,
//要么动态内存已被释放,要么还有别的指针保留table所指向的动态内存空间
ht->table = NULL;
ht->size = 0;
ht->sizemask = 0;
ht->used = 0;
}
4. 创建dict
//创建dict
/* Create a new hash table */
dict *dictCreate(dictType *type,
void *privDataPtr)
{
dict *d = zmalloc(sizeof(*d));
_dictInit(d,type,privDataPtr);
return d;
}
5. 初始化dict
//初始化dict
/* Initialize the hash table */
int _dictInit(dict *d, dictType *type,
void *privDataPtr)
{
_dictReset(&d->ht[0]);
_dictReset(&d->ht[1]);
d->type = type;
d->privdata = privDataPtr;
d->rehashidx = -1; // -1为不在rehash状态,>= 0 为rehash中
d->iterators = 0;
return DICT_OK;
}
6. 调整大小
//resize,根据dict中已存储的条目数进行resize,可扩展哈希表空间也可缩小。
/* Resize the table to the minimal size that contains all the elements,
* but with the invariant of a USED/BUCKETS ratio near to <= 1 */
int dictResize(dict *d)
{
int minimal;
if (!dict_can_resize || dictIsRehashing(d)) return DICT_ERR;
minimal = d->ht[0].used; //按已存储的条目数进行resize
if (minimal < DICT_HT_INITIAL_SIZE) //最小resize大小为 DICT_HT_INITIAL_SIZE
minimal = DICT_HT_INITIAL_SIZE;
return dictExpand(d, minimal);
}
/* Expand or create the hash table */
int dictExpand(dict *d, unsigned long size)
{
dictht n; /* the new hash table */
unsigned long realsize = _dictNextPower(size); //取大于size的最小的2的幂作为实际size
/* the size is invalid if it is smaller than the number of
* elements already inside the hash table */
if (dictIsRehashing(d) || d->ht[0].used > size)
return DICT_ERR;
/* Rehashing to the same table size is not useful. */
if (realsize == d->ht[0].size) return DICT_ERR;
/* Allocate the new hash table and initialize all pointers to NULL */
n.size = realsize;
n.sizemask = realsize-1; //size-1,bit为1的都在低位,用于对哈希值取size的模作为哈希表的桶号
n.table = zcalloc(realsize*sizeof(dictEntry*));
n.used = 0;
/* Is this the first initialization? If so it's not really a rehashing
* we just set the first hash table so that it can accept keys. */
if (d->ht[0].table == NULL) {
d->ht[0] = n;
return DICT_OK;
}
/* Prepare a second hash table for incremental rehashing */
d->ht[1] = n;
d->rehashidx = 0;//>=0,正在rehash中
return DICT_OK;
}
7. rehash
//rehash函数,逐步对dict进行rehash
//redis并没有一次性完成对dict的rehash,而是把整个rehash过程分成许多小的rehash操作去完成,
//每一次rehash都会处理至多一定数量的桶,由参数n指定。由于部分桶是空的,为防止rehash一直都访问
//到空的桶使rehash过程耗时过多,函数里面设定最多访问 n*10 个桶。
//redis为保持性能的稳定,会把一些有机会耗时较比多的操作,分成放多小的操作,rehash便是其中一个例子。
/* Performs N steps of incremental rehashing. Returns 1 if there are still
* keys to move from the old to the new hash table, otherwise 0 is returned.
*
* Note that a rehashing step consists in moving a bucket (that may have more
* than one key as we use chaining) from the old to the new hash table, however
* since part of the hash table may be composed of empty spaces, it is not
* guaranteed that this function will rehash even a single bucket, since it
* will visit at max N*10 empty buckets in total, otherwise the amount of
* work it does would be unbound and the function may block for a long time. */
int dictRehash(dict *d, int n) {
int empty_visits = n*10; /* Max number of empty buckets to visit. */
if (!dictIsRehashing(d)) return 0;
//访问ht[0]中的桶,如果桶非空,把桶中的元素放进ht[1]里。
while(n-- && d->ht[0].used != 0) {
dictEntry *de, *nextde;
/* Note that rehashidx can't overflow as we are sure there are more
* elements because ht[0].used != 0 */
//从rehashidx开始,rehashidx便是用来记录rehash过程状态的变量
assert(d->ht[0].size > (unsigned long)d->rehashidx);
//找出一个非空桶,总的访问次数受到 empty_visits 的限制
while(d->ht[0].table[d->rehashidx] == NULL) {
d->rehashidx++;
if (--empty_visits == 0) return 1; //返回1表示rehash还没完成,需要进行进行
}
de = d->ht[0].table[d->rehashidx];
//移动桶中所有条目到ht[1]中
/* Move all the keys in this bucket from the old to the new hash HT */
while(de) {
unsigned int h;
nextde = de->next;
/* Get the index in the new hash table */
h = dictHashKey(d, de->key) & d->ht[1].sizemask; //对桶号
de->next = d->ht[1].table[h];
d->ht[1].table[h] = de;
d->ht[0].used--;
d->ht[1].used++;
de = nextde;
}
d->ht[0].table[d->rehashidx] = NULL;
d->rehashidx++; //已经处理了rehashidx 号桶,下一个桶
}
//如果ht[0]已经没有条目了,可以把ht[1]切换到ht[0],并重置ht[1]。
/* Check if we already rehashed the whole table... */
if (d->ht[0].used == 0) {
zfree(d->ht[0].table); //释放ht[0]的桶空间
d->ht[0] = d->ht[1];
_dictReset(&d->ht[1]);
d->rehashidx = -1;
return 0;
}
/* More to rehash... */
return 1;
}
由rehash过程可以看出,在rehash过程中,ht[0]和ht[1]同时具有条目,即字典中的所有条目分布在ht[0]和ht[1]中,
这时麻烦也就出来了。主要有以下问题:(现在暂不解答是如何解决的)
1.如何查找key。
2.如何插入新的key。
3.如何删除一个key。
4.如何遍历dict所有条目,如何确保遍历顺序。
5.如何确保rehash过程不断插入、删除条目,而rehash没有出错。
6.如何确保迭代器有效,且正确。
转载于:https://blog.51cto.com/chhquan/1788910