mc里面用了大量的锁,简直是随处可见,在确保不死锁的同时保证性能!里面精髓之处值得琢磨学习
mc用了哪些锁:conn_lock、slabs_lock、slabs_rebalance_lock、hold_lock、item_locks
本篇针对item_locks的粒度变化做了研究:
static pthread_mutex_t *item_locks;
item_lock_count = hashsize(power); 默认4个threads,threads与item_locks成正比!
item_locks = calloc(item_lock_count, sizeof(pthread_mutex_t));
item_locks[(hv & hashmask(hashpower)) % item_lock_count] 桶对应的锁
锁有两种粒度:
enum item_lock_types {
ITEM_LOCK_GRANULAR = 0, 细粒度的锁
ITEM_LOCK_GLOBAL //在扩展的时候会被切换到全局锁
};
memcached在扩容操作时,加的都是全局锁,就是所有item(所有hash桶中的)都是一把锁,在扩容结束时,item锁被重新切换回hash桶上的锁,这里锁是分段加锁的(几个桶一个锁,这个具体数值取决与初始的worker的数量,worker数量越多,锁越细,越少hash桶公用一个锁)。
::switch_item_lock_type
void switch_item_lock_type(enum item_lock_types type) {
char buf[1];
int i;
switch (type) {
case ITEM_LOCK_GRANULAR:
buf[0] = 'l';
break;
case ITEM_LOCK_GLOBAL:
buf[0] = 'g';
break;
default:
fprintf(stderr, "Unknown lock type: %d\n", type);
assert(1 == 0);
break;
}
pthread_mutex_lock(&init_lock);
init_count = 0;
for (i = 0; i < settings.num_threads; i++) {
if (write(threads[i].notify_send_fd, buf, 1) != 1) { /* Listen for notifications from other threads */往管道写状态
perror("Failed writing to notify pipe");
/* TODO: This is a fatal problem. Can it ever happen temporarily? */
}
}
wait_for_thread_registration(settings.num_threads); //等待每个线程处理完设置完锁的类型,然后一直等待!!![标签1]
pthread_mutex_unlock(&init_lock);
}
::setup_thread (LIBEVENT_THREAD *me) 就是在这里注册了管道读回调事件thread_libevent_process
/* Listen for notifications from other threads */
event_set(&me->notify_event, me->notify_receive_fd, EV_READ | EV_PERSIST, thread_libevent_process,
me);
::thread_libevent_process
case 'g':
me->item_lock_type = ITEM_LOCK_GLOBAL;
register_thread_initialized(); //pthread_cond_signal(&init_cond);锁类型更新后发出条件信号告诉[标签1]类型已经改变 [标签2]
break;
再来看下item和item_lock_types 的关系,这些明白了,在wait_for_thread_registration完成后lock_type变了,如果遇到hash表扩容的话,变成全局锁,否则使用小粒度的局部锁来管理被划定的几个桶!
void item_lock(uint32_t hv) {
uint8_t *lock_type = pthread_getspecific(item_lock_type_key);
if (likely(*lock_type == ITEM_LOCK_GRANULAR)) {
mutex_lock(&item_locks[(hv & hashmask(hashpower)) % item_lock_count]);
} else {
mutex_lock(&item_global_lock);
}
}
上面完成了switch_item_lock_type的意图:锁粒度的升降级
int count[2] = {0,0};
enum LOCK_TYPE{
ITEM_LOCK_GRANULAR,
ITEM_LOCK_GLOBAL
};
int lock_type = ITEM_LOCK_GRANULAR;
typedef struct{
int index;
}TEST_THREAD;
static pthread_mutex_t *item_locks;
static pthread_mutex_t item_lock_global = PTHREAD_MUTEX_INITIALIZER;
void item_lock(int index){
if (lock_type == ITEM_LOCK_GRANULAR)
pthread_mutex_lock(&item_locks[index]);
else
pthread_mutex_lock(&item_lock_global);
}
void item_unlock(int index){
if (lock_type == ITEM_LOCK_GRANULAR)
pthread_mutex_unlock(&item_locks[index]);
else
pthread_mutex_unlock(&item_lock_global);
}
void *thfn(void *arg){
TEST_THREAD *pdata = (TEST_THREAD *)arg;
int *data = &pdata->index;
printf("thread[%d] start thfn \n", *data);
while(1)
{
item_lock(*data);
if (lock_type == ITEM_LOCK_GRANULAR)
{
if (*data == 0)
count[0]++;
else
count[1]++;
}
else
{
count[0]++;
count[1]++;
}
printf("lock_type:%d thread:%d count:%d,%d \n", lock_type,*data,count[0],count[1]);
item_unlock(*data);
sleep(1);
}
}
int main()
{
int nthreads = 2;
item_locks = calloc(nthreads, sizeof(pthread_mutex_t));
int i = 0;
for(; i<nthreads; ++i)
pthread_mutex_init(&item_locks[i], NULL);
pthread_t *pths = calloc(nthreads, sizeof(pthread_t));;
TEST_THREAD *tdata = calloc(nthreads, sizeof(TEST_THREAD));
int pos[2] = {0, 1};
i = 0;
for(; i<nthreads; ++i){
tdata[i].index = i;
pthread_create(&pths[i], NULL, thfn, &tdata[i]);
}
sleep(2);
printf("------------[change lock type] ITEM_LOCK_GRANULAR ------------\n");
count[0]=count[1]=0;
lock_type = ITEM_LOCK_GRANULAR;
sleep(4);
printf("------------[change lock type] ITEM_LOCK_GLOBAL ------------\n");
count[0]=count[1]=0;
lock_type = ITEM_LOCK_GLOBAL;
sleep(4);
printf("------------[change lock type] ITEM_LOCK_GRANULAR ------------\n");
count[0]=count[1]=0;
lock_type = ITEM_LOCK_GRANULAR;
sleep(4);
pthread_cancel(pths[0]);
pthread_cancel(pths[1]);
pthread_join(pths[0], NULL);
pthread_join(pths[1], NULL);
printf("\ntest finished !\n");
free(item_locks);
free(pths);
return 0;
}
enum LOCK_TYPE{
ITEM_LOCK_GRANULAR,
ITEM_LOCK_GLOBAL
};
int lock_type = ITEM_LOCK_GRANULAR;
typedef struct{
int index;
}TEST_THREAD;
static pthread_mutex_t *item_locks;
static pthread_mutex_t item_lock_global = PTHREAD_MUTEX_INITIALIZER;
void item_lock(int index){
if (lock_type == ITEM_LOCK_GRANULAR)
pthread_mutex_lock(&item_locks[index]);
else
pthread_mutex_lock(&item_lock_global);
}
void item_unlock(int index){
if (lock_type == ITEM_LOCK_GRANULAR)
pthread_mutex_unlock(&item_locks[index]);
else
pthread_mutex_unlock(&item_lock_global);
}
void *thfn(void *arg){
TEST_THREAD *pdata = (TEST_THREAD *)arg;
int *data = &pdata->index;
printf("thread[%d] start thfn \n", *data);
while(1)
{
item_lock(*data);
if (lock_type == ITEM_LOCK_GRANULAR)
{
if (*data == 0)
count[0]++;
else
count[1]++;
}
else
{
count[0]++;
count[1]++;
}
printf("lock_type:%d thread:%d count:%d,%d \n", lock_type,*data,count[0],count[1]);
item_unlock(*data);
sleep(1);
}
}
int main()
{
int nthreads = 2;
item_locks = calloc(nthreads, sizeof(pthread_mutex_t));
int i = 0;
for(; i<nthreads; ++i)
pthread_mutex_init(&item_locks[i], NULL);
pthread_t *pths = calloc(nthreads, sizeof(pthread_t));;
TEST_THREAD *tdata = calloc(nthreads, sizeof(TEST_THREAD));
int pos[2] = {0, 1};
i = 0;
for(; i<nthreads; ++i){
tdata[i].index = i;
pthread_create(&pths[i], NULL, thfn, &tdata[i]);
}
sleep(2);
printf("------------[change lock type] ITEM_LOCK_GRANULAR ------------\n");
count[0]=count[1]=0;
lock_type = ITEM_LOCK_GRANULAR;
sleep(4);
printf("------------[change lock type] ITEM_LOCK_GLOBAL ------------\n");
count[0]=count[1]=0;
lock_type = ITEM_LOCK_GLOBAL;
sleep(4);
printf("------------[change lock type] ITEM_LOCK_GRANULAR ------------\n");
count[0]=count[1]=0;
lock_type = ITEM_LOCK_GRANULAR;
sleep(4);
pthread_cancel(pths[0]);
pthread_cancel(pths[1]);
pthread_join(pths[0], NULL);
pthread_join(pths[1], NULL);
printf("\ntest finished !\n");
free(item_locks);
free(pths);
return 0;
}
执行结果:
thread[0] start thfn
lock_type:0 thread:0 count:1,0
thread[1] start thfn
lock_type:0 thread:1 count:1,1
lock_type:0 thread:0 count:2,1
lock_type:0 thread:1 count:2,2
------------[change lock type] ITEM_LOCK_GRANULAR ------------ 各干各的,锁粒度小,减少等待
lock_type:0 thread:0 count:1,0
lock_type:0 thread:1 count:1,1
lock_type:0 thread:0 count:2,1
lock_type:0 thread:1 count:2,2
lock_type:0 thread:0 count:3,2
lock_type:0 thread:1 count:3,3
lock_type:0 thread:1 count:3,4
lock_type:0 thread:0 count:4,4
------------[change lock type] ITEM_LOCK_GLOBAL ------------ mc如果遇到扩张,就升级为全局锁,直到扩张完毕
lock_type:1 thread:0 count:1,1
lock_type:1 thread:1 count:2,2
lock_type:1 thread:1 count:3,3
lock_type:1 thread:0 count:4,4
lock_type:1 thread:1 count:5,5
lock_type:1 thread:0 count:6,6
lock_type:1 thread:1 count:7,7
lock_type:1 thread:0 count:8,8
------------[change lock type] ITEM_LOCK_GRANULAR ------------
lock_type:0 thread:1 count:0,1
lock_type:0 thread:0 count:1,1
lock_type:0 thread:1 count:1,2
lock_type:0 thread:0 count:2,2
lock_type:0 thread:1 count:2,3
lock_type:0 thread:0 count:3,3
lock_type:0 thread:1 count:3,4
lock_type:0 thread:0 count:4,4
test finished !
lock_type:0 thread:0 count:1,0
thread[1] start thfn
lock_type:0 thread:1 count:1,1
lock_type:0 thread:0 count:2,1
lock_type:0 thread:1 count:2,2
------------[change lock type] ITEM_LOCK_GRANULAR ------------ 各干各的,锁粒度小,减少等待
lock_type:0 thread:0 count:1,0
lock_type:0 thread:1 count:1,1
lock_type:0 thread:0 count:2,1
lock_type:0 thread:1 count:2,2
lock_type:0 thread:0 count:3,2
lock_type:0 thread:1 count:3,3
lock_type:0 thread:1 count:3,4
lock_type:0 thread:0 count:4,4
------------[change lock type] ITEM_LOCK_GLOBAL ------------ mc如果遇到扩张,就升级为全局锁,直到扩张完毕
lock_type:1 thread:0 count:1,1
lock_type:1 thread:1 count:2,2
lock_type:1 thread:1 count:3,3
lock_type:1 thread:0 count:4,4
lock_type:1 thread:1 count:5,5
lock_type:1 thread:0 count:6,6
lock_type:1 thread:1 count:7,7
lock_type:1 thread:0 count:8,8
------------[change lock type] ITEM_LOCK_GRANULAR ------------
lock_type:0 thread:1 count:0,1
lock_type:0 thread:0 count:1,1
lock_type:0 thread:1 count:1,2
lock_type:0 thread:0 count:2,2
lock_type:0 thread:1 count:2,3
lock_type:0 thread:0 count:3,3
lock_type:0 thread:1 count:3,4
lock_type:0 thread:0 count:4,4
test finished !