算法第四版5-2Tries_Trees

本文介绍C++中嵌套类的基本概念及其使用,并通过具体实例展示如何利用嵌套类来实现Trie树数据结构。该Trie树能够支持字符串键的插入、查找、删除等操作。
#include
#include
#include
using namespace std;
#define alphabet_size 256
//C++嵌套类
//1.嵌套类的名字只在外围类可见。
//2.类的私有成员只有类的成员和友元可以访问,因此外围类不可以访问嵌套类的私有成员。嵌套类可以访问外围类的成员(通过对象、指针或者引用)。
//3.一个好的嵌套类设计:嵌套类应该设成私有。嵌套类的成员和方法可以设为 public 。
//4.嵌套类可以直接访问外围类的静态成员、类型名( typedef )、枚举值。
class trie {
	const  static int R = alphabet_size;
	class node {
	public:
		int val;
		node* next[R];
		node() {
			for (int i = 0; i < R; i++) next[i] = NULL;
			val = NULL;
		}
		node(int v) {
			val = v;
			for (int i = 0; i < R; i++) next[i] = NULL;
		}
	};
	node* root;
public:
	trie() {
		root = NULL;
	}
	void put(string s, int v) {
		root = put(root, s, v, 0);
	}
	node* put(node* x, string s, int v, int d) {
		if (x == NULL) x = new node();
		if (d == s.length()) {
			x->val = v;
			return x;
		}
		char temp = s.at(d);
		x->next[temp] = put(x->next[(int)temp], s, v, d + 1);
		return x;
	}
	int get(string s) {
		node* temp = get(root, s, 0);
		if (temp == NULL) {
			perror("Invalid");
			return NULL;
		}
		return temp->val;
	}
	node* get(node* x, string s, int d) {
		if (x == NULL) return NULL;
		if (d == s.length()) return x;
		char temp = s.at(d);
		return get(x->next[temp], s, d + 1);
	}
	queue keywithprefix(string pre) {   //find all keys that start from p
		queue res;     // use queue to record the key with prefix
		collect(get(root, pre, 0), pre, res);
		return res;
	}
	void collect(node* x, string p, queue &q) {
		if (x == NULL) return;
		//cout << p << endl;   //调试递归调用,加栈标识符
		if (x->val != NULL) {
			q.push(p);
		}
		for (int i = 0; i < R; i++) {  // char 为-128到128,有负值,不能作为index;改用unsigned char或者int,使用(char)转换为char
			/*if ((int)i == 111||(int)i==101||(int)i==255) {
				system("pause");
			}*/
			//cout << (int)i << endl;   // 调试循环,输出标识,记录进入函数时的i值
			collect(x->next[i], p + (char)i, q);   // check everyone but enqueue the certain ones
		}
	}
	queue keythatmatch(string pat) {   //find all keys that exactly match pat
		queue res;
		collect(root, "", pat, res);
		return res;
	}
	void collect(node* x, string p, string pat, queue &q) {   //use p to record the paths
		if (x == NULL) return;
		int d = p.length();   //use string length as the signal of end recurrence
		if (d == pat.length() && x->val != NULL) q.push(p);   //all match and the key is valid(x->val!=NULL)
		if (d == pat.length()) return;   //end match or not match
		for (int i = 0; i < R; i++) {
			if (pat.at(d) == '.' || i == pat.at(d)) {  //单引号为char,双引号为字符串(const char*),结尾有\0 ; string.at(d)为char类型单字符,不能和双引号比较
				collect(x->next[i], p + (char)i, pat, q);
			}
		}
	}
	string longestprefix(string s) {
		int len = search(root, s, 0, 0);
		return s.substr(0, len);
	}
	// d for the current seacrhing index,len save the length of the matched valid key,not index
	int search(node* x, string s, int d, int len) {
		if (x == NULL) return len;
		if (x->val != NULL) len = d;   //in the first recur,d=0,x=root,has 0 letter,so length is 0
								  //in the second recur,d=1,x=root->next[i],has one letter,if match,length=1
								  //in the last,eg,d=n,x=last_letter,has all string,so length=n
		if (d == s.length()) return len;
		return search(x->next[s.at(d)], s, d + 1, len);
		//in the last case,d=n-1,s.at[d] refer the last letter(valid),d+1=s.length(),if match,len=s.length() (valid) 
	}
	void del(string s) {
		root = del(root, s, 0);
	}
	//just set val=NULL,not delete the node ,unless all node->next=NULL
	node* del(node* x, string s, int d) {
		if (x == NULL) {
			perror("valid key");
			return root;
		}
		if (d == s.length()) x->val = NULL;
		else {
			x->next[s.at(d)] = del(x->next[s.at(d)], s, d + 1);
			//return the deleted node*,if NULL,set next[x] NULL,or update next[x]
			//assign to x->next[] to update itself,commonly used in recur_delete operation
		}
		// !!!
		if (x->val != NULL) return x;
		//avoid delete node with same prefix but has value,eg delete shell-1,avoid delete she-5
		//prevent delete the prefix has value but no other subtree,it's next array is all NULL excepet the delete one
		for (int i = 0; i < R; i++) {
			if (x->next[i] != NULL) return x;   //if all next[] is NULL,return NULL,to set pa's next[this]=NULL
		}
		return NULL;
	}
};
int main() {
	int a, n;
	string s;
	trie t;
	queue q;
	while (true) {
		cin >> n;
		while (n--) {
			cin >> s >> a;
			t.put(s, a);
		}
		cin >> s;
		cout << t.get(s) << endl;
		cin >> s;

		q = t.keywithprefix(s);
		while (!q.empty()) {
			cout << q.front() << endl;
			q.pop();
		}
		cin >> s;
		q = t.keythatmatch(s);
		while (!q.empty()) {
			cout << q.front() << endl;
			q.pop();
		}
		cin >> s;
		cout << t.longestprefix(s) << endl;
		cin >> s;
		t.del(s);
		cout << t.longestprefix(s) << endl;
	}
	return 0;
}
Algorithm 5-2
/* 3966 * Notes on Program-Order guarantees on SMP systems. 3967 * 3968 * MIGRATION 3969 * 3970 * The basic program-order guarantee on SMP systems is that when a task [t] 3971 * migrates, all its activity on its old CPU [c0] happens-before any subsequent 3972 * execution on its new CPU [c1]. 3973 * 3974 * For migration (of runnable tasks) this is provided by the following means: 3975 * 3976 * A) UNLOCK of the rq(c0)->lock scheduling out task t 3977 * B) migration for t is required to synchronize *both* rq(c0)->lock and 3978 * rq(c1)->lock (if not at the same time, then in that order). 3979 * C) LOCK of the rq(c1)->lock scheduling in task 3980 * 3981 * Release/acquire chaining guarantees that B happens after A and C after B. 3982 * Note: the CPU doing B need not be c0 or c1 3983 * 3984 * Example: 3985 * 3986 * CPU0 CPU1 CPU2 3987 * 3988 * LOCK rq(0)->lock 3989 * sched-out X 3990 * sched-in Y 3991 * UNLOCK rq(0)->lock 3992 * 3993 * LOCK rq(0)->lock // orders against CPU0 3994 * dequeue X 3995 * UNLOCK rq(0)->lock 3996 * 3997 * LOCK rq(1)->lock 3998 * enqueue X 3999 * UNLOCK rq(1)->lock 4000 * 4001 * LOCK rq(1)->lock // orders against CPU2 4002 * sched-out Z 4003 * sched-in X 4004 * UNLOCK rq(1)->lock 4005 * 4006 * 4007 * BLOCKING -- aka. SLEEP + WAKEUP 4008 * 4009 * For blocking we (obviously) need to provide the same guarantee as for 4010 * migration. However the means are completely different as there is no lock 4011 * chain to provide order. Instead we do: 4012 * 4013 * 1) smp_store_release(X->on_cpu, 0) -- finish_task() 4014 * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() 4015 * 4016 * Example: 4017 * 4018 * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) 4019 * 4020 * LOCK rq(0)->lock LOCK X->pi_lock 4021 * dequeue X 4022 * sched-out X 4023 * smp_store_release(X->on_cpu, 0); 4024 * 4025 * smp_cond_load_acquire(&X->on_cpu, !VAL); 4026 * X->state = WAKING 4027 * set_task_cpu(X,2) 4028 * 4029 * LOCK rq(2)->lock 4030 * enqueue X 4031 * X->state = RUNNING 4032 * UNLOCK rq(2)->lock 4033 * 4034 * LOCK rq(2)->lock // orders against CPU1 4035 * sched-out Z 4036 * sched-in X 4037 * UNLOCK rq(2)->lock 4038 * 4039 * UNLOCK X->pi_lock 4040 * UNLOCK rq(0)->lock 4041 * 4042 * 4043 * However, for wakeups there is a second guarantee we must provide, namely we 4044 * must ensure that CONDITION=1 done by the caller can not be reordered with 4045 * accesses to the task state; see try_to_wake_up() and set_current_state(). 4046 */ 4047 4048 /** 4049 * try_to_wake_up - wake up a thread 4050 * @p: the thread to be awakened 4051 * @state: the mask of task states that can be woken 4052 * @wake_flags: wake modifier flags (WF_*) 4053 * 4054 * Conceptually does: 4055 * 4056 * If (@state & @p->state) @p->state = TASK_RUNNING. 4057 * 4058 * If the task was not queued/runnable, also place it back on a runqueue. 4059 * 4060 * This function is atomic against schedule() which would dequeue the task. 4061 * 4062 * It issues a full memory barrier before accessing @p->state, see the comment 4063 * with set_current_state(). 4064 * 4065 * Uses p->pi_lock to serialize against concurrent wake-ups. 4066 * 4067 * Relies on p->pi_lock stabilizing: 4068 * - p->sched_class 4069 * - p->cpus_ptr 4070 * - p->sched_task_group 4071 * in order to do migration, see its use of select_task_rq()/set_task_cpu(). 4072 * 4073 * Tries really hard to only take one task_rq(p)->lock for performance. 4074 * Takes rq->lock in: 4075 * - ttwu_runnable() -- old rq, unavoidable, see comment there; 4076 * - ttwu_queue() -- new rq, for enqueue of the task; 4077 * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. 4078 * 4079 * As a consequence we race really badly with just about everything. See the 4080 * many memory barriers and their comments for details. 4081 * 4082 * Return: %true if @p->state changes (an actual wakeup was done), 4083 * %false otherwise. 4084 */ 4085 static int 4086 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) 4087 { 4088 unsigned long flags; 4089 int cpu, success = 0; 4090 4091 preempt_disable(); 4092 if (p == current) { 4093 /* 4094 * We're waking current, this means 'p->on_rq' and 'task_cpu(p) 4095 * == smp_processor_id()'. Together this means we can special 4096 * case the whole 'p->on_rq && ttwu_runnable()' case below 4097 * without taking any locks. 4098 * 4099 * In particular: 4100 * - we rely on Program-Order guarantees for all the ordering, 4101 * - we're serialized against set_special_state() by virtue of 4102 * it disabling IRQs (this allows not taking ->pi_lock). 4103 */ 4104 if (!ttwu_state_match(p, state, &success)) 4105 goto out; 4106 4107 trace_sched_waking(p); 4108 WRITE_ONCE(p->__state, TASK_RUNNING); 4109 trace_sched_wakeup(p); 4110 goto out; 4111 } 4112 4113 /* 4114 * If we are going to wake up a thread waiting for CONDITION we 4115 * need to ensure that CONDITION=1 done by the caller can not be 4116 * reordered with p->state check below. This pairs with smp_store_mb() 4117 * in set_current_state() that the waiting thread does. 4118 */ 4119 raw_spin_lock_irqsave(&p->pi_lock, flags); 4120 smp_mb__after_spinlock(); 4121 if (!ttwu_state_match(p, state, &success)) 4122 goto unlock; 4123 4124 #ifdef CONFIG_FREEZER 4125 /* 4126 * If we're going to wake up a thread which may be frozen, then 4127 * we can only do so if we have an active CPU which is capable of 4128 * running it. This may not be the case when resuming from suspend, 4129 * as the secondary CPUs may not yet be back online. See __thaw_task() 4130 * for the actual wakeup. 4131 */ 4132 if (unlikely(frozen_or_skipped(p)) && 4133 !cpumask_intersects(cpu_active_mask, task_cpu_possible_mask(p))) 4134 goto unlock; 4135 #endif 4136 4137 trace_sched_waking(p); 4138 4139 /* 4140 * Ensure we load p->on_rq _after_ p->state, otherwise it would 4141 * be possible to, falsely, observe p->on_rq == 0 and get stuck 4142 * in smp_cond_load_acquire() below. 4143 * 4144 * sched_ttwu_pending() try_to_wake_up() 4145 * STORE p->on_rq = 1 LOAD p->state 4146 * UNLOCK rq->lock 4147 * 4148 * __schedule() (switch to task 'p') 4149 * LOCK rq->lock smp_rmb(); 4150 * smp_mb__after_spinlock(); 4151 * UNLOCK rq->lock 4152 * 4153 * [task p] 4154 * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq 4155 * 4156 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4157 * __schedule(). See the comment for smp_mb__after_spinlock(). 4158 * 4159 * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). 4160 */ 4161 smp_rmb(); 4162 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) 4163 goto unlock; 4164 4165 if (READ_ONCE(p->__state) & TASK_UNINTERRUPTIBLE) 4166 trace_sched_blocked_reason(p); 4167 4168 #ifdef CONFIG_SMP 4169 /* 4170 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be 4171 * possible to, falsely, observe p->on_cpu == 0. 4172 * 4173 * One must be running (->on_cpu == 1) in order to remove oneself 4174 * from the runqueue. 4175 * 4176 * __schedule() (switch to task 'p') try_to_wake_up() 4177 * STORE p->on_cpu = 1 LOAD p->on_rq 4178 * UNLOCK rq->lock 4179 * 4180 * __schedule() (put 'p' to sleep) 4181 * LOCK rq->lock smp_rmb(); 4182 * smp_mb__after_spinlock(); 4183 * STORE p->on_rq = 0 LOAD p->on_cpu 4184 * 4185 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in 4186 * __schedule(). See the comment for smp_mb__after_spinlock(). 4187 * 4188 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure 4189 * schedule()'s deactivate_task() has 'happened' and p will no longer 4190 * care about it's own p->state. See the comment in __schedule(). 4191 */ 4192 smp_acquire__after_ctrl_dep(); 4193 4194 /* 4195 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq 4196 * == 0), which means we need to do an enqueue, change p->state to 4197 * TASK_WAKING such that we can unlock p->pi_lock before doing the 4198 * enqueue, such as ttwu_queue_wakelist(). 4199 */ 4200 WRITE_ONCE(p->__state, TASK_WAKING); 4201 4202 /* 4203 * If the owning (remote) CPU is still in the middle of schedule() with 4204 * this task as prev, considering queueing p on the remote CPUs wake_list 4205 * which potentially sends an IPI instead of spinning on p->on_cpu to 4206 * let the waker make forward progress. This is safe because IRQs are 4207 * disabled and the IPI will deliver after on_cpu is cleared. 4208 * 4209 * Ensure we load task_cpu(p) after p->on_cpu: 4210 * 4211 * set_task_cpu(p, cpu); 4212 * STORE p->cpu = @cpu 4213 * __schedule() (switch to task 'p') 4214 * LOCK rq->lock 4215 * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) 4216 * STORE p->on_cpu = 1 LOAD p->cpu 4217 * 4218 * to ensure we observe the correct CPU on which the task is currently 4219 * scheduling. 4220 */ 4221 if (smp_load_acquire(&p->on_cpu) && 4222 ttwu_queue_wakelist(p, task_cpu(p), wake_flags)) 4223 goto unlock; 4224 4225 /* 4226 * If the owning (remote) CPU is still in the middle of schedule() with 4227 * this task as prev, wait until it's done referencing the task. 4228 * 4229 * Pairs with the smp_store_release() in finish_task(). 4230 * 4231 * This ensures that tasks getting woken will be fully ordered against 4232 * their previous state and preserve Program Order. 4233 */ 4234 smp_cond_load_acquire(&p->on_cpu, !VAL); 4235 4236 trace_android_rvh_try_to_wake_up(p); 4237 4238 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); 4239 if (task_cpu(p) != cpu) { 4240 if (p->in_iowait) { 4241 delayacct_blkio_end(p); 4242 atomic_dec(&task_rq(p)->nr_iowait); 4243 } 4244 4245 wake_flags |= WF_MIGRATED; 4246 psi_ttwu_dequeue(p); 4247 set_task_cpu(p, cpu); 4248 } 4249 #else 4250 cpu = task_cpu(p); 4251 #endif /* CONFIG_SMP */ 4252 4253 ttwu_queue(p, cpu, wake_flags); 4254 unlock: 4255 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4256 out: 4257 if (success) { 4258 trace_android_rvh_try_to_wake_up_success(p); 4259 ttwu_stat(p, task_cpu(p), wake_flags); 4260 } 4261 preempt_enable(); 4262 4263 return success; 4264 } 分析并给出总结解释
06-25
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值