greedy tino(教训!)

贪婪的tino
#include <stdio.h>
#include <string.h>
#include <iostream>

using namespace std;
#define INF 0x7fffffff
#define OFFSET 2000

int dp[101][4001];
int list[101];

int max(int a,int b,int c)
{
	a=(a>b)?a:b;
	
	return a>c?a:c;
}

int main()
{
	int T;
	int cas=0;
	scanf("%d",&T);
	while(T--)
	{
		int n;
		scanf("%d",&n);
		int cnt=1;
		
		bool haszero=false;
		//是list的问题!!! 
		//记住这次的教训!以后使用ind或者cnt作为下标的累加的时候
		//先置为0,然后在循环的开头!使用cnt++
		//否则如果置为1,在循环的结尾使用cnt++,一定要在循环之后写上cnt--
		//否则最后cnt会多一个!  
		for(int i=1;i<=n;i++)
		{
            
			scanf("%d",&list[cnt]);
			if(list[cnt]==0)
			{
				cnt--;
				haszero=true;
			}cnt++; 
		}
		cnt--; 
        //for(int i=1;i<=cnt;i++)cout<<list[i]<<endl;
        n = cnt;
        cout<<n<<endl;
		// for (int i = 1; i <= n; i ++) { //输入n个柑橘重量
			// scanf ("%d",&list[++ cnt]);
			// if (list[cnt] == 0) { //若当前输入柑橘重量为0
				// cnt --; //去除这个柑橘
				// haszero = true; //并记录存在重量为0的柑橘
			// }
		// }
		// n = cnt;
		
		for(int i=-2000;i<=2000;i++)
		{
			dp[0][i+OFFSET]=-INF;
		}
		//假如说写到上面去if(i==0)每次都得判断一下,不应该 
		dp[0][0+OFFSET]=0;
		//这个cnt在这个时候有用! 
		for(int i=1;i<=cnt;i++)
		{
			for(int j=-2000;j<=2000;j++)
			{
				int tmp1,tmp2,tmp3;
				//这里忘记初始化参数 
				tmp1=dp[i-1][j+OFFSET];
				tmp2=tmp3=-INF;
				 
				//在上一个的基础上在左边加,应该防止他们之间的差距大于 
				if(j+list[i]<=2000 && dp[i-1][j+OFFSET+list[i]]+list[i]!=-INF)
				{
					tmp2=dp[i-1][j+OFFSET+list[i]]+list[i];
				 } 				
				 if(j-list[i]>=-2000 && dp[i-1][j+OFFSET-list[i]]+list[i]!=-INF)
				{
					tmp3=dp[i-1][j+OFFSET-list[i]]+list[i];
				 } 
				 int up=max(tmp1,tmp2,tmp3);
				 dp[i][j+OFFSET]=up; 
			}
		}
		cas++;
		//错!!!
		//有presentation error,改到的时候,少了空格的就改了一个,没有改另一个
		if(dp[n][0+OFFSET]==0)
		{
			if(haszero==true)
			{
				printf("Case %d: ",cas);
				printf("0\n");
		 	}
			 else
			 {
			 	printf("Case %d: ",cas);
			 	printf("-1\n");
				 }	
		}
		else
		{
			printf("Case %d: ",cas);
			printf("%d\n",dp[n][0+OFFSET]/2);
		}
		// printf("Case %d: ",++ cas);//按题目输出要求输出
		// if (dp[n][0 + OFFSET] == 0) { //dp[n][0]为0
			// puts( haszero == true ? "0" : "-1");//根据是否存在重量为0的柑橘输出0或-1
		// } else printf("%d\n",dp[n][0 + OFFSET] / 2); //否则输出dp[n][0] / 2
	} 
	return 0;
}


### Epsilon-Greedy Algorithm Implementation and Use Cases The epsilon-greedy algorithm is a strategy commonly used in reinforcement learning to balance exploration and exploitation. In this context, exploration refers to trying out new actions to discover potentially better outcomes, while exploitation involves selecting the action that has historically provided the best reward. #### Algorithm Implementation The epsilon-greedy policy selects a random action with probability ε (epsilon) and the greedy action (the one with the highest estimated value) with probability 1 - ε. This ensures that the agent does not always exploit known information but also explores other options to avoid getting stuck in suboptimal strategies[^2]. Below is an implementation of the epsilon-greedy algorithm in Python: ```python import numpy as np def epsilon_greedy_policy(Q, state, epsilon): if np.random.rand() < epsilon: # Exploration: Select a random action return np.random.choice(len(Q[state])) else: # Exploitation: Select the action with the highest value return np.argmax(Q[state]) ``` In this code snippet, `Q` represents the action-value function estimate for each state-action pair, `state` is the current state, and `epsilon` determines the likelihood of choosing a random action over the optimal one. #### Use Cases Epsilon-greedy algorithms are widely applied in various domains where decision-making under uncertainty is required. Some prominent use cases include: 1. **Reinforcement Learning**: The algorithm is fundamental in training agents to solve Markov Decision Processes (MDPs). For instance, it can be employed in games like chess or Go, where the agent must decide between exploring new moves or exploiting known winning strategies[^1]. 2. **Multi-Armed Bandit Problems**: These problems involve maximizing rewards by selecting among multiple options (or "arms") with unknown payoff distributions. Epsilon-greedy policies help determine which arm to pull next by balancing exploration and exploitation. 3. **Recommendation Systems**: In online recommendation systems, such as those used by streaming platforms or e-commerce websites, epsilon-greedy algorithms can suggest items to users. By occasionally recommending less popular items, the system can discover new preferences while primarily offering top-rated suggestions[^3]. 4. **Autonomous Driving**: Self-driving cars use reinforcement learning techniques to navigate roads safely. An epsilon-greedy approach might allow the vehicle to experiment with different driving styles during testing phases before settling on optimal behaviors[^4]. 5. **Resource Allocation**: In cloud computing environments, epsilon-greedy methods can optimize server allocation by dynamically adjusting resources based on historical performance metrics while exploring alternative configurations[^3].
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值