数据结构_38in-suf

这篇博客主要讨论了C++代码实现中缀表达式到后缀表达式转换时如何处理运算符优先级和前缀符号的问题。通过修改代码,解决了在处理括号内的正负号以及单独的正号时不正确输出的问题。文章强调了使用if-elseif结构的清晰性,并提到了isdigit函数在判断数字字符中的应用。同时,代码展示了如何处理括号、乘除、加减操作,并确保计算的正确性。
  • 这个代码是在上面的代码上改的,之前老师讲的都是没有考虑到前缀符号的问题,所以,,第四个测试点就过不了,第四个测试点 -2*(+3) -----------------> -2 3 *

  • 只有两个改动的地方:
    ①如果符号在第一个位置,-要和数字连着,+不用输出(就不必判断了,这种情况就既不用入栈也不用输出)
    ②如果符号的前一个是(,同上

  • 改动如下

    对于+,没有入栈也没有输出,就是直接跳过了;
    对于这两种情况下的-,就看成数字一起处理

while((isdigit(ss[i])||ss[i]=='.')||(i==0&&ss[i]=='-')||((ss[i]=='-')&&(ss[i-1]=='(')))
else if(((ss[i]=='+')||((ss[i]=='-')&&i!=0))&&ss[i-1]!='(')

开始的时候没有考虑(+3)这种情况不能输出+,就哈哈哈哈哈日常生活中哪有这样写的呀

  • 遇到的小注意点
    ①最好不要if - if - if 这样,要if - else if这样
    ②isdigit可以判断是不是数字,头文件#include<ctype.h>
//判断的时候无结束符
//就用string哈哈哈 

#include<bits/stdc++.h>
#include<ctype.h>
using namespace std;
#define Maxsize 100
typedef struct Stack{
	int data[Maxsize];//0~Maxsize-1 
	int top;
}Stack,*pStack; 

pStack CreateStack(){
	pStack head=new Stack();
	head->top=-1;   
	return head;
}

bool isFull(pStack L){
	if(L->top ==Maxsize-1) return true;
	return false;
}

bool isEmpty(pStack L){
	if(L->top ==-1) return true;
	return false;
}

bool Push(pStack L,char e){
	if(isFull(L)) return false;
	L->data [++(L->top)]=e;
	return true;
}

char Pop(pStack L){
	if(isEmpty(L)) return false;
	return L->data [(L->top )--];
}

int main(){
	pStack head;
	head=CreateStack();

	string ss;char e;cin>>ss;
	int flag=0,flag1=0;
	bool ismatch=true; 
	for(int i=0;ss[i];i++){	
		while((isdigit(ss[i])||ss[i]=='.')||(i==0&&ss[i]=='-')||((ss[i]=='-')&&(ss[i-1]=='('))){
			//是第一次 
			if(i==0){
				cout<<ss[i];
				i++;
			} 
			//不连着  不是第一次 
			else if(!ismatch&&(i!=0)){
				cout<<" "<<ss[i];
				i++;
			} 
			//连着的时候 
			else if(ismatch){
				cout<<ss[i];
				i++;
			} 
			if(!isdigit(ss[i])&&ss[i]!='.'){
				ismatch=false;
				break;
			}
			else{
				ismatch=true;
			} 
		
		}
		if(ss[i]==')'){
			e=Pop(head);
			while(e!='('){
				cout<<" "<<e;
				e=Pop(head);
			}
		}
		//如果是+-的话,就把栈顶元素 1弹出判断优先级 2输出或者再放入 
		else if(((ss[i]=='+')||((ss[i]=='-')&&i!=0))&&ss[i-1]!='('){
			if(isEmpty(head)) Push(head,ss[i]); 
			else{
				do{
					e=Pop(head);
					if(e=='('){
						Push(head,'(');
					}
					else cout<<" "<<e;				
				}while(e!='('&&!isEmpty(head));//关键!! 
				Push(head,ss[i]);
			}
		}
		else if(ss[i]=='*'||ss[i]=='/'||ss[i]=='('){
			Push(head,ss[i]);			
		} 
	}
	while(!isEmpty(head)){
		e=Pop(head);
		cout<<" "<<e;
	} 
	return 0;
}

我的python代码,配合CSH脚本(init.csh),能在linux环境下运行。但在windows调试中,os.environ.get("CSTHOME") 这样的语句无法运行起来,请分析怎样在windows中,也可以运行和调试 以下是init.csh代码 #!/bin/csh -f # # - initial config # ================================================================================================= # -- setup config if ( $4 == "" ) then /bin/echo -e "\033[33;1m >> ERROR : run with (csh $0 'EX-CST' 'CASE' 'DATADT' 'SITECODE')\033[0m" exit 1 endif # 接收4个参数 set PROJECT = $1 set RUNID = $2 set NEWDATE = $3 # 2025-09-25 set NEWID = $4 # 37080002 # 解析日期参数 set YEAR = `echo $NEWDATE | awk -F'-' '{print $1}'` set MONTH = `echo $NEWDATE | awk -F'-' '{print $2}'` set DAY = `echo $NEWDATE | awk -F'-' '{print $3}'` # ================================================================================================= # HOME [模型运行配置] setenv MODHOME "$HOME/pj2" # # config for model NAME and PATH setenv MODNAM "a2" setenv MODLAB "$MODHOME/$MODNAM" # model dirs setenv MODEXE "$MODLAB/exec" setenv MODSRC "$MODLAB/scripts" setenv MODTOL "$MODLAB/tools" setenv MODINI "$MODLAB/static" setenv CSTHOME "$MODLAB/result/$NEWDATE/$NEWID" setenv COMBINE "$CSTHOME/result" setenv BASECST "$CSTHOME/base/base/constraints" setenv BASEOUT "$CSTHOME/base/base/output" setenv MODRUNS "$CSTHOME/runs" setenv MODCFG "$MODRUNS/model/configuration" setenv MODMCM "$MODRUNS/mcm" setenv ENVFILE "$CSTHOME/envs" setenv DATANM "$MODLAB/data/$2.csv" setenv ALLMAP "$MODINI/variablesMapping.txt" set STEP_SIZE = 3600 set RATE_OUT_STEP_SIZE = 3600 set CONSTR = "$1" # -- if clear link files ? setenv CLEAN YES 以下是python文件代码: import sys import os import pandas as pd import numpy as np from datetime import timedelta import warnings warnings.filterwarnings("ignore", category=FutureWarning) def base_trans(df, maps): for key, value in maps.items(): if key in df.columns: if value not in ['TEMP', 'PRESS', 'RH', 'J4', 'CO', 'NO', 'NO2']: df[key] = df[key] * 2.46 * 10 ** 10 # 取 Vm = 24.47 L/mol elif value == 'TEMP': df[key] = df[key] + 273.15 elif value == 'CO': df[key] = df[key] * 2.15 * 10 ** 13 elif value == 'NO': df[key] = df[key] * 2.01 * 10 ** 10 elif value == 'NO2': df[key] = df[key] * 1.31 * 10 ** 10 else: continue return df def rir_generator(df_total, df_maps): map_dict = df_maps.to_dict(orient='list') alkanes_list = df_maps['MCM'][df_maps['CATEGORY'] == '烷烃'].values.tolist() olefin_list = df_maps['MCM'][df_maps['CATEGORY'] == '烯烃'].values.tolist() alkyne_list = df_maps['MCM'][df_maps['CATEGORY'] == '炔烃'].values.tolist() aromatics_list = df_maps['MCM'][df_maps['CATEGORY'] == '芳香烃'].values.tolist() halogenated_hydrocarbon = df_maps['MCM'][df_maps['CATEGORY'] == '卤代烃'].values.tolist() ovocs = df_maps['MCM'][df_maps['CATEGORY'] == '含氧'].values.tolist() cat_dict = {'AVOCs': ['NO', 'NO2', 'CO', 'TEMP', 'PRESS', 'RH', 'J4', 'C5H8'], 'BVOCs': ['C5H8'], 'NOx': ['NO', 'NO2'], 'CO': ['CO'], 'TEMP': ['TEMP'], 'RH': ['RH'], 'wt': alkanes_list, 'xt': olefin_list, 'qt': alkyne_list, 'fxt': aromatics_list, 'ldt': halogenated_hydrocarbon, 'OVOCs': ovocs } for minus, species in cat_dict.items(): path1 = os.path.join(os.environ.get("CSTHOME"), 'rir' + os.sep + minus + '_10%' + os.sep) path = os.path.join(path1, 'constraints') out_path = os.path.join(path1, 'output') reac_path = os.path.join(out_path, 'reactionRates') spe_path = os.path.join(path, 'species') env_path = os.path.join(path, 'environment') pho_path = os.path.join(path, 'photolysis') os.makedirs(spe_path, exist_ok=True) os.makedirs(env_path, exist_ok=True) os.makedirs(pho_path, exist_ok=True) os.makedirs(reac_path, exist_ok=True) # AVOCs种类繁多,因此处理时取补集 if minus == 'AVOCs': for i in range(len(map_dict['MCM'])): if map_dict['VARIABLE'][i] in df_total.columns: if map_dict['MCM'][i] not in species: df_total[map_dict['MCM'][i]] = df_total[map_dict['VARIABLE'][i]] * 0.9 df_total[['Time', map_dict['MCM'][i]]].to_csv(os.path.join(spe_path, map_dict['MCM'][i]), header=None, index=False, sep=' ') else: df_total[map_dict['MCM'][i]] = df_total[map_dict['VARIABLE'][i]] * 1.0 if map_dict['MCM'][i] in ['TEMP', 'PRESS', 'RH']: df_total[['Time', map_dict['MCM'][i]]].to_csv(os.path.join(env_path, map_dict['MCM'][i]), header=None, index=False, sep=' ') elif map_dict['MCM'][i] in ['J4']: df_total[['Time', map_dict['MCM'][i]]].to_csv(os.path.join(pho_path, map_dict['MCM'][i]), header=None, index=False, sep=' ') else: df_total[['Time', map_dict['MCM'][i]]].to_csv(os.path.join(spe_path, map_dict['MCM'][i]), header=None, index=False, sep=' ') else: for i in range(len(map_dict['MCM'])): if map_dict['VARIABLE'][i] in df_total.columns: if map_dict['MCM'][i] in species: df_total[map_dict['MCM'][i]] = df_total[map_dict['VARIABLE'][i]] * 0.9 else: df_total[map_dict['MCM'][i]] = df_total[map_dict['VARIABLE'][i]] * 1.0 if map_dict['MCM'][i] in ['TEMP', 'PRESS', 'RH']: df_total[['Time', map_dict['MCM'][i]]].to_csv(os.path.join(env_path, map_dict['MCM'][i]), header=None, index=False, sep=' ') elif map_dict['MCM'][i] in ['J4']: df_total[['Time', map_dict['MCM'][i]]].to_csv(os.path.join(pho_path, map_dict['MCM'][i]), header=None, index=False, sep=' ') else: df_total[['Time', map_dict['MCM'][i]]].to_csv(os.path.join(spe_path, map_dict['MCM'][i]), header=None, index=False, sep=' ') def ekma_generator(df_total, map_dict): cat_dict = {'AVOCs': ['NO', 'NO2', 'CO', 'TEMP', 'PRESS', 'RH', 'J4', 'C5H8'], 'NOx': ['NO', 'NO2']} # for i_nox in [0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.2, 1.5, 2.0, 2.5, 3]: # for j_voc in [0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.2, 1.5, 2.0]: #for i_nox in [0.01, 0.1, 0.3, 0.5, 0.7, 0.9, 1.2, 2.0, 3]: # for j_voc in [0.01, 0.1, 0.3, 0.5, 0.7, 0.9, 1.2, 2.0]: for i_nox in [0.01, 0.1, 0.5, 1, 1.5,2.0,2.5, 3]: for j_voc in [0.01, 0.1, 0.5, 1,1.5, 2]: for minus, species in cat_dict.items(): path1 = os.path.join(os.environ.get("CSTHOME"), 'ekma' + os.sep + 'NOx' + str(round(i_nox * 100)) + '%_' + 'AVOCs' + str(round(j_voc * 100)) + '%') path = os.path.join(path1, 'constraints') spe_path = os.path.join(path, 'species') env_path = os.path.join(path, 'environment') pho_path = os.path.join(path, 'photolysis') out_path = os.path.join(path1, 'output') reac_path = os.path.join(out_path, 'reactionRates') os.makedirs(spe_path, exist_ok=True) os.makedirs(env_path, exist_ok=True) os.makedirs(pho_path, exist_ok=True) os.makedirs(reac_path, exist_ok=True) if minus == 'AVOCs': for i in range(len(map_dict['MCM'])): if map_dict['VARIABLE'][i] in df_total.columns: if map_dict['MCM'][i] not in species: df_total[map_dict['MCM'][i]] = df_total[map_dict['VARIABLE'][i]] * j_voc elif map_dict['MCM'][i] in ['NO', 'NO2']: df_total[map_dict['MCM'][i]] = df_total[map_dict['VARIABLE'][i]] * i_nox else: df_total[map_dict['MCM'][i]] = df_total[map_dict['VARIABLE'][i]] * 1.0 if map_dict['MCM'][i] in ['TEMP', 'PRESS', 'RH']: df_total[['Time', map_dict['MCM'][i]]].to_csv( os.path.join(env_path, map_dict['MCM'][i]), header=None, index=False, sep=' ') elif map_dict['MCM'][i] in ['J4']: df_total[['Time', map_dict['MCM'][i]]].to_csv( os.path.join(pho_path, map_dict['MCM'][i]), header=None, index=False, sep=' ') else: df_total[['Time', map_dict['MCM'][i]]].to_csv(os.path.join(spe_path, map_dict['MCM'][i]), header=None, index=False, sep=' ') else: for i in range(len(map_dict['MCM'])): if map_dict['VARIABLE'][i] in df_total.columns: if map_dict['MCM'][i] in species: df_total[map_dict['MCM'][i]] = df_total[map_dict['VARIABLE'][i]] * i_nox elif map_dict['MCM'][i] not in ['NO', 'NO2', 'CO', 'TEMP', 'PRESS', 'RH', 'J4', 'C5H8']: df_total[map_dict['MCM'][i]] = df_total[map_dict['VARIABLE'][i]] * j_voc else: df_total[map_dict['MCM'][i]] = df_total[map_dict['VARIABLE'][i]] * 1.0 if map_dict['MCM'][i] in ['TEMP', 'PRESS', 'RH']: df_total[['Time', map_dict['MCM'][i]]].to_csv( os.path.join(env_path, map_dict['MCM'][i]), header=None, index=False, sep=' ') elif map_dict['MCM'][i] in ['J4']: df_total[['Time', map_dict['MCM'][i]]].to_csv( os.path.join(pho_path, map_dict['MCM'][i]), header=None, index=False, sep=' ') else: df_total[['Time', map_dict['MCM'][i]]].to_csv(os.path.join(spe_path, map_dict['MCM'][i]), header=None, index=False, sep=' ') def main_process(data_file, map_file): suf = data_file.split('.')[-1] if suf == 'xls' or suf == 'xlsx': df_origin = pd.read_excel(data_file) else: df_origin = pd.read_csv(data_file) df_map = pd.read_csv(map_file, sep="\\s+") map_dict = df_map.set_index(['VARIABLE'])['MCM'].to_dict() # 转换数据类型 df_origin[df_origin.columns[5:]] = df_origin[df_origin.columns[5:]].apply(pd.to_numeric, errors='coerce') lim_nan = df_origin[df_origin.columns[5:]].apply(lambda x: x < 0) df_origin[lim_nan] = np.nan # 确保数据间隔时间为1小时 df_dr = pd.DataFrame(pd.date_range(start=df_origin['开始时间'].values[0], end=df_origin['开始时间'].values[-1], freq='h'), columns=['Date']) # df_dr['Date'] = df_dr['Date'].apply(lambda x: (pd.to_datetime(str(x))).strftime('%Y/%m/%d %H:%M')) df_origin = df_origin.copy() df_origin['Date'] = pd.to_datetime(df_origin['开始时间'], format="%Y-%m-%d %H:%M:%S") df_origin = pd.merge(left=df_dr, right=df_origin, on=['Date'], how='outer') # 对指定列进行插值 df_origin = df_origin.bfill() df_origin = df_origin.infer_objects(copy=False) # 先调用 infer_objects 方法 df_origin[df_origin.columns[5:]] = df_origin[df_origin.columns[5:]].infer_objects(copy=False) # 再进行插值操作 df_origin[df_origin.columns[5:]] = df_origin[df_origin.columns[5:]].interpolate(method='linear') # 部分特殊列的处理,原始数据必须包含这些列 df_origin['Date'] = pd.to_datetime(df_origin['开始时间'], format="%Y-%m-%d %H:%M:%S") # df_origin['BP(mbar)'] = df_origin['BP(hPa)'].values # 1mb = 1hPa df_origin = df_origin.copy() # df_origin['jNO2(s-1)'] = df_origin['jNO2(E-10)'] / 10000000000.0 # 新增多日数据平均到24日处理 df_origin = (df_origin.reset_index()).drop('index', axis=1) numeric_columns = df_origin.select_dtypes(include=[np.number]).columns df_numeric = df_origin[numeric_columns] df_origins = (df_numeric.groupby(df_origin['Date'].dt.hour).mean()).drop('序号', axis=1, errors='ignore') df_origins = (df_origin.loc[df_origin.index[:24], df_origin.columns[:6]]).join(df_origins) df_origin = df_origins df_origin = df_origin.dropna(axis=1) hour = df_origin['Date'].dt.hour[0] if df_origin['Date'].dt.freq != 'H': print(f" >>> [Warning]: VOCs组分数据时差存在不为1小时的时段! <<<") df_origin['Time'] = [(hour + 16) * 3600 + 3600 * i for i in range(len(df_origin['Date']))] df_origin['Time'] = df_origin['Time'].astype(int) # sta_loc = station_info(df_origin['城市'].unique()[0]) sta_loc = [-1 * df_origin['经度'].unique()[0], df_origin['纬度'].unique()[0]] os.system("echo set LAT = " + str(sta_loc[1]) + ">> " + os.environ.get('ENVFILE')) os.system("echo set LON = " + str(sta_loc[0]) + ">> " + os.environ.get('ENVFILE')) os.system("echo set STEP_NUM = '" + str(int((df_origin['Time'].values[-1] - df_origin['Time'].values[0]) / 3600)) + "'>> " + os.environ.get('ENVFILE')) lim_ymd = df_origin['Time'].apply(lambda x: x == 16*3600) os.system("echo set bUYY = '" + str((df_origin['Date'][lim_ymd] + timedelta(days=-1)).dt.year.values[0]).zfill(4) + "'>> " + os.environ.get('ENVFILE')) os.system("echo set bUMM = '" + str((df_origin['Date'][lim_ymd] + timedelta(days=-1)).dt.month.values[0]).zfill(2) + "'>> " + os.environ.get('ENVFILE')) os.system("echo set bUDD = '" + str((df_origin['Date'][lim_ymd] + timedelta(days=-1)).dt.day.values[0]).zfill(2) + "'>> " + os.environ.get('ENVFILE')) print("[Notice] >>> 开始数据换算及单位调整 <<<") df_origin = base_trans(df_origin, map_dict) print("[Notice] >>> 完成数据换算及单位调整 <<<") print("[Notice] >>> 开始生成敏感性分析情景数据 <<<") rir_generator(df_origin.copy(), df_map) print("[Notice] >>> 完成生成敏感性分析情景数据 <<<") print("[Notice] >>> 开始生成EKMA情景数据 <<<") ekma_generator(df_origin.copy(), df_map.to_dict(orient='list')) print("[Notice] >>> 完成生成EKMA情景数据 <<<") print("[Notice] >>> 开始处理基准情景数据及模型配置 <<<") df_all = df_origin.copy() species = [] env_var = ['H2O', 'DEC', 'BLHEIGHT', 'DILUTE', 'JFAC', 'ROOF', 'ASA'] env_opt = ['CALC', 'CALC', 'NOTUSED', 'NOTUSED', 1.0, 'OPEN', 'NOTUSED'] pho_env = [] spe_path = os.path.join(os.environ.get("BASECST"), 'species') env_path = os.path.join(os.environ.get("BASECST"), 'environment') pho_path = os.path.join(os.environ.get("BASECST"), 'photolysis') reac_path = os.path.join(os.environ.get("BASEOUT"), 'reactionRates') os.makedirs(spe_path, exist_ok=True) os.makedirs(env_path, exist_ok=True) os.makedirs(pho_path, exist_ok=True) os.makedirs(reac_path, exist_ok=True) for key, value in map_dict.items(): if key in df_all.columns: if value not in ['TEMP', 'PRESS', 'RH', 'J4', 'PAN', 'HCHO', 'CH4']: species.append(value) df_all[['Time', key]].to_csv(os.path.join(spe_path, value), header=None, index=False, sep=' ') elif value in ['TEMP', 'PRESS', 'RH']: env_var.append(value) env_opt.append('CONSTRAINED') df_all[['Time', key]].to_csv(os.path.join(env_path, value), header=None, index=False, sep=' ') elif value == 'J4': pho_env.append('{' + value + '}') df_all[['Time', key]].to_csv(os.path.join(pho_path, value), header=None, index=False, sep=' ') else: species.append('{' + value + '}') df_all[['Time', key]].to_csv(os.path.join(spe_path, value), header=None, index=False, sep=' ') else: if value == 'TEMP': env_var.append(value) env_opt.append(298.15) elif value == 'RH': env_var.append(value) env_opt.append(70) elif value == 'PRESS': env_var.append(value) env_opt.append(1013.25) else: continue env_ind = [i + 1 for i in range(len(env_var))] env_dict = {'ind': env_ind, 'var': env_var, 'opt': env_opt} df_env = pd.DataFrame(env_dict) df_env.to_csv(os.path.join(os.environ.get("MODCFG"), 'environmentVariables.config'), header=None, index=False, sep=' ') s_j = pd.DataFrame(pho_env) s_j.to_csv(os.path.join(os.environ.get("MODCFG"), 'photolysisConstrained.config'), header=None, index=False, sep=' ') s_species = pd.Series(species) s_species.to_csv(os.path.join(os.environ.get("MODCFG"), 'speciesConstrained.config'), header=None, index=False, sep=' ') print("[Notice] >>> 完成处理基准情景数据及模型配置 <<<") if __name__ == '__main__': all_data_file = sys.argv[1] all_map_file = sys.argv[2] main_process(all_data_file, all_map_file)
09-26
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值