Name,Alias,ColDesc的作用

博客介绍了VTable的columns中三个属性的概念。Name是当前vtable关联表中的字段名字,Alias是关联表的别名,在vtable的tables里定义,ColDesc是该字段在本vtable中的名字。
在VTable的columns中,这三个属性的概念分别是:
Name-当前vtable的关联表中的字段名字
Alias-关联表的别名,在vtable的tables里面定义
ColDesc-这个字段在本vtable中的名字
在VTable的columns中,这三个属性的概念分别是:
Name-当前vtable的关联表中的字段名字
Alias-关联表的别名,在vtable的tables里面定义
ColDesc-这个字段在本vtable中的名字
with q1 as (select case fc_get_lottype(p.originalparent) when 'Base' then p.componentid when 'Top' then s.bondedwaferid when 'Handle' then p.componentid when 'Device' then s.bondedwaferid when 'NEG' then p.componentid else p.componentid end fsiwaferid, case fc_get_lottype(p.originalparent) when 'Base' then p.originalparent when 'Top' then p2.originalparent when 'Handle' then p.originalparent when 'Device' then p2.originalparent when 'NEG' then p.originalparent else p.originalparent end orgb, case fc_get_lottype(p.originalparent) when 'Base' then p.currentparent when 'Top' then p2.currentparent when 'Handle' then p.currentparent when 'Device' then p2.currentparent when 'NEG' then p.currentparent else p.currentparent end curlotb, case fc_get_lottype(p.originalparent) when 'Base' then s.aliasname when 'Top' then s2.aliasname when 'Handle' then s.aliasname when 'Device' then s2.aliasname when 'NEG' then s.aliasname else s.aliasname end lmb, case nvl(fc_get_lottype(p2.originalparent),'NVL') when 'Base' then p.componentid when 'Top' then s.bondedwaferid when 'Handle' then p.componentid when 'Device' then s.bondedwaferid when 'NEG' then p.componentid when 'NVL' then '' else s.bondedwaferid end cwwaferid, case nvl(fc_get_lottype(p2.originalparent),'NVL') when 'Base' then p.originalparent when 'Top' then p2.originalparent when 'Handle' then p.originalparent when 'Device' then p2.originalparent when 'NEG' then p2.originalparent when 'NVL' then '' else p.originalparent end orgt, case nvl(fc_get_lottype(p2.originalparent),'NVL') when 'Base' then p.currentparent when 'Top' then p2.currentparent when 'Handle' then p.currentparent when 'Device' then p2.currentparent when 'NEG' then p2.currentparent when 'NVL' then '' else p.currentparent end curlott, case nvl(fc_get_lottype(p2.originalparent),'NVL') when 'Base' then s.aliasname when 'Top' then s2.aliasname when 'Handle' then s.aliasname when 'Device' then s2.aliasname when 'NEG' then s2.aliasname when 'NVL' then '' else s.aliasname end lmt, fc_get_lottype(p.originalparent) lottype1, fc_get_lottype(p2.originalparent) lottype2 from fwcomponent p, fabcomponentext s left join fwcomponent p2 on p2.componentid = s.bondedwaferid left join fabcomponentext s2 on s2.parent = p2.sysid where 1 = 1 -- and p.componentid in ('${waferid}') ${if(len(clot)=0,""," and p.currentparent in( '"+replace(clot,"\n","','")+"')")} ${if(len(waferid)=0,""," and p.componentid in( '"+replace(waferid,"\n","','")+"')")} ${if(len(lm)=0,""," and s.aliasname in( '"+replace(lm,"\n","','")+"')")} and p.sysid = s.parent ), q2 as( select * from q1 join tbl_comp_edc b on q1.fsiwaferid = b.componentid and fc_translate_datetime(b.measureddatetime)>=to_date('${dt1}', 'yyyy-mm-dd') and fc_translate_datetime(b.measureddatetime)< to_date('${dt2}', 'yyyy-mm-dd') union select * from q1 join tbl_comp_edc t on q1.cwwaferid = t.componentid and fc_translate_datetime(t.measureddatetime)>= to_date('${dt1}', 'yyyy-mm-dd') and fc_translate_datetime(t.measureddatetime)< to_date('${dt2}', 'yyyy-mm-dd') ) select distinct * from q2整理一下代码
08-12
<select id="findMissionTraceByTaskNumber" resultType="com.blueswords.sc2.module.api.entity.db.MissionTrace"> SELECT* FROM( SELECT CONCAT('从',from_device.alias,'[',from_device.device_name,']','至',to_device.alias,'[',to_device.device_name,']') AS title, d.status, d.create_time as createTime, d.finish_time as finishTime FROM src_conveyor_task_detail d LEFT JOIN src_conveyor_task t ON t.id =d.task_m_id LEFT JOIN wcs_basic_device from_device ON d.from_device_name = from_device.device_name LEFT JOIN wcs_basic_device to_device ON d.to_device_name = to_device.device_name WHERE t.task_number =#{taskNumber} UNION SELECT CASE md.`status` WHEN 0 THEN CONCAT('',md.execute_device_name,'-创建完成-任务起始地址-',md.from_unit,'-任务目的地址-',md.to_unit,'-',md.remark) ELSE CONCAT('',md.execute_device_name,'-创建完成-任务起始地址-',md.from_unit,'-任务目的地址-',md.to_unit) END AS title, md.`status`, md.create_time AS createTime, md.create_time AS finishTime FROM stk_task md WHERE md.task_number = #{taskNumber} AND md.step = 0 UNION SELECT CASE md.`status` WHEN 1 THEN CONCAT('',md.execute_device_name,'-开始执行-',md.remark) ELSE CONCAT('',md.execute_device_name,'-开始执行') END AS title, md.`status`, md.execute_time AS createTime, md.execute_time AS finishTime FROM stk_task md WHERE md.task_number = #{taskNumber} AND md.`status` >= 1 AND md.step = 0 UNION SELECT CONCAT('',md.execute_device_name,'-任务完成-',md.remark) AS title, md.`status`, md.finish_time AS createTime, md.finish_time AS finishTime FROM stk_task md WHERE md.task_number = #{taskNumber} AND md.`status` = 2 AND md.step = 0 UNION SELECT CASE md.`status` WHEN 0 THEN CONCAT('',md.execute_device_name,'-创建完成-任务起始地址-',md.from_unit,'-任务目的地址-',md.to_unit,'-',md.remark) ELSE CONCAT('',md.execute_device_name,'-创建完成-任务起始地址-',md.from_unit,'-任务目的地址-',md.to_unit) END AS title, md.`status`, md.create_time AS createTime, md.create_time AS finishTime FROM stk_task md WHERE md.task_number = #{taskNumber} AND md.step = 1 UNION SELECT CASE md.`status` WHEN 1 THEN CONCAT('',md.execute_device_name,'-开始执行-',md.remark) ELSE CONCAT('',md.execute_device_name,'-开始执行') END AS title, md.`status`, md.execute_time AS createTime, md.execute_time AS finishTime FROM stk_task md WHERE md.task_number = #{taskNumber} AND md.`status` >= 1 AND md.step = 1 UNION SELECT CONCAT('',md.execute_device_name,'-任务完成-',md.remark) AS title, md.`status`, md.finish_time AS createTime, md.finish_time AS finishTime FROM stk_task md WHERE md.task_number = #{taskNumber} AND md.`status` = 2 AND md.step = 1 UNION SELECT CASE md.`status` WHEN 0 THEN CONCAT('',md.execute_device_name,'-创建完成-任务起始地址-',md.from_unit,'-任务目的地址-',md.to_unit,'-',md.remark) ELSE CONCAT('',md.execute_device_name,'-创建完成-任务起始地址-',md.from_unit,'-任务目的地址-',md.to_unit) END AS title, md.`status`, md.create_time AS createTime, md.create_time AS finishTime FROM mls_task_d md LEFT JOIN mls_task_m mm ON mm.id =md.task_m_id WHERE mm.task_number = #{taskNumber} AND md.step = 0 UNION SELECT CASE md.`status` WHEN 1 THEN CONCAT('',md.execute_device_name,'-开始执行-',md.remark) ELSE CONCAT('',md.execute_device_name,'-开始执行') END AS title, md.`status`, md.execute_time AS createTime, md.execute_time AS finishTime FROM mls_task_d md LEFT JOIN mls_task_m mm ON mm.id =md.task_m_id WHERE mm.task_number = #{taskNumber} AND md.`status` >= 1 AND md.step = 0 UNION SELECT CONCAT('',md.execute_device_name,'-任务完成-',md.remark) AS title, md.`status`, md.finish_time AS createTime, md.finish_time AS finishTime FROM mls_task_d md LEFT JOIN mls_task_m mm ON mm.id =md.task_m_id WHERE mm.task_number = #{taskNumber} AND md.`status` = 2 AND md.step = 0 UNION SELECT CASE md.`status` WHEN 0 THEN CONCAT('',dev.alias,md.execute_device_name,'-创建完成-任务起始地址-',md.from_unit,'-任务目的地址-',md.to_unit,'-',md.remark) ELSE CONCAT('',dev.alias,md.execute_device_name,'-创建完成-任务起始地址-',md.from_unit,'-任务目的地址-',md.to_unit) END AS title,md.`status`, md.create_time AS createTime, md.create_time AS finishTime FROM mls_task_d md LEFT JOIN mls_task_m mm ON mm.id =md.task_m_id LEFT JOIN wcs_basic_device dev ON md.execute_device_name = dev.device_name WHERE mm.task_number = #{taskNumber} AND md.step = 1 UNION SELECT CASE md.`status` WHEN 1 THEN CONCAT('',dev.alias,md.execute_device_name,'-开始执行-',md.remark) ELSE CONCAT('',dev.alias,md.execute_device_name,'-开始执行') END AS title, md.`status`, md.execute_time AS createTime, md.execute_time AS finishTime FROM mls_task_d md LEFT JOIN mls_task_m mm ON mm.id =md.task_m_id LEFT JOIN wcs_basic_device dev ON md.execute_device_name = dev.device_name WHERE mm.task_number = #{taskNumber} AND md.`status` >= 1 AND md.step = 1 UNION SELECT CONCAT('',dev.alias,md.execute_device_name,'-任务完成-',md.remark) AS title, md.`status`, md.finish_time AS createTime, md.finish_time AS finishTime FROM mls_task_d md LEFT JOIN mls_task_m mm ON mm.id =md.task_m_id LEFT JOIN wcs_basic_device dev ON md.execute_device_name = dev.device_name WHERE mm.task_number = #{taskNumber} AND md.`status` = 2 AND md.step = 1 UNION SELECT CASE md.`status` WHEN 0 THEN CONCAT('',dev.alias,md.execute_device_name,'-创建完成-任务起始地址-',md.from_unit,'-任务目的地址-',md.to_unit,'-',md.remark) ELSE CONCAT('',dev.alias,md.execute_device_name,'-创建完成-任务起始地址-',md.from_unit,'-任务目的地址-',md.to_unit) END AS title,md.`status`, md.create_time AS createTime, md.create_time AS finishTime FROM mls_task_d md LEFT JOIN mls_task_m mm ON mm.id =md.task_m_id LEFT JOIN wcs_basic_device dev ON md.execute_device_name = dev.device_name WHERE mm.task_number = #{taskNumber} AND md.step = 2 UNION SELECT CASE md.`status` WHEN 1 THEN CONCAT('',dev.alias,md.execute_device_name,'-开始执行-',md.remark) ELSE CONCAT('',dev.alias,md.execute_device_name,'-开始执行') END AS title, md.`status`, md.execute_time AS createTime, md.execute_time AS finishTime FROM mls_task_d md LEFT JOIN mls_task_m mm ON mm.id =md.task_m_id LEFT JOIN wcs_basic_device dev ON md.execute_device_name = dev.device_name WHERE mm.task_number = #{taskNumber} AND md.`status` >= 1 AND md.step = 2 UNION SELECT CONCAT('',dev.alias,md.execute_device_name,'-任务完成-',md.remark) AS title, md.`status`, md.finish_time AS createTime, md.finish_time AS finishTime FROM mls_task_d md LEFT JOIN mls_task_m mm ON mm.id =md.task_m_id LEFT JOIN wcs_basic_device dev ON md.execute_device_name = dev.device_name WHERE mm.task_number = #{taskNumber} AND md.`status` = 2 AND md.step = 2 UNION SELECT concat(dev.alias,apc.device_name,',类型',apc.task_type) AS title, apc.`status`, apc.create_time as createTime, apc.finish_time as finishTime FROM apc_machine_hand_task apc LEFT JOIN wcs_basic_device dev ON apc.device_name = dev.device_name WHERE apc.task_number_list LIKE '${taskNumber},%' OR apc.task_number_list LIKE '${taskNumber},%' OR apc.task_number_list LIKE '${taskNumber},%' UNION SELECT CONCAT(case when cd.task_type = 1 then 'CTU_预任务' when cd.task_type = 2 then 'CTU_正式任务' when cd.task_type = 3 then 'CTU_允许取放' END,'_',if(cd.status=-1,cd.remark,'')) AS title, cd.`status`, cd.create_time as createTime, cd.finish_time as finishTime FROM ctu_task cm left join ctu_task_d cd ON cm.id = cd.task_m_id WHERE cm.task_number = #{taskNumber} UNION SELECT concat(if(ml.operation_type='A','创建任务','任务完成'),'_',ml.remark) AS title, if(ml.handle_status=0,0,2) AS STATUS, ml.req_time AS 'createTime', ml.update_time AS 'finishTime' FROM ti_mission_log ml LEFT JOIN ti_mission tm ON tm.id = ml.mission_id WHERE tm.task_number = #{taskNumber} union SELECT CONCAT( '月台提升机:',alc.device_name,'创建任务:','目的层:',alc.to) AS title, alc.status AS STATUS, alc.create_time AS 'createTime', alc.create_time AS 'finishTime' FROM alc_lifter_task alc WHERE alc.create_time IS NOT NULL AND alc.col1_task_number=#{taskNumber} or alc.col2_task_number=#{taskNumber} OR alc.col3_task_number=#{taskNumber} union SELECT CONCAT( '月台提升机:',alc.device_name,'执行任务:') AS title, alc.status AS STATUS, alc.execute_time AS 'createTime', alc.execute_time AS 'finishTime' FROM alc_lifter_task alc WHERE alc.execute_time IS NOT null AND alc.col1_task_number=#{taskNumber} or alc.col2_task_number=#{taskNumber} OR alc.col3_task_number=#{taskNumber} union SELECT CONCAT( '月台提升机:',alc.device_name,'完成任务:') AS title, alc.status AS STATUS, alc.finish_time AS 'createTime', alc.finish_time AS 'finishTime' FROM alc_lifter_task alc WHERE alc.finish_time IS NOT NULL AND alc.col1_task_number=#{taskNumber} or alc.col2_task_number=#{taskNumber} OR alc.col3_task_number=#{taskNumber} union SELECT CONCAT( '装载月台:',t.device_name,',分配车辆:',t.plate,if( t.step=4,'装车完成','装车中')) AS title, t.status AS STATUS, M.create_time AS 'createTime', M.finish_time AS 'finishTime' FROM alc_task_m T,( SELECT alc.col1_container_no,alc.col2_container_no,alc.col3_container_no,create_time,finish_time FROM alc_lifter_task alc WHERE alc.finish_time IS NOT NULL AND alc.col1_task_number=#{taskNumber} OR alc.col2_task_number=#{taskNumber} OR alc.col3_task_number=#{taskNumber} )M WHERE find_IN_set (M.col1_container_no,container_no_list) or find_IN_set (M.col2_container_no,container_no_list) or find_IN_set (M.col3_container_no,container_no_list) ) AS temp ORDER BY temp.createTime </select>
最新发布
11-12
from pyspark.sql import SparkSession from pyspark.sql.functions import avg, col, corr, count, sum, when from functools import wraps def save_results(output_path): """ Decorator to save DataFrame results to a specified JSON path. """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): # Call the data processing function df = func(*args, **kwargs) # Save the DataFrame results as a single JSON file by coalescing. df.write.json(output_path, mode='overwrite') return df return wrapper return decorator def create_spark_session(): """ Create a Spark session configured for using PySpark. """ return SparkSession.builder \ .appName("NBA Player Analysis") \ .getOrCreate() def load_data(spark, file_path): """ Load data from HDFS into a Spark DataFrame. """ return spark.read.csv(file_path, header=True, inferSchema=True) @save_results("json_output/draft_number_vs_performance") # 指定输出路径 def analyze_draft_number_vs_performance(df): """ 分析选秀顺位(draft_number)与球员表现(pts, reb, ast)的关系 """ result_df = df.groupBy("draft_number").agg( avg("pts").alias("avg_points"), avg("reb").alias("avg_rebounds"), avg("ast").alias("avg_assists") ).orderBy("draft_number") return result_df @save_results("json_output/season_team_performance") def analyze_season_team_performance(df): result_df = df.groupBy("season", "team_abbreviation").agg( avg("pts").alias("avg_points"), avg("reb").alias("avg_rebounds"), avg("ast").alias("avg_assists"), avg("net_rating").alias("avg_net_rating"), count("player_name").alias("player_count") ).orderBy("season", "team_abbreviation") return result_df @save_results("json_output/team_gp_analysis") # 指定输出路径 def analyze_team_gp_stats(df): """ 分析各球队的出场次数(gp)统计数据: - 平均出场次数(avg_gp) - 总出场次数(total_gp) - 出场次数中位数(median_gp) - 球员数量(player_count) """ from pyspark.sql.functions import expr result_df = df.groupBy("team_abbreviation").agg( avg("gp").alias("avg_gp"), sum("gp").alias("total_gp"), expr("percentile_approx(gp, 0.5)").alias("median_gp"), # 计算中位数 count("player_name").alias("player_count") ).orderBy("team_abbreviation") return result_df @save_results("json_output/draft_number_gp_analysis") # 指定输出路径 def analyze_draft_number_gp_stats(df): """ 分析不同选秀顺位球员的出场次数(gp)表现: - 平均出场次数(avg_gp) - 出场次数中位数(median_gp) - 出场次数标准差(stddev_gp) - 球员数量(player_count) """ from pyspark.sql.functions import expr, stddev result_df = df.groupBy("draft_number").agg( avg("gp").alias("avg_gp"), expr("percentile_approx(gp, 0.5)").alias("median_gp"), stddev("gp").alias("stddev_gp"), count("player_name").alias("player_count") ).orderBy("draft_number") return result_df def main(): spark = create_spark_session() df = load_data(spark, "data/all_seasons_processed.csv") # 调用新增的分析函数 analyze_draft_number_vs_performance(df) analyze_season_team_performance(df) analyze_team_gp_stats(df) analyze_draft_number_gp_stats(df) spark.stop() if __name__ == "__main__": main() 依靠此形式新增draft_number和college的json
06-03
from pyspark.sql import SparkSession from pyspark.sql.functions import avg, col, corr, count, sum, when from functools import wraps def save_results(output_path): """ Decorator to save DataFrame results to a specified JSON path. """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): # Call the data processing function df = func(*args, **kwargs) # Save the DataFrame results as a single JSON file by coalescing. df.write.json(output_path, mode='overwrite') return df return wrapper return decorator def create_spark_session(): """ Create a Spark session configured for using PySpark. """ return SparkSession.builder \ .appName("NBA Player Analysis") \ .getOrCreate() def load_data(spark, file_path): """ Load data from HDFS into a Spark DataFrame. """ return spark.read.csv(file_path, header=True, inferSchema=True) @save_results("json_output/player_performance_by_age") def analyze_player_performance_by_age(df): """ Analyze how players' ages affect their average points, assists, and rebounds per game. """ # Select relevant columns and convert to proper types if necessary result_df = df.select( col('age').cast('integer'), col('pts').cast('float'), col('ast').cast('float'), col('reb').cast('float') ) # Group by age and calculate average points, assists, and rebounds age_analysis = result_df.groupBy('age').agg( avg('pts').alias('avg_points'), avg('ast').alias('avg_assists'), avg('reb').alias('avg_rebounds') ).orderBy('age') return age_analysis @save_results("json_output/correlations_over_time") def analyze_correlations_over_time(df): """ Analyze the correlations between height, weight, average assists, and average rebounds for each season. """ # Select necessary columns and cast to appropriate data types correlations_df = df.select( col('season'), col('player_height').cast('float'), col('player_weight').cast('float'), col('ast').cast('float').alias('average_assists'), col('reb').cast('float').alias('average_rebounds') ) # Calculate correlations for each season correlation_results = correlations_df.groupBy('season').agg( corr('player_height', 'average_rebounds').alias('height_rebounds_corr'), corr('player_height', 'average_assists').alias('height_assists_corr'), corr('player_weight', 'average_rebounds').alias('weight_rebounds_corr'), corr('player_weight', 'average_assists').alias('weight_assists_corr') ).orderBy('season') return correlation_results @save_results("json_output/performance_by_university") def analyze_performance_by_university(df): """ Analyze average scoring, assist, and rebound performance of players trained by different universities. """ # Select necessary columns and ensure proper data types university_performance_df = df.select( col('college').alias('university'), col('pts').cast('float').alias('points'), col('ast').cast('float').alias('assists'), col('reb').cast('float').alias('rebounds') ) # Group by university and calculate average points, assists, and rebounds result_df = university_performance_df.groupBy('university').agg( avg('points').alias('avg_points'), avg('assists').alias('avg_assists'), avg('rebounds').alias('avg_rebounds') ).orderBy('university') return result_df @save_results("json_output/net_rating_by_draft_number") def analyze_net_rating_by_draft_number(df): """ Analyze the 'net rating' differences for different 'draft numbers'. """ # Ensure proper data types draft_net_rating_df = df.select( col('draft_number').cast('integer'), col('net_rating').cast('float') ) # Group by draft number and calculate average net rating result_df = draft_net_rating_df.groupBy('draft_number').agg( avg('net_rating').alias('avg_net_rating') ).orderBy('draft_number') return result_df @save_results("json_output/non_usa_players_proportion_by_season.json") def analyze_non_usa_players_proportion_by_season(df): """ Analyze the proportion of non-USA players changes with season. """ # Filter and create a column 'is_non_usa' to indicate non-USA players non_usa_df = df.withColumn( 'is_non_usa', when(col('country') != 'USA', 1).otherwise(0) ) # Group by season, count total players and non-USA players season_stats = non_usa_df.groupBy('season').agg( count('*').alias('total_players'), sum('is_non_usa').alias('non_usa_players') ) # Calculate the proportion of non-USA players per season proportion_df = season_stats.withColumn( 'non_usa_proportion', col('non_usa_players') / col('total_players') ).select('season', 'non_usa_proportion').orderBy('season') return proportion_df @save_results("json_output/height_age_by_season") def analyze_height_age_by_season(df): """ Analyze the average height and age of players, grouped by season. """ # Calculate the average height and age grouped by season result_df = df.groupBy('season').agg( avg('player_height').alias('avg_height'), avg('age').alias('avg_age') ).orderBy('season') return result_df @save_results("json_output/top_colleges_by_stats") def analyze_top_colleges_by_stats(df): """ Analyze top 50 colleges by total points, rebounds, and assists. """ # Group by college and calculate total points, rebounds, and assists stats_df = df.groupBy('college').agg( sum('pts').alias('total_points'), sum('reb').alias('total_rebounds'), sum('ast').alias('total_assists') ) # Get top 50 for each category top_points = stats_df.sort(stats_df.total_points.desc()).limit(50) top_rebounds = stats_df.sort(stats_df.total_rebounds.desc()).limit(50) top_assists = stats_df.sort(stats_df.total_assists.desc()).limit(50) # Combine results into one DataFrame for export top_colleges = top_points.union(top_rebounds).union(top_assists).distinct() return top_colleges def main(): # Initialize Spark Session spark = create_spark_session() # Load data from HDFS data_path: str = "data/all_seasons_processed.csv" # Update this path df = load_data(spark, data_path) # Perform data analysis analyze_player_performance_by_age(df) analyze_correlations_over_time(df) analyze_performance_by_university(df) analyze_net_rating_by_draft_number(df) analyze_non_usa_players_proportion_by_season(df) analyze_height_age_by_season(df) analyze_top_colleges_by_stats(df) # Stop the Spark session spark.stop() if __name__ == "__main__": main()
06-03
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值