设置type="range"为vertical

本文详细介绍了如何通过CSS属性-webkit-appearance设置input type='range'为垂直显示,并提供了相关链接以供进一步学习。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

我的pyecharts为1.9.0 --------------------------------------------------------------------------- TypeError Traceback (most recent call last) Cell In[52], line 33 16 mark_x_value = x_data[mark_x_index] 18 # 添加垂直于X轴的标记线(从y=0到最大y值) 19 line_chart.set_global_opts( 20 title_opts=opts.TitleOpts(title="电池温度监测图表(带垂直线标记)"), 21 tooltip_opts=opts.TooltipOpts(trigger="axis"), 22 toolbox_opts=opts.ToolboxOpts(is_show=True), 23 xaxis_opts=opts.AxisOpts(type_="category"), 24 yaxis_opts=opts.AxisOpts(type_="value"), 25 datazoom_opts=[ 26 opts.DataZoomOpts(type_="slider", range_start=0, range_end=100), 27 opts.DataZoomOpts(type_="inside", range_start=0, range_end=100), 28 ], 29 # 添加标记线配置 30 # 修改MarkLineItem配置,添加type_='vertical'明确垂直线类型 31 markline_opts=opts.MarkLineOpts( 32 data=[ ---> 33 opts.MarkLineItem( 34 type_='vertical', # 关键:声明垂直线类型 35 x=mark_x_value, 36 symbol="none", 37 linestyle_opts=opts.LineStyleOpts( 38 color="#FF0000", 39 width=2, 40 type_="dashed" 41 ), 42 label_opts=opts.LabelOpts( 43 position="insideStartTop", 44 formatter=f"标记点: {mark_x_value}" 45 ) 46 ) 47 ] 48 ) 49 ) 52 # # 全局设置 53 # line_chart.set_global_opts( 54 # title_opts=opts.TitleOpts(title="信号折线图"), # 图表标题 (...) 62 # ] 63 # ) 65 line_chart.render("./result_analyze_map/"+vehicle_no+"_basic_map.html") TypeError: __init__() got an unexpected keyword argument 'linestyle_opts'
07-18
07-18
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import os import argparse import torch from src.loader import load_images, DataSampler from src.utils import initialize_exp, bool_flag, attr_flag, check_attr from src.model import AutoEncoder, LatentDiscriminator, PatchDiscriminator, Classifier from src.training import Trainer from src.evaluation import Evaluator # parse parameters parser = argparse.ArgumentParser(description='Images autoencoder') parser.add_argument("--name", type=str, default="default", help="Experiment name") parser.add_argument("--img_sz", type=int, default=256, help="Image sizes (images have to be squared)") parser.add_argument("--img_fm", type=int, default=3, help="Number of feature maps (1 for grayscale, 3 for RGB)") parser.add_argument("--attr", type=attr_flag, default="Smiling,Male", help="Attributes to classify") parser.add_argument("--instance_norm", type=bool_flag, default=False, help="Use instance normalization instead of batch normalization") parser.add_argument("--init_fm", type=int, default=32, help="Number of initial filters in the encoder") parser.add_argument("--max_fm", type=int, default=512, help="Number maximum of filters in the autoencoder") parser.add_argument("--n_layers", type=int, default=6, help="Number of layers in the encoder / decoder") parser.add_argument("--n_skip", type=int, default=0, help="Number of skip connections") parser.add_argument("--deconv_method", type=str, default="convtranspose", help="Deconvolution method") parser.add_argument("--hid_dim", type=int, default=512, help="Last hidden layer dimension for discriminator / classifier") parser.add_argument("--dec_dropout", type=float, default=0., help="Dropout in the decoder") parser.add_argument("--lat_dis_dropout", type=float, default=0.3, help="Dropout in the latent discriminator") parser.add_argument("--n_lat_dis", type=int, default=1, help="Number of latent discriminator training steps") parser.add_argument("--n_ptc_dis", type=int, default=0, help="Number of patch discriminator training steps") parser.add_argument("--n_clf_dis", type=int, default=0, help="Number of classifier discriminator training steps") parser.add_argument("--smooth_label", type=float, default=0.2, help="Smooth label for patch discriminator") parser.add_argument("--lambda_ae", type=float, default=1, help="Autoencoder loss coefficient") parser.add_argument("--lambda_lat_dis", type=float, default=0.0001, help="Latent discriminator loss feedback coefficient") parser.add_argument("--lambda_ptc_dis", type=float, default=0, help="Patch discriminator loss feedback coefficient") parser.add_argument("--lambda_clf_dis", type=float, default=0, help="Classifier discriminator loss feedback coefficient") parser.add_argument("--lambda_schedule", type=float, default=500000, help="Progressively increase discriminators' lambdas (0 to disable)") parser.add_argument("--v_flip", type=bool_flag, default=False, help="Random vertical flip for data augmentation") parser.add_argument("--h_flip", type=bool_flag, default=True, help="Random horizontal flip for data augmentation") parser.add_argument("--batch_size", type=int, default=32, help="Batch size") parser.add_argument("--ae_optimizer", type=str, default="adam,lr=0.0002", help="Autoencoder optimizer (SGD / RMSprop / Adam, etc.)") parser.add_argument("--dis_optimizer", type=str, default="adam,lr=0.0002", help="Discriminator optimizer (SGD / RMSprop / Adam, etc.)") parser.add_argument("--clip_grad_norm", type=float, default=5, help="Clip gradient norms (0 to disable)") parser.add_argument("--n_epochs", type=int, default=1000, help="Total number of epochs") parser.add_argument("--epoch_size", type=int, default=50000, help="Number of samples per epoch") parser.add_argument("--ae_reload", type=str, default="", help="Reload a pretrained encoder") parser.add_argument("--lat_dis_reload", type=str, default="", help="Reload a pretrained latent discriminator") parser.add_argument("--ptc_dis_reload", type=str, default="", help="Reload a pretrained patch discriminator") parser.add_argument("--clf_dis_reload", type=str, default="", help="Reload a pretrained classifier discriminator") parser.add_argument("--eval_clf", type=str, default="", help="Load an external classifier for evaluation") parser.add_argument("--debug", type=bool_flag, default=False, help="Debug mode (only load a subset of the whole dataset)") params = parser.parse_args() # check parameters check_attr(params) assert len(params.name.strip()) > 0 assert params.n_skip <= params.n_layers - 1 assert params.deconv_method in ['convtranspose', 'upsampling', 'pixelshuffle'] assert 0 <= params.smooth_label < 0.5 assert not params.ae_reload or os.path.isfile(params.ae_reload) assert not params.lat_dis_reload or os.path.isfile(params.lat_dis_reload) assert not params.ptc_dis_reload or os.path.isfile(params.ptc_dis_reload) assert not params.clf_dis_reload or os.path.isfile(params.clf_dis_reload) assert os.path.isfile(params.eval_clf) assert params.lambda_lat_dis == 0 or params.n_lat_dis > 0 assert params.lambda_ptc_dis == 0 or params.n_ptc_dis > 0 assert params.lambda_clf_dis == 0 or params.n_clf_dis > 0 # initialize experiment / load dataset logger = initialize_exp(params) data, attributes = load_images(params) train_data = DataSampler(data[0], attributes[0], params) valid_data = DataSampler(data[1], attributes[1], params) # build the model ae = AutoEncoder(params).cuda() lat_dis = LatentDiscriminator(params).cuda() if params.n_lat_dis else None ptc_dis = PatchDiscriminator(params).cuda() if params.n_ptc_dis else None clf_dis = Classifier(params).cuda() if params.n_clf_dis else None eval_clf = torch.load(params.eval_clf).cuda().eval() # trainer / evaluator trainer = Trainer(ae, lat_dis, ptc_dis, clf_dis, train_data, params) evaluator = Evaluator(ae, lat_dis, ptc_dis, clf_dis, eval_clf, valid_data, params) for n_epoch in range(params.n_epochs): logger.info('Starting epoch %i...' % n_epoch) for n_iter in range(0, params.epoch_size, params.batch_size): # latent discriminator training for _ in range(params.n_lat_dis): trainer.lat_dis_step() # patch discriminator training for _ in range(params.n_ptc_dis): trainer.ptc_dis_step() # classifier discriminator training for _ in range(params.n_clf_dis): trainer.clf_dis_step() # autoencoder training trainer.autoencoder_step() # print training statistics trainer.step(n_iter) # run all evaluations / save best or periodic model to_log = evaluator.evaluate(n_epoch) trainer.save_best_periodic(to_log) logger.info('End of epoch %i.\n' % n_epoch) 有什么需要改的路径吗
05-30
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值