Event.FRAME_CONSTRUCTED

本文探讨了在ActionScript中使用MovieClip的gotoAndStop方法切换帧时,可能遇到getChildByName方法返回null的问题。通过在切换帧之后添加一个事件监听器并在其回调函数中获取所需组件,可以解决此问题。

_oPBox是个MovieClip,共3帧,其中第1帧有个TextField,名字叫txt_leftTime。

 

_oPBox.gotoAndStop(1);

_oPBox.getChildByName("txt_leftTime") as TextField

 

在某些情况下,我发现getChildByName会失效,即取到的对象变成了null.

 

后来查到了原因,在MovieClip使用gotoAndStop切换帧的时候,都会重新初始化里面的变量和内容。当未初始化完毕时,去取getChildByName,就有可能取不到。

 

解决办法是:

_oPBox.addEventListener(Event.FRAME_CONSTRUCTED, constructedHandler);

_oPBox.gotoAndStop(1);

 

然后在constructedHandler中去取_oPBox.getChildByName("txt_leftTime") as TextField

from torch.utils.data import dataset from tqdm import tqdm import network import utils import os import random import argparse import numpy as np from torch.utils import data from datasets import VOCSegmentation, Cityscapes, cityscapes from torchvision import transforms as T from metrics import StreamSegMetrics import torch import torch.nn as nn from PIL import Image import matplotlib import matplotlib.pyplot as plt from glob import glob def get_argparser(): parser = argparse.ArgumentParser() # Datset Options parser.add_argument("--input", type=str, required=True, help="path to a single image or image directory") parser.add_argument("--dataset", type=str, default='voc', choices=['voc', 'cityscapes'], help='Name of training set') # Deeplab Options available_models = sorted(name for name in network.modeling.__dict__ if name.islower() and \ not (name.startswith("__") or name.startswith('_')) and callable( network.modeling.__dict__[name]) ) parser.add_argument("--model", type=str, default='deeplabv3plus_mobilenet', choices=available_models, help='model name') parser.add_argument("--separable_conv", action='store_true', default=False, help="apply separable conv to decoder and aspp") parser.add_argument("--output_stride", type=int, default=16, choices=[8, 16]) # Train Options parser.add_argument("--save_val_results_to", default=None, help="save segmentation results to the specified dir") parser.add_argument("--crop_val", action='store_true', default=False, help='crop validation (default: False)') parser.add_argument("--val_batch_size", type=int, default=4, help='batch size for validation (default: 4)') parser.add_argument("--crop_size", type=int, default=513) parser.add_argument("--ckpt", default=None, type=str, help="resume from checkpoint") parser.add_argument("--gpu_id", type=str, default='0', help="GPU ID") return parser def main(): opts = get_argparser().parse_args() if opts.dataset.lower() == 'voc': opts.num_classes = 21 decode_fn = VOCSegmentation.decode_target elif opts.dataset.lower() == 'cityscapes': opts.num_classes = 19 decode_fn = Cityscapes.decode_target os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print("Device: %s" % device) # Setup dataloader image_files = [] if os.path.isdir(opts.input): for ext in ['png', 'jpeg', 'jpg', 'JPEG']: files = glob(os.path.join(opts.input, '**/*.%s'%(ext)), recursive=True) if len(files)>0: image_files.extend(files) elif os.path.isfile(opts.input): image_files.append(opts.input) # Set up model (all models are 'constructed at network.modeling) model = network.modeling.__dict__[opts.model](num_classes=opts.num_classes, output_stride=opts.output_stride) if opts.separable_conv and 'plus' in opts.model: network.convert_to_separable_conv(model.classifier) utils.set_bn_momentum(model.backbone, momentum=0.01) if opts.ckpt is not None and os.path.isfile(opts.ckpt): # https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu')) model.load_state_dict(checkpoint["model_state"]) model = nn.DataParallel(model) model.to(device) print("Resume model from %s" % opts.ckpt) del checkpoint else: print("[!] Retrain") model = nn.DataParallel(model) model.to(device) #denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # denormalization for ori images if opts.crop_val: transform = T.Compose([ T.Resize(opts.crop_size), T.CenterCrop(opts.crop_size), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) else: transform = T.Compose([ T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) if opts.save_val_results_to is not None: os.makedirs(opts.save_val_results_to, exist_ok=True) with torch.no_grad(): model = model.eval() for img_path in tqdm(image_files): ext = os.path.basename(img_path).split('.')[-1] img_name = os.path.basename(img_path)[:-len(ext)-1] img = Image.open(img_path).convert('RGB') img = transform(img).unsqueeze(0) # To tensor of NCHW img = img.to(device) pred = model(img).max(1)[1].cpu().numpy()[0] # HW colorized_preds = decode_fn(pred).astype('uint8') colorized_preds = Image.fromarray(colorized_preds) if opts.save_val_results_to: colorized_preds.save(os.path.join(opts.save_val_results_to, img_name+'.png')) if __name__ == '__main__': main() 这个代码运行后的文件存储在哪
最新发布
07-29
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值