跟踪算法评测平台使用总结
注:适用于Python算法。
1.OTB平台
准备:
-
OTB数据集:OTB50,OTB2013,OTB2015(OTB100)
- 小知识:OTB50和OTB2013包含的视频序列不同
-
OTBjson文件:https://pan.baidu.com/s/1pLyDFVj2vaGC3tbxTCxgHA
提取码:ijt0
-
Pysot测试程序包(非官方但可用,官方没看懂): https://github.com/StrangerZhang/pysot-toolkit (可以直接下载,也可以克隆仓库)
开始:
解压pysot-tookit
后的文件结构:
-
环境准备
git clone https://github.com/StrangerZhang/pysot-toolkit pip install -r requirements.txt cd pysot\\utils\\ python setup.py build_ext --inplace # 这一步很重要 # if you need to draw graph, you need latex installed on your system
-
数据集文件结构
Download json files used in our toolkit baidu pan or Google Drive
-
Put CVRP13.json, OTB100.json, OTB50.json in OTB100 dataset directory (you need to copy Jogging to Jogging-1 and Jogging-2, and copy Skating2 to Skating2-1 and Skating2-2 or using softlink)
The directory should have the below format
| – OTB100/
| – Basketball
| …
| – Woman
| – OTB100.json
| --OTB50/
| – Basketball
| …
| – Woman
| – OTB50.json
| – VOT2016/
| – bag
| …
| – wiper
| – VOT2016.json
…
-
Put all other jsons in the dataset directory like in step 1
-
-
1.1. Evaluation on OTB100(UAV123, NFS, LaSOT)
converted *.txt tracking results will be released soon
OTB100和OTB50*.txt跟踪结果:
链接:https://pan.baidu.com/s/13jeOhKTxswsJ6kSJzoavVg
提取码:wwmg
cd path\\pysot-toolkit python bin\\eval.py
--dataset_dir path\\OTB100\\ # dataset path
--dataset OTB100 # dataset name(OTB100, UAV123, NFS, LaSOT)
--tracker_result_dir path\\results # tracker dir
--trackers SiamRPN++ C-COT DaSiamRPN ECO # tracker names 在路径下要有相关tracker的txt跟踪结果
--num 4 # evaluation thread
--show_video_level # wether to show video results
--vis # draw graph
# --vis是画图用的,如果latex相关的包没装的话,可以先不用这个命令
# you will see (Normalized Precision not used in OTB evaluation)
-----------------------------------------------------
|Tracker name| Success | Norm Precision | Precision |
-----------------------------------------------------
| SiamRPN++ | 0.696 | 0.000 | 0.914 |
| ECO | 0.691 | 0.000 | 0.910 |
| C-COT | 0.671 | 0.000 | 0.898 |
| DaSiamRPN | 0.658 | 0.000 | 0.880 |
-----------------------------------------------------
-----------------------------------------------------------------------------------------
| Tracker name | SiamRPN++ | DaSiamRPN | ECO |
-----------------------------------------------------------------------------------------
| Video name | success | precision | success | precision | success | precision |
-----------------------------------------------------------------------------------------
| Basketball | 0.423 | 0.555 | 0.677 | 0.865 | 0.653 | 0.800 |
| Biker | 0.728 | 0.932 | 0.319 | 0.448 | 0.506 | 0.832 |
| Bird1 | 0.207 | 0.360 | 0.274 | 0.508 | 0.192 | 0.302 |
| Bird2 | 0.629 | 0.742 | 0.604 | 0.697 | 0.775 | 0.882 |
| BlurBody | 0.823 | 0.879 | 0.759 | 0.767 | 0.713 | 0.894 |
| BlurCar1 | 0.803 | 0.917 | 0.837 | 0.895 | 0.851 | 0.934 |
| BlurCar2 | 0.864 | 0.926 | 0.794 | 0.872 | 0.883 | 0.931 |
......
| Vase | 0.564 | 0.698 | 0.554 | 0.742 | 0.544 | 0.752 |
| Walking | 0.761 | 0.956 | 0.745 | 0.932 | 0.709 | 0.955 |
| Walking2 | 0.362 | 0.476 | 0.263 | 0.371 | 0.793 | 0.941 |
| Woman | 0.615 | 0.908 | 0.648 | 0.887 | 0.771 | 0.936 |
-----------------------------------------------------------------------------------------
OTB100 Success Plot | OTB100 Precision Plot |
---|---|
![]() | ![]() |
更新日期:2020-11-07
需要安装matlab
step1: 获取pysot
平台中跑出的结果
a:在pysot-toolkit中找到draw_success_precision.py文件
# 文件包含关系如下
pysot-toolkit -> pysot -> visualization -> draw_success_precision.py
b:找到结果输出位置
def draw_success_precision(success_ret, name, videos, attr, precision_ret=None,
norm_precision_ret=None, bold_name=None, axis=[0, 1]):
# success plot
fig, ax = plt.subplots()
ax.grid(b=True)
ax.set_aspect(1)
plt.xlabel('Overlap threshold')
plt.ylabel('Success rate')
if attr == 'ALL':
plt.title(r'\textbf{Success plots of OPE on %s}' % (name))
else:
plt.title(r'\textbf{Success plots of OPE - %s}' % (attr))
plt.axis([0, 1] + axis)
success = {}
thresholds = np.arange(0, 1.05, 0.05)
for tracker_name in success_ret.keys():
value = [v for k, v in success_ret[tracker_name].items() if k in videos]
success[tracker_name] = np.mean(value)
for idx, (tracker_name, auc) in enumerate(sorted(success.items(), key=lambda x: x[1], reverse=True)):
if tracker_name == bold_name:
label = r"\textbf{[%.3f] %s}" % (auc, tracker_name)
else:
label = "[%.3f] " % (auc) + tracker_name
value = [v for k, v in success_ret[tracker_name].items() if k in videos]
# ------在这里保存 accuracy 评测结果------
# 注意:以下将测试结果保存为json文件,json文件中的 键key 名称可以按照自己的需求更改。以下是我的命名方式。 如果要用我的格式,在自己的accuracy_result.json中要预先输入 "[]"字符,防止读取时出错。
filename = "accuracy_result.json"
item_list = []
with open(filename, 'r') as f:
load_dict = json.load(f)
num_item = len(load_dict)
for i in range(num_item):
video_attr_name_ = load_dict[i]['video_attr']
tracker_name_ = load_dict[i]['tracker_name']
auc_ = load_dict[i]['accuracy']
thresholds_ = load_dict[i]['thresholds']
mean_value_ = load_dict[i]['mean_value']
item_dict = {'video_attr': video_attr_name_, 'tracker_name': tracker_name_, 'accuracy': auc_, 'thresholds': thresholds_,
'mean_value': mean_value_}
item_list.append(item_dict)
trackers_info = {'video_attr': attr, 'tracker_name': tracker_name, 'accuracy': auc, 'thresholds': thresholds.tolist(),
'mean_value': np.mean(value, axis=0).tolist()}
data = dict(trackers_info)
item_list.append(data)
with open(filename, 'w', encoding='utf=8') as file_obj:
json.dump(item_list, file_obj, ensure_ascii=False)
# -----------------------------
plt.plot(thresholds, np.mean(value, axis=0), color=COLOR[idx], linestyle=LINE_STYLE[idx], label=label,
linewidth=2)
ax.legend(loc='lower left', labelspacing=0.2)
ax.autoscale(enable=True, axis='both', tight=True)
xmin, xmax, ymin, ymax = plt.axis()
ax.autoscale(enable=False)
ymax += 0.03
ymin = 0
plt.axis([xmin, xmax, ymin, ymax])
plt.xticks(np.arange(xmin, xmax + 0.01, 0.1))
plt.yticks(np.arange(ymin, ymax, 0.1))
ax.set_aspect((xmax - xmin) / (ymax - ymin))
plt.show()
if precision_ret:
# norm precision plot
fig, ax = plt.subplots()
ax.grid(b=True)
ax.set_aspect(50)
plt.xlabel('Location error threshold')
plt.ylabel('Precision')
if attr == 'ALL':
plt.title(r'\textbf{Precision plots of OPE on %s}' % (name))
else:
plt.title(r'\textbf{Precision plots of OPE - %s}' % (attr))
plt.axis([0, 50] + axis)
precision = {}
thresholds = np.arange(0, 51, 1)
for tracker_name in precision_ret.keys():
value = [v for k, v in precision_ret[tracker_name].items() if k in videos]
precision[tracker_name] = np.mean(value, axis=0)[20]
for idx, (tracker_name, pre) in \
enumerate(sorted(precision.items(), key=lambda x: x[1], reverse=True)):
if tracker_name == bold_name:
label = r"\textbf{[%.3f] %s}" % (pre, tracker_name)
else:
label = "[%.3f] " % (pre) + tracker_name
value = [v for k, v in precision_ret[tracker_name].items() if k in videos]
# ------在这里保存 precision 评测结果------
# 注意:以下将测试结果保存为json文件,json文件中的 键key 名称可以按照自己的需求更改。以下是我的命名方式。 如果要用我的格式,在自己的precision_result.json中要预先输入 "[]"字符,防止读取时出错。
filename = "precision_result.json"
item_list = []
with open(filename, 'r') as f:
load_dict = json.load(f)
num_item = len(load_dict)
for i in range(num_item):
video_attr_name_ = load_dict[i]['video_attr']
tracker_name_ = load_dict[i]['tracker_name']
pre_ = load_dict[i]['precision']
location_error_ = load_dict[i]['location_error']
mean_value_ = load_dict[i]['mean_value']
item_dict = {'video_attr': video_attr_name_, 'tracker_name': tracker_name_, 'precision': pre_,
'location_error': location_error_,
'mean_value': mean_value_}
item_list.append(item_dict)
trackers_info = {'video_attr': attr, 'tracker_name': tracker_name, 'precision': pre,
'location_error': thresholds.tolist(),
'mean_value': np.mean(value, axis=0).tolist()}
data = dict(trackers_info)
item_list.append(data)
with open(filename, 'w', encoding='utf=8') as file_obj:
json.dump(item_list, file_obj, ensure_ascii=False)
# -----------------------------
plt.plot(thresholds, np.mean(value, axis=0),
color=COLOR[idx], linestyle=LINE_STYLE[idx], label=label, linewidth=2)
ax.legend(loc='lower right', labelspacing=0.2)
ax.autoscale(enable=True, axis='both', tight=True)
xmin, xmax, ymin, ymax = plt.axis()
ax.autoscale(enable=False)
ymax += 0.03
ymin = 0
plt.axis([xmin, xmax, ymin, ymax])
plt.xticks(np.arange(xmin, xmax + 0.01, 5))
plt.yticks(np.arange(ymin, ymax, 0.1))
ax.set_aspect((xmax - xmin) / (ymax - ymin))
plt.show()
保存后的json文件如下所示。
以otb100的accuracy
为例
[
{
"video_attr": "ALL",
"tracker_name": "DaSiamRPN",
"accuracy": 0.6578104106074384,
"thresholds": [
0.0,
0.05,
0.1,
0.15000000000000002,
0.2,
0.25,
0.30000000000000004,
0.35000000000000003,
0.4,
0.45,
0.5,
0.55,
0.6000000000000001,
0.65,
0.7000000000000001,
0.75,
0.8,
0.8500000000000001,
0.9,
0.9500000000000001,
1.0
],
"mean_value": [
0.9442233914404641,
0.9386702705241673,
0.9358224717968006,
0.9329490228341751,
0.9294467194126408,
0.9242290436430551,
0.918775949005531,
0.9120206475895232,
0.9030748049982816,
0.8892517872628702,
0.8651162378188131,
0.8221880079818227,
0.7589487728879333,
0.672489943960896,
0.5608077548971072,
0.42703502833315193,
0.2779701329365757,
0.1420961105674596,
0.05026509393708969,
0.008637430927851134,
0.0
]
},
...
]
百度云链接:
https://pan.baidu.com/s/1tg5MV7wbHTntMFEuJNR7uA
链接:https://pan.baidu.com/s/1tg5MV7wbHTntMFEuJNR7uA
提取码:fa35
# 算法 发表日期 OTB100 OTB50
CFNet 2017 √ √
DaSiamRPN 2018 √ √
DeepSRDCF 2015 √ √
GradNet 2019 √ √
SiamDWfc 2019 √ √
SiamDWrpn 2019 × ×
SiamFC 2016 √ √
SiamRPN 2018 √ √
Staple 2016 √ √
SRDCF 2015 √ √
fDSST 2017 √ √
step2: 在matlab
中画图 ^ _ ^,matlab总是能画出高清矢量图
把thresholds
作为x轴坐标 mean_value
作为y轴坐标,accuracy
作为图例中的参数,matlab代码如下所示
注意:我用的是笨方法(一个一个导入,可费劲了 -_-),逐条输入数据。你们可以让matlab自动读入数据,自动画图。
x = [0.0, 0.05, 0.1,0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1];
% DaSiamRPN
y1 = [0.9442233914404641,0.9386702705241673,0.9358224717968006,0.9329490228341751,0.9294467194126408,0.9242290436430551,0.918775949005531,0.9120206475895232,0.9030748049982816,0.8892517872628702,0.8651162378188131,0.8221880079818227,0.7589487728879333,0.672489943960896,0.5608077548971072,0.42703502833315193,0.2779701329365757,0.1420961105674596,0.05026509393708969,0.008637430927851134,0.0];
% GradNet
y2 = [0.9263890592030127,0.9218735240699784,0.9180412466836074,0.9127812779935524,0.9041223754825147,0.8966298031432164,0.8853508338000691,0.87057875287613,0.8513960351013828,0.8289370631565266,0.7991511226444641,0.7555322219703103,0.7060119546890604,0.6390773684333284,0.5568406090656736,0.4486608397681775,0.3164612695936204,0.18370840425201745,0.07358659837845453,0.017753140520270425,0.0];
% DeepSRDCF
y3 = [0.9295271862209883,0.923564608930089,0.9156696191746453,0.9064032039074287,0.8917670371030297,0.8790174777978871,0.863456570635697,0.8448654214894227,0.8254515139185511,0.802168940787343,0.7727848765724907,0.7333073463110571,0.6851679976700313,0.6275305128569844,0.554241793966035,0.4670845219748036,0.36238573466845286,0.2340082307211255,0.10137347730290586,0.022128601347098976,0.0];
% SiamRPN
y4 = [0.9326766663901934,0.9255093274885584,0.9192133043519823,0.9136078036834928,0.9074477960737707,0.8989501070538686,0.8912679201132064,0.882685993915506,0.8686661593995951,0.8444571822190547,0.8133285089505476,0.7706641617556328,0.71102371407216,0.6227243722020586,0.5122682209199629,0.3828773582703887,0.24408493798654715,0.12331002199308587,0.03778983976289278,0.004494551749426426,0.0];
% SiamDWfc
y5 = [0.9073859407131067,0.9010755773457866,0.8959148900672925,0.8903851976534368,0.8838893031816778,0.8772825676028024,0.8680768769635214,0.8558826013609128,0.8382919876499139,0.8156116434308225,0.7864772972635317,0.7480002308650145,0.6934843482246645,0.620176933653182,0.5395945497875223,0.4402527933461412,0.31911536339025137,0.19337848020348197,0.07978362337400152,0.016731017267435408,0.0];
% SRDCF
y6 = [0.8668064582004582,0.8607457284994612,0.8558321351852903,0.8476239199593887,0.8377001002853492,0.8275946792019205,0.8166957682883003,0.8005720423400038,0.7794128247362038,0.7537710822057495,0.7286118600843976,0.6981424274681797,0.6592349773321172,0.606291581417098,0.5340779001665525,0.4437965132347132,0.3353806626049053,0.20333498782970838,0.08281285987756977,0.01918824189997814,0.0];
% CFNet
y7 = [0.8545821143325211,0.8505685608306334,0.8472334743196367,0.8424287875916306,0.8360067630585298,0.828284405476835,0.8143046383609033,0.8011905308206508,0.7816948675237534,0.7574987291741957,0.7280635057572741,0.6936511578938139,0.6444722079337629,0.5757790611801646,0.4947852743544796,0.3984019876580399,0.2908731675612074,0.18312170057556673,0.08390257095991266,0.021383078431070488,0.0];
% SiamFC
y8 = [0.8545171995952235,0.8469311400179531,0.8418608927151742,0.8360257556280485,0.8308315196594619,0.8233318811647429,0.8114615239678525,0.798049266221633,0.7799392516263487,0.7574352478717283,0.7306011065744853,0.689176637957252,0.6418671593563158,0.5848415673533651,0.5124502884105385,0.4166055475262083,0.3008903455058429,0.17838459825278036,0.06776497508907947,0.013871717907547624,0.0];
% Staple
y9 = [0.851514531428886,0.8429061886397691,0.8358422111901332,0.8280623836473364,0.8182027104586048,0.8081127769778973,0.7915709685374521,0.7717263149451264,0.7517459480384794,0.7262557772735901,0.699110956929411,0.6645317809619083,0.6287365415888684,0.5804212891127789,0.5154574757678585,0.42398714524561626,0.3113226826739766,0.1857374881736212,0.07585342411376178,0.019092064943305845,0.0];
% fDSST
y10 = [0.7840765750133489,0.7726510881012206,0.758088268642461,0.7469882478399419,0.7335544633487623,0.7165357934898212,0.6970075626360174,0.6759587348448904,0.6530882882330847,0.6299143718517752,0.6058091574814283,0.5805499117014249,0.5491592397632852,0.5115603084178555,0.46052010438209545,0.38926357137032286,0.2968780547682986,0.18956815141711864,0.08922156350324331,0.02449715223862947,0.0];
y = {y1,y2, y3, y4, y5, y6, y7, y8, y9, y10};
tracker_names = { 'DaSiamRPN', 'GradNet', 'DeepSRDCF', 'SiamRPN', 'SiamDWfc', 'SRDCF', 'CFNet', 'SiamFC', 'Staple' 'fDSST'};
aucs = [0.658 0.639, 0.635, 0.629, 0.627, 0.598, 0.587, 0.587, 0.578, 0.517];
lines_style = {'-', '--', ':', '-', '--', ':', '-', '--', ':', '-', '--', ':', '-', '--', ':'};
lines_color = {[1, 0, 0 ], [0, 1, 0], [1, 0, 1],[1, 1, 0],[0, 162/255, 232/255],[0.5, 0.5, 0.5], [0, 0, 1],[0, 1, 1],[136/255, 0, 21/255], [255/255, 127/255, 39/255], [0,0,0],[0, 150/255, 150/255], [150/255, 0, 150/255], [150/255, 150/255, 0], [150/255, 120/255, 110/255]};
% line_pro = {'r-', 'g--', 'b:', 'c:.', 'm-', 'y--', 'k:', 'g:', 'b--', 'c--', 'y-'};
legend_str = {}; %画图例用 cell类型
for i=1:length(tracker_names)
% 读取 线型 和 颜色
style_line = lines_style(i);
style_line = style_line{1};
color_line = lines_color(i);
color_line = color_line{1};
plot(x,y{1,i},'Color', color_line, 'LineStyle', style_line, 'LineWidth', 2);
legend_str(end+1) = strcat(' [', num2str(aucs(i)),']', tracker_names(i));
hold on;
end
% 图例要一次性输出,不要在循环里逐个输出,不然最后一个会覆盖前面的图例
legend(legend_str, 'Location','SouthWest');
grid on;
set(gca,'xtick', 0:0.1:1);
set(gca,'ytick', 0:0.1:1);
set(gca, 'FontName', '微软雅黑', 'FontSize',8.5);
xlabel('重叠率阈值','FontName','微软雅黑');
ylabel('成功率','FontName','微软雅黑');
step3: 将matlab图保存为emf
格式,然后复制到word中就可以得到高清图了(用电脑看图软件打开是模糊的,但是复制到word后是高清图)。类似地可以输出precision
图,需要更改matlab中的x坐标和y坐标,还有图例中的变量值和tracker名字。
PS: VOT评测图暂时我没画,用表格数据对比吧。
On the way
2. VOT平台
注:目前Pysot我没有完全看懂,EAO(Expected Average Overlap)值输出有问题,只能输出A(Accuracy)和R(Robustness)。
EAO值只能用官方给的输出:QAQ
官方工具包链接: https://github.com/votchallenge/vot-toolkit (配置的时候有可能有问题)
我是在Ubuntu下的Matlab用的VOT包,我的优快云博客: https://blog.youkuaiyun.com/qq_29894613/article/details/101172584
准备:和OTB准备一样
开始:
cd path\\pysot-toolkit python bin\\eval.py
--dataset_dir path\\VOT2016\\ # dataset path
--dataset VOT2016 # dataset name(VOT2018, VOT2016)
--tracker_result_dir path\\results\\ # tracker dir
--trackers ECO UPDT SiamRPNpp # tracker names 在路径下要有相关tracker的txt跟踪结果
# you will see
------------------------------------------------------------
|Tracker Name| Accuracy | Robustness | Lost Number | EAO |
------------------------------------------------------------
| SiamRPNpp | 0.600 | 0.234 | 50.0 | 0.415 |
| UPDT | 0.536 | 0.184 | 39.2 | 0.378 |
| ECO | 0.484 | 0.276 | 59.0 | 0.280 |
------------------------------------------------------------
注意:我运行这段代码时会出现问题,我改了以下的代码:
# 代码所在的位置为pysot-toolkit\\pysot\\datasets\\vot.py
# class VOTVideo -> def load_tracker() -> 75行
# 源代码
for name in tracker_names:
traj_files = glob(os.path.join(path, name, 'baseline', self.name, '*0*.txt'))
# 更改后的代码
for name in tracker_names:
traj_files = glob(path + '/' + name + '/' + 'baseline' + '/' + self.name + '/*.txt')
3.生成自己tracker的*.txt文件
注:OTB平台和VOT的*.txt文件都可以在Pysot工具中生成。
代码格式:
from pysot.datasets import DatasetFactory
from pysot.utils.region import vot_overlap
dataset = DatasetFactory.create_dataset(name=dataset_name,
dataset_root=datset_root,
load_img=False)
frame_counter = 0
pred_bboxes = []
for video in dataset:
for idx, (img, gt_bbox) in enumerate(video):
if idx == frame_counter:
# init your tracker here
pred_bbox.append(1)
elif idx > frame_counter:
# get tracking result here
pred_bbox =
overlap = vot_overlap(pred_bbox, gt_bbox, (img.shape[1], img.shape[0]))
if overlap > 0:
# continue tracking
pred_bboxes.append(pred_bbox)
else:
# lost target, restart
pred_bboxes.append(2)
frame_counter = idx + 5
else:
pred_bboxes.append(0)
自己的tracker按照上面的代码格式写,然后就可以在相关目录下生成*.txt文件
下面是我的代码:
import glob
from os.path import join, realpath, dirname
import numpy as np
import cv2
import os
from defaults import _C as cfg
import time
import torch
from tadt_tracker import Tadt_Tracker
from backbone_v2 import build_vgg16
from pysot.datasets import DatasetFactory
# VOT和OTB的ground truth表示不一样。OTB为左上角坐标和w,h VOT为四个点的坐标
# 此函数为转化函数,嘻嘻,在网上找的,忘了出自哪里了。
def get_axis_aligned_bbox(region):
""" convert region to (x, y, w, h) that represent by axis aligned box
left_top :x, y
height:h
width:w
"""
if isinstance(region, list):
region = np.array(region)
nv = region.size
if nv == 8:
cx = np.mean(region[0::2])
cy = np.mean(region[1::2])
x1 = min(region[0::2])
x2 = max(region[0::2])
y1 = min(region[1::2])
y2 = max(region[1::2])
A1 = np.linalg.norm(region[0:2] - region[2:4]) * \
np.linalg.norm(region[2:4] - region[4:6])
A2 = (x2 - x1) * (y2 - y1)
s = np.sqrt(A1 / A2)
w = s * (x2 - x1) + 1
h = s * (y2 - y1) + 1
x = cx - w/2
y = cy - h/2
else:
x = region[0]
y = region[1]
w = region[2]
h = region[3]
return x, y, w, h
# dataset_name = "OTB50"
dataset_name = "OTB100"
# dataset_name = "VOT2016"
dataset_root = "E:\\0_OTB_benchmark\\Pysot\\dataset\\OTB100"
# dataset_root = "E:\\0_OTB_benchmark\\Pysot\\dataset\\VOT2016"
datasets_name_path = os.path.join(dataset_root, dataset_name+'.json')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = My_model()
tracker = My_Tracker(cfg, model=model, device=device, display=False) # My_tracker为代表符,换成自己实际的名字
dataset = DatasetFactory.create_dataset(name=dataset_name,
dataset_root=dataset_root,
load_img=False)
frame_counter = 0
# pred_bboxes = []
video_num = 0
for video in dataset:
video_num = video_num + 1
# print("----eval_video_name----", video.name)
if video_num >= 0:
pred_bboxes = []
for idx, (img, gt_bbox) in enumerate(video):
print("----eval_video_name----", video.name, idx)
initial_gt_bbox = get_axis_aligned_bbox(gt_bbox)
if idx == frame_counter:
# init tracker here
print("----initialize model----")
tracker.initialize_tadt(img, initial_gt_bbox)
if 'VOT' in dataset_name:
# gt_bbox = [int(gt_bbox[0]), int(gt_bbox[1]), int(gt_bbox[2]), int(gt_bbox[3])]
initial_gt_bbox = [int(value) for value in initial_gt_bbox]
print(initial_gt_bbox)
pred_bboxes.append(initial_gt_bbox)
else:
pred_bboxes.append(initial_gt_bbox)
print("----initialize successfully----")
elif idx > frame_counter:
# get tracking result here
pred_bbox = tracker.tracking(img, visualize=False)
pred_bboxes.append(pred_bbox)
print("----this video track successfully")
# print("----eval_video_name----", video.name)
print("----start save tracking result----")
with open("E:\\0_OTB_benchmark\\Pysot\\pysot-toolkit\\result\\" + video.name + ".txt", "a", encoding='utf-8') as w:
for i in range(len(pred_bboxes)):
# print(pred_bbox)
w.write(str(pred_bboxes[i]).strip('[').strip(']').strip('(').strip(')').replace(" ", "") + '\n')
w.close()
print("----save", video.name, "successfully")
5.转化别人的saw OTB跟踪结果
有些人是在VOT标准评测工具包上做的跟踪评测,生成的是.mat文件,但是在Pysot中不能用。然后我就写了一个简短的代码把.mat文件转化成.txt文件
import scipy.io as scio
import pandas as pd
import numpy as np
import os
import h5py
import hdf5storage
import shutil
# ------------------------------
# .mat文件转换成.txt文件
# ------------------------------
# 注意:下面的代码根据不同类型的.mat有所不同 可输出.mat中的keys查看
path_matfile = "E:\\0_OTB_benchmark\\OTB结果\\OTB50\\C-COT_raw_results\\C-COT_OTB_2015_results"
outpath = "E:\\0_OTB_benchmark\\OTB结果\\OTB50\\C-COT_raw_results\\txt_OTB"
all_matfile_list = os.listdir(path_matfile)
each_matfile_path = [os.path.join(path_matfile, matfile) for matfile in all_matfile_list]
videos_name_txt = [video_name.replace('OPE_', '').replace('.mat', '') + '.txt' for video_name in all_matfile_list]
for i, matfile_path in enumerate(each_matfile_path):
# data = scio.loadmat(matfile_path) # 有些类型的mat文件不能读取
data = hdf5storage.loadmat(matfile_path) # 兼容mat文件类型较多
# 下面两行有些.mat不同
track_result = data['results']
# print(track_result[0][0][0][1])
track_result = track_result[0][0][0][1]
print(videos_name_txt[i])
np.savetxt(os.path.join(outpath, videos_name_txt[i]), track_result, delimiter=',', fmt='%d')
6. 文件重命名
# -----------------------
# 文件重命名
# -----------------------
# 有些trackers在OTB100或OTB50中生成的是小写字母开头的跟踪结果,需要把首字母转化成大写字母,以匹配otb100.json或otb50.json 转化完后,检查一些txt文件名字是否和数据集中的一样,不一样的稍微修改一下,大概有10个左右不一样吧
# 代码依实际情况变化
src_path = "E:\\0_OTB_benchmark\\Test\\Basketball\\img"
dst_path = "E:\\0_OTB_benchmark\\Test\\dst"
all_filename = os.listdir(src_path)
print(all_filename)
all_filename_path_old = [os.path.join(src_path, filename) for filename in all_filename if '.txt' in filename]
all_filename_path_new = [os.path.join(dst_path, filename.capitalize()) for filename in all_filename] # .capitalize()将字符串首字母变为大写
for (old_file, new_file) in zip(all_filename_path_old, all_filename_path_new):
os.renames(old_file, new_file) # 把原来的文件夹覆盖掉了,先拷贝一份,以防万一
7.从OTB100中转移OTB50
# 从OTB100中转移OTB50数据
src_otb100_path = "E:\\0_OTB_benchmark\\Pysot\\pysot-toolkit\\result\\Results_OTB100"
dst_otb50_path = "E:\\0_OTB_benchmark\\Pysot\\pysot-toolkit\\result\\Results_OTB50"
template_tracker = os.path.join(dst_otb50_path, 'Initial_Method')
template_videos = os.listdir(template_tracker)
all_trackername = os.listdir(src_otb100_path)
src_trackers = [os.path.join(src_otb100_path, src_tracker) for src_tracker in all_trackername]
for tracker_file in all_trackername:
if tracker_file not in os.listdir(dst_otb50_path):
os.mkdir(os.path.join(dst_otb50_path, tracker_file))
print(template_videos)
print(all_trackername)
for i, tracker in enumerate(src_trackers):
each_tracker_videos = os.listdir(tracker)
for video in template_videos:
shutil.copyfile(os.path.join(tracker, video), os.path.join(dst_otb50_path, all_trackername[i], video))