#1408 : The Lastest Time

本文介绍了一个简单的编程问题,即利用四个数字组成所有可能的有效时间,并找出其中最大的合法时间。通过循环式暴力搜索的方法,实现了一段简洁的 C++ 代码来解决这个问题。

描述

What is latest time you can make with 4 digits A, B, C and D?

For example if the 4 digits are 1, 0, 0, 0, you can make 4 times with them: 00:01, 00:10, 01:00, 10:00. The lastest time will be 10:00. Note a valid time is between 00:00 and 23:59.

输入

One line with 4 digits A, B, C and D, separated by a space. (0 <= A, B, C, D <= 9)

输出

Output the lastest time in the format "hh:mm". If there is no valid time output NOT POSSIBLE.  

样例输入
0 9 0 0
样例输出
09:00

循环式暴力搜索。

做水题好快乐。

代码:

#include <bits/stdc++.h>

using namespace std;
int v[5];
int shi=-1,fen=-1;
void work(int q,int w,int e,int r)
{
    int sh=q*10+w;
    int fe=e*10+r;
    if(sh<=23&&fe<=59)
    {
        if(sh>shi||(sh==shi&&fe>fen))
        {
            shi=sh;
            fen=fe;
        }
    }
}
int shu(int x)
{
    int ans=0;
    while(x)
    {
        x=x/10;
        ans++;
    }
    if(ans==2)return 1;
    return 0;
}
int main()
{
    int a[4];
    for(int i=0;i<4;i++)
    {
        scanf("%d",&a[i]);
    }
    for(int i=0;i<4;i++)
    {
        memset(v,0,sizeof(v));
        v[i]=1;
        for(int j=0;j<4;j++)
        {
            if(v[j])continue;
            v[j]=1;
            for(int k=0;k<4;k++)
            {
                if(v[k])continue;
                v[k]=1;
                for(int l=0;l<4;l++)
                {
                    if(v[l])continue;
                    v[l]=1;
                    work(a[i],a[j],a[k],a[l]);
                    v[l]=0;
                }
                v[k]=0;
            }
            v[j]=0;
        }
        v[i]=0;
    }
    if(shi==-1&&fen==-1)
    {
        printf("NOT POSSIBLE\n");
    }
    else{
    if(shu(shi))
    {
        printf("%d:",shi);
    }
    else printf("0%d:",shi);
    if(shu(fen))
    {
        printf("%d\n",fen);
    }
    else printf("0%d\n",fen);
    }
    return 0;
}



解释以下脚本:@echo off&setlocal enabledelayedexpansion :: 日志路径 set var_date=%date:~0,10% set var_date=%var_date:-=% set log_path=%temp%\ibdata%var_date%.log echo ******* start process ******** echo. echo %time% start process >> %log_path% echo %log_path% pause :: 脚本的存放位置固定为 D:\naura echo %cd% pause if %cd% NEQ D:\naura ( echo %time% work path is %cd% >> %log_path% echo 请将脚本文件及ibdata1文件放在 D:\naura 目录下 echo. pause exit 0 ) :: 检查ibdata1 文件是否存在 if not exist ibdata1 ( echo ibdata1 文件不存在,请检查脚本文件所在目录 echo. pause exit 0 ) :: 检查ibdata1 文件是否存在 if not exist tables.txt ( echo tables.txt 文件不存在,请检查脚本文件所在目录 echo. pause exit 0 ) :: 校验 ibdata1 的大小是否符合预期 for /f %%i in (&#39;dir /b ibdata1&#39;) do ( echo %time% lastest ibdata1 file size is %%~zi >> %log_path% if %%~zi NEQ 10485760 ( echo ibdata1 文件大小不对,请核对脚本目录下 ibdata1 文件 echo. pause exit 0 ) ) set excute_h=1 set excute_m=1 set excute_s=1 :: 是否需要手动干预的标志 set handwork=0 :: 检查数据库是否启动 同时检查是否有数据库操作权限 tasklist|find /i "mysqld.exe" >> %log_path% if %errorlevel% NEQ 0 ( echo %time% mysql is not running and started to start >> %log_path% net start mysql >> %log_path% 2>&1 if !errorlevel! NEQ 0 ( echo %time% failed to start mysql echo mysql 启动失败,请手动启动mysql服务之后,再执行脚本 echo. pause exit 0 ) ) else ( echo %time% verify that we have execute net permission >> %log_path% net help >> %log_path% 2>&1 if !errorlevel! NEQ 0 ( echo %time% we don&#39;t have authority >> %log_path% echo 由于权限问题,清理过程需要您手动干预,请不要离开机台,注意脚本运行!! echo. pause echo. set handwork=1 ) ) echo %time% begin excute clean mysql data echo. :: 定义变量,方便修改 set mysqlServer=localhost set mysqlUser=root set mysqlPassword=8888 set database_fa300=fa300 set database_scope=scope set curObj.table=0 set curObj.field=0 set export_ingnore_table=%database_scope% set dataSavePath=naura_scope set tryCount=0 :: 获取所有数据记录表,表中数据需要根据时间备份 set tableCounts=0 set obj=0 for /f "delims=: tokens=1-2" %%i in (tables.txt) do ( for /f "skip=1" %%a in (&#39;mysql -h%mysqlServer% -u%mysqlUser% -p%mysqlPassword% -e "select count(0) from information_schema.tables where table_name=&#39;%%i&#39; and table_schema=&#39;%database_scope%&#39;"&#39;) do ( if %%a EQU 1 ( set obj[!tableCounts!].table=%%i set obj[!tableCounts!].field=%%j set /a tableCounts=!tableCounts!+1 ) ) ) :: 脚本执行过程中产生的临时参数保存路径 set process_param=%temp%\ibdata_param.log :: 临时数据保存的磁盘 set param_volume=0 :: 脚本执行到第几步 set param_step=0 :: 参数文件产生日期 set param_date=0 :: 备份天数 set param_day=0 :: ibdata 文件路径 set param_ibdataPath=0; :: 导出或导入数组 obj 的下标 set param_tableIndex=0; set begin_time=%time% if not exist %process_param% goto:step0 echo %time% %process_param% file exist >> %log_path% echo ----------------------- >> %log_path% type %process_param% >> %log_path% echo ----------------------- >> %log_path% for /f "delims== tokens=1-2" %%i in (%process_param%) do ( if %%i EQU param_date ( set param_date=%%j ) ) echo %time% param_date: %param_date% >> %log_path% :: 判断已经存在的参数,是否为同一天,默认同一天的可以继续使用 if %date:~0,10% NEQ %param_date% goto:step0 :: 获取所有的参数 for /f "delims== tokens=1-2" %%i in (%process_param%) do ( if %%i EQU volume ( set param_volume=%%j ) else if %%i EQU step ( set param_step=%%j ) else if %%i EQU day ( set param_day=%%j ) else if %%i EQU ibdataPath ( set param_ibdataPath=%%j ) else ( set param_tableIndex=%%j ) ) echo %time% param: %param_volume% %param_step% %param_day% %param_ibdataPath% %param_tableIndex%>> %log_path% if not exist "%param_ibdataPath%" goto:step0 goto:step%param_step% :: 环境准备 :: 1 获取临时数据存储位置 :: 2 获取 mysql data 的位置 :: 3 判断临时数据存储空间是否满足 :: 4 估算脚本运行时间 :step0 echo %time% begin excute step0 >> %log_path% del /Q/F %process_param% >> %log_path% 2>&1 :: 自动获取可用空间最大的非系统盘, 如果存在 E 盘,则直接默认 E 盘 set param_volume=0 set volume_freespace=0 for /f "Skip=2 tokens=1-2" %%i in (&#39;Wmic LogicalDisk where "DriveType=3" Get FreeSpace^,Name&#39;) do ( echo %time% %%j %%i >> %log_path% set tempSize=%%i set /a tempSize=!tempSize:~0,-6! / 1074 > nul 2>&1 if %%j EQU E: ( set param_volume=%%j set /a volume_freespace=!tempSize! goto:endVolume ) if !tempSize! GTR !volume_freespace! ( set param_volume=%%j set /a volume_freespace=!tempSize! ) ) :endVolume echo %time% the path to save the temp data is %param_volume%, freespce is %volume_freespace%GB >> %log_path% if not exist %param_volume%\%dataSavePath% md %param_volume%\%dataSavePath% :: 获取 ibdata1 文件路径 echo %time% show variables datadir >> %log_path% mysql -h %mysqlServer% -u %mysqlUser% -p%mysqlPassword% -e "show variables like &#39;datadir&#39;" > %param_volume%\%dataSavePath%\data.txt 2>>%log_path% type %param_volume%\%dataSavePath%\data.txt >> %log_path% for /f "delims= tokens=1" %%i in (%param_volume%\%dataSavePath%\data.txt) do ( echo %%i | findstr "Value" >nul && echo. >nul || set param_ibdataPath=%%i ) set param_ibdataPath=%param_ibdataPath:~8%ibdata1 set param_ibdataPath=%param_ibdataPath:\\=\% if not exist "%param_ibdataPath%" ( mysql -h %mysqlServer% -u %mysqlUser% -p%mysqlPassword% -e "show variables like &#39;innodb_data_home_dir&#39;" > %param_volume%\%dataSavePath%\data.txt 2>>%log_path% type %param_volume%\%dataSavePath%\data.txt >> %log_path% for /f "delims= tokens=1" %%i in (%param_volume%\%dataSavePath%\data.txt) do ( echo %%i | findstr "Value" >nul && echo. >nul || set param_ibdataPath=%%i ) set param_ibdataPath=!param_ibdataPath:~21!ibdata1 set param_ibdataPath=!param_ibdataPath:\\=\! ) echo. echo !param_ibdataPath! echo ibdata1 file path !param_ibdataPath! >> %log_path% echo. if not exist "%param_ibdataPath%" ( echo 没有找到 ibdata 文件,请联系软件工程师!!!! echo. pause exit ) :: 获取客户端 ibdata file size set oldIbdataSize=0 for /f "tokens=3" %%i in (&#39;dir "%param_ibdataPath%"^|find /i "1 个文件"&#39;) do ( set oldIbdataSize=%%i ) set oldIbdataSize=%oldIbdataSize:,=% echo %time% old ibdata file size is %oldIbdataSize% >> %log_path% :: 判断临时数据存储位置的空间是否够用 set /a oldIbdataSize=%oldIbdataSize:~0,-6% / 1074 echo %time% old ibdata file size is %oldIbdataSize%GB >> %log_path% set /a oldIbdataSize=%oldIbdataSize%+10 if %volume_freespace% LSS %oldIbdataSize% ( echo %param_volume% freespce is too small >> %log_path% echo %param_volume% 剩余空间太小,至少需要 %oldIbdataSize%GB 空间,请手动清理后再次运行脚本 echo. pause exit 0 ) set /a oldIbdataSize=%oldIbdataSize%*3 echo 整个清理过程大约需要 %oldIbdataSize% min echo. set param_step=1 call:fun_save_param echo %time% end excute step0 >> %log_path% :: 输入所需备份多久的数据,主要备份腔室表和 transfer表 :step1 echo %time% begin excute step1 >> %log_path% set /p param_day=请输入需要备份数据的天数(最大为120): echo. echo %param_day%|findstr "^[0-9]*$">nul if %errorlevel% NEQ 0 ( echo 请输入整数 echo. goto:step1 ) if %param_day% GTR 120 ( echo 备份时间超过 120 ,请重新输入 echo. goto:step1 ) set param_step=2 call:fun_save_param echo %time% end excute step1 >> %log_path% :: 备份客户端 ibdata :step2 echo %time% begin excute step2 >> %log_path% set tryCount=0 :backup set /a tryCount=%tryCount% + 1 if %tryCount% EQU 4 ( echo !!!!!备份 %param_ibdataPath% 失败,请手动备份至其他磁盘后,再继续执行脚本!!!!! echo. pause set param_step=3 call:fun_save_param exit 0 ) echo %time% begin backup ibdata1 echo %time% begin backup ibdata1 >> %log_path% copy /Y/V/Z "%param_ibdataPath%" %param_volume%\%dataSavePath%\ibdata1 if %errorlevel% NEQ 0 goto:backup echo %time% end backup ibdata1 echo. set param_step=3 call:fun_save_param echo %time% end backup ibdata1 >> %log_path% echo %time% end excute step2 >> %log_path% :: 清理数据库中备份表,及备份表中数据 :step3 echo %time% begin excute step3 >> %log_path% echo %time% begin clean history table for /f "skip=1" %%a in (&#39;mysql -h%mysqlServer% -u%mysqlUser% -p%mysqlPassword% -e "select count(0) from information_schema.tables where table_name=&#39;backuphistory&#39; and table_schema=&#39;%database_scope%&#39;"&#39;) do ( if %%a EQU 1 ( goto:history ) else ( goto:skiphistory ) ) :history for /f "skip=1 tokens=1" %%i in (&#39;mysql -h%mysqlServer% -u%mysqlUser% -p%mysqlPassword% --database %database_scope% -e "select backupname from backuphistory where backuptime < date_sub(curdate(), interval %param_day% day)"&#39;) do ( echo %time% delete history table %%i echo %time% delete history table %%i >> %log_path% mysql -h%mysqlServer% -u%mysqlUser% -p%mysqlPassword% --database %database_scope% -e "delete from backuphistory where backupname=&#39;%%i&#39;" 2>>%log_path% mysql -h%mysqlServer% -u%mysqlUser% -p%mysqlPassword% --database %database_scope% -e "drop table %%i" 2>>%log_path% ) :skiphistory echo %time% end clean history table echo. set param_step=4 call:fun_save_param echo %time% end excute step3 >> %log_path% :: 导出 scope 相关的表 :step4 echo %time% begin excute step4 >> %log_path% :: 循环导出 obj 数组中的表 set currentIndex=0 :exportStartLoop if %currentIndex% EQU %tableCounts% goto:exportEndLoop for /f "usebackq delims==. tokens=1-3" %%i in (`set obj[%currentIndex%]`) do ( set curObj.%%j=%%k ) set export_ingnore_table=%export_ingnore_table% --ignore-table=%database_scope%.%curObj.table% if %currentIndex% LSS %param_tableIndex% ( set /a currentIndex=%currentIndex% + 1 goto:exportStartLoop ) echo %time% begin export table %curObj.table% echo %time% begin export table %curObj.table% >> %log_path% set tryCount=0 :: 导出失败进行 3 次尝试 :exporttable if %tryCount% EQU 3 ( echo 导出表 %curObj.table% 失败 无法继续 请联系软件工程人员 echo. pause exit 0 ) if %param_day% EQU 0 ( mysqldump -h%mysqlServer% -u%mysqlUser% -p%mysqlPassword% --no-data --databases %database_scope% --tables %curObj.table% > %param_volume%\%dataSavePath%\%curObj.table%.sql 2>>%log_path% ) else ( mysqldump -h%mysqlServer% -u%mysqlUser% -p%mysqlPassword% --databases %database_scope% --tables %curObj.table% --where="%curObj.field%>date_sub(curdate(), interval %param_day% day)" > %param_volume%\%dataSavePath%\%curObj.table%.sql 2>>%log_path% ) if %errorlevel% NEQ 0 ( echo %time% failed to export table:%curObj.table% >> %log_path% set /a tryCount=%tryCount% + 1 goto:exporttable ) echo %time% end export table %curObj.table% echo. echo %time% end export table %curObj.table% >> %log_path% set /a currentIndex=%currentIndex% + 1 set /a param_tableIndex=%currentIndex% call:fun_save_param goto:exportStartLoop :exportEndLoop :: 导出 scope 库 set tryCount=0 :exportscope if %tryCount% EQU 3 ( echo 导出库 %database_scope% 失败 无法继续 请联系软件工程人员 echo. pause exit 0 ) if %database_scope%% NEQ null ( echo %time% begin export database %database_scope% echo %time% begin export database %database_scope% : %export_ingnore_table% >> %log_path% mysqldump -h%mysqlServer% -u%mysqlUser% -p%mysqlPassword% --databases --events --routines %export_ingnore_table%> %param_volume%\%dataSavePath%\%database_scope%.sql 2>>%log_path% echo %time% end export database %database_scope% echo. echo %time% end export database %database_scope% >> %log_path% ) if %errorlevel% NEQ 0 ( echo %time% failed to export database:%database_scope% >> %log_path% set /a tryCount=%tryCount% + 1 goto:exportscope ) set param_step=5 call:fun_save_param echo %time% end excute step4 >> %log_path% :: 导出fa300 库 :step5 echo %time% begin excute step5 >> %log_path% set tryCount=0 :exportfa300 if %tryCount% EQU 3 ( echo 导出库 %database_fa300% 失败 无法继续 请联系软件工程人员 echo. pause exit 0 ) if %database_fa300% NEQ null ( echo %time% begin export database %database_fa300% echo %time% begin export database %database_fa300% >> %log_path% mysqldump -h%mysqlServer% -u%mysqlUser% -p%mysqlPassword% --databases --events --routines %database_fa300% > %param_volume%\%dataSavePath%\%database_fa300%.sql 2>>%log_path% echo %time% end export database %database_fa300% echo. echo %time% end export database %database_fa300% >> %log_path% ) if %errorlevel% NEQ 0 ( echo %time% failed to export database:%database_fa300% >> %log_path% set /a tryCount=%tryCount% + 1 goto:exportfa300 ) set param_step=6 set param_tableIndex=0 call:fun_save_param echo ********** export data successfully *********** echo. echo %time% exprot data successfully >> %log_path% echo %time% end excute step5 >> %log_path% :: 将干净的 ibdata 替换至客户端 :step6 echo %time% begin excute step6 >> %log_path% echo %time% begin stop mysql echo %time% begin stop mysql >> %log_path% call:fun_stopmysql echo %time% end stop mysql echo. set tryCount=0 :replace :: 三次失败尝试,如果都失败,就需要收到操作 set /a tryCount=%tryCount% + 1 if %tryCount% EQU 4 ( echo !!!!!替换 %param_ibdataPath% 失败!!!!! echo. echo 请手动将脚本目录下 ibdata1 文件替换至 %param_ibdataPath% echo. pause set param_step=7 call:fun_save_param exit 0 ) ::替换 ibdata1 文件 echo %time% begin replace ibdata1 echo %time% begin replace ibdata1 >> %log_path% copy /Y/V/Z ibdata1 "%param_ibdataPath%" if %errorlevel% NEQ 0 goto:replace echo %time% end replace ibdata1 echo. echo %time% end replace ibdata1 >> %log_path% set param_step=7 call:fun_save_param echo ********** replace ibdate1 successfully *********** echo. echo %time% end excute step6 >> %log_path% :step7 echo %time% begin excute step7 >> %log_path% echo %time% begin start mysql echo %time% begin start mysql >> %log_path% call:fun_startmysql echo %time% end start mysql echo. echo %time% end start mysql >> %log_path% :: 导入fa300库 set tryCount=0 :importfa300 if %tryCount% EQU 3 ( echo 导入库 %database_fa300% 失败 无法继续 请联系软件工程人员 echo. pause exit 0 ) if %database_fa300% NEQ null ( echo %time% begin import basedata %database_fa300% echo %time% begin import basedata %database_fa300% >> %log_path% mysql -h %mysqlServer% -u %mysqlUser% -p%mysqlPassword% < %param_volume%\%dataSavePath%\%database_fa300%.sql 2>>%log_path% echo %time% end import basedata %database_fa300% echo. echo %time% end import basedata %database_fa300% >> %log_path% ) if %errorlevel% NEQ 0 ( echo %time% failed to import database:%database_fa300% >> %log_path% set /a tryCount=%tryCount% + 1 goto:importfa300 ) set param_step=8 call:fun_save_param echo %time% end excute step7 >> %log_path% :step8 echo %time% begin excute step8 >> %log_path% call:fun_startmysql :: 导入 scope 库 set tryCount=0 :importscope if %tryCount% EQU 3 ( echo 导入库 %database_scope% 失败 无法继续 请联系软件工程人员 echo. pause exit 0 ) if %database_scope% NEQ null ( echo %time% begin import basedata %database_scope% echo %time% begin import basedata %database_scope% >> %log_path% mysql -h %mysqlServer% -u %mysqlUser% -p%mysqlPassword% < %param_volume%\%dataSavePath%\%database_scope%.sql 2>>%log_path% echo %time% end import basedata %database_scope% echo. echo %time% end import basedata %database_scope% >> %log_path% ) if %errorlevel% NEQ 0 ( echo %time% failed to import database:%database_scope% >> %log_path% set /a tryCount=%tryCount% + 1 goto:importscope ) set param_step=9 call:fun_save_param echo %time% end excute step8 >> %log_path% :step9 echo %time% begin excute step9 >> %log_path% call:fun_startmysql :: 循环导入数组中的表 :importStartLoop if %param_tableIndex% EQU %tableCounts% goto:importEndLoop for /f "usebackq delims==. tokens=1-3" %%i in (`set obj[%param_tableIndex%]`) do ( set curObj.%%j=%%k ) echo %time% begin import table %curObj.table% echo echo %time% begin import table %curObj.table% >> %log_path% set tryCount=0 :importtable if %tryCount% EQU 3 ( echo 导入表 %curObj.table% 错误无法继续,请联系软件工程人员处理 echo. pause exit 0 ) mysql -h %mysqlServer% -u %mysqlUser% -p%mysqlPassword% %database_scope% < %param_volume%\%dataSavePath%\%curObj.table%.sql 2>>%log_path% if %errorlevel% NEQ 0 ( echo %time% failed to import table:%curObj.table% >> %log_path% set /a tryCount=%tryCount%+1 goto:importtable ) echo %time% end import table %curObj.table% echo. echo %time% end import table %curObj.table% >> %log_path% set /a param_tableIndex=%param_tableIndex% + 1 call:fun_save_param goto:importStartLoop :importEndLoop set param_step=10 call:fun_save_param echo %time% end excute step9 >> %log_path% echo ********* imported data successfully *********** echo. :: 校验表是否导入 :step10 echo %time% begin check results echo %time% begin check results >> %log_path% set currentIndex=0 :checkStartLoop if %currentIndex% EQU %tableCounts% goto:checkEndLoop for /f "usebackq delims==. tokens=1-3" %%i in (`set obj[%currentIndex%]`) do ( set curObj.%%j=%%k ) mysql -h %mysqlServer% -u %mysqlUser% -p%mysqlPassword% %database_scope% -e "select %curObj.field% from %curObj.table% limit 1" >>%log_path% 2>&1 if %errorlevel% NEQ 0 ( echo %curObj.table% 表校验失败,请联系软件工程师处理!!! echo. pause exit 0 ) set /a currentIndex=%currentIndex% + 1 goto:checkStartLoop :checkEndLoop echo %time% end check results echo %time% end check results >> %log_path% echo ******** program executed successfully ******** echo. rename %param_volume%\%dataSavePath% %database_scope%_%var_date% >> %log_path% 2>&1 del /Q/F %process_param% >> %log_path% 2>&1 :: 计算任务执行时间 set end_time=%time% echo end time %end_time% call:fun_time_diff %begin_time%,%end_time% echo total time %excute_h%:%excute_m%:%excute_s% pause :fun_time_diff set param1=%1 set param2=%2 set /a h1=%param1:~0,2% 2>nul set /a m1=1%param1:~3,2% - 100 2>nul set /a s1=1%param1:~6,2% - 100 2>nul set /a h2=%param2:~0,2% 2>nul set /a m2=1%param2:~3,2% - 100 2>nul set /a s2=1%param2:~6,2% - 100 2>nul if %h2% LSS %h1% set /a h2=%h2%+24 set /a ts1=%h1%*3600 + %m1%*60 + %s1% set /a ts2=%h2%*3600 + %m2%*60 + %s2% set /a ts=%ts2% - %ts1% set /a excute_h=%ts%/3600 set /a excute_m=(%ts%-%excute_h%*3600)/60 set /a excute_s=%ts%%%%(60) goto:EOF :fun_save_param echo param_date=%date:~0,10%>%process_param% echo step=%param_step% >>%process_param% echo day=%param_day% >>%process_param% echo volume=%param_volume%>>%process_param% echo ibdataPath=%param_ibdataPath%>>%process_param% echo tableIndex=%param_tableIndex% >>%process_param% goto:EOF :fun_stopmysql tasklist|find /i "mysqld.exe" >> %log_path% 2>&1 if %errorlevel% NEQ 0 goto:EOF if %handwork% EQU 0 goto:stopmysql echo !!!!!! 请手动停止mysql 服务,然后继续 !!!!!! echo. pause echo. goto:fun_stopmysql ::使用命令停止 mysql 服务 :stopmysql net stop mysql >> %log_path% 2>&1 if %errorlevel% NEQ 0 set handwork=1 goto:fun_stopmysql goto:EOF :fun_startmysql tasklist|find /i "mysqld.exe" >> %log_path% 2>&1 if %errorlevel% EQU 0 goto:EOF if %handwork% EQU 0 goto:startmysql echo !!!!!!请手动启动 mysql 服务,然后继续!!!!!! echo. pause echo. goto:fun_startmysql :startmysql net start mysql >> %log_path% 2>&1 if %errorlevel% NEQ 0 set handwork=1 goto:fun_startmysql goto:EOF
06-20
IMAGENET_MEAN = [0.485, 0.456, 0.406] IMAGENET_STD = [0.229, 0.224, 0.225] def get_args_parser(): parser = argparse.ArgumentParser() # dataset parser.add_argument(&#39;--checkpoint_dir&#39;, default=&#39;tmp&#39;, type=str, help=&#39;where to save the training log and models&#39;) parser.add_argument(&#39;--stage&#39;, default=&#39;sceneflow&#39;, type=str, help=&#39;training stage on different datasets&#39;) parser.add_argument(&#39;--val_dataset&#39;, default=[&#39;kitti15&#39;], type=str, nargs=&#39;+&#39;) parser.add_argument(&#39;--max_disp&#39;, default=400, type=int, help=&#39;exclude very large disparity in the loss function&#39;) parser.add_argument(&#39;--img_height&#39;, default=288, type=int) parser.add_argument(&#39;--img_width&#39;, default=512, type=int) parser.add_argument(&#39;--padding_factor&#39;, default=16, type=int) # training parser.add_argument(&#39;--batch_size&#39;, default=64, type=int) parser.add_argument(&#39;--num_workers&#39;, default=8, type=int) parser.add_argument(&#39;--lr&#39;, default=1e-3, type=float) parser.add_argument(&#39;--weight_decay&#39;, default=1e-4, type=float) parser.add_argument(&#39;--seed&#39;, default=326, type=int) # resume pretrained model or resume training parser.add_argument(&#39;--resume&#39;, default=None, type=str, help=&#39;resume from pretrained model or resume from unexpectedly terminated training&#39;) parser.add_argument(&#39;--strict_resume&#39;, action=&#39;store_true&#39;, help=&#39;strict resume while loading pretrained weights&#39;) parser.add_argument(&#39;--no_resume_optimizer&#39;, action=&#39;store_true&#39;) parser.add_argument(&#39;--resume_exclude_upsampler&#39;, action=&#39;store_true&#39;) # model: learnable parameters parser.add_argument(&#39;--task&#39;, default=&#39;stereo&#39;, choices=[&#39;flow&#39;, &#39;stereo&#39;, &#39;depth&#39;], type=str) parser.add_argument(&#39;--num_scales&#39;, default=1, type=int, help=&#39;feature scales: 1/8 or 1/8 + 1/4&#39;) parser.add_argument(&#39;--feature_channels&#39;, default=128, type=int) parser.add_argument(&#39;--upsample_factor&#39;, default=8, type=int) parser.add_argument(&#39;--num_head&#39;, default=1, type=int) parser.add_argument(&#39;--ffn_dim_expansion&#39;, default=4, type=int) parser.add_argument(&#39;--num_transformer_layers&#39;, default=6, type=int) parser.add_argument(&#39;--reg_refine&#39;, action=&#39;store_true&#39;, help=&#39;optional task-specific local regression refinement&#39;) # model: parameter-free parser.add_argument(&#39;--attn_type&#39;, default=&#39;self_swin2d_cross_1d&#39;, type=str, help=&#39;attention function&#39;) parser.add_argument(&#39;--attn_splits_list&#39;, default=[2], type=int, nargs=&#39;+&#39;, help=&#39;number of splits in attention&#39;) parser.add_argument(&#39;--corr_radius_list&#39;, default=[-1], type=int, nargs=&#39;+&#39;, help=&#39;correlation radius for matching, -1 indicates global matching&#39;) parser.add_argument(&#39;--prop_radius_list&#39;, default=[-1], type=int, nargs=&#39;+&#39;, help=&#39;self-attention radius for propagation, -1 indicates global attention&#39;) parser.add_argument(&#39;--num_reg_refine&#39;, default=1, type=int, help=&#39;number of additional local regression refinement&#39;) # evaluation parser.add_argument(&#39;--eval&#39;, action=&#39;store_true&#39;) parser.add_argument(&#39;--inference_size&#39;, default=None, type=int, nargs=&#39;+&#39;) parser.add_argument(&#39;--count_time&#39;, action=&#39;store_true&#39;) parser.add_argument(&#39;--save_vis_disp&#39;, action=&#39;store_true&#39;) parser.add_argument(&#39;--save_dir&#39;, default=None, type=str) parser.add_argument(&#39;--middlebury_resolution&#39;, default=&#39;F&#39;, choices=[&#39;Q&#39;, &#39;H&#39;, &#39;F&#39;]) # submission parser.add_argument(&#39;--submission&#39;, action=&#39;store_true&#39;) parser.add_argument(&#39;--eth_submission_mode&#39;, default=&#39;train&#39;, type=str, choices=[&#39;train&#39;, &#39;test&#39;]) parser.add_argument(&#39;--middlebury_submission_mode&#39;, default=&#39;training&#39;, type=str, choices=[&#39;training&#39;, &#39;test&#39;]) parser.add_argument(&#39;--output_path&#39;, default=&#39;output&#39;, type=str) # log parser.add_argument(&#39;--summary_freq&#39;, default=100, type=int, help=&#39;Summary frequency to tensorboard (iterations)&#39;) parser.add_argument(&#39;--save_ckpt_freq&#39;, default=1000, type=int, help=&#39;Save checkpoint frequency (steps)&#39;) parser.add_argument(&#39;--val_freq&#39;, default=1000, type=int, help=&#39;validation frequency in terms of training steps&#39;) parser.add_argument(&#39;--save_latest_ckpt_freq&#39;, default=1000, type=int) parser.add_argument(&#39;--num_steps&#39;, default=100000, type=int) # distributed training parser.add_argument(&#39;--distributed&#39;, action=&#39;store_true&#39;) parser.add_argument(&#39;--local_rank&#39;, type=int, default=0) parser.add_argument(&#39;--launcher&#39;, default=&#39;none&#39;, type=str) parser.add_argument(&#39;--gpu_ids&#39;, default=0, type=int, nargs=&#39;+&#39;) # inference parser.add_argument(&#39;--inference_dir&#39;, default=None, type=str) parser.add_argument(&#39;--inference_dir_left&#39;, default=None, type=str) parser.add_argument(&#39;--inference_dir_right&#39;, default=None, type=str) parser.add_argument(&#39;--pred_bidir_disp&#39;, action=&#39;store_true&#39;, help=&#39;predict both left and right disparities&#39;) parser.add_argument(&#39;--pred_right_disp&#39;, action=&#39;store_true&#39;, help=&#39;predict right disparity&#39;) parser.add_argument(&#39;--save_pfm_disp&#39;, action=&#39;store_true&#39;, help=&#39;save predicted disparity as .pfm format&#39;) parser.add_argument(&#39;--debug&#39;, action=&#39;store_true&#39;) return parser def main(args): print_info = not args.eval and not args.submission and args.inference_dir is None and \ args.inference_dir_left is None and args.inference_dir_right is None if print_info and args.local_rank == 0: print(args) misc.save_args(args) misc.check_path(args.checkpoint_dir) misc.save_command(args.checkpoint_dir) misc.check_path(args.output_path) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) np.random.seed(args.seed) torch.backends.cudnn.benchmark = True if args.launcher == &#39;none&#39;: args.distributed = False device = torch.device(&#39;cuda&#39; if torch.cuda.is_available() else &#39;cpu&#39;) else: args.distributed = True # adjust batch size for each gpu assert args.batch_size % torch.cuda.device_count() == 0 args.batch_size = args.batch_size // torch.cuda.device_count() dist_params = dict(backend=&#39;nccl&#39;) init_dist(args.launcher, **dist_params) # re-set gpu_ids with distributed training mode _, world_size = get_dist_info() args.gpu_ids = range(world_size) device = torch.device(&#39;cuda:{}&#39;.format(args.local_rank)) setup_for_distributed(args.local_rank == 0) # model model = UniMatch(feature_channels=args.feature_channels, num_scales=args.num_scales, upsample_factor=args.upsample_factor, num_head=args.num_head, ffn_dim_expansion=args.ffn_dim_expansion, num_transformer_layers=args.num_transformer_layers, reg_refine=args.reg_refine, task=args.task).to(device) if print_info: print(model) if args.distributed: model = torch.nn.parallel.DistributedDataParallel( model.to(device), device_ids=[args.local_rank], output_device=args.local_rank) model_without_ddp = model.module else: if torch.cuda.device_count() > 1: print(&#39;Use %d GPUs&#39; % torch.cuda.device_count()) model = torch.nn.DataParallel(model) model_without_ddp = model.module else: model_without_ddp = model num_params = sum(p.numel() for p in model.parameters()) if print_info: print(&#39;=> Number of trainable parameters: %d&#39; % num_params) if not args.eval and not args.submission and args.inference_dir is None: save_name = &#39;%d_parameters&#39; % num_params open(os.path.join(args.checkpoint_dir, save_name), &#39;a&#39;).close() optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) start_epoch = 0 start_step = 0 if args.resume: print("=> Load checkpoint: %s" % args.resume) loc = &#39;cuda:{}&#39;.format(args.local_rank) if torch.cuda.is_available() else &#39;cpu&#39; checkpoint = torch.load(args.resume, map_location=loc) model_without_ddp.load_state_dict(checkpoint[&#39;model&#39;], strict=args.strict_resume) if &#39;optimizer&#39; in checkpoint and &#39;step&#39; in checkpoint and &#39;epoch&#39; in checkpoint and not \ args.no_resume_optimizer: print(&#39;Load optimizer&#39;) optimizer.load_state_dict(checkpoint[&#39;optimizer&#39;]) start_step = checkpoint[&#39;step&#39;] start_epoch = checkpoint[&#39;epoch&#39;] if print_info: print(&#39;start_epoch: %d, start_step: %d&#39; % (start_epoch, start_step)) if args.submission: if &#39;kitti15&#39; in args.val_dataset or &#39;kitti12&#39; in args.val_dataset: create_kitti_submission(model_without_ddp, output_path=args.output_path, padding_factor=args.padding_factor, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, inference_size=args.inference_size, ) if &#39;eth3d&#39; in args.val_dataset: create_eth3d_submission(model_without_ddp, output_path=args.output_path, padding_factor=args.padding_factor, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, inference_size=args.inference_size, submission_mode=args.eth_submission_mode, save_vis_disp=args.save_vis_disp, ) if &#39;middlebury&#39; in args.val_dataset: create_middlebury_submission(model_without_ddp, output_path=args.output_path, padding_factor=args.padding_factor, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, inference_size=args.inference_size, submission_mode=args.middlebury_submission_mode, save_vis_disp=args.save_vis_disp, ) return if args.eval: val_results = {} if &#39;things&#39; in args.val_dataset: results_dict = validate_things(model_without_ddp, max_disp=args.max_disp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, ) if args.local_rank == 0: val_results.update(results_dict) if &#39;kitti15&#39; in args.val_dataset or &#39;kitti12&#39; in args.val_dataset: results_dict = validate_kitti15(model_without_ddp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, count_time=args.count_time, debug=args.debug, ) if args.local_rank == 0: val_results.update(results_dict) if &#39;eth3d&#39; in args.val_dataset: results_dict = validate_eth3d(model_without_ddp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, ) if args.local_rank == 0: val_results.update(results_dict) if &#39;middlebury&#39; in args.val_dataset: results_dict = validate_middlebury(model_without_ddp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, resolution=args.middlebury_resolution, ) if args.local_rank == 0: val_results.update(results_dict) return if args.inference_dir or (args.inference_dir_left and args.inference_dir_right): inference_stereo(model_without_ddp, inference_dir=args.inference_dir, inference_dir_left=args.inference_dir_left, inference_dir_right=args.inference_dir_right, output_path=args.output_path, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, pred_bidir_disp=args.pred_bidir_disp, pred_right_disp=args.pred_right_disp, save_pfm_disp=args.save_pfm_disp, ) return train_data = build_dataset(args) print(&#39;=> {} training samples found in the training set&#39;.format(len(train_data))) if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler( train_data, num_replicas=torch.cuda.device_count(), rank=args.local_rank ) else: train_sampler = None train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=train_sampler is None, num_workers=args.num_workers, pin_memory=True, drop_last=True, sampler=train_sampler, ) last_epoch = start_step if args.resume and not args.no_resume_optimizer else -1 lr_scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer, args.lr, args.num_steps + 10, pct_start=0.05, cycle_momentum=False, anneal_strategy=&#39;cos&#39;, last_epoch=last_epoch, ) if args.local_rank == 0: summary_writer = SummaryWriter(args.checkpoint_dir) total_steps = start_step epoch = start_epoch print(&#39;=> Start training...&#39;) while total_steps < args.num_steps: model.train() # mannually change random seed for shuffling every epoch if args.distributed: train_sampler.set_epoch(epoch) if args.local_rank == 0: summary_writer.add_scalar(&#39;lr&#39;, lr_scheduler.get_last_lr()[0], total_steps + 1) for i, sample in enumerate(train_loader): left = sample[&#39;left&#39;].to(device) # [B, 3, H, W] right = sample[&#39;right&#39;].to(device) gt_disp = sample[&#39;disp&#39;].to(device) # [B, H, W] mask = (gt_disp > 0) & (gt_disp < args.max_disp) if not mask.any(): continue pred_disps = model(left, right, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, task=&#39;stereo&#39;, )[&#39;flow_preds&#39;] disp_loss = 0 all_loss = [] # loss weights loss_weights = [0.9 ** (len(pred_disps) - 1 - power) for power in range(len(pred_disps))] for k in range(len(pred_disps)): pred_disp = pred_disps[k] weight = loss_weights[k] curr_loss = F.smooth_l1_loss(pred_disp[mask], gt_disp[mask], reduction=&#39;mean&#39;) disp_loss += weight * curr_loss all_loss.append(curr_loss) total_loss = disp_loss # more efficient zero_grad for param in model.parameters(): param.grad = None total_loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() lr_scheduler.step() total_steps += 1 if total_steps % args.summary_freq == 0 and args.local_rank == 0: img_summary = dict() img_summary[&#39;left&#39;] = left img_summary[&#39;right&#39;] = right img_summary[&#39;gt_disp&#39;] = gt_disp img_summary[&#39;pred_disp&#39;] = pred_disps[-1] pred_disp = pred_disps[-1] img_summary[&#39;disp_error&#39;] = disp_error_img(pred_disp, gt_disp) save_images(summary_writer, &#39;train&#39;, img_summary, total_steps) epe = F.l1_loss(gt_disp[mask], pred_disp[mask], reduction=&#39;mean&#39;) print(&#39;step: %06d \t epe: %.3f&#39; % (total_steps, epe.item())) summary_writer.add_scalar(&#39;train/epe&#39;, epe.item(), total_steps) summary_writer.add_scalar(&#39;train/disp_loss&#39;, disp_loss.item(), total_steps) summary_writer.add_scalar(&#39;train/total_loss&#39;, total_loss.item(), total_steps) # save all losses for s in range(len(all_loss)): save_name = &#39;train/loss&#39; + str(len(all_loss) - s - 1) save_value = all_loss[s] summary_writer.add_scalar(save_name, save_value, total_steps) d1 = d1_metric(pred_disp, gt_disp, mask) summary_writer.add_scalar(&#39;train/d1&#39;, d1.item(), total_steps) # always save the latest model for resuming training if args.local_rank == 0 and total_steps % args.save_latest_ckpt_freq == 0: # Save lastest checkpoint after each epoch checkpoint_path = os.path.join(args.checkpoint_dir, &#39;checkpoint_latest.pth&#39;) save_dict = { &#39;model&#39;: model_without_ddp.state_dict(), &#39;optimizer&#39;: optimizer.state_dict(), &#39;step&#39;: total_steps, &#39;epoch&#39;: epoch, } torch.save(save_dict, checkpoint_path) # save checkpoint of specific epoch if args.local_rank == 0 and total_steps % args.save_ckpt_freq == 0: print(&#39;Save checkpoint at step: %d&#39; % total_steps) checkpoint_path = os.path.join(args.checkpoint_dir, &#39;step_%06d.pth&#39; % total_steps) save_dict = { &#39;model&#39;: model_without_ddp.state_dict(), } torch.save(save_dict, checkpoint_path) # validation if total_steps % args.val_freq == 0: val_results = {} if &#39;things&#39; in args.val_dataset: results_dict = validate_things(model_without_ddp, max_disp=args.max_disp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, ) if args.local_rank == 0: val_results.update(results_dict) if &#39;kitti15&#39; in args.val_dataset or &#39;kitti12&#39; in args.val_dataset: results_dict = validate_kitti15(model_without_ddp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, count_time=args.count_time, ) if args.local_rank == 0: val_results.update(results_dict) if &#39;eth3d&#39; in args.val_dataset: results_dict = validate_eth3d(model_without_ddp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, ) if args.local_rank == 0: val_results.update(results_dict) if &#39;middlebury&#39; in args.val_dataset: results_dict = validate_middlebury(model_without_ddp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, resolution=args.middlebury_resolution, ) if args.local_rank == 0: val_results.update(results_dict) if args.local_rank == 0: # save to tensorboard for key in val_results: tag = key.split(&#39;_&#39;)[0] tag = tag + &#39;/&#39; + key summary_writer.add_scalar(tag, val_results[key], total_steps) # save validation results to file val_file = os.path.join(args.checkpoint_dir, &#39;val_results.txt&#39;) with open(val_file, &#39;a&#39;) as f: f.write(&#39;step: %06d\n&#39; % total_steps) # order of metrics metrics = [&#39;things_epe&#39;, &#39;things_d1&#39;, &#39;kitti15_epe&#39;, &#39;kitti15_d1&#39;, &#39;kitti15_3px&#39;, &#39;eth3d_epe&#39;, &#39;eth3d_1px&#39;, &#39;middlebury_epe&#39;, &#39;middlebury_2px&#39;, ] eval_metrics = [] for metric in metrics: if metric in val_results.keys(): eval_metrics.append(metric) metrics_values = [val_results[metric] for metric in eval_metrics] num_metrics = len(eval_metrics) f.write(("| {:>20} " * num_metrics + &#39;\n&#39;).format(*eval_metrics)) f.write(("| {:20.4f} " * num_metrics).format(*metrics_values)) f.write(&#39;\n\n&#39;) model.train() if total_steps >= args.num_steps: print(&#39;Training done&#39;) return epoch += 1 if __name__ == &#39;__main__&#39;: parser = get_args_parser() args = parser.parse_args() if &#39;LOCAL_RANK&#39; not in os.environ: os.environ[&#39;LOCAL_RANK&#39;] = str(args.local_rank) main(args)分析代码
最新发布
07-15
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值