Week Training: 542 01 Matrix

本文介绍了一种使用广度优先搜索(BFS)算法来计算矩阵中每个元素到最近0的距离的方法。通过将所有初始0的位置加入队列,并逐步扩展到相邻的1,更新其距离值,最终得到完整矩阵的距离值。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Our purpose is to find the distance to the nearest 0 of each element in the matrix. It comes to me first that we should use BFS for each element, but how is still challenging. Since the returned value is a matrix, it's important how to store the answer correctly in position. We first push the position of every 0 in the matrix into a queue, then in the traversal step, we visit every elements up, down, left and right, judging if it is 0 or 1. If we find 1, the position of it is also pushed in, like the operation in BFS. During each visit, the distance is increased by 1 in this way. 

class Solution {
public:
    vector<vector<int>> updateMatrix(vector<vector<int>>& matrix) {
        int row = matrix.size();
        int col = matrix[0].size();
        queue<pair<int,int> > q;
        for(int i=0;i<row;i++){
            for(int j=0;j<col;j++){
                if(matrix[i][j]==0){
                    q.push(make_pair(i,j));
                }
                else{
                    matrix[i][j]=INT_MAX;
                }
            }
        }
        int dist[4][2] = {{-1,0},{1,0},{0,-1},{0,1}};
        while(!q.empty()){
            pair<int,int> p = q.front();
            q.pop();
            for(int i=0;i<4;i++){
                int r = p.first + dist[i][0];
                int c = p.second + dist[i][1];
                if(r<0||r>=row||c<0||c>=col||matrix[r][c]<=matrix[p.first][p.second]+1)
                    continue;
                q.push(make_pair(r,c));
                matrix[r][c] = matrix[p.first][p.second]+1;
            }
        }
        return matrix;
    }
};


defread_and_generate_dataset(graph_signal_matrix_filename, num_of_weeks,num_of_days, num_of_hours,num_for_predict, points_per_hour=12,save=False): 'data_seq=np.load(graph_signal_matrix_filename)[‘data’]#(sequence_length,num_of_vertices,num_of_features) all_samples=[] foridxinrange(data_seq.shape[0]): sample=get_sample_indices(data_seq,num_of_weeks,num_of_days, num_of_hours,idx,num_for_predict, points_per_hour) if((sample[0]isNone)and(sample[1]isNone)and(sample[2]isNone)): continue week_sample,day_sample,hour_sample,target=sample sample=[]#[(week_sample),(day_sample),(hour_sample),target,time_sample] ifnum_of_weeks>0: week_sample=np.expand_dims(week_sample,axis=0).transpose((0,2,3,1))#(1,N,F,T) sample.append(week_sample) ifnum_of_days>0: day_sample=np.expand_dims(day_sample,axis=0).transpose((0,2,3,1))#(1,N,F,T) sample.append(day_sample) ifnum_of_hours>0: hour_sample=np.expand_dims(hour_sample,axis=0).transpose((0,2,3,1))#(1,N,F,T)进行了一个维度调换 sample.append(hour_sample) target=np.expand_dims(target,axis=0).transpose((0,2,3,1))[:,:,0,:]#(1,N,T) sample.append(target) time_sample=np.expand_dims(np.array([idx]),axis=0)#(1,1) sample.append(time_sample) all_samples.append( sample)split_line1=int(len(all_samples)*0.6)#数据分割线 split_line2=int(len(all_samples)*0.8)#数据分割线 training_set=[np.concatenate(i,axis=0)#数据叠加 foriinzip(*all_samples[:split_line1])]#[(B,N,F,Tw),(B,N,F,Td),(B,N,F,Th),(B,N,Tpre),(B,1)] validation_set=[np.concatenate(i,axis=0) foriinzip(*all_samples[split_line1:split_line2])] testing_set=[np.concatenate(i,axis=0) foriinzip(*all_samples[split_line2:])] train_x=np.concatenate(training_set[:-2],axis=-1)#(B,N,F,T’) val_x=np.concatenate(validation_set[:-2],axis=-1) test_x=np.concatenate(testing_set[:-2],axis=-1) train_target=training_set[-2]#(B,N,T) val_target=validation_set[-2] test_target=testing_set[-2] train_timestamp=training_set[-1]#(B,1) val_timestamp=validation_set[-1] test_timestamp=testing_set[-1] ########数据归一化####### (stats,train_x_norm,val_x_norm,test_x_norm)=normalization(train_x,val_x,test_x) ######查看数据类型###### all_data={ ‘train’:{ ‘x’:train_x_norm, ‘target’:train_target, ‘timestamp’:train_timestamp, }, ‘val’:{ ‘x’:val_x_norm, ‘target’:val_target, ‘timestamp’:val_timestamp, }, ‘test’:{ ‘x’:test_x_norm, ‘target’:test_target, ‘timestamp’:test_timestamp, }, ‘stats’:{ ‘_mean’:stats[‘_mean’], ‘_std’:stats[‘_std’], } } print(‘trainx:’,all_data[‘train’][‘x’].shape) print(‘traintarget:’,all_data[‘train’][‘target’].shape) print(‘traintimestamp:’,all_data[‘train’][‘timestamp’].shape) print() print(‘valx:’,all_data[‘val’][‘x’].shape) print(‘valtarget:’,all_data[‘val’][‘target’].shape) print(‘valtimestamp:’,all_data[‘val’][‘timestamp’].shape) print() print(‘testx:’,all_data[‘test’][‘x’].shape) print(‘testtarget:’,all_data[‘test’][‘target’].shape) print(‘testtimestamp:’,all_data[‘test’][‘timestamp’].shape) print() print(‘traindata_mean:’,stats[‘_mean’].shape,stats[‘_mean’]) print(‘traindata_std:’,stats[‘_std’].shape,stats[‘_std’]) ifsave: file=os.path.basename(graph_signal_matrix_filename).split(‘.’)[0] dirpath=os.path.dirname(graph_signal_matrix_filename) filename=os.path.join(dirpath,file+‘_r’+str(num_of_hours)+‘_d’+str(num_of_days)+‘_w’+str(num_of_weeks))+‘_astcgn’ print(‘savefile:’,filename) np.savez_compressed(filename, train_x=all_data[‘train’][‘x’],train_target=all_data[‘train’][‘target’], train_timestamp=all_data[‘train’][‘timestamp’], val_x=all_data[‘val’][‘x’],val_target=all_data[‘val’][‘target’], val_timestamp=all_data[‘val’][‘timestamp’], test_x=all_data[‘test’][‘x’],test_target=all_data[‘test’][‘target’], test_timestamp=all_data[‘test’][‘timestamp’], mean=all_data[‘stats’][‘_mean’],std=all_data[‘stats’][‘_std’] ) returnall_data//想问的是,这段代码和# prepare dataset部分的代码各自的作用和之间的关联
03-13
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值