1094. The Largest Generation (25)

本文提供了一种解决PAT-A-1094题目的方法,使用BFS算法遍历树形结构,找出节点最多的层级及其数量。特别处理了特殊情况,当只有一个根节点时,直接返回特定结果。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

题目详情:https://www.patest.cn/contests/pat-a-practise/1094

提交情况:
这里写图片描述

提交代码:

#include <iostream>
#include <vector>
using namespace std;
#define Max 110
int n,m,level[Max],maxLevel,visit[Max];  //level[Max]记录各节点的层次,maxLevel记录最大的深度 
vector<int> child[Max];  //用于存储各节点的孩子节点 
int queue[Max],front,rear;
void BFS( int start )
{
    visit[start] = 1;
    level[1] = 1; //把节点1的编号设置为1 
    queue[++rear] = start;
    while( front != rear )
    {
        int index = queue[++front];
        for( int i=0;i<child[index].size();++i )
        {
            //孩子节点为child[index][i],下面有些写法会看得眼花缭乱 
            queue[++rear] = child[index][i]; //孩子节点入队 
            level[ child[index][i] ] = level[index] + 1;  //得到孩子节点的层号 
            visit[ child[index][i] ] = 1;  //置访问过的标志 
            if( maxLevel < level[index] + 1 ) //得到最深的层号 
                maxLevel = level[index]+1;
        }
    }   
} 
int main()
{
    cin>>n>>m;
    if(  n != 0 && m == 0 )
    {
        cout<<"1 1"<<endl;
        return 0;
    }
    for( int i=0;i<m;++i )  //处理输入
    {
        int father,kids,num;
        cin>>father>>num;
        for( int j=0;j<num;++j )
        {
            cin>>kids;
            child[father].push_back(kids);
        }
    }
    //BFS()前的初始化
    front = rear = -1; 
    BFS(1);
//  for( int i =1;i<=n;++i )  //查看level[]数组 
//  {
//      if( i == n )
//          cout<<level[i]<<endl;
//      else
//          cout<<level[i]<<" ";
//  }
//  cout<<"maxLevel is "<<maxLevel<<endl;
    int largest = 0,generation;
    //寻找人数最多的一代人和代数,即某一层上节点最多的节点数和层数
    for( int i=1;i<=maxLevel;++i )   
    {
        int temp = 0;
        for( int j=1;j<=n;++j )
        {
            if( i == level[j] )
                ++temp;
        }
        if( temp > largest )
        {
            largest = temp;
            generation = i;
        }
    }
    cout<<largest<<" "<<generation<<endl;
    return 0;
}

一开始测试点1没有通过,加了这几行代码:

if(  n != 0 && m == 0 )
    {
        cout<<"1 1"<<endl;
        return 0;
    }

才对的,这代表着有n个孤立的节点,所以节点最多的某一层也就有1个节点,至于为什么是第一层的节点,我也不知道啊,就试试了就过了[不知道发生了什么的表情],个人猜测应该是从最小的层数开始的吧,应该是这样。欢迎各位大神指点啊!

import time import torch, torch_npu from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig # 替换成本地的模型权重路径 MODEL_PATH = "/models/z50051264/Qwen2.5-7B-Instruct" bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, # Support torch.float16, torch.float32, torch.bfloat16 bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=False, bnb_4bit_quant_storage=torch.uint8 ) torch.npu.synchronize() start_time = time.time() model = AutoModelForCausalLM.from_pretrained( MODEL_PATH, device_map={"":0}, quantization_config=bnb_config, low_cpu_mem_usage=True, torch_dtype=torch.float16 # Support torch.float16, torch.float32, torch.bfloat16 ) torch.npu.synchronize() print(f"[+] load time: {time.time() - start_time:.6}s") tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH) model.eval() prompt = "Once upon a time, " inputs = tokenizer([prompt], return_tensors="pt") input_ids = inputs.input_ids.npu() attention_mask = inputs.attention_mask.npu() torch.npu.synchronize() start_time = time.time() generated_ids = model.generate( input_ids=input_ids, attention_mask=attention_mask, max_new_tokens=32, do_sample=False, ) torch.npu.synchronize() print(f"[+] inference time: {time.time() - start_time:.6}s") print(tokenizer.batch_decode(generated_ids)) 我在使用npu版本的bitsandbytes,但是执行以上代码,出现错误: [root@190f3c453709 inference]# python nf4.py /usr/local/python3.10.17/lib/python3.10/site-packages/torch_npu/utils/storage.py:38: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() if self.device.type != 'cpu': Loading checkpoint shards: 100%|█████████████████████████████████████████████████████████| 4/4 [00:13<00:00, 3.26s/it] [+] load time: 14.9728s The following generation flags are not valid and may be ignored: ['temperature', 'top_p', 'top_k']. Set `TRANSFORMERS_VERBOSITY=info` for more details. [+] inference time: 3.78472s ['Once upon a time, 123456789 was the largest known prime number. If a new prime number, 123456789'] 请分析问题原因,并给出详细解决方法
最新发布
07-23
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值