00110_Class类

1、Class 对象是在加载类时由 Java虚拟机以及通过调用类加载器中的 defineClass 方法自动构造的;

2、获取Class对象的三种方式

  (1)方式一:通过Object类中的getObject()方法

Person p = new Person();
Class c = p.getClass();

  (2)方式二:通过 类名.class 获取到字节码文件对象(任意数据类型都具备一个class静态属性,看上去要比第一种方式简单)

Class c2 = Person.class;

  (3)方式三: 通过Class类中的方法(将类名作为字符串传递给Class类中的静态方法forName即可)

Class c3 = Class.forName("Person");

3、第三种和前两种的区别

  (1)前两种你必须明确Person类型;

  (3)后面是指定这种类型的字符串就行.这种扩展更强.我不需要知道你的类.我只提供字符串,按照配置文件加载就可以了。

4、代码演示

 1 package cn.gzdlh_01_Reflect;
 2 
 3 /*
 4  * 获取.class字节码文件对象的方式
 5  *         1:通过Object类中的getObject()方法
 6  *         2: 通过 类名.class 获取到字节码文件对象
 7  *         3: 反射中的方法,
 8  *             public static Class<?> forName(String className) throws ClassNotFoundException
 9  *             返回与带有给定字符串名的类或接口相关联的 Class 对象 
10  */
11 public class ReflectDemo {
12     public static void main(String[] args) throws ClassNotFoundException {
13         // 1: 通过Object类中的getObject()方法
14         // Person p1 = new Person();
15         // Class c1 = p1.getClass();
16         // System.out.println("c1 = "+ c1);
17 
18         // 2: 通过 类名.class 获取到字节码文件对象
19         // Class c2 = Person.class;
20         // System.out.println("c2 = "+ c2);
21 
22         // 3: 反射中的方法
23         Class c3 = Class.forName("cn.gzdlh_01_Reflect.Person");// 包名.类名
24         System.out.println("c3 = " + c3);
25     }
26 }

  Person类

 1 package cn.gzdlh_01_Reflect;
 2 
 3 public class Person {
 4     // 成员变量
 5     public String name;
 6     public int age;
 7     private String address;
 8 
 9     // 构造方法
10     public Person() {
11         System.out.println("空参数构造方法");
12     }
13 
14     public Person(String name) {
15         this.name = name;
16         System.out.println("带有String的构造方法");
17     }
18 
19     // 私有的构造方法
20     private Person(String name, int age) {
21         this.name = name;
22         this.age = age;
23         System.out.println("带有String,int的构造方法");
24     }
25 
26     public Person(String name, int age, String address) {
27         this.name = name;
28         this.age = age;
29         this.address = address;
30         System.out.println("带有String, int, String的构造方法");
31     }
32 
33     // 成员方法
34     // 没有返回值没有参数的方法
35     public void method1() {
36         System.out.println("没有返回值没有参数的方法");
37     }
38 
39     // 没有返回值,有参数的方法
40     public void method2(String name) {
41         System.out.println("没有返回值,有参数的方法 name= " + name);
42     }
43 
44     // 有返回值,没有参数
45     public int method3() {
46         System.out.println("有返回值,没有参数的方法");
47         return 123;
48     }
49 
50     // 有返回值,有参数的方法
51     public String method4(String name) {
52         System.out.println("有返回值,有参数的方法");
53         return "哈哈" + name;
54     }
55 
56     // 私有方法
57     private void method5() {
58         System.out.println("私有方法");
59     }
60 
61     @Override
62     public String toString() {
63         return "Person [name=" + name + ", age=" + age + ", address=" + address
64                 + "]";
65     }
66 }

转载于:https://www.cnblogs.com/gzdlh/p/8159200.html

# 这是一个示例 Python 脚本。 # 按 Shift+F10 执行将其替换为您的代码。 # 按 双击 Shift 在所有地方搜索、文件、工具窗口、操作和设置。 import argparse import math import pickle import torch import torch.nn as nn import torch.nn.functional as F from tqdm import tqdm from omegaconf import OmegaConf from sklearn.metrics import f1_score from torch.utils.data import Dataset, DataLoader from torch.nn import TransformerEncoderLayer, TransformerEncoder restypes = [ 'A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V' ] unsure_restype = 'X' unknown_restype = 'U' def make_dataset(data_config, train_rate=0.7, valid_rate=0.2): data_path = data_config.data_path with open(data_path, 'rb') as f: data = pickle.load(f) total_number = len(data) train_sep = int(total_number * train_rate) valid_sep = int(total_number * (train_rate + valid_rate)) train_data_dicts = data[:train_sep] valid_data_dicts = data[train_sep:valid_sep] test_data_dicts = data[valid_sep:] train_dataset = DisProtDataset(train_data_dicts) valid_dataset = DisProtDataset(valid_data_dicts) test_dataset = DisProtDataset(test_data_dicts) return train_dataset, valid_dataset, test_dataset class DisProtDataset(Dataset): def __init__(self, dict_data): sequences = [d['sequence'] for d in dict_data] labels = [d['label'] for d in dict_data] assert len(sequences) == len(labels) self.sequences = sequences self.labels = labels self.residue_mapping = {'X':20} self.residue_mapping.update(dict(zip(restypes, range(len(restypes))))) def __len__(self): return len(self.sequences) def __getitem__(self, idx): sequence = torch.zeros(len(self.sequences[idx]), len(self.residue_mapping)) for i, c in enumerate(self.sequences[idx]): if c not in restypes: c = 'X' sequence[i][self.residue_mapping[c]] = 1 label = torch.tensor([int(c) for c in self.labels[idx]], dtype=torch.long) return sequence, label class PositionalEncoding(nn.Module): def __init__(self, d_model, dropout=0.0, max_len=40): super().__init__() position = torch.arange(max_len).unsqueeze(1) div_term = torch.exp( torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model) ) pe = torch.zeros(1, max_len, d_model) pe[0, :, 0::2] = torch.sin(position * div_term) pe[0, :, 1::2] = torch.cos(position * div_term) self.register_buffer("pe", pe) self.dropout = nn.Dropout(p=dropout) def forward(self, x): if len(x.shape) == 3: x = x + self.pe[:, : x.size(1)] elif len(x.shape) == 4: x = x + self.pe[:, :x.size(1), None, :] return self.dropout(x) class DisProtModel(nn.Module): def __init__(self, model_config): super().__init__() self.d_model = model_config.d_model self.n_head = model_config.n_head self.n_layer = model_config.n_layer self.input_layer = nn.Linear(model_config.i_dim, self.d_model) self.position_embed = PositionalEncoding(self.d_model, max_len=20000) self.input_norm = nn.LayerNorm(self.d_model) self.dropout_in = nn.Dropout(p=0.1) encoder_layer = TransformerEncoderLayer( d_model=self.d_model, nhead=self.n_head, activation='gelu', batch_first=True) self.transformer = TransformerEncoder(encoder_layer, num_layers=self.n_layer) self.output_layer = nn.Sequential( nn.Linear(self.d_model, self.d_model), nn.GELU(), nn.Dropout(p=0.1), nn.Linear(self.d_model, model_config.o_dim) ) def forward(self, x): x = self.input_layer(x) x = self.position_embed(x) x = self.input_norm(x) x = self.dropout_in(x) x = self.transformer(x) x = self.output_layer(x) return x def metric_fn(pred, gt): pred = pred.detach().cpu() gt = gt.detach().cpu() pred_labels = torch.argmax(pred, dim=-1).view(-1) gt_labels = gt.view(-1) score = f1_score(y_true=gt_labels, y_pred=pred_labels, average='micro') return score if __name__ == '__main__': device = 'cuda' if torch.cuda.is_available() else 'cpu' parser = argparse.ArgumentParser('IDRs prediction') parser.add_argument('--config_path', default='./config.yaml') args = parser.parse_args() config = OmegaConf.load(args.config_path) train_dataset, valid_dataset, test_dataset = make_dataset(config.data) train_dataloader = DataLoader(dataset=train_dataset, **config.train.dataloader) valid_dataloader = DataLoader(dataset=valid_dataset, batch_size=1, shuffle=False) model = DisProtModel(config.model) model = model.to(device) optimizer = torch.optim.AdamW(model.parameters(), lr=config.train.optimizer.lr, weight_decay=config.train.optimizer.weight_decay) loss_fn = nn.CrossEntropyLoss() model.eval() metric = 0. with torch.no_grad(): for sequence, label in valid_dataloader: sequence = sequence.to(device) label = label.to(device) pred = model(sequence) metric += metric_fn(pred, label) print("init f1_score:", metric / len(valid_dataloader)) for epoch in range(config.train.epochs): # train loop progress_bar = tqdm( train_dataloader, initial=0, desc=f"epoch:{epoch:03d}", ) model.train() total_loss = 0. for sequence, label in progress_bar: sequence = sequence.to(device) label = label.to(device) pred = model(sequence) loss = loss_fn(pred.permute(0, 2, 1), label) progress_bar.set_postfix(loss=loss.item()) total_loss += loss.item() optimizer.zero_grad() loss.backward() optimizer.step() avg_loss = total_loss / len(train_dataloader) # valid loop model.eval() metric = 0. with torch.no_grad(): for sequence, label in valid_dataloader: sequence = sequence.to(device) label = label.to(device) pred = model(sequence) metric += metric_fn(pred, label) print(f"avg_training_loss: {avg_loss}, f1_score: {metric / len(valid_dataloader)}") # 保存当前 epoch 的模型 save_path = f"model.pkl" torch.save(model.state_dict(), save_path) print(f"Model saved to {save_path}") 根据这样的代码创建符合的数据集
07-13
package androidx.core.graphics; import android.annotation.SuppressLint; import android.graphics.Bitmap; import android.graphics.ImageDecoder; import android.graphics.ImageDecoder.ImageInfo; import android.graphics.ImageDecoder.Source; import android.graphics.drawable.Drawable; import androidx.annotation.RequiresApi; import androidx.core.graphics.ImageDecoderKt$.ExternalSyntheticApiModelOutline1; import kotlin.Metadata; import kotlin.Unit; import kotlin.jvm.functions.Function3; import kotlin.jvm.internal.Intrinsics; import org.jetbrains.annotations.NotNull; @Metadata(d1 = {"\u00000\n\u0000\n\u0002\u0018\u0002\n\u0002\u0018\u0002\n\u0000\n\u0002\u0018\u0002\n\u0002\u0018\u0002\n\u0002\u0018\u0002\n\u0002\u0018\u0002\n\u0002\b\u0003\n\u0002\u0010\u0002\n\u0002\u0018\u0002\n\u0000\n\u0002\u0018\u0002\n\u0000\u001aU\u0010\u0000\u001a\u00020\u0001*\u00020\u00022C\b\u0004\u0010\u0003\u001a=\u0012\u0004\u0012\u00020\u0005\u0012\u0013\u0012\u00110\u0006¢\u0006\f\b\u0007\u0012\b\b\b\u0012\u0004\b\b(\t\u0012\u0013\u0012\u00110\u0002¢\u0006\f\b\u0007\u0012\b\b\b\u0012\u0004\b\b(\n\u0012\u0004\u0012\u00020\u000b0\u0004¢\u0006\u0002\b\fH‡\bø\u0001\u0000\u001aU\u0010\r\u001a\u00020\u000e*\u00020\u00022C\b\u0004\u0010\u0003\u001a=\u0012\u0004\u0012\u00020\u0005\u0012\u0013\u0012\u00110\u0006¢\u0006\f\b\u0007\u0012\b\b\b\u0012\u0004\b\b(\t\u0012\u0013\u0012\u00110\u0002¢\u0006\f\b\u0007\u0012\b\b\b\u0012\u0004\b\b(\n\u0012\u0004\u0012\u00020\u000b0\u0004¢\u0006\u0002\b\fH‡\bø\u0001\u0000‚\u0002\u0007\n\u0005\b™20\u0001¨\u0006\u000f"}, d2 = {"decodeBitmap", "Landroid/graphics/Bitmap;", "Landroid/graphics/ImageDecoder$Source;", "action", "Lkotlin/Function3;", "Landroid/graphics/ImageDecoder;", "Landroid/graphics/ImageDecoder$ImageInfo;", "Lkotlin/ParameterName;", "name", "info", "source", "", "Lkotlin/ExtensionFunctionType;", "decodeDrawable", "Landroid/graphics/drawable/Drawable;", "core-ktx_release"}, k = 2, mv = {1, 7, 1}, xi = 48) @SuppressLint({"ClassVerificationFailure"}) /* compiled from: ImageDecoder.kt */ public final class ImageDecoderKt { @RequiresApi(28) @NotNull public static final Bitmap decodeBitmap(@NotNull Source source, @NotNull Function3<? super ImageDecoder, ? super ImageInfo, ? super Source, Unit> function3) { Intrinsics.checkNotNullParameter(source, "<this>"); Intrinsics.checkNotNullParameter(function3, "action"); Bitmap m = ExternalSyntheticApiModelOutline1.m(source, new 1(function3)); Intrinsics.checkNotNullExpressionValue(m, "crossinline action: Imag…ction(info, source)\n }"); return m; } @RequiresApi(28) @NotNull public static final Drawable decodeDrawable(@NotNull Source source, @NotNull Function3<? super ImageDecoder, ? super ImageInfo, ? super Source, Unit> function3) { Intrinsics.checkNotNullParameter(source, "<this>"); Intrinsics.checkNotNullParameter(function3, "action"); Bitmap m = ExternalSyntheticApiModelOutline1.m(source, new 1(function3)); Intrinsics.checkNotNullExpressionValue(m, "crossinline action: Imag…ction(info, source)\n }"); return m; } }
06-12
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值