06-Student Becomes the Teacher

本文介绍了一个简单的Python程序,用于管理学生的作业、测验和考试成绩,并计算最终的平均分及对应的字母等级。通过定义多个函数来实现成绩计算和学生数据的处理。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

06-Student Becomes the Teacher

在Codeacademy上学习Python课程,刷题的笔记记录如下,欢迎交流!

目录

Good Morning Class!

i. Lesson Number One 1/9
lloyd = {
    "name": "Lloyd",
    "homework": [],
    "quizzes": [],
    "tests": []
}
alice = {
    "name": "Alice",
    "homework": [],
    "quizzes": [],
    "tests": []
}
tyler = {
    "name": "Tyler",
    "homework": [],
    "quizzes": [],
    "tests": []
}
ii. What’s the Score? 2/9
lloyd = {
    "name": "Lloyd",
    "homework": [90.0, 97.0, 75.0, 92.0],
    "quizzes": [88.0, 40.0, 94.0],
    "tests": [75.0, 90.0]
}
alice = {
    "name": "Alice",
    "homework": [100.0, 92.0, 98.0, 100.0],
    "quizzes": [82.0, 83.0, 91.0],
    "tests": [89.0, 97.0]
}
tyler = {
    "name": "Tyler",
    "homework": [0.0, 87.0, 75.0, 22.0],
    "quizzes": [0.0, 75.0, 78.0],
    "tests": [100.0, 100.0]
}
iii. Put It Together 3/9
lloyd = {
    "name": "Lloyd",
    "homework": [90.0, 97.0, 75.0, 92.0],
    "quizzes": [88.0, 40.0, 94.0],
    "tests": [75.0, 90.0]
}
alice = {
    "name": "Alice",
    "homework": [100.0, 92.0, 98.0, 100.0],
    "quizzes": [82.0, 83.0, 91.0],
    "tests": [89.0, 97.0]
}
tyler = {
    "name": "Tyler",
    "homework": [0.0, 87.0, 75.0, 22.0],
    "quizzes": [0.0, 75.0, 78.0],
    "tests": [100.0, 100.0]
}
students = [lloyd, alice, tyler]
iv. For the Record 4/9
lloyd = {
    "name": "Lloyd",
    "homework": [90.0, 97.0, 75.0, 92.0],
    "quizzes": [88.0, 40.0, 94.0],
    "tests": [75.0, 90.0]
}
alice = {
    "name": "Alice",
    "homework": [100.0, 92.0, 98.0, 100.0],
    "quizzes": [82.0, 83.0, 91.0],
    "tests": [89.0, 97.0]
}
tyler = {
    "name": "Tyler",
    "homework": [0.0, 87.0, 75.0, 22.0],
    "quizzes": [0.0, 75.0, 78.0],
    "tests": [100.0, 100.0]
}
students = [lloyd, alice, tyler]
for x in students:
    print x['name']
    print x['homework']
    print x['quizzes']
    print x['tests']

String it Together

i.reverse 7/15
def reverse(text):
    length = len(text)
    word = []
    while length > 0:
        word.append(text[length - 1])
        length -= 1
    return ''.join(word) #将字符串序列转换为字符串
ii. Just Weight and See 6/9
lloyd = {
    "name": "Lloyd",
    "homework": [90.0, 97.0, 75.0, 92.0],
    "quizzes": [88.0, 40.0, 94.0],
    "tests": [75.0, 90.0]
}
alice = {
    "name": "Alice",
    "homework": [100.0, 92.0, 98.0, 100.0],
    "quizzes": [82.0, 83.0, 91.0],
    "tests": [89.0, 97.0]
}
tyler = {
    "name": "Tyler",
    "homework": [0.0, 87.0, 75.0, 22.0],
    "quizzes": [0.0, 75.0, 78.0],
    "tests": [100.0, 100.0]
}

# Add your function below!
def average(numbers):
    total = sum (numbers)
    aver = float(total)/len(numbers)
    return aver

def get_average(student):
    homework= average(student["homework"])
    quizzes =average(student["quizzes"])
    tests = average(student["tests"])
    return 0.1*homework + 0.3*quizzes + 0.6*tests 
iii. Sending a Letter 7/9
lloyd = {
    "name": "Lloyd",
    "homework": [90.0, 97.0, 75.0, 92.0],
    "quizzes": [88.0, 40.0, 94.0],
    "tests": [75.0, 90.0]
}
alice = {
    "name": "Alice",
    "homework": [100.0, 92.0, 98.0, 100.0],
    "quizzes": [82.0, 83.0, 91.0],
    "tests": [89.0, 97.0]
}
tyler = {
    "name": "Tyler",
    "homework": [0.0, 87.0, 75.0, 22.0],
    "quizzes": [0.0, 75.0, 78.0],
    "tests": [100.0, 100.0]
}

# Add your function below!
def average(numbers):
    total = sum (numbers)
    aver = float(total)/len(numbers)
    return aver

def get_average(student):
    homework= average(student["homework"])
    quizzes =average(student["quizzes"])
    tests = average(student["tests"])
    return 0.1*homework + 0.3*quizzes + 0.6*tests 

def get_letter_grade(score):
    if 90<=score:
        return "A"
    elif 80<=score<90:
        return "B"
    elif 70<=score<80:
        return "C"
    elif 60<=score<70:
        return "D"
    else:
        return "F"

get_average(lloyd)
iv. Part of the Whole 8/9
#方法1:判断比较思想
def anti_vowel(text):
#    word='a'or'e'or'i'or'o'or'u'or'A'or'E'or'I'or'O'or'U'
    word=''
    for var in text:
        if  (var != 'a' and var !='e'and var !='i'and var !='o'and var !='u'and var !='A'and var !='E'and var !='I'and var !='O'and var !='U'):
            word = word + var
    return word 

#方法2:删除思想
def anti_vowel(text):
    removed = text
    vowel = ["a","e","i","o","u","A","E","I","O","U"]
    for x in text:
        for y in vowel:
            if x == y:
                removed = removed.replace(x,"")
    return removed
v. How is Everybody Doing? 9/9
lloyd = {
    "name": "Lloyd",
    "homework": [90.0, 97.0, 75.0, 92.0],
    "quizzes": [88.0, 40.0, 94.0],
    "tests": [75.0, 90.0]
}
alice = {
    "name": "Alice",
    "homework": [100.0, 92.0, 98.0, 100.0],
    "quizzes": [82.0, 83.0, 91.0],
    "tests": [89.0, 97.0]
}
tyler = {
    "name": "Tyler",
    "homework": [0.0, 87.0, 75.0, 22.0],
    "quizzes": [0.0, 75.0, 78.0],
    "tests": [100.0, 100.0]
}

# Add your function below!
def average(numbers):
    total = sum (numbers)
    aver = float(total)/len(numbers)
    return aver

def get_average(student):
    homework= average(student["homework"])
    quizzes =average(student["quizzes"])
    tests = average(student["tests"])
    return 0.1*homework + 0.3*quizzes + 0.6*tests 

def get_letter_grade(score):
    if 90<=score:
        return "A"
    elif 80<=score<90:
        return "B"
    elif 70<=score<80:
        return "C"
    elif 60<=score<70:
        return "D"
    else:
        return "F"       

students=[lloyd,alice,tyler]
def get_class_average(students):
    results=[]
    for student in students:    
        results.append(get_average(student))
    return average(results)

print get_class_average(students)
print get_letter_grade(get_class_average(students))
<think>好的,用户需要寻找关于教师-学生(Teacher-Student)模型训练的相关代码,尤其是在PyTorch或TensorFlow框架中的实现。首先,我应该回忆一下教师-学生模型的基本概念和应用场景。教师模型通常是较大的预训练模型,用于指导学生模型的学习,而学生模型则更轻量,旨在保持性能的同时减少计算资源消耗。这种模型常用于知识蒸馏(Knowledge Distillation)中。 接下来,我需要确定用户的具体需求。他们可能是在寻找知识蒸馏的示例代码,或者更广义的教师-学生框架的实现。比如,在图像分类任务中,教师模型生成软标签(soft labels),学生模型通过模仿这些软标签来学习。另外,用户可能对如何在不同框架中实现这一点感兴趣,比如PyTorch或TensorFlow。 接下来,我需要考虑如何构建一个简单的示例代码。在PyTorch中,通常的做法是定义教师和学生模型,加载预训练的教师模型,然后在训练循环中结合学生模型的输出和教师模型的输出来计算损失。损失函数通常包括学生预测与真实标签的交叉熵,以及学生输出与教师输出的KL散度或其他相似性度量。 同时,我需要确保代码符合最佳实践,比如使用预训练模型(如ResNet-18作为教师,较小的网络作为学生),冻结教师模型的参数,并正确设置优化器只更新学生模型的参数。另外,数据预处理和加载也是重要部分,需要包括适当的数据增强和标准化。 然后,我应该考虑用户可能需要的相关扩展问题,比如如何调整温度参数(temperature scaling)、如何处理不同的损失函数权重、如何应用于其他任务(如自然语言处理)等。这些可以作为相关问题提出,帮助用户进一步探索。 还需要检查是否有可用的库或工具可以简化实现,比如Hugging Face的Transformers库在NLP任务中可能提供预训练的教师和学生模型,或者TensorFlow的模型压缩工具包(如TensorFlow Model Optimization Toolkit)中的知识蒸馏模块。 最后,要确保引用相关的参考文献,尤其是用户提供的引用中是否有相关的内容。例如,引用[2]提到了transformer和强化学习,可能在NLP任务中的教师-学生模型中有应用。引用[3]中的VAE可能在某些生成模型的知识蒸馏中使用到。虽然用户提供的引用中没有直接提到教师-学生模型,但可以适当关联相关技术。 总结步骤:解释教师-学生模型的基本原理,提供PyTorch和TensorFlow的代码示例,说明关键实现点,然后生成相关问题帮助用户深入理解。</think>以下是关于教师-学生模型训练的代码实现说明及框架示例: ### 教师-学生模型基本原理 教师-学生模型通过知识蒸馏(Knowledge Distillation)实现,教师模型(大模型)的输出作为软标签指导学生模型(小模型)训练,提升小模型性能[^2]。典型流程: 1. 训练/加载预训练教师模型 2. 定义轻量级学生模型 3. 设计联合损失函数:$L = \alpha L_{CE}(y, \sigma(z_s)) + (1-\alpha)L_{KL}(\sigma(z_t/T), \sigma(z_s/T))$ - $L_{CE}$: 学生预测与真实标签的交叉熵 - $L_{KL}$: 教师与学生输出的KL散度 - $T$: 温度参数,$\alpha$: 权重系数 ### PyTorch 实现示例 ```python import torch import torch.nn as nn from torchvision.models import resnet18 # 定义教师模型(冻结参数) teacher = resnet18(pretrained=True) teacher.eval() for param in teacher.parameters(): param.requires_grad = False # 定义学生模型(简化结构) student = nn.Sequential( nn.Conv2d(3, 64, 3, padding=1), nn.ReLU(), nn.AdaptiveAvgPool2d(1), nn.Flatten(), nn.Linear(64, 1000) ) # 定义蒸馏损失 criterion_ce = nn.CrossEntropyLoss() criterion_kl = nn.KLDivLoss(reduction='batchmean') optimizer = torch.optim.Adam(student.parameters(), lr=0.001) def train_step(images, labels, T=3, alpha=0.7): with torch.no_grad(): teacher_logits = teacher(images) student_logits = student(images) loss_ce = criterion_ce(student_logits, labels) loss_kl = criterion_kl( torch.log_softmax(student_logits/T, dim=1), torch.softmax(teacher_logits/T, dim=1) ) * (T**2) # 缩放梯度 total_loss = alpha*loss_ce + (1-alpha)*loss_kl optimizer.zero_grad() total_loss.backward() optimizer.step() return total_loss ``` ### TensorFlow 实现要点 ```python import tensorflow as tf from tensorflow.keras.applications import ResNet50 # 教师模型 teacher = ResNet50(weights='imagenet', include_top=True) teacher.trainable = False # 学生模型 student = tf.keras.Sequential([ tf.keras.layers.Conv2D(64, 3, activation='relu', input_shape=(224,224,3)), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(1000) ]) # 自定义训练循环 @tf.function def train_step(images, labels, T=3, alpha=0.7): with tf.GradientTape() as tape: teacher_logits = teacher(images, training=False) student_logits = student(images, training=True) ce_loss = tf.reduce_mean( tf.keras.losses.sparse_categorical_crossentropy(labels, student_logits)) teacher_probs = tf.nn.softmax(teacher_logits/T) student_probs = tf.nn.log_softmax(student_logits/T) kl_loss = tf.reduce_mean( tf.keras.losses.kullback_leibler_divergence(teacher_probs, student_probs)) total_loss = alpha*ce_loss + (1-alpha)*kl_loss gradients = tape.gradient(total_loss, student.trainable_variables) optimizer.apply_gradients(zip(gradients, student.trainable_variables)) return total_loss ``` ### 关键实现细节 1. **温度参数**:软化概率分布时使用$T>1$,推理时恢复$T=1$ 2. **参数冻结**:教师模型需设置为`eval()`模式并冻结梯度 3. **数据增强**:应使用与教师模型训练时相同的预处理 4. **扩展方法**:可结合注意力迁移[^4]、中间层特征匹配等技巧
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值