python打卡DAY30

部署运行你感兴趣的模型镜像

import torch

import torch.nn as nn

import torch.optim as optim

from sklearn.datasets import load_iris

from sklearn.model_selection import train_test_split

import matplotlib.pyplot as plt

import numpy as np

import time

# # 加载鸢尾花数据集

# iris=load_iris()

# x=iris.data

# y=iris.target

# x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=42,shuffle=True)

# # 归一化数据,神经网络对于输入数据的尺寸敏感,归一化是最常见的处理方式

# ##自变量有多个,且量纲不尽相同对其进行归一化减少量纲对结果的影响

# from sklearn.preprocessing import MinMaxScaler

# scaler=MinMaxScaler()

# x_train=scaler.fit_transform(x_train)

# x_test=scaler.transform(x_test)

# # 将数据转换为 PyTorch 张量,因为 PyTorch 使用张量进行训练

# # y_train和y_test是整数,所以需要转化为long类型,如果是float32,会输出1.0 0.0

# x_train=torch.FloatTensor(x_train)

# y_train=torch.LongTensor(y_train)

# x_test=torch.FloatTensor(x_test)

# y_train=torch.LongTensor(y_train)

# class MLP(nn.Module):

# def __init__(self):

# super(MLP,self).__init__()

# self.fc1=nn.Linear(4,10)

# self.relu=nn.ReLU()

# self.fc2=nn.Linear(10,3)

# def forward(self,x):

# x=self.relu(self.fc1(x))

# x=self.fc2(x)

# return x

# model=MLP()

# #交叉熵损失函数

# criterion=nn.CrossEntropyLoss()

# optimizer=optim.Adam(model.parameters(),lr=0.001)

# num_epochs=20000

# losses=[]

# start_time=time.time()

# for epoch in range(num_epochs):

# outputs=model.forward(x_train)

# loss=criterion(outputs,y_train)

# optimizer.zero_grad()

# loss.backward()

# optimizer.step()

# losses.append(loss.item())

# if(epoch+1)%1000==0:

# print(f'epoch[{epoch+1}/{num_epochs}],loss:{loss.item():.4f}')

# time_all=time.time()-start_time

# print(f'Training time: {time_all:.2f} seconds')


 

# plt.plot(range(num_epochs),losses)

# plt.xlabel('Epoch')

# plt.ylabel('Loss')

# plt.title('Training Loss over Epochs')

# plt.show()

# ##GPU

# if torch.backends.mps.is_available():

# device=torch.device('mps')

# else:

# device=torch.device('cpu')


 

# iris=load_iris()

# x=iris.data

# y=iris.target

# x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=42,shuffle=True)

# # # 归一化数据,神经网络对于输入数据的尺寸敏感,归一化是最常见的处理方式

# # ##自变量有多个,且量纲不尽相同对其进行归一化减少量纲对结果的影响

# from sklearn.preprocessing import MinMaxScaler

# scaler=MinMaxScaler()

# x_train=scaler.fit_transform(x_train)

# x_test=scaler.fit_transform(x_test)

# # 将数据转换为PyTorch张量并移至GPU

# # 分类问题交叉熵损失要求标签为long类型

# # 张量具有to(device)方法,可以将张量移动到指定的设备上

# x_train=torch.FloatTensor(x_train).to(device)

# y_train=torch.LongTensor(y_train).to(device)

# x_test=torch.FloatTensor(x_test).to(device)

# y_test=torch.LongTensor(y_test).to(device)

# class MLP(nn.Module):

# def __init__(self):

# super(MLP,self).__init__()

# self.fc1=nn.Linear(4,10)

# self.relu=nn.ReLU()

# self.fc2=nn.Linear(10,3)

# def forward(self,x):

# x=self.relu(self.fc1(x))

# x=self.fc2(x)

# return x

# # 实例化模型并移至GPU

# # MLP继承nn.Module类,所以也具有to(device)方法

# model=MLP().to(device)

# criterion=nn.CrossEntropyLoss()

# optimizer=optim.Adam(model.parameters(),lr=0.001)

# num_epochs=20000

# losses=[]

# start_time=time.time()

# for epoch in range(num_epochs):

# outputs=model(x_train)

# loss=criterion(outputs,y_train)

# optimizer.zero_grad()

# loss.backward()

# optimizer.step()

# # losses.append(loss.item())

# if (epoch + 1) % 1000 == 0:

# print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')

# time_all = time.time() - start_time

# print(f'Training time: {time_all:.2f} seconds')

# # plt.plot(range(num_epochs),losses)

# # plt.xlabel('Epoch')

# # plt.ylabel('Loss')

# # plt.title('Training Loss over Epochs')

# # plt.show()


 

# class Counter:

# def __init__(self):

# self.count=0

# self.a=6

# def __call__(self):

# self.count+=1

# return self.count

# count=Counter()

# print(count())

# print(count.a)

class Adder:

def __init__(self):

self.name=520

def __call__(self,a,b,name=555):

return (f'{name}:{a+b}')

adder=Adder()

print(adder(3,5,name=666))

您可能感兴趣的与本文相关的镜像

PyTorch 2.5

PyTorch 2.5

PyTorch
Cuda

PyTorch 是一个开源的 Python 机器学习库,基于 Torch 库,底层由 C++ 实现,应用于人工智能领域,如计算机视觉和自然语言处理

Python中实现打卡兑换礼物的功能,通常会涉及到以下几个步骤: 1. **数据结构设计**:创建一个数据库或数据结构来存储用户的打卡记录,比如字典或列表,其中每个元素包含用户ID、日期等信息。 ```python users_gifts = {} # 使用字典,key为用户ID,value为打卡记录 ``` 2. **添加打卡功能**:编写函数,当用户调用时,检查用户是否存在并更新打卡次数。例如,可以使用`datetime`库来记录每日打卡时间。 ```python import datetime def check_in(user_id): today = datetime.datetime.now().strftime("%Y-%m-%d") if user_id not in users_gifts: users_gifts[user_id] = {today: 1} else: if today not in users_gifts[user_id]: users_gifts[user_id][today] = 1 else: users_gifts[user_id][today] += 1 ``` 3. **条件判断与兑换规则**:设定一个规则,如连续7天打卡即可兑换一份礼物。可以遍历用户的打卡记录,检查是否符合条件。 ```python def can_exchange(user_id): user_history = users_gifts.get(user_id, {}) consecutive_days = {} for date, count in user_history.items(): if date - consecutive_days.get(date, '') <= datetime.timedelta(days=6): # 连续6天 consecutive_days[date] = count if len(consecutive_days) == 7: # 找到7连日 return True return False ``` 4. **兑换操作**:如果满足兑换条件,可以删除已达到兑换的打卡记录,并通知用户兑换成功。 ```python def redeem_gift(user_id): if can_exchange(user_id): for day, _ in list(users_gifts[user_id].items())[:7]: # 删除前7天的打卡记录 del users_gifts[user_id][day] print(f"恭喜用户{user_id},您的7天连续打卡已成功兑换礼物!") ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值