Python AI代码

 

 

1. 图像分类(使用CNN)

 

```python

import tensorflow as tf

from tensorflow import keras

from tensorflow.keras import layers

import numpy as np

import matplotlib.pyplot as plt

 

# 构建CNN模型

def create_cnn_model():

    model = keras.Sequential([

        layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),

        layers.MaxPooling2D((2, 2)),

        layers.Conv2D(64, (3, 3), activation='relu'),

        layers.MaxPooling2D((2, 2)),

        layers.Conv2D(64, (3, 3), activation='relu'),

        layers.Flatten(),

        layers.Dense(64, activation='relu'),

        layers.Dropout(0.5),

        layers.Dense(10, activation='softmax')

    ])

    return model

 

# 加载数据

(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

 

# 数据预处理

x_train = x_train.reshape(-1, 28, 28, 1).astype('float32') / 255.0

x_test = x_test.reshape(-1, 28, 28, 1).astype('float32') / 255.0

 

y_train = keras.utils.to_categorical(y_train, 10)

y_test = keras.utils.to_categorical(y_test, 10)

 

# 创建并训练模型

cnn_model = create_cnn_model()

cnn_model.compile(optimizer='adam',

                 loss='categorical_crossentropy',

                 metrics=['accuracy'])

 

history = cnn_model.fit(x_train, y_train,

                       epochs=10,

                       batch_size=128,

                       validation_split=0.2)

 

# 评估

test_loss, test_acc = cnn_model.evaluate(x_test, y_test)

print(f'CNN测试准确率: {test_acc:.4f}')

```

 

2. 文本情感分析

 

```python

import tensorflow as tf

from tensorflow import keras

import numpy as np

from tensorflow.keras.preprocessing.text import Tokenizer

from tensorflow.keras.preprocessing.sequence import pad_sequences

 

# 示例数据

texts = [

    "这部电影太棒了,演员表演出色!",

    "完全浪费时间,剧情糟糕",

    "还不错,可以一看",

    "史上最烂电影,后悔看了",

    "精彩绝伦,推荐给大家",

    "一般般,没什么特别",

    "非常感人的故事,哭了",

    "无聊透顶,差点睡着"

]

labels = [1, 0, 1, 0, 1, 0, 1, 0] # 1:正面, 0:负面

 

# 文本预处理

tokenizer = Tokenizer(num_words=1000, oov_token="<OOV>")

tokenizer.fit_on_texts(texts)

sequences = tokenizer.texts_to_sequences(texts)

padded_sequences = pad_sequences(sequences, maxlen=20)

 

# 构建模型

model = keras.Sequential([

    keras.layers.Embedding(1000, 16, input_length=20),

    keras.layers.GlobalAveragePooling1D(),

    keras.layers.Dense(16, activation='relu'),

    keras.layers.Dense(1, activation='sigmoid')

])

 

model.compile(optimizer='adam',

             loss='binary_crossentropy',

             metrics=['accuracy'])

 

# 训练

history = model.fit(padded_sequences, np.array(labels),

                   epochs=50, verbose=0)

 

# 预测新文本

def predict_sentiment(text):

    sequence = tokenizer.texts_to_sequences([text])

    padded = pad_sequences(sequence, maxlen=20)

    prediction = model.predict(padded)[0][0]

    sentiment = "正面" if prediction > 0.5 else "负面"

    return f"文本: '{text}' -> 情感: {sentiment} (置信度: {prediction:.4f})"

 

# 测试

test_texts = ["这部电影很好", "太糟糕了", "还不错"]

for text in test_texts:

    print(predict_sentiment(text))

```

 

3. 房价预测(回归问题)

 

```python

import tensorflow as tf

from tensorflow import keras

import numpy as np

import matplotlib.pyplot as plt

from sklearn.datasets import fetch_california_housing

from sklearn.model_selection import train_test_split

from sklearn.preprocessing import StandardScaler

 

# 加载数据

housing = fetch_california_housing()

X, y = housing.data, housing.target

 

# 数据分割和标准化

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)

 

scaler = StandardScaler()

X_train_scaled = scaler.fit_transform(X_train)

X_val_scaled = scaler.transform(X_val)

X_test_scaled = scaler.transform(X_test)

 

# 构建回归模型

def create_regression_model():

    model = keras.Sequential([

        keras.layers.Dense(64, activation='relu', input_shape=[X_train.shape[1]]),

        keras.layers.Dense(64, activation='relu'),

        keras.layers.Dense(32, activation='relu'),

        keras.layers.Dense(1)

    ])

    return model

 

model = create_regression_model()

model.compile(optimizer='adam', loss='mse', metrics=['mae'])

 

# 训练

history = model.fit(X_train_scaled, y_train,

                   epochs=100,

                   validation_data=(X_val_scaled, y_val),

                   verbose=0)

 

# 评估

test_loss, test_mae = model.evaluate(X_test_scaled, y_test)

print(f'测试集MAE: ${test_mae*100000:.2f}')

 

# 预测示例

def predict_house(features):

    features_scaled = scaler.transform([features])

    prediction = model.predict(features_scaled)[0][0]

    return prediction

 

# 示例预测

sample_house = X_test[0]

predicted_price = predict_house(sample_house)

actual_price = y_test[0]

print(f"预测价格: ${predicted_price*100000:.2f}")

print(f"实际价格: ${actual_price*100000:.2f}")

```

 

4. 简单推荐系统

 

```python

import numpy as np

import pandas as pd

from sklearn.metrics.pairwise import cosine_similarity

from sklearn.feature_extraction.text import TfidfVectorizer

 

# 示例电影数据

movies = {

    'title': ['泰坦尼克号', '阿凡达', '星际穿越', '盗梦空间', '黑客帝国', '教父', '肖申克的救赎', '这个杀手不太冷'],

    'genre': ['爱情,剧情,灾难', '科幻,动作,冒险', '科幻,冒险,剧情', '动作,科幻,惊悚', 

              '动作,科幻', '犯罪,剧情', '剧情', '动作,犯罪,剧情'],

    'director': ['詹姆斯·卡梅隆', '詹姆斯·卡梅隆', '克里斯托弗·诺兰', '克里斯托弗·诺兰',

                '沃卓斯基姐妹', '弗朗西斯·福特·科波拉', '弗兰克·德拉邦特', '吕克·贝松']

}

 

df = pd.DataFrame(movies)

 

# 创建内容特征

df['content'] = df['genre'] + ' ' + df['director']

 

# 使用TF-IDF向量化

tfidf = TfidfVectorizer(stop_words='chinese')

tfidf_matrix = tfidf.fit_transform(df['content'])

 

# 计算相似度矩阵

cosine_sim = cosine_similarity(tfidf_matrix, tfidf_matrix)

 

def get_recommendations(title, cosine_sim=cosine_sim):

    idx = df[df['title'] == title].index[0]

    sim_scores = list(enumerate(cosine_sim[idx]))

    sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)

    sim_scores = sim_scores[1:4] # 取前3个最相似的

    movie_indices = [i[0] for i in sim_scores]

    return df['title'].iloc[movie_indices]

 

# 测试推荐系统

print("基于'星际穿越'的推荐:")

print(get_recommendations('星际穿越'))

print("\n基于'教父'的推荐:")

print(get_recommendations('教父'))

```

 

5. 图像生成(简单DCGAN)

 

```python

import tensorflow as tf

from tensorflow import keras

from tensorflow.keras import layers

import numpy as np

import matplotlib.pyplot as plt

 

# 生成器

def make_generator_model():

    model = keras.Sequential([

        layers.Dense(7*7*256, use_bias=False, input_shape=(100,)),

        layers.BatchNormalization(),

        layers.LeakyReLU(),

        

        layers.Reshape((7, 7, 256)),

        

        layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False),

        layers.BatchNormalization(),

        layers.LeakyReLU(),

        

        layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False),

        layers.BatchNormalization(),

        layers.LeakyReLU(),

        

        layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')

    ])

    return model

 

# 判别器

def make_discriminator_model():

    model = keras.Sequential([

        layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1]),

        layers.LeakyReLU(),

        layers.Dropout(0.3),

        

        layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'),

        layers.LeakyReLU(),

        layers.Dropout(0.3),

        

        layers.Flatten(),

        layers.Dense(1, activation='sigmoid')

    ])

    return model

 

# 生成示例图像

def generate_and_save_images(model, epoch, test_input):

    predictions = model(test_input, training=False)

    

    fig = plt.figure(figsize=(4, 4))

    

    for i in range(predictions.shape[0]):

        plt.subplot(4, 4, i+1)

        plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')

        plt.axis('off')

    

    plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))

    plt.show()

 

# 示例使用

generator = make_generator_model()

discriminator = make_discriminator_model()

 

# 生成随机噪声

noise = tf.random.normal([16, 100])

generated_images = generator(noise, training=False)

 

# 显示生成的图像

plt.figure(figsize=(4, 4))

for i in range(generated_images.shape[0]):

    plt.subplot(4, 4, i+1)

    plt.imshow(generated_images[i, :, :, 0] * 127.5 + 127.5, cmap='gray')

    plt.axis('off')

plt.show()

```

 

6. 时间序列预测

 

```python

import numpy as np

import matplotlib.pyplot as plt

from tensorflow import keras

from tensorflow.keras import layers

 

# 生成示例时间序列数据

def generate_time_series():

    time = np.arange(0, 1000, 0.1)

    series = np.sin(0.1 * time) + np.sin(0.05 * time) + 0.1 * np.random.randn(len(time))

    return time, series

 

time, series = generate_time_series()

 

# 准备数据

def create_dataset(series, time_steps=10):

    X, y = [], []

    for i in range(len(series) - time_steps):

        X.append(series[i:(i + time_steps)])

        y.append(series[i + time_steps])

    return np.array(X), np.array(y)

 

time_steps = 20

X, y = create_dataset(series, time_steps)

 

# 分割数据

split_time = int(0.8 * len(X))

X_train, X_test = X[:split_time], X[split_time:]

y_train, y_test = y[:split_time], y[split_time:]

 

# 构建LSTM模型

model = keras.Sequential([

    layers.LSTM(50, activation='relu', return_sequences=True, input_shape=(time_steps, 1)),

    layers.LSTM(50, activation='relu'),

    layers.Dense(1)

])

 

model.compile(optimizer='adam', loss='mse')

 

# 训练

history = model.fit(X_train, y_train, epochs=20, validation_split=0.2, verbose=0)

 

# 预测

train_predict = model.predict(X_train)

test_predict = model.predict(X_test)

 

# 可视化结果

plt.figure(figsize=(12, 6))

plt.plot(time[time_steps:split_time], y_train, label='训练数据')

plt.plot(time[split_time + time_steps:], y_test, label='测试数据')

plt.plot(time[split_time + time_steps:], test_predict, label='预测')

plt.legend()

plt.show()

```

 

运行说明

 

每个实例都可以独立运行,需要安装以下依赖:

 

```bash

pip install tensorflow matplotlib numpy scikit-learn pandas

```

 

这些实例涵盖了:

 

· 计算机视觉(图像分类、图像生成)

· 自然语言处理(情感分析)

· 推荐系统

· 回归预测

· 时间序列分析

 

你可以根据需要选择运行特定的实例,或者将它们组合起来构建更复杂的AI应用!

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值