import numpy as np
import copy
import pandas as pd
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
MNIST = datasets.fetch_mldata('MNIST original')
X, y = MNIST['data'], MNIST['target']
# Scaler = StandardScaler()
# X = Scaler.fit_transform(X)
pca = PCA(0.9)
pca.fit(X)
X = pca.fit_transform(X)
# print(y.shape)
# data = np.array(pd.read_csv(r'E:\dataset\clusterData\sonar.csv', header=None))
# X = data[:, :-1]
# y = data[:, -1]
# Scaler = StandardScaler()
# pca = PCA(n_components=10)
# X = Scaler.fit_transform(data[:, :-1])
# X = pca.fit_transform(X)
# y = data[:, -1]
y = np.vstack(y)
data = np.hstack((X,y))
print(data.shape)
data = pd.DataFrame(data)
data.to_csv(r'E:\dataset\clusterData\MNIST_PCA.csv',header=None,index=None)
然后,打开生成的csv文件,在第一行对每一列加入一个属性名称。不加的话第一行数据被默认为head。在java运行中数据集就会少一行。
package classifier;
import weka.core.Instances;
import weka.core.converters.ConverterUtils.DataSource;
import weka.core.converters.ArffSaver;
import java.util.Random;
import java.io.File;
public class TransformCSV_arff {
public static void main(String[] args) throws Exception{
Instances allData = DataSource.read("E:\\dataset\\clusterData\\COIL20_2.csv");
ArffSaver saver = new ArffSaver();
saver.setInstances(allData);
saver.setFile(new File("E:\\dataset\\clusterData\\COIL20_2.arff"));
saver.writeBatch();
System.out.println("已经转化为arrf文件");
}
}
记得安装weka包,不然就没有然后了!
MNIST PCA降维与CSV转ARFF

93

被折叠的 条评论
为什么被折叠?



