相关解释参考:http://blog.youkuaiyun.com/mmc2015/article/details/50939265
代码写得比较水。。。。仅供大家参考:
#!usr/bin/env python
# -*- coding:utf-8 -*-
from nltk.corpus import wordnet as wn
import pandas as pd
import numpy as np
from scipy import stats
from sklearn.preprocessing import MinMaxScaler, Imputer
data=pd.read_csv("combined.csv")
wordsList=np.array(data.iloc[:,[0,1]])
simScore=np.array(data.iloc[:,[2]])
predScoreList=np.zeros( (len(simScore),1) )
for i, (word1, word2) in enumerate(wordsList):
print "process #%d words pair [%s,%s]" % (i, word1, word2)
count=0
synsets1=wn.synsets(word1)
synsets2=wn.synsets(word2)
for synset1 in synsets1:
for synset2 in synsets2:
score=synset1.path_similarity(synset2)