-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathFileParser.py
More file actions
131 lines (91 loc) · 3.14 KB
/
FileParser.py
File metadata and controls
131 lines (91 loc) · 3.14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#-*-coding:utf-8-*-
import codecs
import pandas as pd
import numpy as np
import Utils
import PredefinedValues as pv
def truncate(sourceFile, targetFile, truncateLineCount):
reader = codecs.open(sourceFile, 'r','utf-8')
writer = codecs.open(targetFile, 'w','utf-8')
count = 0
while count < truncateLineCount:
writer.write(reader.readline())
count +=1
reader.close()
writer.close()
if pv.outputDebugMsg:
Utils.logMessage("\nTruncate file finished")
def preprocess(sourceFile, targetFile, targetFields):
reader = codecs.open(sourceFile, 'r','utf-8')
writer = codecs.open(targetFile, 'w','utf-8')
line = reader.readline()
fields = line.replace('\n','').split(',')
fieldsIdx = []
for item in targetFields:
if item in fields:
fieldsIdx.append(fields.index(item))
while line:
writer.write(filterFields(line, fieldsIdx))
line = reader.readline()
reader.close()
writer.close()
if pv.outputDebugMsg:
Utils.logMessage("\nFilter fields finished")
def filterFields(line, fieldsIdx):
fields = line.replace('\n', '').split(',')
ret = ''
for i in fieldsIdx:
ret += (fields[i] + ',')
return ret[:-1] + '\n'
def readData(filepath):
return pd.read_csv(filepath, encoding='utf-8')
def readSimMatrix(filePath):
return pd.read_csv(filePath, header = None, encoding='utf-8')
def recordSimMatrix(simMat, filePath):
pd.DataFrame(simMat).to_csv(filePath ,header=False, index=False, encoding='utf-8')
def outputMatrix(matrix, targetFile):
pd.DataFrame(matrix).to_csv(targetFile, header=False, index=False)
def outputNodesInSameCluster(model, unifiedRDDVecs, rawDataFrame, clusterIDCenterFilePath, clusterIDFilePath):
centers = unifiedRDDVecs.map(lambda item: model.clusterCenters[model.predict(item)]).collect()
rawDataFrame['clusterID'] = convertClusterID(centers)
pd.DataFrame(rawDataFrame).to_csv(clusterIDFilePath, header=False, index=False, encoding='utf-8')
rawDataFrame['center'] = centers
groupUserByCluster(rawDataFrame).to_csv(clusterIDCenterFilePath, index=False, encoding='utf-8')
if pv.outputDebugMsg:
Utils.logMessage("\nOutput cluster finished")
def convertClusterID(centers):
clusterIDMap = {}
clusterId = 0
ret = []
for center in centers:
scenter = center.tostring()
if scenter not in clusterIDMap:
clusterIDMap[scenter] = clusterId
clusterId += 1
ret.append(clusterIDMap[scenter])
if pv.outputDebugMsg:
Utils.logMessage('\nConvert to cluster ID finished')
return ret
def groupUserByCluster(rawDataFrame):
clusterIdxMap = {}
rows = rawDataFrame.shape[0]
for i in xrange(rows):
cluster = rawDataFrame.loc[i,'center'].tostring()
if cluster not in clusterIdxMap:
clusterIdxMap[cluster] = []
clusterIdxMap[cluster].append(i)
assigned = False
retDF = None
for cluster, indexes in clusterIdxMap.items():
for idx in indexes:
if not assigned:
retDF = pd.DataFrame(rawDataFrame.loc[idx]).T
assigned = True
else:
retDF = retDF.append(rawDataFrame.loc[idx], ignore_index=True)
return retDF
def shuffleRawData(filePath):
df = pd.read_csv(filePath, encoding='utf-8')
df = df.copy()
df = df.reindex(np.random.permutation(df.index))
df.to_csv(filePath, index=False, encoding='utf-8')