-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathPreprocessing.py
More file actions
103 lines (91 loc) · 4.1 KB
/
Preprocessing.py
File metadata and controls
103 lines (91 loc) · 4.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import spacy
# use eventually 'de_core_news_lg'
nlp = spacy.load('de_core_news_md')
print(nlp.pipe_names)
# mit Methode pipe(<datei>) und Path optimierbar(siehe Zeile 180)
with open('../resources/GRUENE.txt', encoding='utf-8', errors='ignore') as g:
dateiGruene = " ".join(l.rstrip() for l in g)
with open('../resources/SPD.txt', encoding='utf-8', errors='ignore') as g:
dateiSPD = " ".join(l.rstrip() for l in g)
with open('../resources/LINKE.txt', encoding='utf-8', errors='ignore') as g:
dateiLinke = " ".join(l.rstrip() for l in g)
with open('../resources/CDU.txt', encoding='utf-8', errors='ignore') as g:
dateiCDU = " ".join(l.rstrip() for l in g)
with open('../resources/FDP.txt', encoding='utf-8', errors='ignore') as g:
dateiFDP = " ".join(l.rstrip() for l in g)
with open('../resources/AFD.txt', encoding='utf-8', errors='ignore') as g:
dateiAfD = " ".join(l.rstrip() for l in g)
textGruene = nlp(dateiGruene)
textSPD = nlp(dateiSPD)
textLinke = nlp(dateiLinke)
textCDU = nlp(dateiCDU)
textFDP = nlp(dateiFDP)
textAfD = nlp(dateiAfD)
wordsGruene = [token.lemma_ for token in textGruene if not token.is_stop and
not token.is_punct and
not token.is_space and
token.pos_ != 'NUM' and
not token.is_upper]
wordsSPD = [token.lemma_ for token in textSPD if not token.is_stop and
not token.is_punct and
not token.is_space and
token.pos_ != 'NUM' and
not token.is_upper
and token.text != '>']
wordsLinke = [token.lemma_ for token in textLinke if not token.is_stop and
not token.is_punct and
not token.is_space and
token.pos_ != 'NUM' and
not token.is_upper and
token.text != '\uf0a7']
wordsCDU = [token.lemma_ for token in textCDU if not token.is_stop and
not token.is_punct and
not token.is_space and
token.pos_ != 'NUM' and
not token.is_upper]
wordsFDP = [token.lemma_ for token in textFDP if not token.is_stop and
not token.is_punct and
not token.is_space and
token.pos_ != 'NUM' and
token.text == 'Freie' and
token.text == ' Demokraten' and
not token.is_upper]
wordsAfD = [token.lemma_ for token in textAfD if not token.is_stop and
not token.is_punct and
not token.is_space and
token.pos_ != 'NUM' and
not token.is_upper and
not token.text == 'AfD']
'''
#sentsGruene = textGruene.sents
#for sent in sentsGruene:
# print(list(sentsGruene))
'''
def filterNouns(text):
nouns = [token.lemma_ for token in text if not token.is_stop and
not token.is_stop and
not token.is_punct and
not token.is_space and
(token.pos_ == 'NOUN' or
token.pos_ == 'PROPN')]
return nouns
def filterverbs(text):
verbs = [token.lemma_ for token in text if not token.is_stop and
not token.is_punct and
not token.is_space and
token.pos_ == 'VERB']
return verbs
def filterAdj(text):
adjs = [token.lemma_ for token in text if not token.is_stop and
not token.is_punct and
not token.is_space and
token.pos_ == 'ADJ']
return adjs
def filterArg(sents):
argus = [token.lemma_ for sent, token in sents if not token.is_stop and
not token.is_punct and
not token.is_space and
(filterAdj(sent) or filterAdj(sent) and
filterNouns(sent))]
# TODO: condition for filtering argument
return argus