buildfsa.py
10.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
#!/usr/bin/python
# -*- coding:utf-8 -*-
'''
Created on 21 paź 2013
@author: mlenart
'''
import os
import sys
import logging
import codecs
from morfeuszbuilder.fsa import encode
from morfeuszbuilder.fsa import convertinput
from morfeuszbuilder.fsa.fsa import FSA
from morfeuszbuilder.fsa.serializer import Serializer
from morfeuszbuilder.tagset.tagset import Tagset
from morfeuszbuilder.segrules import rulesParser
from optparse import OptionParser
# class InputFormat():
# ENCODED = 'ENCODED'
# POLIMORF = 'POLIMORF'
# PLAIN = 'PLAIN'
class SerializationMethod():
SIMPLE = 'SIMPLE'
V1 = 'V1'
V2 = 'V2'
def _checkOption(opt, parser, msg):
if opt is None:
print >> sys.stderr, msg
parser.print_help()
exit(1)
def _checkExactlyOneOptionSet(optsList, parser, msg):
if optsList.count(True) != 1:
print >> sys.stderr, msg
parser.print_help()
exit(1)
def _parseListCallback(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(','))
def _checkOpen(filename, mode):
try:
with open(filename, mode) as _:
pass
except IOError as ex:
print >> sys.stderr, str(ex)
exit(1)
def _parseOptions():
"""
Parses commandline args
"""
parser = OptionParser()
parser.add_option('--input-files',
type='string',
dest='inputFiles',
action='callback',
callback=_parseListCallback,
metavar='FILES',
help='comma separated list of files')
parser.add_option('--tagset-file',
dest='tagsetFile',
metavar='FILE',
help='path to the file with tagset')
parser.add_option('--segments-file',
dest='segmentsFile',
metavar='FILE',
help='path to the file with segment rules')
parser.add_option('--trim-supneg',
dest='trimSupneg',
default=False,
action='store_true',
help='trim "naj" and "nie" prefixes from words tagged as "%:sup" and "%:neg" respectively. Valid only for analysis.')
parser.add_option('-o', '--output-file',
dest='outputFile',
metavar='FILE',
help='path to output file')
parser.add_option('-a', '--analyzer',
dest='analyzer',
action='store_true',
default=False,
help='Generate FSA for morphological analysis')
parser.add_option('-g', '--generator',
dest='generator',
action='store_true',
default=False,
help='Generate FSA for morphological synthesis')
parser.add_option('--cpp',
dest='cpp',
action='store_true',
default=False,
help='Encode binary data in c++ file')
parser.add_option('--use-arrays',
dest='useArrays',
action='store_true',
default=False,
help='store states reachable by 2 transitions in arrays (should speed up recognition, available only when --serialization-method=V1)')
parser.add_option('--serialization-method',
dest='serializationMethod',
help="FSA serialization method: \
SIMPLE - fixed-length transitions, fastest and weakest compression \
V1 - variable-length transitions, compressed labels - strongest compression \
V2 - format similar to the default in Jan Daciuk's fsa package - variable-length transitions, non-compressed labels - good compression, good speed")
#~ parser.add_option('--visualize',
#~ dest='visualize',
#~ action='store_true',
#~ default=False,
#~ help='visualize result')
parser.add_option('--train-file',
dest='trainFile',
help='A text file used for training. Should contain words from some large corpus - one word in each line')
parser.add_option('--debug',
dest='debug',
action='store_true',
default=False,
help='output some debugging info')
#~ parser.add_option('--profile',
#~ dest='profile',
#~ action='store_true',
#~ default=False,
#~ help='show profiling graph (required pycallgraph and graphviz')
opts, args = parser.parse_args()
_checkOption(opts.inputFiles, parser, "Input file is missing")
_checkOption(opts.outputFile, parser, "Output file is missing")
_checkOption(opts.tagsetFile, parser, "Tagset file is missing")
_checkOption(opts.serializationMethod, parser, "Serialization method file is missing")
_checkExactlyOneOptionSet([opts.analyzer, opts.generator],
parser, 'Must set exactly one FSA type: --analyzer or --generator')
_checkOpen(opts.tagsetFile, 'r')
for filename in opts.inputFiles:
_checkOpen(filename, 'r')
_checkOpen(opts.outputFile, 'w')
_checkOption(opts.segmentsFile, parser, "Segment rules file is missing")
if opts.analyzer:
_checkOpen(opts.segmentsFile, 'r')
if not opts.serializationMethod.upper() in [SerializationMethod.SIMPLE, SerializationMethod.V1, SerializationMethod.V2]:
print >> sys.stderr, '--serialization-method must be one of ('+str([SerializationMethod.SIMPLE, SerializationMethod.V1, SerializationMethod.V2])+')'
parser.print_help()
exit(1)
return opts
def _concatFiles(inputFiles):
# return open(inputFiles[0], 'r')
for inputFile in inputFiles:
if inputFile:
with open(inputFile, 'r') as f:
for line in f:
if not ' ' in ''.join(line.split('\t')[:2]):
yield line
else:
logging.warn(u'Ignoring line: "%s" - contains space in text form or lemma' % line.strip().decode('utf8'))
def _readPolimorfInput4Analyzer(inputFiles, tagset, encoder, segmentRulesManager, trimSupneg):
logging.info('reading analyzer data from %s', str(inputFiles))
for entry in convertinput.PolimorfConverter4Analyzer(tagset, encoder, 'utf8', segmentRulesManager, trimSupneg).convert(_concatFiles(inputFiles)):
yield entry
def _readPolimorfInput4Generator(inputFiles, tagset, encoder, segmentRulesManager):
logging.info('reading generator data from %s', str(inputFiles))
for entry in convertinput.PolimorfConverter4Generator(tagset, encoder, 'utf8', segmentRulesManager).convert(_concatFiles(inputFiles)):
yield entry
def _readTrainData(trainFile):
with codecs.open(trainFile, 'r', 'utf8') as f:
for line in f:
yield line.strip()
def _printStats(fsa):
acceptingNum = 0
sinkNum = 0
arrayNum = 0
for s in fsa.dfs():
if s.isAccepting():
acceptingNum += 1
if s.transitionsNum == 0:
sinkNum += 1
if s.serializeAsArray:
arrayNum += 1
logging.info('states num: '+str(fsa.getStatesNum()))
logging.info('transitions num: '+str(fsa.getTransitionsNum()))
logging.info('accepting states num: '+str(acceptingNum))
logging.info('sink states num: '+str(sinkNum))
logging.info('array states num: '+str(arrayNum))
def buildAnalyzerFromPoliMorf(inputFiles, tagset, segmentRulesManager, trimSupneg):
encoder = encode.MorphEncoder()
fsa = FSA(encoder, tagset)
for word, data in _readPolimorfInput4Analyzer(inputFiles, tagset, encoder, segmentRulesManager, trimSupneg):
# print word, data
fsa.addEntry(word, data)
fsa.close()
logging.info('------')
logging.info('Analyzer FSA stats:')
logging.info('------')
_printStats(fsa)
return fsa, encoder.qualifiersMap
def buildGeneratorFromPoliMorf(inputFiles, tagset, segmentRulesManager):
encoder = encode.Encoder4Generator()
fsa = FSA(encoder, tagset)
inputData = _readPolimorfInput4Generator(inputFiles, tagset, encoder, segmentRulesManager)
for word, data in inputData:
fsa.addEntry(word, data)
fsa.close()
logging.info('------')
logging.info('Generator FSA stats:')
logging.info('------')
_printStats(fsa)
return fsa, encoder.qualifiersMap
def main(opts):
if opts.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if opts.analyzer:
logging.info('*** building analyzer ***')
else:
logging.info('*** building generator ***')
logging.info('reading tagset from %s', opts.tagsetFile)
tagset = Tagset(opts.tagsetFile)
rulesType = rulesParser.RulesParser.PARSE4ANALYZER if opts.analyzer else rulesParser.RulesParser.PARSE4GENERATOR
segmentRulesManager = rulesParser.RulesParser(tagset, rulesType).parse(opts.segmentsFile)
segmentationRulesData = segmentRulesManager.serialize()
if opts.analyzer:
fsa, qualifiersMap = buildAnalyzerFromPoliMorf(opts.inputFiles, tagset, segmentRulesManager, opts.trimSupneg)
else:
fsa, qualifiersMap = buildGeneratorFromPoliMorf(opts.inputFiles, tagset, segmentRulesManager)
print qualifiersMap
if opts.trainFile:
logging.info('training with '+opts.trainFile+' ...')
fsa.train(_readTrainData(opts.trainFile))
logging.info('done training')
# serializer = {
# SerializationMethod.SIMPLE: SimpleSerializer,
# SerializationMethod.V1: VLengthSerializer1,
# SerializationMethod.V2: VLengthSerializer2,
# }[opts.serializationMethod](fsa)
serializer = Serializer.getSerializer(opts.serializationMethod, fsa, tagset, qualifiersMap, segmentationRulesData)
if opts.cpp:
serializer.serialize2CppFile(opts.outputFile, isGenerator=opts.generator)
else:
serializer.serialize2BinaryFile(opts.outputFile, isGenerator=opts.generator)
logging.info('total FSA size (in bytes): '+str(fsa.initialState.reverseOffset))
# {
# OutputFormat.CPP: serializer.serialize2CppFile,
# OutputFormat.BINARY: serializer.serialize2BinaryFile
# }[opts.outputFormat](opts.outputFile)
if __name__ == '__main__':
import os
opts = _parseOptions()
try:
main(opts)
# except Exception as ex:
# print >> sys.stderr, unicode(ex).encode('utf8')
# sys.exit(1)
finally:
pass