Commit a5c6c89ebb1323d43a4db20f5dcc1ff5393d80dc
1 parent
435b22c2
domyślne wykluczanie usuniętych rzeczy
Showing
21 changed files
with
179 additions
and
114 deletions
INSTALL
... | ... | @@ -33,8 +33,6 @@ $ sudo su postgres |
33 | 33 | # create user kuznia with password 'kuznia'; |
34 | 34 | # create database kuznia; |
35 | 35 | # grant all privileges on database kuznia to kuznia; |
36 | -# \c kuznia | |
37 | -# create language plpgsql; -- niepotrzebne w postgresie 9.1 | |
38 | 36 | Ctrl-D |
39 | 37 | Ctrl-D |
40 | 38 | |
... | ... |
dictionary/ajax_lexeme_view.py
... | ... | @@ -114,7 +114,7 @@ def lexeme_edit_form(request, id): |
114 | 114 | prefix='lip' + str(lip.pk), user=request.user, editable=editable), |
115 | 115 | lip.qualifiers.filter(vocabulary=owner) if ro_owner else []) |
116 | 116 | for lip in lips] |
117 | - crs = l.refs_to.filter(to_lexeme__deleted=False).order_by('type__index') | |
117 | + crs = l.refs_to.order_by('type__index') | |
118 | 118 | to_return['cross_references'] = crs |
119 | 119 | return to_return |
120 | 120 | |
... | ... | @@ -326,7 +326,7 @@ def check_pos(request, pos_id, ic_id): |
326 | 326 | def check_pattern(request, pattern_name, ic_id): |
327 | 327 | lips = LexemeInflectionPattern.objects.filter( |
328 | 328 | inflection_characteristic__pk=ic_id, pattern__name=pattern_name) |
329 | - lips = lips.exclude(lexeme__status='cand', lexeme__deleted=True) | |
329 | + lips = lips.exclude(lexeme__status='cand') | |
330 | 330 | if lips.exists(): |
331 | 331 | return {'answer': 'yes'} |
332 | 332 | else: |
... | ... |
dictionary/forms.py
... | ... | @@ -25,8 +25,7 @@ class QualifiersField(ModelMultipleChoiceField): |
25 | 25 | vocabularies = lexeme.editable_vocabularies(user) |
26 | 26 | else: |
27 | 27 | vocabularies = qualified.editable_vocabularies(user) |
28 | - qualifiers = Qualifier.objects.filter( | |
29 | - vocabulary__in=vocabularies, deleted=False) | |
28 | + qualifiers = Qualifier.objects.filter(vocabulary__in=vocabularies) | |
30 | 29 | else: |
31 | 30 | qualifiers = editable_qualifiers(user) |
32 | 31 | if not qualifiers: |
... | ... | @@ -53,7 +52,7 @@ class QualifiersField(ModelMultipleChoiceField): |
53 | 52 | |
54 | 53 | class QualifierField(ModelChoiceField): |
55 | 54 | def __init__(self, **kwargs): |
56 | - qualifiers = Qualifier.objects.filter(deleted=False) | |
55 | + qualifiers = Qualifier.objects.all() | |
57 | 56 | super(QualifierField, self).__init__(qualifiers, **kwargs) |
58 | 57 | self.set_qualifiers(qualifiers) |
59 | 58 | |
... | ... | @@ -276,11 +275,11 @@ class ChangeClassForm(ModelForm): |
276 | 275 | q = self.instance |
277 | 276 | if ec: |
278 | 277 | ec_qualifiers = ec.qualifier_set.all() |
279 | - if Lexeme.objects.filter(deleted=False, qualifiers=q).filter( | |
278 | + if Lexeme.objects.filter(qualifiers=q).filter( | |
280 | 279 | qualifiers__in=ec_qualifiers): |
281 | 280 | raise ValidationError(u'Kolizja w klasie wykluczania') |
282 | 281 | if LexemeInflectionPattern.objects.filter( |
283 | - deleted=False, qualifiers=q).filter(qualifiers__in=ec_qualifiers): | |
282 | + qualifiers=q).filter(qualifiers__in=ec_qualifiers): | |
284 | 283 | raise ValidationError(u'Kolizja w klasie wykluczania') |
285 | 284 | if Ending.objects.filter(qualifiers=q).filter( |
286 | 285 | qualifiers__in=ec_qualifiers): |
... | ... |
dictionary/history.py
... | ... | @@ -84,7 +84,7 @@ lip_attribute_order = [ |
84 | 84 | def prepare_value(table, column, value): |
85 | 85 | try: |
86 | 86 | if column == 'qualifier_id': |
87 | - prepared = Qualifier.objects.get(pk=int(value)).label | |
87 | + prepared = Qualifier.all_objects.get(pk=int(value)).label | |
88 | 88 | elif column == 'status': |
89 | 89 | prepared = dict(Lexeme.STATUS_CHOICES).get(value) |
90 | 90 | elif column == 'charfl': |
... | ... | @@ -92,7 +92,7 @@ def prepare_value(table, column, value): |
92 | 92 | elif column == 'w_id': |
93 | 93 | prepared = Pattern.objects.get(pk=int(value)).name |
94 | 94 | elif column == 'classificationvalue_id': |
95 | - cv = ClassificationValue.objects.get(pk=int(value)) | |
95 | + cv = ClassificationValue.all_objects.get(pk=int(value)) | |
96 | 96 | prepared = (cv.label, cv.classification.name) |
97 | 97 | else: |
98 | 98 | prepared = value |
... | ... | @@ -110,7 +110,7 @@ def transaction_table(transaction_data): |
110 | 110 | vocabs = [] |
111 | 111 | crs = {} |
112 | 112 | lip_qualifiers = {} |
113 | - deleted=False | |
113 | + deleted = False | |
114 | 114 | for item1 in transaction_data: |
115 | 115 | table = item1.table_name |
116 | 116 | column = item1.column_name |
... | ... | @@ -173,7 +173,7 @@ def transaction_table(transaction_data): |
173 | 173 | for i in (0, 1): |
174 | 174 | try: |
175 | 175 | if cr_data['l_id_do'][i] is not None: |
176 | - l = Lexeme.objects.get(pk=int(cr_data['l_id_do'][i])) | |
176 | + l = Lexeme.all_objects.get(pk=int(cr_data['l_id_do'][i])) | |
177 | 177 | ic = l.lip_data()['inflection_characteristics'] |
178 | 178 | cr_type = CrossReferenceType.objects.get(pk=cr_data['typods_id'][i]) |
179 | 179 | prepared = ' '.join((cr_type.symbol, unicode(l), ic)) |
... | ... | @@ -199,7 +199,7 @@ def transaction_table(transaction_data): |
199 | 199 | lip_dict[lip_id].append((attr, q_data['qualifier_id'])) |
200 | 200 | lip_tables = [] |
201 | 201 | for lip_id, lip_data in lip_dict.iteritems(): |
202 | - lip = LexemeInflectionPattern.objects.filter(pk=lip_id) | |
202 | + lip = LexemeInflectionPattern.all_objects.filter(pk=lip_id) | |
203 | 203 | if lip: |
204 | 204 | lip = lip[0] |
205 | 205 | header = '%s/%s' % (lip.inflection_characteristic.entry, lip.pattern.name) |
... | ... |
dictionary/management/commands/check_ispell.py
... | ... | @@ -21,8 +21,7 @@ def check_ispell(input_file): |
21 | 21 | for line in open(input_file): |
22 | 22 | line = line.decode('utf-8').strip() |
23 | 23 | entry, flags = line.split('/', 1) |
24 | - lexemes = Lexeme.objects.filter(deleted=False, entry=entry).exclude( | |
25 | - status='cand') | |
24 | + lexemes = Lexeme.objects.filter(entry=entry).exclude(status='cand') | |
26 | 25 | if flags not in results: |
27 | 26 | results[flags] = {'pos': {}, 'ics': {}} |
28 | 27 | for l in lexemes: |
... | ... | @@ -30,5 +29,6 @@ def check_ispell(input_file): |
30 | 29 | inc_count(results[flags]['pos'], l.part_of_speech.symbol) |
31 | 30 | inc_count(results[flags]['ics'], ics) |
32 | 31 | for flags, res in results.iteritems(): |
33 | - print ('%s=%s,%s' % ((flags,) + tuple( | |
34 | - dict_repr(res[x]) for x in ('pos', 'ics')))).encode('utf-8') | |
35 | 32 | \ No newline at end of file |
33 | + print ('%s=%s,%s' % | |
34 | + ((flags, dict_repr(res['pos']), | |
35 | + dict_repr(res['ic'])))).encode('utf-8') | |
36 | 36 | \ No newline at end of file |
... | ... |
dictionary/management/commands/check_morfologik.py
... | ... | @@ -40,7 +40,7 @@ def get_forms(l, lc_sym): |
40 | 40 | if lc_sym != 'v': |
41 | 41 | l_forms = set(l.lexemeform_set.values_list('form', flat=True)) |
42 | 42 | if lc_sym == 'adj': |
43 | - neg = l.refs_to.filter(type__symbol='adjnie', to_lexeme__deleted=False) | |
43 | + neg = l.refs_to.filter(type__symbol='adjnie') | |
44 | 44 | if neg: |
45 | 45 | l_neg = neg[0].to_lexeme |
46 | 46 | neg_forms = l_neg.lexemeform_set.values_list('form', flat=True) |
... | ... | @@ -48,11 +48,11 @@ def get_forms(l, lc_sym): |
48 | 48 | l_forms |= set(form for form in neg_forms if form not in added_forms) |
49 | 49 | else: |
50 | 50 | tags = ['allq'] |
51 | - if l.refs_to.filter(type__symbol='verpact', to_lexeme__deleted=False): | |
51 | + if l.refs_to.filter(type__symbol='verpact'): | |
52 | 52 | tags.append('pact') |
53 | - if l.refs_to.filter(type__symbol='verppas', to_lexeme__deleted=False): | |
53 | + if l.refs_to.filter(type__symbol='verppas'): | |
54 | 54 | tags.append('ppas') |
55 | - if l.refs_to.filter(type__symbol='verger', to_lexeme__deleted=False): | |
55 | + if l.refs_to.filter(type__symbol='verger'): | |
56 | 56 | tags.append('ger') |
57 | 57 | lips = l.lexemeinflectionpattern_set.all() |
58 | 58 | if not lips: |
... | ... | @@ -87,7 +87,7 @@ def check_forms(lc_sym, forms): |
87 | 87 | entry = forms[0] |
88 | 88 | forms = set(forms) |
89 | 89 | morf_lexemes = Lexeme.objects.filter( |
90 | - deleted=False, lexemeassociation__vocabulary__id='Morfologik', entry=entry, | |
90 | + lexemeassociation__vocabulary__id='Morfologik', entry=entry, | |
91 | 91 | part_of_speech__lexical_class__symbol=lc_sym) |
92 | 92 | for l in morf_lexemes: |
93 | 93 | if l.part_of_speech.lexical_class.symbol != lc_sym: |
... | ... |
dictionary/management/commands/extra_crs.py
... | ... | @@ -63,7 +63,7 @@ def add_crs(type, path): |
63 | 63 | morfologik = Vocabulary.objects.get(id='Morfologik') |
64 | 64 | morf = morfologik.owned_lexemes_pk() |
65 | 65 | lexemes = Lexeme.objects.filter( |
66 | - deleted=False, lexemeassociation__vocabulary__id='Morfologik') | |
66 | + lexemeassociation__vocabulary__id='Morfologik') | |
67 | 67 | adv = lexemes.filter(part_of_speech__symbol__in=('adv', 'advndm')) |
68 | 68 | adj = lexemes.filter(part_of_speech__symbol='adj') |
69 | 69 | advcom = lexemes.filter(part_of_speech__symbol='advcom') |
... | ... | @@ -106,11 +106,9 @@ def add_crs(type, path): |
106 | 106 | debug(advneg_e, u'Brak %s' % lack) |
107 | 107 | else: |
108 | 108 | advs = [cr.to_lexeme.pk for cr |
109 | - in adjs[0].refs_to.filter( | |
110 | - type='adjadv', to_lexeme__deleted=False)] | |
109 | + in adjs[0].refs_to.filter(type='adjadv')] | |
111 | 110 | advs += [cr.from_lexeme.pk for cr |
112 | - in adjs[0].refs_from.filter( | |
113 | - type='advadj', from_lexeme__deleted=False)] | |
111 | + in adjs[0].refs_from.filter(type='advadj')] | |
114 | 112 | advs = adv.filter(pk__in=advs).distinct() |
115 | 113 | if len(advs) > 1: |
116 | 114 | details = [] |
... | ... |
dictionary/management/commands/filter_new.py
... | ... | @@ -21,8 +21,7 @@ def filter_new(input_file): |
21 | 21 | if not not_found: |
22 | 22 | pos, ic, forms = line.split(':')[1].split(';') |
23 | 23 | lexemes = Lexeme.objects.distinct().filter( |
24 | - vocabularies__id__in=('Morfologik', 'zbytkiM'), entry=entry, | |
25 | - deleted=False) | |
24 | + vocabularies__id__in=('Morfologik', 'zbytkiM'), entry=entry) | |
26 | 25 | if pos == 'subst': |
27 | 26 | lexemes = lexemes.filter(part_of_speech__symbol__in=( |
28 | 27 | 'subst', 'osc', 'skrs')) |
... | ... | @@ -54,8 +53,7 @@ def filter_new(input_file): |
54 | 53 | output('AMBIGUITY: %s' % line) |
55 | 54 | if not_found: |
56 | 55 | lexemes = Lexeme.objects.filter( |
57 | - vocabularies__id__in=('Morfologik', 'zbytkiM'), entry=entry, | |
58 | - deleted=False) | |
56 | + vocabularies__id__in=('Morfologik', 'zbytkiM'), entry=entry) | |
59 | 57 | if len(lexemes) == 0: |
60 | 58 | output('NOT FOUND: %s' % line) |
61 | 59 | elif len(lexemes) == 1: |
... | ... |
dictionary/management/commands/fix_homonym.py
... | ... | @@ -17,10 +17,10 @@ def fix_homonym(): |
17 | 17 | no_history() |
18 | 18 | homonyms = ( |
19 | 19 | Lexeme.objects.values('entry', 'part_of_speech') |
20 | - .filter(deleted=False).annotate(count=Count('pk')).filter(count__gt=1)) | |
20 | + .annotate(count=Count('pk')).filter(count__gt=1)) | |
21 | 21 | for homonym in homonyms: |
22 | 22 | lexemes = Lexeme.objects.filter( |
23 | - deleted=False, entry=homonym['entry'], | |
23 | + entry=homonym['entry'], | |
24 | 24 | part_of_speech=homonym['part_of_speech']).order_by('pk') |
25 | 25 | for i, lexeme in enumerate(lexemes, 1): |
26 | 26 | lexeme.homonym_number = i |
... | ... |
dictionary/management/commands/fix_morfologik.py
... | ... | @@ -15,7 +15,7 @@ class Command(BaseCommand): |
15 | 15 | |
16 | 16 | morfologik = Vocabulary.objects.get(id='Morfologik') |
17 | 17 | morf = morfologik.owned_lexemes_pk() |
18 | -existing = Lexeme.objects.filter(deleted=False) | |
18 | +existing = Lexeme.objects # usunięte są odsiewane automatycznie | |
19 | 19 | |
20 | 20 | def sgtant_qualifiers(): |
21 | 21 | sgtant = existing.filter(comment__contains='singulare tantum') |
... | ... |
dictionary/management/commands/fix_osc.py
... | ... | @@ -20,7 +20,7 @@ def fix_osc(): |
20 | 20 | no_history() |
21 | 21 | morfologik = Vocabulary.objects.get(id='Morfologik') |
22 | 22 | morf = morfologik.owned_lexemes_pk() |
23 | - existing = Lexeme.objects.filter(deleted=False) | |
23 | + existing = Lexeme.objects | |
24 | 24 | morf_osc = existing.filter( |
25 | 25 | pk__in=morf, part_of_speech__symbol='subst', entry__endswith=u'ość') |
26 | 26 | for lexeme in morf_osc: |
... | ... | @@ -38,7 +38,7 @@ def fix_osc(): |
38 | 38 | lexeme.part_of_speech = PartOfSpeech.objects.get(symbol='osc') |
39 | 39 | lexeme.save() |
40 | 40 | negs = CrossReference.objects.filter( |
41 | - from_lexeme__in=adjs, to_lexeme__deleted=False, type__symbol='nieadj') | |
41 | + from_lexeme__in=adjs, type__symbol='nieadj') | |
42 | 42 | if negs: |
43 | 43 | # wszystkie przymiotniki z Morfologika mają negację nie+, nie nie-+ |
44 | 44 | # wygląda na to, że w M nie ma nie-...-ości... |
... | ... |
dictionary/management/commands/fix_surnames.py
... | ... | @@ -18,7 +18,7 @@ def fix_surnames(): |
18 | 18 | SGJP = Vocabulary.objects.get(id='SGJP') |
19 | 19 | morf = morfologik.owned_lexemes_pk() |
20 | 20 | sgjp = SGJP.owned_lexemes_pk() |
21 | - existing = Lexeme.objects.filter(deleted=False) | |
21 | + existing = Lexeme.objects | |
22 | 22 | sgjp_subst = existing.filter( # jak odsiewam po SGJP, to nic nie zostaje... |
23 | 23 | part_of_speech__symbol='subst', |
24 | 24 | entry__regex=u'^[A-ZĄĆĘŁŃÓŚŻŹ]') |
... | ... |
dictionary/management/commands/import_morfologik.py
... | ... | @@ -20,7 +20,7 @@ DEBUG = False |
20 | 20 | |
21 | 21 | #sgjp = Vocabulary.objects.get(id='SGJP').owned_lexemes_pk() |
22 | 22 | morf = Vocabulary.objects.get(id='Morfologik').owned_lexemes_pk() |
23 | -sgjp = Lexeme.objects.filter(deleted=False, source='SGJP') | |
23 | +sgjp = Lexeme.objects.filter(source='SGJP') | |
24 | 24 | |
25 | 25 | # może być nierozpoznane pltant |
26 | 26 | ending_genders = { |
... | ... | @@ -499,8 +499,7 @@ def check_sgjp_v(entry, ic, patterns, base_forms, derived): |
499 | 499 | if set(patterns) == sgjp_patterns: |
500 | 500 | sgjp_derived = {} |
501 | 501 | for pos in ('ger', 'pact', 'ppas'): |
502 | - derived_part = lexeme.refs_to.filter( | |
503 | - type='ver' + pos, to_lexeme__deleted=False) | |
502 | + derived_part = lexeme.refs_to.filter(type='ver' + pos) | |
504 | 503 | if pos == 'ppas': |
505 | 504 | derived_part = derived_part |
506 | 505 | if derived_part: |
... | ... | @@ -525,8 +524,7 @@ def check_sgjp(lc_sym, entry, form_set, forms, **extra): |
525 | 524 | lexemes = lexemes.filter(pk__in=sgjp) |
526 | 525 | matched_lexemes = [] |
527 | 526 | for lexeme in lexemes: |
528 | - if lc_sym == 'adj' and lexeme.refs_to.filter( | |
529 | - type='nieadj', to_lexeme__deleted=False): | |
527 | + if lc_sym == 'adj' and lexeme.refs_to.filter(type='nieadj'): | |
530 | 528 | continue |
531 | 529 | if lc_sym == 'subst' and extra['tantum'] == 'sg': |
532 | 530 | sgjp_forms = lexeme.all_forms(affixes=False, label_filter=r'sg:') |
... | ... | @@ -564,7 +562,7 @@ def check_sgjp(lc_sym, entry, form_set, forms, **extra): |
564 | 562 | # dla przymiotników |
565 | 563 | def get_negation(lc_sym, lexeme): |
566 | 564 | negations = [cf.to_lexeme for cf in CrossReference.objects.filter( |
567 | - from_lexeme=lexeme, to_lexeme__deleted=False, type='adjnie')] | |
565 | + from_lexeme=lexeme, type='adjnie')] | |
568 | 566 | good_negation = None |
569 | 567 | for n in negations: |
570 | 568 | if n.entry == u'nie' + lexeme.entry: |
... | ... | @@ -575,7 +573,7 @@ def get_negation(lc_sym, lexeme): |
575 | 573 | |
576 | 574 | def closest_lexeme_subst(entry, gender, patterns, included=None): |
577 | 575 | lexemes = Lexeme.objects.filter( |
578 | - deleted=False, part_of_speech__lexical_class__symbol='subst') | |
576 | + part_of_speech__lexical_class__symbol='subst') | |
579 | 577 | lexemes = lexemes.distinct() |
580 | 578 | # ten sam rodzaj |
581 | 579 | gender = gender[0] if gender != 'm1' else 'm1' |
... | ... |
dictionary/management/commands/import_resztki.py
... | ... | @@ -20,7 +20,7 @@ DEBUG = False |
20 | 20 | GENDERS = ('m1', 'm2', 'm3', 'm', 'f', 'n1', 'n2', 'p1', 'p2', 'p3') |
21 | 21 | |
22 | 22 | #morf = Vocabulary.objects.get(id='Morfologik').owned_lexemes.all() |
23 | -sgjp = Lexeme.objects.filter(deleted=False).exclude(source='Morfologik') | |
23 | +sgjp = Lexeme.objects.exclude(source='Morfologik') | |
24 | 24 | |
25 | 25 | def get_basic_endings(parts_of_speech, genders=None): |
26 | 26 | ics = InflectionCharacteristic.objects.filter( |
... | ... | @@ -269,8 +269,7 @@ def check_sgjp(lc_sym, entry, form_set, **extra): |
269 | 269 | lexemes = lexemes.filter(pk__in=sgjp) |
270 | 270 | matched_lexemes = [] |
271 | 271 | for lexeme in lexemes: |
272 | - if lc_sym == 'adj' and lexeme.refs_to.filter( | |
273 | - type='nieadj', to_lexeme__deleted=False): | |
272 | + if lc_sym == 'adj' and lexeme.refs_to.filter(type='nieadj'): | |
274 | 273 | continue |
275 | 274 | if lc_sym == 'subst' and extra['tantum'] == 'sg': |
276 | 275 | sgjp_forms = lexeme.all_forms(affixes=False, label_filter=r'sg:') |
... | ... | @@ -307,7 +306,7 @@ def check_sgjp(lc_sym, entry, form_set, **extra): |
307 | 306 | |
308 | 307 | def closest_lexeme_subst(entry, gender, patterns, included=None): |
309 | 308 | lexemes = Lexeme.objects.filter( |
310 | - deleted=False, part_of_speech__lexical_class__symbol='subst') | |
309 | + part_of_speech__lexical_class__symbol='subst') | |
311 | 310 | lexemes = lexemes.distinct() |
312 | 311 | # ten sam rodzaj |
313 | 312 | genders = expand_gender(gender) |
... | ... |
dictionary/management/commands/import_variant.py
... | ... | @@ -13,12 +13,14 @@ class Command(BaseCommand): |
13 | 13 | import_variant(variant, open(filename)) |
14 | 14 | |
15 | 15 | def import_variant(variant, input_file): |
16 | + TableTemplate.objects.filter(variant__id=variant).delete() | |
16 | 17 | for line in input_file: |
17 | 18 | if line.startswith(variant): |
18 | 19 | line = line.strip().decode('utf-8') |
19 | 20 | data = line.split('\t') |
20 | 21 | _variant, pos, p_type, ic, row, col, rspan, cspan, bfl, tag, pref, suf, \ |
21 | 22 | index = data |
23 | + assert variant == _variant | |
22 | 24 | # trochę copypasta |
23 | 25 | lc = PartOfSpeech.objects.get(symbol=pos).lexical_class |
24 | 26 | v, _created = Variant.objects.get_or_create(id=variant) |
... | ... |
dictionary/management/commands/init_polimorf.py
... | ... | @@ -16,8 +16,8 @@ def init_polimorf(): |
16 | 16 | no_history() |
17 | 17 | pm, created = Vocabulary.objects.get_or_create(id='PoliMorf') |
18 | 18 | sgjp = Vocabulary.objects.get(id='SGJP') |
19 | - existing = Lexeme.objects.filter(deleted=False) | |
20 | - for l in sgjp.owned_lexemes.filter(deleted=False): | |
19 | + existing = Lexeme.objects | |
20 | + for l in sgjp.owned_lexemes.all(): | |
21 | 21 | LexemeAssociation.objects.get_or_create(lexeme=l, vocabulary=pm) |
22 | 22 | for l in existing.filter(owner_vocabulary__id='Morfologik'): |
23 | 23 | if not existing.filter(entry=l.entry, owner_vocabulary__id='SGJP'): |
... | ... |
dictionary/management/commands/load_import.py
... | ... | @@ -87,8 +87,8 @@ def check_der(verb, pos, entry, patterns): |
87 | 87 | ic = lips[0].inflection_characteristic.entry |
88 | 88 | matched = [] |
89 | 89 | for l in Lexeme.objects.filter( |
90 | - deleted=False, entry=entry, part_of_speech__symbol=pos, | |
91 | - lexemeinflectionpattern__inflection_characteristic__entry=ic): | |
90 | + entry=entry, part_of_speech__symbol=pos, | |
91 | + lexemeinflectionpattern__inflection_characteristic__entry=ic): | |
92 | 92 | l_lips = l.lexemeinflectionpattern_set.all() |
93 | 93 | if l_lips[0].inflection_characteristic.entry == ic: |
94 | 94 | l_patterns = set(l.patterns.values_list('name', flat=True)) |
... | ... |
dictionary/management/commands/uncertain_ppas.py
... | ... | @@ -13,7 +13,7 @@ class Command(BaseCommand): |
13 | 13 | def get_derived(lexeme): |
14 | 14 | derived = set() |
15 | 15 | for pos in ('ger', 'pact', 'ppas'): |
16 | - if lexeme.refs_to.filter(type__symbol='ver' + pos, to_lexeme__deleted=False): | |
16 | + if lexeme.refs_to.filter(type__symbol='ver' + pos): | |
17 | 17 | derived.add(pos) |
18 | 18 | return derived |
19 | 19 | |
... | ... | @@ -23,21 +23,20 @@ def list_verbs(): |
23 | 23 | SGJP = Vocabulary.objects.get(id='SGJP') |
24 | 24 | morf = morfologik.owned_lexemes_pk() |
25 | 25 | sgjp = SGJP.owned_lexemes_pk() |
26 | - existing = Lexeme.objects.filter(deleted=False) | |
27 | - sgjp_verbs = existing.filter(pk__in=sgjp, part_of_speech__symbol='v') | |
28 | - morf_verbs = existing.filter(pk__in=morf, part_of_speech__symbol='v') | |
26 | + verbs = Lexeme.objects.filter(part_of_speech__symbol='v') | |
27 | + sgjp_verbs = verbs.filter(pk__in=sgjp) | |
28 | + morf_verbs = verbs.filter(pk__in=morf) | |
29 | 29 | sgjp_entries = set(sgjp_verbs.values_list('entry', flat=True)) |
30 | 30 | morf_entries = set(morf_verbs.values_list('entry', flat=True)) |
31 | 31 | common_entries = sgjp_entries & morf_entries |
32 | 32 | lexemes = morf_verbs.filter(entry__in=common_entries) |
33 | 33 | for lexeme in lexemes: |
34 | - homonyms = existing.filter( | |
35 | - pk__in=sgjp, entry=lexeme.entry, part_of_speech__symbol='v') | |
34 | + homonyms = verbs.filter(pk__in=sgjp, entry=lexeme.entry) | |
36 | 35 | for hom in homonyms: |
37 | 36 | if set(lexeme.patterns.all()) == set(hom.patterns.all()): |
38 | 37 | m_der = get_derived(lexeme) |
39 | 38 | s_der = get_derived(hom) |
40 | - if (m_der.issuperset(s_der) and m_der - s_der == set(['ppas']) | |
41 | - or s_der.issuperset(m_der) and s_der - m_der == set(['ppas'])): | |
39 | + if (m_der.issuperset(s_der) and m_der - s_der == {'ppas'} | |
40 | + or s_der.issuperset(m_der) and s_der - m_der == {'ppas'}): | |
42 | 41 | print lexeme.entry.encode('utf-8'), |
43 | 42 | print lexeme.pk, hom.pk, 'morf' if 'ppas' in m_der else 'sgjp' |
... | ... |
dictionary/models.py
... | ... | @@ -4,6 +4,21 @@ from django.db.models import * |
4 | 4 | from django.contrib.auth.models import User, Permission |
5 | 5 | from common.util import no_history |
6 | 6 | |
7 | +class NotDeletedManager(Manager): | |
8 | + use_for_related_field = True | |
9 | + | |
10 | + def get_query_set(self): | |
11 | + return super(NotDeletedManager, self).get_query_set().filter(deleted=False) | |
12 | + | |
13 | +class LexemeNotDeletedManager(Manager): | |
14 | + use_for_related_field = True | |
15 | + | |
16 | + def get_query_set(self): | |
17 | + return super( | |
18 | + LexemeNotDeletedManager, self).get_query_set().filter( | |
19 | + lexeme__deleted=False) | |
20 | + | |
21 | + | |
7 | 22 | class QualifierExclusionClass(Model): |
8 | 23 | name = CharField( |
9 | 24 | unique=True, max_length=64, db_column='nazwa', verbose_name=u'nazwa') |
... | ... | @@ -25,6 +40,7 @@ def get_exclusion_classes(): |
25 | 40 | exclusion_classes[ec.pk] = q_list |
26 | 41 | return exclusion_classes |
27 | 42 | |
43 | + | |
28 | 44 | # kwalifikatory dla leksemów, odmian, |
29 | 45 | # zakonczen i form wyjatkowych |
30 | 46 | class Qualifier(Model): |
... | ... | @@ -37,11 +53,14 @@ class Qualifier(Model): |
37 | 53 | verbose_name=u'klasa wykluczania') |
38 | 54 | deleted = BooleanField(db_column='usuniety') |
39 | 55 | |
56 | + objects = NotDeletedManager() | |
57 | + all_objects = Manager() | |
58 | + | |
40 | 59 | def is_empty(self): |
41 | 60 | return not ( |
42 | - self.lexeme_set.filter(deleted=False).exists() or | |
61 | + self.lexeme_set.exists() or | |
43 | 62 | self.ending_set.exists() or |
44 | - self.lexemeinflectionpattern_set.filter(lexeme__deleted=False).exists()) | |
63 | + self.lexemeinflectionpattern_set.exists()) | |
45 | 64 | |
46 | 65 | def set_for(self, something, set): |
47 | 66 | if set: |
... | ... | @@ -64,12 +83,11 @@ class Qualifier(Model): |
64 | 83 | ordering = ['label'] |
65 | 84 | |
66 | 85 | def visible_qualifiers(user): |
67 | - return Qualifier.objects.filter( | |
68 | - vocabulary__in=visible_vocabularies(user), deleted=False) | |
86 | + return Qualifier.objects.filter(vocabulary__in=visible_vocabularies(user)) | |
69 | 87 | |
70 | 88 | def editable_qualifiers(user): |
71 | - return Qualifier.objects.filter( | |
72 | - vocabulary__in=editable_vocabularies(user), deleted=False) | |
89 | + return Qualifier.objects.filter(vocabulary__in=editable_vocabularies(user)) | |
90 | + | |
73 | 91 | |
74 | 92 | class Classification(Model): |
75 | 93 | name = CharField( |
... | ... | @@ -78,13 +96,16 @@ class Classification(Model): |
78 | 96 | |
79 | 97 | def value_tree(self): |
80 | 98 | parts = [] |
81 | - for v in self.values.filter(parent_node__isnull=True, deleted=False): | |
99 | + for v in self.values.filter(parent_node__isnull=True): | |
82 | 100 | parts.append(v.subtree()) |
83 | 101 | return parts |
84 | 102 | |
85 | 103 | def make_choices(self): |
86 | 104 | return make_choices(self.value_tree()) |
87 | 105 | |
106 | + def __unicode__(self): | |
107 | + return self.name | |
108 | + | |
88 | 109 | class Meta: |
89 | 110 | db_table = 'klasyfikacje' |
90 | 111 | |
... | ... | @@ -96,6 +117,7 @@ def make_choices(tree): |
96 | 117 | choices += [(pk, u' ' + label) for (pk, label) in subchoices] # |
97 | 118 | return choices |
98 | 119 | |
120 | + | |
99 | 121 | class ClassificationValue(Model): |
100 | 122 | label = CharField( |
101 | 123 | unique=True, max_length=64, db_column='nazwa', |
... | ... | @@ -108,14 +130,17 @@ class ClassificationValue(Model): |
108 | 130 | lexemes = ManyToManyField('Lexeme', blank=True) |
109 | 131 | deleted = BooleanField(db_column='usunieta') |
110 | 132 | |
133 | + objects = NotDeletedManager() | |
134 | + all_objects = Manager() | |
135 | + | |
111 | 136 | def subtree(self): |
112 | 137 | subtrees = [] |
113 | - for child in self.child_nodes.filter(deleted=False): | |
138 | + for child in self.child_nodes.all(): | |
114 | 139 | subtrees.append(child.subtree()) |
115 | 140 | return self, subtrees |
116 | 141 | |
117 | 142 | def is_empty(self): |
118 | - return not self.lexemes.filter(deleted=False).exists() | |
143 | + return not self.lexemes.exists() | |
119 | 144 | |
120 | 145 | def __unicode__(self): |
121 | 146 | return self.label |
... | ... | @@ -123,6 +148,7 @@ class ClassificationValue(Model): |
123 | 148 | class Meta: |
124 | 149 | db_table = 'wartosci_klasyfikacji' |
125 | 150 | |
151 | + | |
126 | 152 | class LexicalClass(Model): |
127 | 153 | symbol = CharField(primary_key=True, max_length=16, db_column='czm') |
128 | 154 | |
... | ... | @@ -132,6 +158,7 @@ class LexicalClass(Model): |
132 | 158 | class Meta: |
133 | 159 | db_table = 'czescimowy' |
134 | 160 | |
161 | + | |
135 | 162 | class PartOfSpeech(Model): |
136 | 163 | symbol = CharField(primary_key=True, max_length=16, db_column='pos') |
137 | 164 | lexical_class = ForeignKey(LexicalClass, db_column='czm') |
... | ... | @@ -143,6 +170,7 @@ class PartOfSpeech(Model): |
143 | 170 | db_table = 'klasygramatyczne' |
144 | 171 | ordering = ['symbol'] |
145 | 172 | |
173 | + | |
146 | 174 | class BaseFormLabel(Model): |
147 | 175 | entry = CharField(max_length=32, blank=True, db_column='efobaz') |
148 | 176 | |
... | ... | @@ -152,6 +180,7 @@ class BaseFormLabel(Model): |
152 | 180 | class Meta: |
153 | 181 | db_table = 'efobazy' |
154 | 182 | |
183 | + | |
155 | 184 | class InflectionCharacteristic(Model): |
156 | 185 | entry = CharField(max_length=16, blank=True, db_column='charfl') |
157 | 186 | part_of_speech = ForeignKey(PartOfSpeech, db_column='pos') |
... | ... | @@ -164,6 +193,7 @@ class InflectionCharacteristic(Model): |
164 | 193 | db_table = 'charfle' |
165 | 194 | unique_together = ['entry', 'part_of_speech'] |
166 | 195 | |
196 | + | |
167 | 197 | class PatternType(Model): |
168 | 198 | lexical_class = ForeignKey( |
169 | 199 | LexicalClass, db_column='czm', verbose_name=u'cz. mowy') |
... | ... | @@ -180,10 +210,13 @@ class PatternType(Model): |
180 | 210 | .values_list('endings__base_form_label', flat=True)) |
181 | 211 | return BaseFormLabel.objects.filter(pk__in=bfl_pks) |
182 | 212 | |
213 | + def __unicode__(self): | |
214 | + return '%s (%s)' % (self.entry, self.lexical_class.symbol) | |
215 | + | |
183 | 216 | class Meta: |
184 | 217 | db_table = 'typywzorow' |
185 | 218 | |
186 | -# wzór fleksyjny | |
219 | + | |
187 | 220 | class Pattern(Model): |
188 | 221 | PATTERN_STATUS_CHOICES = ( |
189 | 222 | # (<symbol>, u'<opis>'), |
... | ... | @@ -229,6 +262,7 @@ class Pattern(Model): |
229 | 262 | ('view_pattern', u'Może oglądać wzory'), |
230 | 263 | ) |
231 | 264 | |
265 | + | |
232 | 266 | def prepare_table(table): |
233 | 267 | for row in table: |
234 | 268 | for cell in row: |
... | ... | @@ -250,6 +284,31 @@ def prepare_table(table): |
250 | 284 | return new |
251 | 285 | cell['label'] = filter(new, cell['label']) |
252 | 286 | |
287 | + | |
288 | +# zakonczenie formy bazowej | |
289 | +class Ending(Model): | |
290 | + pattern = ForeignKey(Pattern, related_name='endings', db_column='w_id') | |
291 | + # etykieta (tag) formy bazowej | |
292 | + base_form_label = ForeignKey(BaseFormLabel, db_column='efobaz') | |
293 | + # kolejnosc dla zakonczen o tym samym base_form_label | |
294 | + index = IntegerField(db_column='zind') | |
295 | + string = CharField(max_length=16, db_column='zak', blank=True) | |
296 | + qualifiers = ManyToManyField( | |
297 | + Qualifier, blank=True, db_table='kwalifikatory_zakonczen') | |
298 | + | |
299 | + def editable_vocabularies(self, user): | |
300 | + return editable_vocabularies(user) | |
301 | + | |
302 | + def __unicode__(self): | |
303 | + return '%s : %s : %s' % ( | |
304 | + self.pattern.name, self.string, self.base_form_label) | |
305 | + | |
306 | + class Meta: | |
307 | + db_table = 'zakonczenia' | |
308 | + unique_together = ('pattern', 'base_form_label', 'index') | |
309 | + ordering = ['index'] | |
310 | + | |
311 | + | |
253 | 312 | class Lexeme(Model): |
254 | 313 | STATUS_CHOICES = ( |
255 | 314 | ('cand', u'kandydat'), |
... | ... | @@ -280,6 +339,9 @@ class Lexeme(Model): |
280 | 339 | patterns = ManyToManyField(Pattern, through='LexemeInflectionPattern') |
281 | 340 | deleted = BooleanField(db_column='usuniety') |
282 | 341 | |
342 | + objects = NotDeletedManager() | |
343 | + all_objects = Manager() | |
344 | + | |
283 | 345 | def inflection_tables(self, variant, qualifiers=None): |
284 | 346 | lips = self.lexemeinflectionpattern_set.order_by('index') |
285 | 347 | ics = [] |
... | ... | @@ -362,7 +424,7 @@ class Lexeme(Model): |
362 | 424 | |
363 | 425 | def fix_homonym_number(self): |
364 | 426 | homonym_numbers = (Lexeme.objects.filter( |
365 | - deleted=False, entry=self.entry, part_of_speech=self.part_of_speech) | |
427 | + entry=self.entry, part_of_speech=self.part_of_speech) | |
366 | 428 | .exclude(pk=self.pk)).values_list('homonym_number', flat=True) |
367 | 429 | for i in range(1, len(homonym_numbers) + 2): |
368 | 430 | if i not in homonym_numbers: |
... | ... | @@ -397,13 +459,10 @@ class Lexeme(Model): |
397 | 459 | ('lexeme_priority', u'Ważniejszy głos przy modyfikowaniu leksemów') |
398 | 460 | ) |
399 | 461 | |
400 | -def filter_visible(lexemes, user, deleted=False): | |
462 | +def filter_visible(lexemes, user): | |
401 | 463 | vocab_ids = [v.id for v in visible_vocabularies(user)] |
402 | 464 | # uniknięcie podzapytania *bardzo* zwiększa wydajność! |
403 | - lexemes = lexemes.filter(vocabularies__id__in=vocab_ids).distinct() | |
404 | - if not deleted: | |
405 | - lexemes = lexemes.filter(deleted=False) | |
406 | - return lexemes | |
465 | + return lexemes.filter(vocabularies__id__in=vocab_ids).distinct() | |
407 | 466 | |
408 | 467 | def get_root(basic_form, pos, pattern, ic, use_pattern_ending=False): |
409 | 468 | bfl = ic.basic_form_label |
... | ... | @@ -444,6 +503,9 @@ class LexemeInflectionPattern(Model): |
444 | 503 | qualifiers = ManyToManyField( |
445 | 504 | Qualifier, blank=True, db_table='kwalifikatory_odmieniasiow') |
446 | 505 | |
506 | + objects = LexemeNotDeletedManager() | |
507 | + all_objects = Manager() | |
508 | + | |
447 | 509 | def table_template(self, variant): |
448 | 510 | return TableTemplate.objects.get( |
449 | 511 | variant=variant, pattern_type=self.pattern.type, |
... | ... | @@ -593,45 +655,21 @@ def combine_qualifiers(l_qualifiers, lip_qualifiers, e_qualifiers): |
593 | 655 | |
594 | 656 | def filter_visible_lips(lips, user): |
595 | 657 | vocabs = visible_vocabularies(user) |
596 | - return lips.filter( | |
597 | - lexeme__vocabularies__in=vocabs, lexeme__deleted=False).distinct() | |
598 | - | |
599 | -# zakonczenie formy bazowej | |
600 | -class Ending(Model): | |
601 | - pattern = ForeignKey(Pattern, related_name='endings', db_column='w_id') | |
602 | - # etykieta (tag) formy bazowej | |
603 | - base_form_label = ForeignKey(BaseFormLabel, db_column='efobaz') | |
604 | - # kolejnosc dla zakonczen o tym samym base_form_label | |
605 | - index = IntegerField(db_column='zind') | |
606 | - string = CharField(max_length=16, db_column='zak', blank=True) | |
607 | - qualifiers = ManyToManyField( | |
608 | - Qualifier, blank=True, db_table='kwalifikatory_zakonczen') | |
609 | - | |
610 | - def editable_vocabularies(self, user): | |
611 | - return editable_vocabularies(user) | |
612 | - | |
613 | - def __unicode__(self): | |
614 | - return '%s : %s : %s' % ( | |
615 | - self.pattern.name, self.string, self.base_form_label) | |
616 | - | |
617 | - class Meta: | |
618 | - db_table = 'zakonczenia' | |
619 | - unique_together = ('pattern', 'base_form_label', 'index') | |
620 | - ordering = ['index'] | |
658 | + return lips.filter(lexeme__vocabularies__in=vocabs).distinct() | |
621 | 659 | |
622 | 660 | |
623 | 661 | # Sluzy do doczepienia flag do poszczegolnych form |
624 | 662 | # poszczegolnych leksemow |
625 | -class UncommonForm(Model): | |
626 | - lexeme_inflection_pattern = ForeignKey( | |
627 | - LexemeInflectionPattern, db_column='o_id') | |
628 | - #raczej tag, ale z jakiego tagsetu ? | |
629 | - #base_form_label/tag = | |
630 | - qualifiers = ManyToManyField( | |
631 | - Qualifier, blank=True, db_table='kwalifikatory_form') | |
632 | - | |
633 | - class Meta: | |
634 | - db_table = 'formy_wyjatkowe' | |
663 | +#class UncommonForm(Model): | |
664 | +# lexeme_inflection_pattern = ForeignKey( | |
665 | +# LexemeInflectionPattern, db_column='o_id') | |
666 | +# #raczej tag, ale z jakiego tagsetu ? | |
667 | +# #base_form_label/tag = | |
668 | +# qualifiers = ManyToManyField( | |
669 | +# Qualifier, blank=True, db_table='kwalifikatory_form') | |
670 | +# | |
671 | +# class Meta: | |
672 | +# db_table = 'formy_wyjatkowe' | |
635 | 673 | # unique_together = ( |
636 | 674 | # 'lexeme_inflection_pattern', |
637 | 675 | # 'tag', |
... | ... | @@ -681,10 +719,17 @@ def visible_vocabularies(user): |
681 | 719 | def editable_vocabularies(user): |
682 | 720 | return user.editable_vocabularies.all() |
683 | 721 | |
722 | + | |
684 | 723 | class LexemeAssociation(Model): |
685 | 724 | lexeme = ForeignKey(Lexeme, db_column='l_id') |
686 | 725 | vocabulary = ForeignKey(Vocabulary, db_column='slownik') |
687 | 726 | |
727 | + objects = LexemeNotDeletedManager() | |
728 | + all_objects = Manager() | |
729 | + | |
730 | + def __unicode__(self): | |
731 | + return '%s/%s' % (self.lexeme.entry, self.vocabulary.id) | |
732 | + | |
688 | 733 | class Meta: |
689 | 734 | db_table = 'leksemy_w_slownikach' |
690 | 735 | |
... | ... | @@ -709,10 +754,20 @@ class CrossReferenceType(Model): |
709 | 754 | PartOfSpeech, db_column='pos2', related_name='crtype_from') |
710 | 755 | #reverse = ForeignKey('self', db_column='odwrotny') |
711 | 756 | |
757 | + def __unicode__(self): | |
758 | + return self.symbol | |
759 | + | |
712 | 760 | class Meta: |
713 | 761 | db_table = 'typyodsylaczy' |
714 | 762 | |
715 | 763 | |
764 | +class CRManager(Manager): | |
765 | + use_for_related_field = True | |
766 | + | |
767 | + def get_query_set(self): | |
768 | + return super(CRManager, self).get_query_set().filter( | |
769 | + from_lexeme__deleted=False, to_lexeme__deleted=False) | |
770 | + | |
716 | 771 | class CrossReference(Model): |
717 | 772 | from_lexeme = ForeignKey(Lexeme, db_column='l_id_od', related_name='refs_to') |
718 | 773 | to_lexeme = ForeignKey( |
... | ... | @@ -721,6 +776,13 @@ class CrossReference(Model): |
721 | 776 | type = ForeignKey( |
722 | 777 | CrossReferenceType, db_column='typods_id', verbose_name=u'typ') |
723 | 778 | |
779 | + objects = CRManager() | |
780 | + all_objects = Manager() | |
781 | + | |
782 | + def __unicode__(self): | |
783 | + return '%s: %s -> %s' % ( | |
784 | + self.type.symbol, self.from_lexeme.entry, self.to_lexeme.entry) | |
785 | + | |
724 | 786 | class Meta: |
725 | 787 | db_table = 'odsylacze' |
726 | 788 | |
... | ... | @@ -761,6 +823,11 @@ class Cell(Model): |
761 | 823 | #kolejnosc klatki w paradygmacie |
762 | 824 | index = IntegerField(db_column='kind') |
763 | 825 | |
826 | + def __unicode__(self): | |
827 | + return '%s [%s] %s- -%s {%s}' % ( | |
828 | + self.table_template, self.base_form_label, self.prefix, self.suffix, | |
829 | + self.tag) | |
830 | + | |
764 | 831 | class Meta: |
765 | 832 | db_table = 'klatki' |
766 | 833 | ordering = ['index'] |
... | ... | @@ -772,6 +839,10 @@ class TableCell(Model): |
772 | 839 | rowspan = IntegerField() |
773 | 840 | colspan = IntegerField() |
774 | 841 | |
842 | + def __unicode__(self): | |
843 | + return '%s [%s->%s,%s->%s]' % ( | |
844 | + self.cell, self.row, self.rowspan, self.col, self.colspan) | |
845 | + | |
775 | 846 | class Meta: |
776 | 847 | db_table = 'komorki_tabel' |
777 | 848 | |
... | ... | @@ -813,6 +884,9 @@ class LexemeForm(Model): |
813 | 884 | lexeme = ForeignKey(Lexeme) |
814 | 885 | form = CharField(max_length=128, db_index=True) |
815 | 886 | |
887 | + objects = LexemeNotDeletedManager() | |
888 | + all_objects = Manager() | |
889 | + | |
816 | 890 | class SavedFilter(Model): |
817 | 891 | serialized_filter = TextField() |
818 | 892 | name = CharField(max_length=64) |
... | ... |
dictionary/pagination_types.py
... | ... | @@ -10,7 +10,7 @@ types = {} |
10 | 10 | def lexeme_history_list(params, user): |
11 | 11 | objects = History.objects.exclude(lexeme=None).distinct().values( |
12 | 12 | 'lexeme', 'user', 'transaction_began') |
13 | - lexemes = filter_visible(Lexeme.objects, user, deleted=True).exclude(entry='') | |
13 | + lexemes = filter_visible(Lexeme.all_objects, user).exclude(entry='') | |
14 | 14 | for field, lookup, value in params['filters']: |
15 | 15 | if field == 'user': |
16 | 16 | if lookup == 'eq': |
... | ... |
dictionary/views.py
... | ... | @@ -255,7 +255,7 @@ def manage_qualifiers(request): |
255 | 255 | else: |
256 | 256 | remove_exclusion_class_form = None |
257 | 257 | add_qualifier_form = AddQualifierForm(vocabulary=vocabulary) |
258 | - qualifiers = Qualifier.objects.filter(vocabulary=vocabulary, deleted=False) | |
258 | + qualifiers = Qualifier.objects.filter(vocabulary=vocabulary) | |
259 | 259 | qualifier_forms = [] |
260 | 260 | for q in qualifiers: |
261 | 261 | change_class_form = ChangeClassForm(instance=q, prefix='cec%s' % q.pk) |
... | ... |