Commit d59102a7736598c5ab3d667dfa7e1da4c43494bf
1 parent
c001aac9
lip -> inflection
--HG-- branch : beta
Showing
28 changed files
with
377 additions
and
359 deletions
common/static/css/general.css
dictionary/ajax_lexeme_slickgrid.py
... | ... | @@ -50,10 +50,10 @@ class LexemeQuery(SlickGridQuery): |
50 | 50 | 'entry': lambda lexeme: lexeme.entry, |
51 | 51 | 'abbr_pos': lambda lexeme: lexeme.part_of_speech.pos_name.abbr, |
52 | 52 | 'pos': lambda lexeme: lexeme.part_of_speech_id, |
53 | - 'patterns': lambda lexeme: lexeme.lip_data()['patterns'], | |
53 | + 'patterns': lambda lexeme: lexeme.inflection_data()['patterns'], | |
54 | 54 | 'genders': lambda lexeme: |
55 | 55 | getattr(lexeme.attribute_value(ASPECT), 'value', '') or |
56 | - lexeme.lip_data()['genders'], | |
56 | + lexeme.inflection_data()['genders'], | |
57 | 57 | 'vocabs': lambda lexeme: '/'.join(lexeme.vocab_list()), |
58 | 58 | 'owner': lambda lexeme: lexeme.owner_vocabulary_id, |
59 | 59 | 'status': lambda lexeme: |
... | ... | @@ -81,7 +81,7 @@ class LexemeQuery(SlickGridQuery): |
81 | 81 | 'containing_vocabulary': 'vocabularies__id', |
82 | 82 | 'owner_vocabulary': 'owner_vocabulary_id', |
83 | 83 | 'lexeme_qualifier': 'qualifiers__id', |
84 | - 'lip_qualifier': 'inflection__qualifiers__id', | |
84 | + 'inflection_qualifier': 'inflection__qualifiers__id', | |
85 | 85 | 'qualifier': 'qualifiers_cache__id', |
86 | 86 | 'classification_value': 'classificationvalue__id', |
87 | 87 | 'borrowing_source': 'borrowing_source_id', |
... | ... | @@ -111,7 +111,7 @@ class LexemeQuery(SlickGridQuery): |
111 | 111 | 'w_id', 'odmieniasie', 'wzory', 'w_id', 'id', 'l_id', ident), |
112 | 112 | 'pattern_type': ( |
113 | 113 | 'typ', 'odmieniasie', 'wzory', 'w_id', 'id', 'l_id', int_list), |
114 | - 'lip_qualifier': ( | |
114 | + 'inflection_qualifier': ( | |
115 | 115 | 'qualifier_id', 'odmieniasie', 'kwalifikatory_odmieniasiow', |
116 | 116 | 'id', 'inflection_id', 'l_id', int_list), |
117 | 117 | } |
... | ... |
dictionary/ajax_lexeme_view.py
... | ... | @@ -10,8 +10,8 @@ from common.decorators import ajax, AjaxError, render_ajax |
10 | 10 | from common.util import error_messages, bisect_left, format_date |
11 | 11 | from dictionary.ajax_lexeme_slickgrid import LexemeQuery |
12 | 12 | from dictionary.auto_derivatives import lexeme_derivatives, create_derivative |
13 | -from dictionary.forms import LexemeEditForm, LIPEditForm, ClassificationForm, \ | |
14 | - CrossReferenceForm, ActionFieldForm, ACTION_FIELDS,\ | |
13 | +from dictionary.forms import LexemeEditForm, InflectionEditForm, \ | |
14 | + ClassificationForm, CrossReferenceForm, ActionFieldForm, ACTION_FIELDS,\ | |
15 | 15 | LexemeOpenAttributeForm, LexemeClosedAttributeForm, \ |
16 | 16 | LexemeMultipleAttributeForm |
17 | 17 | from dictionary.models import Lexeme, Inflection, PartOfSpeech, Vocabulary, \ |
... | ... | @@ -142,13 +142,14 @@ def lexeme_edit_form(request, id): |
142 | 142 | to_return['classification_forms'] = make_classification_forms( |
143 | 143 | l, editable=editable) |
144 | 144 | |
145 | - lips = l.inflection_set.all() | |
146 | - to_return['lip_forms'] = [ | |
147 | - (LIPEditForm( | |
148 | - part_of_speech=l.part_of_speech, instance=lip, | |
149 | - prefix='lip' + str(lip.pk), user=request.user, editable=editable), | |
150 | - lip.qualifiers.filter(vocabulary=owner) if ro_owner else []) | |
151 | - for lip in lips] | |
145 | + inflections = l.inflection_set.all() | |
146 | + to_return['inflection_forms'] = [ | |
147 | + (InflectionEditForm( | |
148 | + part_of_speech=l.part_of_speech, instance=inflection, | |
149 | + prefix='inf' + str(inflection.pk), user=request.user, | |
150 | + editable=editable), | |
151 | + inflection.qualifiers.filter(vocabulary=owner) if ro_owner else []) | |
152 | + for inflection in inflections] | |
152 | 153 | to_return['cross_references'] = l.cross_references(request.user) |
153 | 154 | if l.homonym_count(request.user) > 0: |
154 | 155 | to_return['homonym'] = l.homonym_number |
... | ... | @@ -198,16 +199,16 @@ def classification_forms(request, lexeme_id, vocab_id, pos): |
198 | 199 | |
199 | 200 | |
200 | 201 | @render_ajax(template='lexeme_edit_form_row.html', method='get') |
201 | -def new_lip_edit_row(request, lexeme_id, pos_id, num): | |
202 | +def new_inflection_edit_row(request, lexeme_id, pos_id, num): | |
202 | 203 | l = Lexeme.all_objects.get(pk=lexeme_id) |
203 | 204 | if not l.perm(request.user, 'change'): |
204 | 205 | raise AjaxError('access denied') |
205 | 206 | if not pos_id: |
206 | 207 | raise AjaxError(_(u'Unspecified part of speech.')) |
207 | 208 | pos = PartOfSpeech.objects.get(pk=pos_id) |
208 | - lip_form = LIPEditForm( | |
209 | - part_of_speech=pos, prefix='lip_add_%s' % num, user=request.user) | |
210 | - return {'lip_form': lip_form, 'editable': True} | |
209 | + inflection_form = InflectionEditForm( | |
210 | + part_of_speech=pos, prefix='inf_add_%s' % num, user=request.user) | |
211 | + return {'inflection_form': inflection_form, 'editable': True} | |
211 | 212 | |
212 | 213 | |
213 | 214 | @render_ajax(template='cross_reference_row.html', method='get') |
... | ... | @@ -314,38 +315,41 @@ def update_lexeme(request, form_data): |
314 | 315 | 'no_other_refs': rev_cr.from_lexeme.refs_to.count() == 1, |
315 | 316 | }) |
316 | 317 | cr.delete() |
317 | - submitted_lips = [] | |
318 | + submitted_inflections = [] | |
318 | 319 | submitted_crs = [] |
319 | 320 | pattern_types = set() |
320 | 321 | for pair in form_data: |
321 | 322 | name = pair['name'] |
322 | 323 | prefix = name.split('-')[0] |
323 | - if name.startswith('lip') and prefix not in submitted_lips: | |
324 | - submitted_lips.append(prefix) | |
325 | - if prefix.startswith('lip_add'): | |
326 | - lip = Inflection() | |
327 | - lip.lexeme = l | |
324 | + if name.startswith('inf') and prefix not in submitted_inflections: | |
325 | + submitted_inflections.append(prefix) | |
326 | + if prefix.startswith('inf_add'): | |
327 | + inflection = Inflection() | |
328 | + inflection.lexeme = l | |
328 | 329 | else: |
329 | 330 | pk = int(prefix[3:]) |
330 | - lip = Inflection.objects.get(pk=pk) | |
331 | + inflection = Inflection.objects.get(pk=pk) | |
331 | 332 | form_dict[prefix + '-qualifiers'] = get_list( |
332 | 333 | form_data, prefix + '-qualifiers') |
333 | - lip_form = LIPEditForm( | |
334 | + inflection_form = InflectionEditForm( | |
334 | 335 | part_of_speech=l.part_of_speech, data=form_dict, prefix=prefix, |
335 | - instance=lip, user=request.user, index=len(submitted_lips)) | |
336 | - if lip_form.is_valid(): | |
337 | - lip = lip_form.save() | |
338 | - lip.root = l.get_root(lip.pattern, lip.gender) | |
339 | - if lip.root is None: | |
336 | + instance=inflection, user=request.user, | |
337 | + index=len(submitted_inflections)) | |
338 | + if inflection_form.is_valid(): | |
339 | + inflection = inflection_form.save() | |
340 | + inflection.root = l.get_root( | |
341 | + inflection.pattern, inflection.gender) | |
342 | + if inflection.root is None: | |
340 | 343 | raise AjaxError( |
341 | 344 | _(u'Basic form ending doesn\'t match the pattern')) |
342 | - for qualifier in lip_form.fields['qualifiers'].queryset: | |
345 | + for qualifier in inflection_form.fields['qualifiers'].queryset: | |
343 | 346 | qualifier.set_for( |
344 | - lip, qualifier in lip_form.cleaned_data['qualifiers']) | |
345 | - lip.save() | |
346 | - pattern_types.add(lip.pattern) | |
347 | + inflection, | |
348 | + qualifier in inflection_form.cleaned_data['qualifiers']) | |
349 | + inflection.save() | |
350 | + pattern_types.add(inflection.pattern) | |
347 | 351 | else: |
348 | - raise AjaxError(error_messages(lip_form)) | |
352 | + raise AjaxError(error_messages(inflection_form)) | |
349 | 353 | if name.startswith('cr_add') and prefix not in submitted_crs: |
350 | 354 | submitted_crs.append(prefix) |
351 | 355 | cr_form = CrossReferenceForm(data=form_dict, prefix=prefix) |
... | ... | @@ -364,7 +368,7 @@ def update_lexeme(request, form_data): |
364 | 368 | }) |
365 | 369 | else: |
366 | 370 | raise AjaxError(error_messages(cr_form)) |
367 | - if len(submitted_lips) == 0 and l.is_public(): | |
371 | + if len(submitted_inflections) == 0 and l.is_public(): | |
368 | 372 | raise AjaxError( |
369 | 373 | _(u'Enter inflection or set โcandidateโ or โlitterโ status.')) |
370 | 374 | if l.part_of_speech_id not in ('subst', 'v') and len(pattern_types) > 1: |
... | ... | @@ -462,10 +466,10 @@ def create_derivatives(request, lexeme_id, chosen_derivatives): |
462 | 466 | def cr_tuple(cr): |
463 | 467 | pos = cr.to_lexeme.part_of_speech.symbol |
464 | 468 | if pos in ('ppas', 'appas'): |
465 | - lip = cr.to_lexeme.inflection_set.get() | |
469 | + inflection = cr.to_lexeme.inflection_set.get() | |
466 | 470 | ending9 = Ending.objects.get( |
467 | - pattern=lip.pattern, base_form_label__symbol='9') | |
468 | - plnommo = lip.root + ending9.string | |
471 | + pattern=inflection.pattern, base_form_label__symbol='9') | |
472 | + plnommo = inflection.root + ending9.string | |
469 | 473 | return cr.type.symbol, cr.to_lexeme.entry, plnommo, pos |
470 | 474 | else: |
471 | 475 | return cr.type.symbol, cr.to_lexeme.entry, pos |
... | ... | @@ -534,17 +538,18 @@ def update_lexeme_qualifiers(lexeme, user, form_dict, form_data): |
534 | 538 | lexeme, unicode(qualifier.pk) in form_dict.get('qualifiers', ())) |
535 | 539 | for vocab in editable_vocabs: |
536 | 540 | vocab.set_lexeme(lexeme, vocab.pk in form_dict['vocabularies']) |
537 | - submitted_lips = [] | |
541 | + submitted_inflections = [] | |
538 | 542 | for pair in form_data: |
539 | 543 | name = pair['name'] |
540 | 544 | prefix = name.split('-')[0] |
541 | - if name.startswith('lip') and prefix not in submitted_lips: | |
542 | - submitted_lips.append(prefix) | |
545 | + if name.startswith('inf') and prefix not in submitted_inflections: | |
546 | + submitted_inflections.append(prefix) | |
543 | 547 | pk = int(prefix[3:]) |
544 | - lip = Inflection.objects.get(pk=pk) | |
545 | - lip_qualifiers = get_list(form_data, prefix + '-qualifiers') | |
548 | + inflection = Inflection.objects.get(pk=pk) | |
549 | + inflection_qualifiers = get_list(form_data, prefix + '-qualifiers') | |
546 | 550 | for qualifier in qualifiers: |
547 | - qualifier.set_for(lip, unicode(qualifier.pk) in lip_qualifiers) | |
551 | + qualifier.set_for( | |
552 | + inflection, unicode(qualifier.pk) in inflection_qualifiers) | |
548 | 553 | return {} |
549 | 554 | |
550 | 555 | |
... | ... | @@ -582,10 +587,10 @@ def check_pos(request, pos_id, had_gender): |
582 | 587 | |
583 | 588 | @ajax(method='get') |
584 | 589 | def check_pattern(request, pattern_name, gender_id): |
585 | - lips = Inflection.objects.filter( | |
590 | + inflections = Inflection.objects.filter( | |
586 | 591 | gender_id=gender_id, pattern__name=pattern_name) |
587 | - lips = lips.exclude(lexeme__status__in=Lexeme.HIDDEN_STATUSES) | |
588 | - if lips.exists(): | |
592 | + inflections = inflections.exclude(lexeme__status__in=Lexeme.HIDDEN_STATUSES) | |
593 | + if inflections.exists(): | |
589 | 594 | return {'answer': 'yes'} |
590 | 595 | else: |
591 | 596 | return {'answer': 'no'} |
... | ... | @@ -636,11 +641,12 @@ def clone_lexeme(request, lexeme_id): |
636 | 641 | new_lexeme.fix_homonym_number() # nie zadziaลaล :/ |
637 | 642 | new_lexeme.save() |
638 | 643 | new_lexeme.qualifiers = lexeme.qualifiers.all() |
639 | - for lip in lexeme.inflection_set.all(): | |
640 | - new_lip = Inflection.objects.create( | |
641 | - lexeme=new_lexeme, index=lip.index, pattern=lip.pattern, | |
642 | - gender=lip.gender, root=lip.root) | |
643 | - new_lip.qualifiers = lip.qualifiers.all() | |
644 | + for inflection in lexeme.inflection_set.all(): | |
645 | + new_inflection = Inflection.objects.create( | |
646 | + lexeme=new_lexeme, index=inflection.index, | |
647 | + pattern=inflection.pattern, gender=inflection.gender, | |
648 | + root=inflection.root) | |
649 | + new_inflection.qualifiers = inflection.qualifiers.all() | |
644 | 650 | for lav in lexeme.lexemeav_set.all(): |
645 | 651 | LexemeAV.objects.create( |
646 | 652 | lexeme=new_lexeme, attribute_value=lav.attribute_value) |
... | ... | @@ -672,32 +678,33 @@ def cr_homonyms(request, entry, cr_type): |
672 | 678 | if pos in aspect.parts_of_speech.all(): |
673 | 679 | middle_labels = (_(u'Aspect'),) |
674 | 680 | |
675 | - def middle_fields(lexeme, lip_data): | |
681 | + def middle_fields(lexeme, inflection_data): | |
676 | 682 | aspect_value = lexeme.attribute_value(aspect) |
677 | 683 | return aspect_value.value if aspect_value else '', |
678 | 684 | elif pos.inflection_type_id == 'subst': |
679 | 685 | middle_labels = (_(u'Gender'),) |
680 | 686 | |
681 | - def middle_fields(lexeme, lip_data): | |
682 | - return lip_data['genders'], | |
687 | + def middle_fields(lexeme, inflection_data): | |
688 | + return inflection_data['genders'], | |
683 | 689 | else: |
684 | 690 | middle_labels = () |
685 | 691 | |
686 | - def middle_fields(lexeme, lip_data): | |
692 | + def middle_fields(lexeme, inflection_data): | |
687 | 693 | return () |
688 | 694 | labels = ( |
689 | 695 | (_(u'Hom. โ'),) + middle_labels + |
690 | 696 | (_(u'Pattern'), _(u'Dictionaries'), _(u'Status'))) |
691 | 697 | |
692 | - def make_row(lexeme, lip_data): | |
698 | + def make_row(lexeme, inflection_data): | |
693 | 699 | status = force_unicode(dict(Lexeme.STATUS_CHOICES)[lexeme.status]) |
694 | 700 | vocabs = '/'.join(lexeme.vocab_list()) |
695 | 701 | return ( |
696 | - (lexeme.homonym_number,) + middle_fields(lexeme, lip_data) + | |
697 | - (lip_data['patterns'], vocabs, status)) | |
702 | + (lexeme.homonym_number,) + middle_fields(lexeme, inflection_data) + | |
703 | + (inflection_data['patterns'], vocabs, status)) | |
698 | 704 | |
699 | 705 | lexemes_data = [ |
700 | - make_row(l, l.lip_data()) for l in lexemes.order_by('homonym_number') | |
706 | + make_row(l, l.inflection_data()) | |
707 | + for l in lexemes.order_by('homonym_number') | |
701 | 708 | ] |
702 | 709 | return {'labels': labels, 'lexemes': lexemes_data} |
703 | 710 | |
... | ... |
dictionary/ajax_prompter.py
... | ... | @@ -8,37 +8,39 @@ from patterns.pattern_blacklist import blacklist |
8 | 8 | |
9 | 9 | commonness = Classification.objects.get(name=u'pospolitoลฤ') |
10 | 10 | |
11 | -LIP_ROWS = 10 | |
11 | +INFLECTION_ROWS = 10 | |
12 | 12 | |
13 | 13 | |
14 | 14 | def make_list(user, entry, pos, gender, cvs, bl_check): |
15 | - lips = Inflection.objects.distinct() | |
16 | - lips = Inflection.filter_visible(lips, user) | |
17 | - lips = lips.filter( | |
15 | + inflections = Inflection.objects.distinct() | |
16 | + inflections = Inflection.filter_visible(inflections, user) | |
17 | + inflections = inflections.filter( | |
18 | 18 | lexeme__part_of_speech__symbol=pos).exclude( |
19 | 19 | lexeme__status__in=Lexeme.HIDDEN_STATUSES) |
20 | 20 | if gender: |
21 | - lips = lips.filter(gender=gender) | |
21 | + inflections = inflections.filter(gender=gender) | |
22 | 22 | if cvs: |
23 | - lips = lips.filter(lexeme__classificationvalue__in=cvs) | |
24 | - lips = lips.select_related('pattern', 'gender', 'lexeme') | |
25 | - lips = lips.order_by('lexeme__entry') | |
23 | + inflections = inflections.filter(lexeme__classificationvalue__in=cvs) | |
24 | + inflections = inflections.select_related('pattern', 'gender', 'lexeme') | |
25 | + inflections = inflections.order_by('lexeme__entry') | |
26 | 26 | feature_sets = set() |
27 | 27 | bad_inflections = set() |
28 | - chosen_lips = [] | |
28 | + chosen_inflections = [] | |
29 | 29 | for suf_len in xrange(len(entry), 0, -1): |
30 | 30 | suf = suffix(entry, suf_len) |
31 | - suf_lips = lips.filter(lexeme__entry__endswith=suf) | |
31 | + suf_inflections = inflections.filter(lexeme__entry__endswith=suf) | |
32 | 32 | if suf_len < len(entry): |
33 | 33 | suf1 = suffix(entry, suf_len + 1) |
34 | - suf_lips = suf_lips.exclude(lexeme__entry__endswith=suf1) | |
34 | + suf_inflections = suf_inflections.exclude( | |
35 | + lexeme__entry__endswith=suf1) | |
35 | 36 | for p0, gender0 in bad_inflections: |
36 | - suf_lips.exclude(pattern=p0, gender=gender0) | |
37 | + suf_inflections.exclude(pattern=p0, gender=gender0) | |
37 | 38 | for p0, gender0 in feature_sets: |
38 | - suf_lips = suf_lips.exclude(pattern=p0, gender=gender0) | |
39 | - for lip in suf_lips: | |
40 | - p = lip.pattern | |
41 | - l_gender = lip.gender | |
39 | + suf_inflections = suf_inflections.exclude( | |
40 | + pattern=p0, gender=gender0) | |
41 | + for inflection in suf_inflections: | |
42 | + p = inflection.pattern | |
43 | + l_gender = inflection.gender | |
42 | 44 | if p.name in blacklist and bl_check: |
43 | 45 | continue |
44 | 46 | if (p, l_gender) in bad_inflections: |
... | ... | @@ -46,21 +48,22 @@ def make_list(user, entry, pos, gender, cvs, bl_check): |
46 | 48 | if (p, l_gender) in feature_sets: |
47 | 49 | continue |
48 | 50 | if p.get_root(entry, l_gender) is not None: |
49 | - l_cvs = lip.lexeme.classification_values(commonness) | |
50 | - l_root = lip.lexeme.get_root(p, l_gender) | |
51 | - l_end = lip.lexeme.entry[len(l_root):] | |
51 | + l_cvs = inflection.lexeme.classification_values(commonness) | |
52 | + l_root = inflection.lexeme.get_root(p, l_gender) | |
53 | + l_end = inflection.lexeme.entry[len(l_root):] | |
52 | 54 | l_entry = u'%sยท%s' % (l_root, l_end) |
53 | 55 | if len(l_end) < len(suf): |
54 | 56 | suf = suffix(l_entry, suf_len + 1) |
55 | - chosen_lips.append((lip, l_cvs, cut_end(l_entry, suf), suf)) | |
57 | + chosen_inflections.append( | |
58 | + (inflection, l_cvs, cut_end(l_entry, suf), suf)) | |
56 | 59 | feature_sets.add((p, l_gender)) |
57 | - if len(chosen_lips) == LIP_ROWS: | |
60 | + if len(chosen_inflections) == INFLECTION_ROWS: | |
58 | 61 | break |
59 | 62 | else: |
60 | 63 | bad_inflections.add((p, l_gender)) |
61 | - if len(chosen_lips) == LIP_ROWS: | |
64 | + if len(chosen_inflections) == INFLECTION_ROWS: | |
62 | 65 | break |
63 | - return chosen_lips | |
66 | + return chosen_inflections | |
64 | 67 | |
65 | 68 | |
66 | 69 | @ajax(method='get', template='prompter_list.html', |
... | ... | @@ -72,6 +75,6 @@ def prompter_list(request, entry, pos_id, commonness_ids, |
72 | 75 | cvs = list(ClassificationValue.objects.filter(pk__in=commonness_ids)) |
73 | 76 | else: |
74 | 77 | cvs = None |
75 | - lips = make_list(request.user, entry, pos_id, gender, cvs, bl_check) | |
78 | + inflections = make_list(request.user, entry, pos_id, gender, cvs, bl_check) | |
76 | 79 | # zakลadamy, ลผe symbol == pk |
77 | - return {'lips': lips} | |
80 | + return {'inflections': inflections} | |
... | ... |
dictionary/auto_derivatives.py
... | ... | @@ -28,9 +28,9 @@ NO_ZLOZ = LexemeAttributeValue.objects.get( |
28 | 28 | value=u'nieobecna', attribute__name=u'forma zลoลผ.') |
29 | 29 | |
30 | 30 | |
31 | -def ppas_data(lips, pos='ppas'): | |
32 | - for lip in lips: | |
33 | - pattern = lip.pattern | |
31 | +def ppas_data(inflections, pos='ppas'): | |
32 | + for inflection in inflections: | |
33 | + pattern = inflection.pattern | |
34 | 34 | endings10 = Ending.objects.filter( |
35 | 35 | pattern=pattern, base_form_label__symbol='10') |
36 | 36 | endings12 = Ending.objects.filter( |
... | ... | @@ -40,37 +40,37 @@ def ppas_data(lips, pos='ppas'): |
40 | 40 | yield { |
41 | 41 | 'pos': pos, |
42 | 42 | 'cr_type': 'verppas', |
43 | - 'entry': lip.root + ending.string + 'y', | |
44 | - 'pl': lip.root + ending12.string, | |
45 | - 'index': lip.index, | |
43 | + 'entry': inflection.root + ending.string + 'y', | |
44 | + 'pl': inflection.root + ending12.string, | |
45 | + 'index': inflection.index, | |
46 | 46 | } |
47 | 47 | |
48 | 48 | |
49 | -def pact_data(lips): | |
50 | - for lip in lips: | |
51 | - pattern = lip.pattern | |
49 | +def pact_data(inflections): | |
50 | + for inflection in inflections: | |
51 | + pattern = inflection.pattern | |
52 | 52 | endings3 = Ending.objects.filter( |
53 | 53 | pattern=pattern, base_form_label__symbol='3') |
54 | 54 | for ending in endings3: |
55 | 55 | yield { |
56 | 56 | 'pos': 'pact', |
57 | 57 | 'cr_type': 'verpact', |
58 | - 'entry': lip.root + ending.string + 'cy', | |
59 | - 'index': lip.index, | |
58 | + 'entry': inflection.root + ending.string + 'cy', | |
59 | + 'index': inflection.index, | |
60 | 60 | } |
61 | 61 | |
62 | 62 | |
63 | -def ger_data(lips): | |
64 | - for lip in lips: | |
65 | - pattern = lip.pattern | |
63 | +def ger_data(inflections): | |
64 | + for inflection in inflections: | |
65 | + pattern = inflection.pattern | |
66 | 66 | endings11 = Ending.objects.filter( |
67 | 67 | pattern=pattern, base_form_label__symbol='11') |
68 | 68 | for ending in endings11: |
69 | 69 | yield { |
70 | 70 | 'pos': 'ger', |
71 | 71 | 'cr_type': 'verger', |
72 | - 'entry': lip.root + ending.string + 'ie', | |
73 | - 'index': lip.index, | |
72 | + 'entry': inflection.root + ending.string + 'ie', | |
73 | + 'index': inflection.index, | |
74 | 74 | } |
75 | 75 | |
76 | 76 | |
... | ... | @@ -101,8 +101,8 @@ def make_negation(s): |
101 | 101 | |
102 | 102 | |
103 | 103 | def lexeme_derivatives(lexeme): |
104 | - lips = list(lexeme.inflection_set.all()) | |
105 | - if not lips: | |
104 | + inflections = list(lexeme.inflection_set.all()) | |
105 | + if not inflections: | |
106 | 106 | return |
107 | 107 | if lexeme.part_of_speech.symbol == 'v': |
108 | 108 | proper = lexeme.lexemeattributevalue_set.filter( |
... | ... | @@ -116,12 +116,12 @@ def lexeme_derivatives(lexeme): |
116 | 116 | attribute__name=u'aspekt').exclude(value='dk') |
117 | 117 | if trans or q_trans: |
118 | 118 | pos = 'ppas' if trans else 'appas' |
119 | - for data in ppas_data(lips, pos): | |
119 | + for data in ppas_data(inflections, pos): | |
120 | 120 | yield data |
121 | 121 | if imperf: |
122 | - for data in pact_data(lips): | |
122 | + for data in pact_data(inflections): | |
123 | 123 | yield data |
124 | - for data in ger_data(lips): | |
124 | + for data in ger_data(inflections): | |
125 | 125 | yield data |
126 | 126 | elif lexeme.part_of_speech.symbol == 'adj': |
127 | 127 | # adjcom, adv, advcom, osc, nieadj |
... | ... | @@ -162,50 +162,51 @@ def create_derivative(lexeme, part_of_speech, cr_type, entry, index, pl=None): |
162 | 162 | der.fix_homonym_number() |
163 | 163 | lexeme.owner_vocabulary.add_lexeme(der) |
164 | 164 | if not negation: |
165 | - lip = Inflection(lexeme=der, index=1) | |
165 | + inflection = Inflection(lexeme=der, index=1) | |
166 | 166 | if part_of_speech in ('ppas', 'appas'): |
167 | 167 | # -ty/-ci |
168 | 168 | if entry.endswith('ty'): |
169 | - lip.pattern = Pppas_ty | |
169 | + inflection.pattern = Pppas_ty | |
170 | 170 | # -iony/-eni |
171 | 171 | elif entry.endswith('iony') and not pl.endswith('ieni'): |
172 | - lip.pattern = Pppas_iony_ieni | |
172 | + inflection.pattern = Pppas_iony_ieni | |
173 | 173 | # -ony/-eni |
174 | 174 | elif entry.endswith('eni'): |
175 | - lip.pattern = Pppas_ony_eni | |
175 | + inflection.pattern = Pppas_ony_eni | |
176 | 176 | # -ny/-ni |
177 | 177 | else: |
178 | - lip.pattern = Pppas_ny_ni | |
178 | + inflection.pattern = Pppas_ny_ni | |
179 | 179 | elif part_of_speech == 'pact': |
180 | - lip.pattern = Ppact | |
180 | + inflection.pattern = Ppact | |
181 | 181 | elif part_of_speech == 'ger': |
182 | - lip.gender = n2 | |
182 | + inflection.gender = n2 | |
183 | 183 | if entry.endswith('cie'): |
184 | - lip.pattern = Pger_cie | |
184 | + inflection.pattern = Pger_cie | |
185 | 185 | else: # -nie |
186 | - lip.pattern = Pger_nie | |
186 | + inflection.pattern = Pger_nie | |
187 | 187 | elif part_of_speech == 'osc': |
188 | - lip.pattern = Posc | |
189 | - lip.gender = f | |
188 | + inflection.pattern = Posc | |
189 | + inflection.gender = f | |
190 | 190 | elif part_of_speech == 'adjcom': |
191 | - lip.pattern = Pcom | |
191 | + inflection.pattern = Pcom | |
192 | 192 | elif part_of_speech in ('adv', 'advcom'): |
193 | - lip.pattern = Pndm | |
194 | - lip.root = lip.get_root() | |
195 | - lip.save() | |
196 | - orig_lip = Inflection.objects.get(lexeme=lexeme, index=index) | |
193 | + inflection.pattern = Pndm | |
194 | + inflection.root = inflection.get_root() | |
195 | + inflection.save() | |
196 | + orig_inflection = Inflection.objects.get(lexeme=lexeme, index=index) | |
197 | 197 | # moลผe kopiowaฤ kwalifikatory odmieniasia do leksemu? |
198 | - for q in orig_lip.qualifiers.all(): | |
199 | - lip.qualifiers.add(q) # der zamiast lip? | |
198 | + for q in orig_inflection.qualifiers.all(): | |
199 | + inflection.qualifiers.add(q) # der zamiast inflection? | |
200 | 200 | else: |
201 | - for orig_lip in lexeme.inflection_set.all(): | |
202 | - lip = Inflection( | |
203 | - lexeme=der, index=orig_lip.index, pattern=orig_lip.pattern, | |
204 | - gender=orig_lip.gender) | |
205 | - lip.root = lip.get_root() | |
206 | - lip.save() | |
207 | - for q in orig_lip.qualifiers.all(): | |
208 | - lip.qualifiers.add(q) | |
201 | + for orig_inflection in lexeme.inflection_set.all(): | |
202 | + inflection = Inflection( | |
203 | + lexeme=der, index=orig_inflection.index, | |
204 | + pattern=orig_inflection.pattern, | |
205 | + gender=orig_inflection.gender) | |
206 | + inflection.root = inflection.get_root() | |
207 | + inflection.save() | |
208 | + for q in orig_inflection.qualifiers.all(): | |
209 | + inflection.qualifiers.add(q) | |
209 | 210 | for attr, attr_val in lexeme.attributes_values(): |
210 | 211 | if attr not in (ATTR_POPRZ, ATTR_ZLOZ) and attr_val \ |
211 | 212 | and attr.parts_of_speech.filter(symbol=part_of_speech): |
... | ... |
dictionary/forms.py
... | ... | @@ -200,18 +200,18 @@ class LexemeMultipleAttributeForm(LexemeAttributeForm): |
200 | 200 | self.fields['value'].help_text = '' |
201 | 201 | |
202 | 202 | |
203 | -class LIPEditForm(ModelForm): | |
203 | +class InflectionEditForm(ModelForm): | |
204 | 204 | pattern_name = CharField(widget=TextInput( |
205 | 205 | attrs={'class': 'pattern', 'size': '10'}), label=_(u'Pattern')) |
206 | 206 | qualifiers = QualifiersField( |
207 | 207 | required=False, label=_(u'Qual.'), |
208 | - widget=SelectMultiple(attrs={'class': 'lip-qualifiers'})) | |
208 | + widget=SelectMultiple(attrs={'class': 'inflection-qualifiers'})) | |
209 | 209 | inflection_type = CharField( |
210 | - widget=HiddenInput(attrs={'class': 'lip-it'}), label=u'') | |
210 | + widget=HiddenInput(attrs={'class': 'inflection-it'}), label=u'') | |
211 | 211 | |
212 | 212 | def __init__(self, part_of_speech, user, editable=True, index=None, |
213 | 213 | **kwargs): |
214 | - super(LIPEditForm, self).__init__(**kwargs) | |
214 | + super(InflectionEditForm, self).__init__(**kwargs) | |
215 | 215 | if part_of_speech.inflection_type_id != 'subst': |
216 | 216 | self.fields['gender'].widget = HiddenInput() |
217 | 217 | self.fields['gender'].label = '' |
... | ... | @@ -242,13 +242,13 @@ class LIPEditForm(ModelForm): |
242 | 242 | return cleaned_data |
243 | 243 | |
244 | 244 | def save(self, *args, **kwargs): |
245 | - lip = self.instance | |
246 | - lip.pattern = self.cleaned_data['pattern'] | |
245 | + inflection = self.instance | |
246 | + inflection.pattern = self.cleaned_data['pattern'] | |
247 | 247 | if self.index: |
248 | - lip.index = self.index | |
249 | - super(LIPEditForm, self).save(*args, **kwargs) | |
250 | - lip.save() | |
251 | - return lip | |
248 | + inflection.index = self.index | |
249 | + super(InflectionEditForm, self).save(*args, **kwargs) | |
250 | + inflection.save() | |
251 | + return inflection | |
252 | 252 | |
253 | 253 | class Meta: |
254 | 254 | model = Inflection |
... | ... | @@ -256,7 +256,7 @@ class LIPEditForm(ModelForm): |
256 | 256 | widgets = { |
257 | 257 | 'gender': Select(attrs={'class': 'gender'}), |
258 | 258 | 'pronunciation': TextInput( |
259 | - attrs={'class': 'lip-pronunciation-field'}), | |
259 | + attrs={'class': 'inflection-pronunciation-field'}), | |
260 | 260 | } |
261 | 261 | |
262 | 262 | |
... | ... |
dictionary/management/commands/import_witek.py
... | ... | @@ -60,32 +60,32 @@ def import_subst(elements, comment): |
60 | 60 | raise |
61 | 61 | assert pos == 'subst' |
62 | 62 | genders = [Gender.objects.get(symbol=g) for g in gender.split('/')] |
63 | - lip_data = [p.rsplit(' ', 1) for p in pattern_data.split('/')] | |
64 | - if len(genders) > 1 and len(lip_data) > 1: | |
63 | + inflection_data = [p.rsplit(' ', 1) for p in pattern_data.split('/')] | |
64 | + if len(genders) > 1 and len(inflection_data) > 1: | |
65 | 65 | print >>sys.stderr, 'mnogie wzory i rodzaje', elements |
66 | 66 | return |
67 | 67 | if len(genders) == 1: |
68 | - lip_data = [(ld, genders[0]) for ld in lip_data] | |
68 | + inflection_data = [(ld, genders[0]) for ld in inflection_data] | |
69 | 69 | else: |
70 | - lip_data = [(lip_data[0], g) for g in genders] | |
70 | + inflection_data = [(inflection_data[0], g) for g in genders] | |
71 | 71 | l = new_lexeme(entry, 'subst', comment) |
72 | 72 | comm_value = ClassificationValue.objects.get( |
73 | 73 | classification__name=u'pospolitoลฤ', label=commonness) |
74 | 74 | LexemeCV.objects.create(lexeme=l, classification_value=comm_value) |
75 | - for i, (ld, gender) in enumerate(lip_data, 1): | |
76 | - lip = Inflection(lexeme=l, index=i, gender=gender) | |
75 | + for i, (ld, gender) in enumerate(inflection_data, 1): | |
76 | + inflection = Inflection(lexeme=l, index=i, gender=gender) | |
77 | 77 | if len(ld) == 1: |
78 | 78 | pattern = ld[0] |
79 | 79 | qualifier = None |
80 | 80 | else: |
81 | 81 | qualifier, pattern = ld |
82 | - lip.pattern = Pattern.objects.get(name=pattern) | |
83 | - lip.root = lip.get_root() | |
84 | - if lip.get_root() is None: | |
82 | + inflection.pattern = Pattern.objects.get(name=pattern) | |
83 | + inflection.root = inflection.get_root() | |
84 | + if inflection.get_root() is None: | |
85 | 85 | raise ValueError(u"%s: can't find root" % repr(entry)) |
86 | - lip.save() | |
86 | + inflection.save() | |
87 | 87 | if qualifier: |
88 | - lip.qualifiers.add(Qualifier.objects.get(label=qualifier)) | |
88 | + inflection.qualifiers.add(Qualifier.objects.get(label=qualifier)) | |
89 | 89 | |
90 | 90 | |
91 | 91 | NDM = Pattern.objects.get(name='ndm') # hardcoded pattern |
... | ... | @@ -100,9 +100,9 @@ def import_adv(elements, comment): |
100 | 100 | raise |
101 | 101 | assert pos == 'adv' and pattern_name == 'ndm' |
102 | 102 | l = new_lexeme(entry, 'adv', comment) |
103 | - lip = Inflection(lexeme=l, index=1, pattern=NDM) | |
104 | - lip.root = lip.get_root() | |
105 | - lip.save() | |
103 | + inflection = Inflection(lexeme=l, index=1, pattern=NDM) | |
104 | + inflection.root = inflection.get_root() | |
105 | + inflection.save() | |
106 | 106 | adjs = Lexeme.objects.filter(entry=adj_entry) |
107 | 107 | if len(adjs) == 1: |
108 | 108 | adj = adjs.get() |
... | ... | @@ -143,6 +143,6 @@ def import_adj(elements, comment): |
143 | 143 | else: |
144 | 144 | NO_POPRZ.add_lexeme(l) |
145 | 145 | pattern = Pattern.objects.get(name=pattern_name) |
146 | - lip = Inflection(lexeme=l, index=1, pattern=pattern) | |
147 | - lip.root = lip.get_root() | |
148 | - lip.save() | |
149 | 146 | \ No newline at end of file |
147 | + inflection = Inflection(lexeme=l, index=1, pattern=pattern) | |
148 | + inflection.root = inflection.get_root() | |
149 | + inflection.save() | |
150 | 150 | \ No newline at end of file |
... | ... |
dictionary/models.py
... | ... | @@ -342,36 +342,36 @@ class Lexeme(Model): |
342 | 342 | all_objects = Manager() |
343 | 343 | |
344 | 344 | def inflection_tables(self, variant, qualifiers=None): |
345 | - lips = self.inflection_set.order_by( | |
345 | + inflections = self.inflection_set.order_by( | |
346 | 346 | 'index').select_related('gender', 'pattern') |
347 | 347 | genders = OrderedDict() |
348 | - for lip in lips: | |
349 | - g = lip.gender | |
348 | + for inflection in inflections: | |
349 | + g = inflection.gender | |
350 | 350 | if g not in genders: |
351 | 351 | genders[g] = {'patterns': [], 'pronunciations': []} |
352 | - genders[g]['patterns'].append(lip.pattern) | |
353 | - if lip.pronunciation: | |
354 | - genders[g]['pronunciations'].append(lip.pronunciation) | |
352 | + genders[g]['patterns'].append(inflection.pattern) | |
353 | + if inflection.pronunciation: | |
354 | + genders[g]['pronunciations'].append(inflection.pronunciation) | |
355 | 355 | return [ |
356 | 356 | (gender, val['patterns'], val['pronunciations']) + |
357 | 357 | self.inflection_table(variant, gender, qualifiers=qualifiers) |
358 | 358 | for gender, val in genders.iteritems()] |
359 | 359 | |
360 | 360 | def inflection_table(self, variant, gender, qualifiers=None): |
361 | - lips = self.inflection_set.prefetch_related( | |
361 | + inflections = self.inflection_set.prefetch_related( | |
362 | 362 | 'qualifiers').select_related('pattern__type', 'gender') |
363 | 363 | if gender: |
364 | - lips = lips.filter(gender=gender) | |
364 | + inflections = inflections.filter(gender=gender) | |
365 | 365 | span = unicode(variant) == u'0' |
366 | - gender_qualifiers = lips[0].qualifiers.all() & qualifiers | |
366 | + gender_qualifiers = inflections[0].qualifiers.all() & qualifiers | |
367 | 367 | attr = LexemeAttribute.objects.get(name=u'nacechowana forma') |
368 | 368 | depr = self.attribute_value(attr) |
369 | 369 | tables = [ |
370 | - lip.inflection_table( | |
370 | + inflection.table( | |
371 | 371 | variant, qualifiers=qualifiers, span=span, |
372 | 372 | depr=depr, cell_qualifier=True, |
373 | 373 | edit_view=(i == 0)) # brzydko, ale cรณลผ |
374 | - for i, lip in enumerate(lips)] | |
374 | + for i, inflection in enumerate(inflections)] | |
375 | 375 | table1 = tables[0] |
376 | 376 | for table2 in tables[1:]: |
377 | 377 | while len(table2) > len(table1): |
... | ... | @@ -395,10 +395,11 @@ class Lexeme(Model): |
395 | 395 | |
396 | 396 | def all_forms(self, label_filter=None, variant='1'): |
397 | 397 | forms = set() |
398 | - for lip in self.inflection_set.all(): | |
398 | + for inflection in self.inflection_set.all(): | |
399 | 399 | forms |= set( |
400 | 400 | form for (indexes, form, qualifiers) |
401 | - in lip.all_forms(label_filter=label_filter, variant=variant)) | |
401 | + in inflection.all_forms( | |
402 | + label_filter=label_filter, variant=variant)) | |
402 | 403 | return forms |
403 | 404 | |
404 | 405 | def refresh_forms(self): |
... | ... | @@ -408,10 +409,10 @@ class Lexeme(Model): |
408 | 409 | |
409 | 410 | def all_qualifiers(self, variant='1'): |
410 | 411 | qualifiers = set(self.qualifiers.all()) |
411 | - for lip in self.inflection_set.all(): | |
412 | + for inflection in self.inflection_set.all(): | |
412 | 413 | qualifiers |= set( |
413 | 414 | qualifier for (indexes, form, qualifiers) |
414 | - in lip.all_forms(variant=variant) | |
415 | + in inflection.all_forms(variant=variant) | |
415 | 416 | for qualifier in qualifiers) |
416 | 417 | return qualifiers |
417 | 418 | |
... | ... | @@ -444,24 +445,24 @@ class Lexeme(Model): |
444 | 445 | classification=classification) |
445 | 446 | |
446 | 447 | def pattern_list(self): |
447 | - lips = self.inflection_set.all() | |
448 | + inflections = self.inflection_set.all() | |
448 | 449 | patterns = [] |
449 | - for lip in lips: | |
450 | - if patterns == [] or lip.pattern != patterns[-1]: | |
451 | - patterns.append(lip.pattern) | |
450 | + for inflection in inflections: | |
451 | + if patterns == [] or inflection.pattern != patterns[-1]: | |
452 | + patterns.append(inflection.pattern) | |
452 | 453 | return patterns |
453 | 454 | |
454 | - def lip_data(self, lips=None): | |
455 | - if lips is None: | |
456 | - lips = self.inflection_set.all() | |
455 | + def inflection_data(self, inflections=None): | |
456 | + if inflections is None: | |
457 | + inflections = self.inflection_set.all() | |
457 | 458 | patterns = [] |
458 | 459 | genders = [] |
459 | - for lip in lips: | |
460 | - if patterns == [] or lip.pattern.name != patterns[-1]: | |
461 | - patterns.append(lip.pattern.name) | |
462 | - if (lip.gender and | |
463 | - (genders == [] or lip.gender.symbol != genders[-1])): | |
464 | - genders.append(lip.gender.symbol) | |
460 | + for inflection in inflections: | |
461 | + if patterns == [] or inflection.pattern.name != patterns[-1]: | |
462 | + patterns.append(inflection.pattern.name) | |
463 | + if (inflection.gender and | |
464 | + (genders == [] or inflection.gender.symbol != genders[-1])): | |
465 | + genders.append(inflection.gender.symbol) | |
465 | 466 | patterns = '/'.join(patterns) |
466 | 467 | genders = '/'.join(genders) |
467 | 468 | return {'patterns': patterns, 'genders': genders} |
... | ... | @@ -538,8 +539,8 @@ class Lexeme(Model): |
538 | 539 | def attributes(self, part_of_speech=None, genders=None): |
539 | 540 | pos = part_of_speech or self.part_of_speech |
540 | 541 | if genders is None and pos.inflection_type_id == 'subst': |
541 | - lips = self.inflection_set.all() | |
542 | - genders = [lip.gender for lip in lips] | |
542 | + inflections = self.inflection_set.all() | |
543 | + genders = [inflection.gender for inflection in inflections] | |
543 | 544 | elif genders is None: |
544 | 545 | genders = [] |
545 | 546 | attrs = LexemeAttribute.objects.all() |
... | ... | @@ -739,22 +740,22 @@ class Inflection(Model): |
739 | 740 | def base_endings(self, label_filter=None): |
740 | 741 | return self.pattern.base_endings(label_filter) |
741 | 742 | |
742 | - def inflection_table(self, variant, edit_view=False, attr_vals=None, | |
743 | - pos=None, depr=None, **kwargs): | |
743 | + def table(self, variant, edit_view=False, attr_vals=None, | |
744 | + pos=None, depr=None, **kwargs): | |
744 | 745 | part_of_speech = pos or self.lexeme.part_of_speech |
745 | 746 | tt = self.table_template( |
746 | 747 | variant, attr_vals=attr_vals, pos=part_of_speech) |
747 | 748 | if tt is None: |
748 | 749 | return [] |
749 | - lip_qualifiers = self.qualifiers.all() if not edit_view else [] | |
750 | + inflection_qualifiers = self.qualifiers.all() if not edit_view else [] | |
750 | 751 | if attr_vals is None: |
751 | 752 | attr_vals = self.lexeme.lexemeattributevalue_set.all() |
752 | 753 | if depr and (not self.gender or self.gender.symbol != 'm1'): |
753 | 754 | depr = None |
754 | 755 | return tt.render_with_pattern( |
755 | 756 | self.pattern, self.gender, attr_vals, |
756 | - root=self.root, edit_view=edit_view, lip_index=self.index, | |
757 | - lip_qualifiers=lip_qualifiers, depr=depr, **kwargs) | |
757 | + root=self.root, edit_view=edit_view, inflection_index=self.index, | |
758 | + inflection_qualifiers=inflection_qualifiers, depr=depr, **kwargs) | |
758 | 759 | |
759 | 760 | def all_forms(self, label_filter=None, variant='1', **kwargs): |
760 | 761 | forms = [] |
... | ... | @@ -762,8 +763,8 @@ class Inflection(Model): |
762 | 763 | for cell in self.cells(variant=variant): |
763 | 764 | forms += cell.forms( |
764 | 765 | base_endings=base_endings, root=self.root, |
765 | - lip_qualifiers=self.qualifiers.all(), lip_index=self.index, | |
766 | - **kwargs) | |
766 | + inflection_qualifiers=self.qualifiers.all(), | |
767 | + inflection_index=self.index, **kwargs) | |
767 | 768 | return forms |
768 | 769 | |
769 | 770 | def editable_vocabularies(self, user): |
... | ... | @@ -781,11 +782,12 @@ class Inflection(Model): |
781 | 782 | ) |
782 | 783 | |
783 | 784 | @staticmethod |
784 | - def filter_visible(lips, user): | |
785 | + def filter_visible(inflections, user): | |
785 | 786 | vocabs = Vocabulary.visible_vocabularies(user) |
786 | 787 | if not user.is_authenticated(): |
787 | - lips = lips.exclude(lexeme__status__in=Lexeme.HIDDEN_STATUSES) | |
788 | - return lips.filter(lexeme__vocabularies__in=vocabs).distinct() | |
788 | + inflections = inflections.exclude( | |
789 | + lexeme__status__in=Lexeme.HIDDEN_STATUSES) | |
790 | + return inflections.filter(lexeme__vocabularies__in=vocabs).distinct() | |
789 | 791 | |
790 | 792 | class Meta: |
791 | 793 | db_table = 'odmieniasie' |
... | ... |
dictionary/static/css/inflection_table.css
dictionary/static/css/lexeme_view.css
... | ... | @@ -4,7 +4,7 @@ |
4 | 4 | padding: 0; |
5 | 5 | } |
6 | 6 | |
7 | -.lip-pronunciation-field { | |
7 | +.inflection-pronunciation-field { | |
8 | 8 | width: 90px; |
9 | 9 | } |
10 | 10 | |
... | ... | @@ -130,11 +130,6 @@ table#prompter-list .cv ul li { |
130 | 130 | height: 30px; |
131 | 131 | } |
132 | 132 | |
133 | -.ellipsis-icon { | |
134 | - display: block; | |
135 | - width: 16px; | |
136 | -} | |
137 | - | |
138 | 133 | tr.search-by-form-row:nth-child(odd) { |
139 | 134 | background: #bbb; |
140 | 135 | } |
... | ... |
dictionary/static/js/lexeme-edit.js
... | ... | @@ -73,7 +73,7 @@ $.extend(edit, { |
73 | 73 | |
74 | 74 | init_form_widgets: function() { |
75 | 75 | "use strict"; |
76 | - $(document).on('click', '.lip-row span.remove', function () { | |
76 | + $(document).on('click', '.inflection-row span.remove', function () { | |
77 | 77 | var li = $(this).closest('li'); |
78 | 78 | var name = $('input', li)[0].name; |
79 | 79 | var gender = li.find('.gender option:selected').text(); |
... | ... | @@ -91,7 +91,7 @@ $.extend(edit, { |
91 | 91 | } |
92 | 92 | } |
93 | 93 | } |
94 | - if (name.substring(0, 7) !== 'lip_add') | |
94 | + if (name.substring(0, 7) !== 'inf_add') | |
95 | 95 | deleted.push(name.split('-')[0]); |
96 | 96 | li.remove(); |
97 | 97 | edit.show_changed(); |
... | ... | @@ -105,7 +105,8 @@ $.extend(edit, { |
105 | 105 | common.init_selection(new_row.find('.gender')); |
106 | 106 | var pattern_list = $('#pattern-list'); |
107 | 107 | pattern_list.append(new_row); |
108 | - edit.qualifier_select(pattern_list.find('.lip-qualifiers').last()); | |
108 | + edit.qualifier_select( | |
109 | + pattern_list.find('.inflection-qualifiers').last()); | |
109 | 110 | edit.show_changed(); |
110 | 111 | }); |
111 | 112 | |
... | ... | @@ -132,9 +133,9 @@ $.extend(edit, { |
132 | 133 | }); |
133 | 134 | |
134 | 135 | var update_preview = function () { |
135 | - var lip_row = $(this).closest('.lip-row'); | |
136 | - if (lip_row.hasClass('lip-row-active')) { | |
137 | - set_active_lip_row.call(lip_row); | |
136 | + var inflection_row = $(this).closest('.inflection-row'); | |
137 | + if (inflection_row.hasClass('inflection-row-active')) { | |
138 | + set_active_inflection_row.call(inflection_row); | |
138 | 139 | } |
139 | 140 | }; |
140 | 141 | $(document).on( |
... | ... | @@ -144,7 +145,7 @@ $.extend(edit, { |
144 | 145 | $(document).on( |
145 | 146 | 'change', '#' + edit.form_id + ' select', update_preview); |
146 | 147 | |
147 | - $(document).on('click', '.lip-row', set_active_lip_row); | |
148 | + $(document).on('click', '.inflection-row', set_active_inflection_row); | |
148 | 149 | |
149 | 150 | $(document).on('click', '.lexeme-edit-delete', delete_lexeme); |
150 | 151 | |
... | ... | @@ -319,7 +320,7 @@ $.extend(edit, { |
319 | 320 | } |
320 | 321 | }); |
321 | 322 | //$('#pattern-list').disableSelection(); |
322 | - $('.lip-row', '#pattern-list').first().click(); | |
323 | + $('.inflection-row', '#pattern-list').first().click(); | |
323 | 324 | $('#id_vocabularies').multiSelect({ |
324 | 325 | noneSelectedText: gettext("Choose dictionaries"), |
325 | 326 | selectedText: gettext("# dictionaries"), |
... | ... | @@ -336,7 +337,7 @@ $.extend(edit, { |
336 | 337 | toggle_specialist(show); |
337 | 338 | } |
338 | 339 | }); |
339 | - edit.qualifier_select($('select.lip-qualifiers')); | |
340 | + edit.qualifier_select($('select.inflection-qualifiers')); | |
340 | 341 | init_classification_forms(); |
341 | 342 | init_extra_attr_widgets(); |
342 | 343 | $('button.prompter').button(); |
... | ... | @@ -682,7 +683,7 @@ function before_save() { |
682 | 683 | "use strict"; |
683 | 684 | // sprawdziฤ wzรณr z rodzajem |
684 | 685 | var ok = true; |
685 | - $('.lip-row').each(function () { | |
686 | + $('.inflection-row').each(function () { | |
686 | 687 | var gender = $(this).find('.gender').val(); |
687 | 688 | var pattern = $(this).find('.pattern').val(); |
688 | 689 | if (gender) { |
... | ... | @@ -705,7 +706,7 @@ function get_new_row_html(id) { |
705 | 706 | "use strict"; |
706 | 707 | var new_row_html = $.ajaxJSON({ |
707 | 708 | method: 'get', |
708 | - url: $dj.ajax_new_lip_row, | |
709 | + url: $dj.ajax_new_inflection_row, | |
709 | 710 | data: {lexeme_id: id, pos_id: get_pos(), num: new_row_counter}, |
710 | 711 | async: false // w zasadzie siฤ tak nie powinno robiฤ |
711 | 712 | }).html; |
... | ... | @@ -713,10 +714,11 @@ function get_new_row_html(id) { |
713 | 714 | return new_row_html; |
714 | 715 | } |
715 | 716 | |
716 | -var set_active_lip_row = function () { | |
717 | +var set_active_inflection_row = function () { | |
717 | 718 | "use strict"; |
718 | - $('.lip-row-active').removeClass('lip-row-active ui-state-active'); | |
719 | - $(this).addClass('lip-row-active ui-state-active'); | |
719 | + $('.inflection-row-active').removeClass( | |
720 | + 'inflection-row-active ui-state-active'); | |
721 | + $(this).addClass('inflection-row-active ui-state-active'); | |
720 | 722 | // tabelka |
721 | 723 | var data = { |
722 | 724 | lexeme_id: slickgrid.get_id(), |
... | ... | @@ -725,7 +727,7 @@ var set_active_lip_row = function () { |
725 | 727 | pattern: $('input.pattern', this).value(), |
726 | 728 | gender: $('.gender', this).value(), |
727 | 729 | attr_data: get_attr_data(), |
728 | - lip_id: $('input.pattern', this)[0].name.split('-')[0] | |
730 | + inflection_id: $('input.pattern', this)[0].name.split('-')[0] | |
729 | 731 | }; |
730 | 732 | $.ajaxJSON({ |
731 | 733 | method: 'get', |
... | ... | @@ -797,9 +799,9 @@ function reload_prompter(arg) { |
797 | 799 | // zaลadowaฤ dane do promptera |
798 | 800 | var elem; |
799 | 801 | if (arg.button) |
800 | - elem = $(arg.button).closest('.lip-row'); | |
802 | + elem = $(arg.button).closest('.inflection-row'); | |
801 | 803 | else |
802 | - elem = $('.lip-row-active'); | |
804 | + elem = $('.inflection-row-active'); | |
803 | 805 | var gender = elem.find('.gender').val(); |
804 | 806 | var id = slickgrid.get_id(); |
805 | 807 | var cvs = $('#id_cl' + $dj.commonness + '-values').val(); |
... | ... | @@ -844,7 +846,7 @@ var set_active_prompter_row = function () { |
844 | 846 | pattern: $('.pattern', this).text(), |
845 | 847 | gender: $('.gender-id', this).text(), |
846 | 848 | attr_data: get_attr_data(), |
847 | - lip_id: 'lip_add' | |
849 | + inflection_id: 'inf_add' | |
848 | 850 | }; |
849 | 851 | $.ajaxJSON({ |
850 | 852 | method: 'get', |
... | ... | @@ -864,7 +866,7 @@ function input_prompter_pattern() { |
864 | 866 | if (active.length > 0) { |
865 | 867 | $('#prompter-dialog').dialog('close'); |
866 | 868 | pattern = active.find('.pattern').text(); |
867 | - $('.lip-row-active').find('input.pattern').val(pattern); | |
869 | + $('.inflection-row-active').find('input.pattern').val(pattern); | |
868 | 870 | edit.show_changed(); |
869 | 871 | } |
870 | 872 | } |
... | ... | @@ -918,10 +920,12 @@ var check_pos = function () { |
918 | 920 | var pos = get_pos(); |
919 | 921 | if (!pos) |
920 | 922 | return; |
921 | - var bad_lips = false, good_genders = [], confirmed, had_gender; | |
922 | - var lip_row_elems = $('.lip-row'), cr_row_elems = $('.cr-row'); | |
923 | - if (lip_row_elems.length > 0) { | |
924 | - had_gender = $(lip_row_elems[0]).find('select.gender').length > 0; | |
923 | + var bad_inflections = false, good_genders = [], confirmed, had_gender; | |
924 | + var inflection_row_elems = $('.inflection-row'), | |
925 | + cr_row_elems = $('.cr-row'); | |
926 | + if (inflection_row_elems.length > 0) { | |
927 | + had_gender = $(inflection_row_elems[0]) | |
928 | + .find('select.gender').length > 0; | |
925 | 929 | var pattern = $(this).find('input.pattern').val(); |
926 | 930 | var answer = $.ajaxJSON({ |
927 | 931 | method: 'get', |
... | ... | @@ -930,20 +934,20 @@ var check_pos = function () { |
930 | 934 | async: false |
931 | 935 | }).answer; |
932 | 936 | if (answer !== 'yes') |
933 | - bad_lips = true; | |
937 | + bad_inflections = true; | |
934 | 938 | } |
935 | - if (!bad_lips && had_gender) { | |
936 | - good_genders = lip_row_elems.map(function(i, elem) { | |
939 | + if (!bad_inflections && had_gender) { | |
940 | + good_genders = inflection_row_elems.map(function(i, elem) { | |
937 | 941 | return $(elem).find('.gender').val(); |
938 | 942 | }).toArray(); |
939 | 943 | } |
940 | 944 | var stale_attrs = check_attrs(good_genders); |
941 | 945 | var stale_classifications = check_classifications(); |
942 | - if (bad_lips || cr_row_elems.length > 0 || | |
946 | + if (bad_inflections || cr_row_elems.length > 0 || | |
943 | 947 | stale_attrs.length > 0 || stale_classifications.length > 0) { |
944 | - var lips_text = '', cr_text = ''; | |
945 | - if (bad_lips > 0) { | |
946 | - lips_text = gettext("all inflections") + '\n'; | |
948 | + var inflections_text = '', cr_text = ''; | |
949 | + if (bad_inflections > 0) { | |
950 | + inflections_text = gettext("all inflections") + '\n'; | |
947 | 951 | } |
948 | 952 | if (cr_row_elems.length > 0) { |
949 | 953 | cr_text += gettext("all cross-references") + '\n'; |
... | ... | @@ -959,28 +963,29 @@ var check_pos = function () { |
959 | 963 | join_classifications(stale_classifications) + '\n'; |
960 | 964 | } |
961 | 965 | confirmed = window.confirm( |
962 | - gettext("Those will be removed:") + '\n' + lips_text + cr_text + | |
963 | - attr_text + clas_text + gettext("Do you want to continue?")); | |
966 | + gettext("Those will be removed:") + | |
967 | + '\n' + inflections_text + cr_text + | |
968 | + attr_text + clas_text + gettext("Do you want to continue?")); | |
964 | 969 | if (!confirmed) { |
965 | 970 | common.revert_selection($(this)); |
966 | 971 | } |
967 | 972 | } |
968 | - if (confirmed || !(bad_lips || cr_row_elems.length > 0 || | |
973 | + if (confirmed || !(bad_inflections || cr_row_elems.length > 0 || | |
969 | 974 | stale_attrs.length > 0 || stale_classifications.length > 0)) { |
970 | 975 | common.confirm_selection($(this)); |
971 | 976 | var reload_preview = false; |
972 | - if (bad_lips) { | |
973 | - lip_row_elems.each(function () { | |
977 | + if (bad_inflections) { | |
978 | + inflection_row_elems.each(function () { | |
974 | 979 | var li = $(this); |
975 | 980 | // copypasta... |
976 | 981 | var name = li.find('input')[0].name; |
977 | - if (name.substring(0, 7) !== 'lip_add') | |
982 | + if (name.substring(0, 7) !== 'inf_add') | |
978 | 983 | deleted.push(name.split('-')[0]); |
979 | 984 | li.remove(); |
980 | 985 | reload_preview = true; |
981 | 986 | }); |
982 | 987 | } |
983 | - lip_row_elems.find('.lip-it').val($dj.inflection_types[pos]); | |
988 | + inflection_row_elems.find('.inflection-it').val($dj.inflection_types[pos]); | |
984 | 989 | if (reload_preview) |
985 | 990 | $('#table-preview').html(''); |
986 | 991 | cr_row_elems.each(function () { |
... | ... |
dictionary/static/js/lexeme-view.js
dictionary/templates/inflection_tables.html
dictionary/templates/lexeme_edit_form.html
... | ... | @@ -84,7 +84,7 @@ |
84 | 84 | {% endif %} |
85 | 85 | </p> |
86 | 86 | <ul id="pattern-list" {{ editable|yesno:'class="editable",'|safe }}> |
87 | - {% for lip_form, ro_qualifiers in lip_forms %} | |
87 | + {% for inflection_form, ro_qualifiers in inflection_forms %} | |
88 | 88 | {% include 'lexeme_edit_form_row.html' %} |
89 | 89 | {% endfor %} |
90 | 90 | </ul> |
... | ... | @@ -155,7 +155,7 @@ |
155 | 155 | {% endif %} |
156 | 156 | </span> |
157 | 157 | <span class="gender"> |
158 | - {{ cr.to_lexeme.lip_data.genders }} | |
158 | + {{ cr.to_lexeme.inflection_data.genders }} | |
159 | 159 | {{ cr.to_lexeme|attribute:"aspekt" }} |
160 | 160 | </span> |
161 | 161 | </li> |
... | ... |
dictionary/templates/lexeme_edit_form_row.html
1 | -<li class="lip-row ui-state-default ui-corner-all"> | |
1 | +<li class="inflection-row ui-state-default ui-corner-all"> | |
2 | 2 | {% if editable %} |
3 | 3 | <span class="remove ui-icon ui-icon-closethick"></span> |
4 | 4 | <span class="arrows ui-icon ui-icon-arrowthick-2-n-s"></span> |
5 | 5 | {% endif %} |
6 | - {% for field in lip_form %} | |
6 | + {% for field in inflection_form %} | |
7 | 7 | {% if field.name != 'qualifiers' and field.name != 'pronunciation' %} |
8 | - <span class="lip-field"> | |
8 | + <span class="inflection-field"> | |
9 | 9 | {{ field.label_tag }} {{ field.as_widget }} |
10 | 10 | </span> |
11 | 11 | {% endif %} |
12 | 12 | {% endfor %} |
13 | 13 | {% if editable %} |
14 | 14 | <button type="button" class="prompter" |
15 | - id="{{ lip_form.prefix }}-prompter">...</button> | |
15 | + id="{{ inflection_form.prefix }}-prompter">...</button> | |
16 | 16 | {% endif %} |
17 | 17 | <p> |
18 | 18 | {% if ro_qualifiers %} |
19 | 19 | {{ owner.id }}: {{ ro_qualifiers|join:", " }}; |
20 | 20 | {% endif %} |
21 | - {{ lip_form.qualifiers.label_tag }} {{ lip_form.qualifiers.as_widget }} | |
22 | - {{ lip_form.pronunciation.label_tag }} {{ lip_form.pronunciation.as_widget }} | |
21 | + {{ inflection_form.qualifiers.label_tag }} | |
22 | + {{ inflection_form.qualifiers.as_widget }} | |
23 | + {{ inflection_form.pronunciation.label_tag }} | |
24 | + {{ inflection_form.pronunciation.as_widget }} | |
23 | 25 | </p> |
24 | 26 | </li> |
... | ... |
dictionary/templates/prompter_list.html
1 | -{% for lip, cvs, prefix, suffix in lips %} | |
1 | +{% for inflection, cvs, prefix, suffix in inflections %} | |
2 | 2 | <tr class="prompter-row"> |
3 | 3 | <td class="entry">{{ prefix }}<span class="suffix">{{ suffix }}</span> |
4 | 4 | </td> |
5 | - {% if lip.gender %} | |
6 | - <td class="gender">{{ lip.gender.symbol }}</td> | |
7 | - <td class="gender-id">{{ lip.gender_id }}</td> | |
5 | + {% if inflection.gender %} | |
6 | + <td class="gender">{{ inflection.gender.symbol }}</td> | |
7 | + <td class="gender-id">{{ inflection.gender_id }}</td> | |
8 | 8 | {% endif %} |
9 | 9 | <td class="cv"> |
10 | 10 | <ul> |
... | ... | @@ -15,6 +15,6 @@ |
15 | 15 | {% endfor %} |
16 | 16 | </ul> |
17 | 17 | </td> |
18 | - <td class="pattern">{{ lip.pattern.name }}</td> | |
18 | + <td class="pattern">{{ inflection.pattern.name }}</td> | |
19 | 19 | </tr> |
20 | 20 | {% endfor %} |
... | ... |
dictionary/urls.py
... | ... | @@ -25,7 +25,7 @@ urlpatterns += patterns( |
25 | 25 | url(r'^ajax/odm-forms/$', 'odm_forms'), |
26 | 26 | url(r'^ajax/lexeme-edit-form/$', 'lexeme_edit_form'), |
27 | 27 | url(r'^ajax/update-lexeme/$', 'update_lexeme'), |
28 | - url(r'^ajax/new-lip-row/$', 'new_lip_edit_row'), | |
28 | + url(r'^ajax/new-inflection-row/$', 'new_inflection_edit_row'), | |
29 | 29 | url(r'^ajax/new-cr-row/$', 'new_cross_reference_row'), |
30 | 30 | url(r'^ajax/delete-lexeme/$', 'delete_lexeme'), |
31 | 31 | url(r'^ajax/check-pos/$', 'check_pos'), |
... | ... |
dictionary/views.py
... | ... | @@ -138,7 +138,7 @@ def lexeme_view(request): |
138 | 138 | 'ajax_get_filters': reverse('get_filters'), |
139 | 139 | 'ajax_delete_filter': reverse('delete_filter'), |
140 | 140 | 'ajax_table_preview': reverse('table_preview'), |
141 | - 'ajax_new_lip_row': reverse('new_lip_edit_row'), | |
141 | + 'ajax_new_inflection_row': reverse('new_inflection_edit_row'), | |
142 | 142 | 'ajax_new_cr_row': reverse('new_cross_reference_row'), |
143 | 143 | 'ajax_delete_lexeme': reverse('delete_lexeme'), |
144 | 144 | 'ajax_prompter_list': reverse('prompter_list'), |
... | ... |
dictionary/wsjp.py
... | ... | @@ -51,8 +51,8 @@ ASPEKT = LexemeAttribute.objects.get(name=u'aspekt') |
51 | 51 | WLASC = LexemeAttribute.objects.get(name=u'wลaลciwy') |
52 | 52 | |
53 | 53 | |
54 | -def get_charfl(lip): | |
55 | - l = lip.lexeme | |
54 | +def get_charfl(inflection): | |
55 | + l = inflection.lexeme | |
56 | 56 | pos = l.part_of_speech_id |
57 | 57 | if pos == 'adj': |
58 | 58 | zloz = l.attribute_value(ZLOZ).value == u'obecna' |
... | ... | @@ -68,7 +68,7 @@ def get_charfl(lip): |
68 | 68 | elif pos == 'pred': |
69 | 69 | return 'qndk' |
70 | 70 | elif pos == 'subst': |
71 | - return lip.gender.symbol | |
71 | + return inflection.gender.symbol | |
72 | 72 | elif pos == 'v': |
73 | 73 | wlasc = l.attribute_value(WLASC).value |
74 | 74 | return ('q' if wlasc == 'Q' else '') + l.attribute_value(ASPEKT).value |
... | ... | @@ -88,14 +88,14 @@ def make_data(entry): |
88 | 88 | nonrefl_ids, sie_ids, sobie_ids = ( |
89 | 89 | ', '.join(str(pk) for pk in refls) |
90 | 90 | for refls in (nonrefls, sie_refls, sobie_refls)) |
91 | - lips = Inflection.objects.filter(lexeme__entry=entry) | |
92 | - if not lips: | |
91 | + inflections = Inflection.objects.filter(lexeme__entry=entry) | |
92 | + if not inflections: | |
93 | 93 | return [] |
94 | 94 | subqueries = [] |
95 | 95 | params = [] |
96 | - for lip in lips: | |
97 | - charfl = get_charfl(lip) | |
98 | - pattern = lip.pattern | |
96 | + for inflection in inflections: | |
97 | + charfl = get_charfl(inflection) | |
98 | + pattern = inflection.pattern | |
99 | 99 | fnuni = pattern.endings.filter( |
100 | 100 | base_form_label__symbol='pl:gen:fchar').exists() |
101 | 101 | funi = pattern.endings.filter( |
... | ... | @@ -118,7 +118,7 @@ def make_data(entry): |
118 | 118 | (l.pos not in ('v', 'pact') or |
119 | 119 | refl.attribute_value_id in (%s))''' % nonrefl_ids, |
120 | 120 | }, |
121 | - [charfl, lip.id, '1'] | |
121 | + [charfl, inflection.id, '1'] | |
122 | 122 | ), |
123 | 123 | # czasowniki siฤiczne: |
124 | 124 | ( |
... | ... | @@ -129,7 +129,7 @@ def make_data(entry): |
129 | 129 | 'leks_clause': '''(l.pos in ('v', 'pact') and |
130 | 130 | refl.attribute_value_id in (%s))''' % sie_ids, |
131 | 131 | }, |
132 | - [charfl, lip.id, 's'] | |
132 | + [charfl, inflection.id, 's'] | |
133 | 133 | ), |
134 | 134 | # czasowniki sobieiczne: |
135 | 135 | ( |
... | ... | @@ -140,7 +140,7 @@ def make_data(entry): |
140 | 140 | 'leks_clause': '''(l.pos in ('v', 'pact') and |
141 | 141 | refl.attribute_value_id in (%s))''' % sobie_ids, |
142 | 142 | }, |
143 | - [charfl, lip.id, 's1'] | |
143 | + [charfl, inflection.id, 's1'] | |
144 | 144 | ), |
145 | 145 | # czasowniki zanegowane: |
146 | 146 | ( |
... | ... | @@ -151,7 +151,7 @@ def make_data(entry): |
151 | 151 | 'leks_clause': '''l.pos='v' and |
152 | 152 | refl.attribute_value_id in (%s)''' % nonrefl_ids, |
153 | 153 | }, |
154 | - [charfl, lip.id, 'n'] | |
154 | + [charfl, inflection.id, 'n'] | |
155 | 155 | ), |
156 | 156 | # czasowniki siฤiczne zanegowane: |
157 | 157 | ( |
... | ... | @@ -162,7 +162,7 @@ def make_data(entry): |
162 | 162 | 'leks_clause': '''(l.pos='v' and |
163 | 163 | refl.attribute_value_id in (%s))''' % sie_ids, |
164 | 164 | }, |
165 | - [charfl, lip.id, 'ns'] | |
165 | + [charfl, inflection.id, 'ns'] | |
166 | 166 | ), |
167 | 167 | # czasowniki sobieiczne zanegowane: |
168 | 168 | ( |
... | ... | @@ -173,7 +173,7 @@ def make_data(entry): |
173 | 173 | 'leks_clause': '''(l.pos='v' and |
174 | 174 | refl.attribute_value_id in (%s))''' % sobie_ids, |
175 | 175 | }, |
176 | - [charfl, lip.id, 'ns1'] | |
176 | + [charfl, inflection.id, 'ns1'] | |
177 | 177 | ), |
178 | 178 | # wymagajฤ ce gniazdowania: adjcom, advcom i ppas |
179 | 179 | ( |
... | ... | @@ -185,7 +185,7 @@ def make_data(entry): |
185 | 185 | (l.pos != 'ppas' or refl.attribute_value_id in (%s))''' |
186 | 186 | % nonrefl_ids, |
187 | 187 | }, |
188 | - [charfl, lip.lexeme_id] | |
188 | + [charfl, inflection.lexeme_id] | |
189 | 189 | ), |
190 | 190 | # imiesลowy bierne czasownikรณw siฤicznych: |
191 | 191 | ( |
... | ... | @@ -195,7 +195,7 @@ def make_data(entry): |
195 | 195 | 'main_clause': '''(typods ='ppasver' and l.pos ='ppas' and |
196 | 196 | refl.attribute_value_id in (%s))''' % sie_ids, |
197 | 197 | }, |
198 | - [charfl, lip.lexeme_id] | |
198 | + [charfl, inflection.lexeme_id] | |
199 | 199 | ), |
200 | 200 | # imiesลowy bierne czasownikรณw sobieicznych: |
201 | 201 | ( |
... | ... | @@ -205,7 +205,7 @@ def make_data(entry): |
205 | 205 | 'main_clause': '''(typods ='ppasver' and l.pos ='ppas' and |
206 | 206 | refl.attribute_value_id in (%s))''' % sobie_ids, |
207 | 207 | }, |
208 | - [charfl, lip.lexeme_id] | |
208 | + [charfl, inflection.lexeme_id] | |
209 | 209 | ), |
210 | 210 | ] |
211 | 211 | subqueries.extend( |
... | ... |
history/lexeme_history.py
... | ... | @@ -57,7 +57,7 @@ lexeme_attribute_order = [ |
57 | 57 | # _(u'odsyลacz'), |
58 | 58 | ] |
59 | 59 | |
60 | -lip_attribute_order = [ | |
60 | +inflection_attribute_order = [ | |
61 | 61 | _(u'index'), |
62 | 62 | _(u'inflection characteristic'), |
63 | 63 | _(u'gender'), |
... | ... | @@ -96,13 +96,13 @@ def get_lexeme_attr(attr, lexeme): |
96 | 96 | return '' |
97 | 97 | |
98 | 98 | |
99 | -def get_lip_attr(attr, lip): | |
99 | +def get_inflection_attr(attr, inflection): | |
100 | 100 | if attr == 'oind': |
101 | - return lip.index | |
101 | + return inflection.index | |
102 | 102 | elif attr == 'gender_id': |
103 | - return lip.gender.symbol if lip.gender else '' | |
103 | + return inflection.gender.symbol if inflection.gender else '' | |
104 | 104 | elif attr == 'w_id': |
105 | - return lip.pattern.name | |
105 | + return inflection.pattern.name | |
106 | 106 | |
107 | 107 | |
108 | 108 | def prepare_value(column, value): |
... | ... | @@ -139,7 +139,7 @@ def prepare_value(column, value): |
139 | 139 | return prepared |
140 | 140 | |
141 | 141 | |
142 | -def lip_header(gender, pattern): | |
142 | +def inflection_header(gender, pattern): | |
143 | 143 | if gender: |
144 | 144 | return u'%s/%s ' % (gender.symbol, pattern.name) |
145 | 145 | else: |
... | ... | @@ -148,13 +148,13 @@ def lip_header(gender, pattern): |
148 | 148 | |
149 | 149 | def transaction_table(transaction_data): |
150 | 150 | transaction_dict = {} |
151 | - lips = GroupDict() | |
151 | + inflections = GroupDict() | |
152 | 152 | extra_attributes = {} |
153 | 153 | classifications = {} |
154 | 154 | qualifiers = [] |
155 | 155 | vocabs = [] |
156 | 156 | crs = {} |
157 | - lip_qualifiers = {} | |
157 | + inflection_qualifiers = {} | |
158 | 158 | deleted = False |
159 | 159 | for item1 in transaction_data: |
160 | 160 | table = item1.table_name |
... | ... | @@ -175,9 +175,9 @@ def transaction_table(transaction_data): |
175 | 175 | if table not in ('odmieniasie', 'kwalifikatory_odmieniasiow'): |
176 | 176 | transaction_dict[attr] = before_after |
177 | 177 | elif table == 'odmieniasie': |
178 | - if item1.row_id not in lips: | |
179 | - lips[item1.row_id] = {} | |
180 | - lips[item1.row_id][attr] = before_after | |
178 | + if item1.row_id not in inflections: | |
179 | + inflections[item1.row_id] = {} | |
180 | + inflections[item1.row_id][attr] = before_after | |
181 | 181 | if column == 'attribute_value_id': |
182 | 182 | if before: |
183 | 183 | value, attr = before |
... | ... | @@ -207,10 +207,10 @@ def transaction_table(transaction_data): |
207 | 207 | crs[item1.row_id] = {} |
208 | 208 | crs[item1.row_id][column] = before_after |
209 | 209 | if table == 'kwalifikatory_odmieniasiow': |
210 | - if item1.row_id not in lip_qualifiers: | |
211 | - lip_qualifiers[item1.row_id] = {} | |
210 | + if item1.row_id not in inflection_qualifiers: | |
211 | + inflection_qualifiers[item1.row_id] = {} | |
212 | 212 | if column: # stare DELETE nie bฤdฤ widoczne |
213 | - lip_qualifiers[item1.row_id][column] = before_after | |
213 | + inflection_qualifiers[item1.row_id][column] = before_after | |
214 | 214 | if deleted: |
215 | 215 | return deleted_lexeme_table(transaction_data[0].lexeme) |
216 | 216 | rows = [] |
... | ... | @@ -235,7 +235,7 @@ def transaction_table(transaction_data): |
235 | 235 | try: |
236 | 236 | if cr_data['l_id_do'][i] is not None: |
237 | 237 | l = Lexeme.all_objects.get(id=int(cr_data['l_id_do'][i])) |
238 | - genders = l.lip_data()['genders'] | |
238 | + genders = l.inflection_data()['genders'] | |
239 | 239 | cr_type = CrossReferenceType.objects.get( |
240 | 240 | pk=cr_data['typods_id'][i]) |
241 | 241 | prepared = ' '.join((cr_type.symbol, unicode(l), genders)) |
... | ... | @@ -245,38 +245,40 @@ def transaction_table(transaction_data): |
245 | 245 | prepared = None |
246 | 246 | before_after.append(prepared) |
247 | 247 | rows.append((attr, tuple(before_after))) |
248 | - lip_dict = GroupDict() | |
249 | - for lip_id, lip_data in lips.iteritems(): | |
250 | - for attr in lip_attribute_order: | |
251 | - if attr in lip_data: | |
252 | - lip_dict.add(lip_id, (attr, lip_data[attr])) | |
253 | - for q_data in lip_qualifiers.itervalues(): | |
248 | + inflection_dict = GroupDict() | |
249 | + for inflection_id, inflection_data in inflections.iteritems(): | |
250 | + for attr in inflection_attribute_order: | |
251 | + if attr in inflection_data: | |
252 | + inflection_dict.add( | |
253 | + inflection_id, (attr, inflection_data[attr])) | |
254 | + for q_data in inflection_qualifiers.itervalues(): | |
254 | 255 | if q_data: # stare DELETE... |
255 | 256 | attr = _(u'qualifier') |
256 | - lip_data = q_data.get( | |
257 | + inflection_data = q_data.get( | |
257 | 258 | 'inflection_id', q_data['lexemeinflectionpattern_id']) |
258 | - lip_id = int(lip_data[0] or lip_data[1]) | |
259 | - lip_dict.add(lip_id, (attr, q_data['qualifier_id'])) | |
260 | - lip_tables = [] | |
261 | - for lip_id, lip_data in lip_dict.iteritems(): | |
262 | - lip = Inflection.all_objects.filter(pk=lip_id) | |
263 | - if lip: | |
264 | - lip = lip[0] | |
265 | - header = lip_header(lip.gender, lip.pattern) | |
259 | + inflection_id = int(inflection_data[0] or inflection_data[1]) | |
260 | + inflection_dict.add(inflection_id, (attr, q_data['qualifier_id'])) | |
261 | + inflection_tables = [] | |
262 | + for inflection_id, inflection_data in inflection_dict.iteritems(): | |
263 | + inflection = Inflection.all_objects.filter(pk=inflection_id) | |
264 | + if inflection: | |
265 | + inflection = inflection[0] | |
266 | + header = inflection_header(inflection.gender, inflection.pattern) | |
266 | 267 | else: |
267 | 268 | records = History.objects.filter( |
268 | - operation='DELETE', table_name='odmieniasie', row_id=lip_id) | |
269 | + operation='DELETE', table_name='odmieniasie', | |
270 | + row_id=inflection_id) | |
269 | 271 | try: |
270 | 272 | gender_id = records.get(column_name='gender_id').old_value |
271 | 273 | pattern_id = records.get(column_name='w_id').old_value |
272 | 274 | gender = Gender.objects.get(id=gender_id) |
273 | 275 | pattern = Pattern.all_objects.get(id=pattern_id) |
274 | - header = lip_header(gender, pattern) | |
276 | + header = inflection_header(gender, pattern) | |
275 | 277 | except ObjectDoesNotExist: # stare DELETE... |
276 | 278 | header = '' |
277 | 279 | header += force_unicode(_(u'(deleted)')) |
278 | - lip_tables.append((header, lip_data)) | |
279 | - return rows, lip_tables | |
280 | + inflection_tables.append((header, inflection_data)) | |
281 | + return rows, inflection_tables | |
280 | 282 | |
281 | 283 | |
282 | 284 | def deleted_lexeme_table(lexeme): |
... | ... | @@ -302,34 +304,35 @@ def deleted_lexeme_table(lexeme): |
302 | 304 | rows.append((attr, ( |
303 | 305 | ' '.join([ |
304 | 306 | cr.type.symbol, cr.to_lexeme.entry, |
305 | - cr.to_lexeme.lip_data()['genders']]), | |
307 | + cr.to_lexeme.inflection_data()['genders']]), | |
306 | 308 | None))) |
307 | 309 | for av in lexeme.lexemeattributevalue_set.all(): |
308 | 310 | attr = av.attribute.name |
309 | 311 | rows.append((attr, (av.value or av.display_value, None))) |
310 | - lip_tables = [] | |
311 | - for lip in lexeme.inflection_set.all(): | |
312 | - lip_data = [] | |
312 | + inflection_tables = [] | |
313 | + for inflection in lexeme.inflection_set.all(): | |
314 | + inflection_data = [] | |
313 | 315 | for table, column, attr in attribute_translation_list: |
314 | 316 | if table == 'odmieniasie' and column != 'charfl': |
315 | - lip_data.append((attr, (get_lip_attr(column, lip), None))) | |
316 | - for q in lip.qualifiers.all(): | |
317 | + inflection_data.append( | |
318 | + (attr, (get_inflection_attr(column, inflection), None))) | |
319 | + for q in inflection.qualifiers.all(): | |
317 | 320 | attr = _(u'qualifier') |
318 | - lip_data.append((attr, (q.label, None))) | |
319 | - header = lip_header(lip.gender, lip.pattern) | |
320 | - lip_tables.append((header, lip_data)) | |
321 | - return rows, lip_tables | |
321 | + inflection_data.append((attr, (q.label, None))) | |
322 | + header = inflection_header(inflection.gender, inflection.pattern) | |
323 | + inflection_tables.append((header, inflection_data)) | |
324 | + return rows, inflection_tables | |
322 | 325 | |
323 | 326 | |
324 | 327 | def lexeme_table(transaction_data, last_tb): |
325 | - rows, lip_tables = transaction_table(transaction_data) | |
328 | + rows, inflection_tables = transaction_table(transaction_data) | |
326 | 329 | try: |
327 | 330 | lexeme = transaction_data[0].lexeme |
328 | 331 | except Lexeme.DoesNotExist: |
329 | 332 | lexeme = _(u'(deleted)') # aktualnie niemoลผliwe |
330 | 333 | return { |
331 | 334 | 'rows': rows, |
332 | - 'lip_tables': lip_tables, | |
335 | + 'inflection_tables': inflection_tables, | |
333 | 336 | 'user': transaction_data[0].user, |
334 | 337 | 'date': last_tb, |
335 | 338 | 'lexeme': lexeme, |
... | ... |
history/static/css/history.css
history/templates/lexeme_history_table.html
... | ... | @@ -6,10 +6,10 @@ |
6 | 6 | <th>{% trans 'after' %}</th> |
7 | 7 | </tr> |
8 | 8 | {% include "history_row.html" with rows=table.rows %} |
9 | - {% for header, lip_rows in table.lip_tables %} | |
9 | + {% for header, inflection_rows in table.inflection_tables %} | |
10 | 10 | <tr> |
11 | - <th colspan="3" class="lip-header">{% trans 'inflection' %} {{ header }}:</th> | |
11 | + <th colspan="3" class="inflection-header">{% trans 'inflection' %} {{ header }}:</th> | |
12 | 12 | </tr> |
13 | - {% include "history_row.html" with rows=lip_rows %} | |
13 | + {% include "history_row.html" with rows=inflection_rows %} | |
14 | 14 | {% endfor %} |
15 | 15 | </table> |
... | ... |
patterns/ajax_pattern_view.py
... | ... | @@ -103,7 +103,7 @@ def pattern_preview(request, id, reader=False): |
103 | 103 | user = request.user |
104 | 104 | if not request.user.is_authenticated() and not pattern.is_public(): |
105 | 105 | raise AjaxError('access denied') |
106 | - lips = Inflection.filter_visible(pattern.inflection_set, user) | |
106 | + inflections = Inflection.filter_visible(pattern.inflection_set, user) | |
107 | 107 | detailed_counts = [] |
108 | 108 | pattern_filter_rule = { |
109 | 109 | 'field': 'pattern_name', |
... | ... | @@ -121,7 +121,7 @@ def pattern_preview(request, id, reader=False): |
121 | 121 | |
122 | 122 | if pattern.type.inflection_type_id == 'subst': |
123 | 123 | for gender in Gender.objects.all(): |
124 | - gender_count = lexeme_count(lips.filter(gender=gender)) | |
124 | + gender_count = lexeme_count(inflections.filter(gender=gender)) | |
125 | 125 | if gender_count > 0: |
126 | 126 | example = pattern.get_example(gender) |
127 | 127 | if example: |
... | ... | @@ -147,7 +147,7 @@ def pattern_preview(request, id, reader=False): |
147 | 147 | return { |
148 | 148 | 'pattern': pattern, |
149 | 149 | 'root': first_root, |
150 | - 'lexeme_count': lexeme_count(lips), | |
150 | + 'lexeme_count': lexeme_count(inflections), | |
151 | 151 | 'detailed_counts': detailed_counts, |
152 | 152 | 'pattern_filter_url': make_filter_url([pattern_filter_rule]), |
153 | 153 | } |
... | ... |
patterns/models.py
... | ... | @@ -145,10 +145,10 @@ class Pattern(Model): |
145 | 145 | pattern=self, gender=gender) |
146 | 146 | if examples: |
147 | 147 | lexeme = examples.get().lexeme |
148 | - example_lips = lexeme.inflection_set.filter( | |
148 | + example_inflections = lexeme.inflection_set.filter( | |
149 | 149 | pattern=self, gender=gender) |
150 | - if example_lips: | |
151 | - return lexeme, example_lips[0].root | |
150 | + if example_inflections: | |
151 | + return lexeme, example_inflections[0].root | |
152 | 152 | else: |
153 | 153 | return self.get_example(gender, refresh=True) |
154 | 154 | |
... | ... |
patterns/static/css/pattern_view.css
... | ... | @@ -18,9 +18,4 @@ table.detailed-counts td, table.detailed-counts th { |
18 | 18 | table.detailed-counts th.left-header { |
19 | 19 | border-right-style: solid; |
20 | 20 | text-align: left; |
21 | -} | |
22 | - | |
23 | -.ellipsis-icon { | |
24 | - display: block; | |
25 | - width: 16px; | |
26 | 21 | } |
27 | 22 | \ No newline at end of file |
... | ... |
tables/models.py
... | ... | @@ -174,11 +174,11 @@ class TableTemplate(Model): |
174 | 174 | return self.name |
175 | 175 | |
176 | 176 | |
177 | -def combine_qualifiers(lip_qualifiers, e_qualifiers): | |
178 | - if not lip_qualifiers: | |
179 | - return e_qualifiers | |
177 | +def combine_qualifiers(inflection_qualifiers, ending_qualifiers): | |
178 | + if not inflection_qualifiers: | |
179 | + return ending_qualifiers | |
180 | 180 | qualifiers = set() |
181 | - for q in list(lip_qualifiers) + list(e_qualifiers): | |
181 | + for q in list(inflection_qualifiers) + list(ending_qualifiers): | |
182 | 182 | if q.exclusion_class: |
183 | 183 | excluded = set(q.exclusion_class.qualifier_set.all()) |
184 | 184 | qualifiers -= excluded |
... | ... | @@ -201,7 +201,7 @@ class Cell(Model): |
201 | 201 | return set() |
202 | 202 | |
203 | 203 | def forms(self, base_endings=None, separator=u'', root=u'', |
204 | - lip_qualifiers=None, lip_index=0, qualifiers=None, | |
204 | + inflection_qualifiers=None, inflection_index=0, qualifiers=None, | |
205 | 205 | edit_view=False, span=False, depr=None, cell_qualifier=False): |
206 | 206 | if qualifiers: |
207 | 207 | qualifiers_set = set(qualifiers) |
... | ... | @@ -212,11 +212,11 @@ class Cell(Model): |
212 | 212 | else: |
213 | 213 | return set(quals) & qualifiers_set |
214 | 214 | |
215 | - if lip_qualifiers and not edit_view: | |
215 | + if inflection_qualifiers and not edit_view: | |
216 | 216 | # l_qual = filter_quals(lexeme_qualifiers) |
217 | - lip_qual = filter_quals(lip_qualifiers) | |
217 | + inflection_qual = filter_quals(inflection_qualifiers) | |
218 | 218 | else: |
219 | - lip_qual = set() | |
219 | + inflection_qual = set() | |
220 | 220 | endings = base_endings[self.base_form_label] |
221 | 221 | if span: |
222 | 222 | form_template = ( |
... | ... | @@ -242,11 +242,11 @@ class Cell(Model): |
242 | 242 | % self.get_qualifier()) |
243 | 243 | forms = [ |
244 | 244 | ( |
245 | - (self.get_index(), lip_index, ending.index), | |
245 | + (self.get_index(), inflection_index, ending.index), | |
246 | 246 | (form_template % ( |
247 | 247 | self.prefix, root, separator, ending.string, self.suffix)), |
248 | 248 | combine_qualifiers( |
249 | - lip_qual, filter_quals(ending.qualifiers.all())), | |
249 | + inflection_qual, filter_quals(ending.qualifiers.all())), | |
250 | 250 | ) |
251 | 251 | for ending in endings |
252 | 252 | ] |
... | ... |
tables/templates/inflection_table.html
... | ... | @@ -4,7 +4,7 @@ |
4 | 4 | <caption> |
5 | 5 | {{ gender.symbol }}: |
6 | 6 | {% if pronunciations %} |
7 | - <span class="lip-pronunciation">[{{ pronunciations|join:"/"|safe }}]{% if gender_qualifiers %};{% endif %}</span> | |
7 | + <span class="inflection-pronunciation">[{{ pronunciations|join:"/"|safe }}]{% if gender_qualifiers %};{% endif %}</span> | |
8 | 8 | {% endif %} |
9 | 9 | <span class="qualifiers">{{ gender_qualifiers|join:" " }}</span> |
10 | 10 | {% include "pattern_list.html" %} |
... | ... |
tables/views.py
... | ... | @@ -9,7 +9,7 @@ from patterns.models import Pattern |
9 | 9 | |
10 | 10 | |
11 | 11 | @render_ajax(template='table_preview.html', method='get') |
12 | -def table_preview(request, lexeme_id, lip_id, pattern, attr_data=None, | |
12 | +def table_preview(request, lexeme_id, inflection_id, pattern, attr_data=None, | |
13 | 13 | gender=None, entry=None, pos=None): |
14 | 14 | lexeme = Lexeme.all_objects.get(pk=lexeme_id) |
15 | 15 | if not lexeme.perm(request.user, 'view'): |
... | ... | @@ -28,15 +28,15 @@ def table_preview(request, lexeme_id, lip_id, pattern, attr_data=None, |
28 | 28 | id__in=attr_data) |
29 | 29 | else: |
30 | 30 | attr_vals = None |
31 | - if lip_id.startswith('lip_add'): | |
32 | - lip = Inflection(lexeme=lexeme, index=0) | |
31 | + if inflection_id.startswith('inf_add'): | |
32 | + inflection = Inflection(lexeme=lexeme, index=0) | |
33 | 33 | else: |
34 | - lip = Inflection.objects.get(pk=int(lip_id[3:])) | |
35 | - lip.pattern = pattern | |
36 | - lip.gender = gender | |
37 | - lip.root = lip.get_root() | |
34 | + inflection = Inflection.objects.get(pk=int(inflection_id[3:])) | |
35 | + inflection.pattern = pattern | |
36 | + inflection.gender = gender | |
37 | + inflection.root = inflection.get_root() | |
38 | 38 | qualifiers = Qualifier.visible_qualifiers(request.user) |
39 | - table = lip.inflection_table( | |
39 | + table = inflection.table( | |
40 | 40 | '0', separated=True, qualifiers=qualifiers, edit_view=True, |
41 | 41 | attr_vals=attr_vals, pos=part_of_speech) |
42 | 42 | prepare_table(table) |
... | ... |