Commit 4d466b16eca613eb60e120b875cbde10bd07fab7
1 parent
c13a529a
Semantics for nouns: fixes for payments, validation, and others.
Showing
11 changed files
with
131 additions
and
72 deletions
accounts/models.py
... | ... | @@ -279,6 +279,9 @@ class RealizedSemantics(Model): |
279 | 279 | # wykonane ramki (wypelniane dla semantyka) |
280 | 280 | made_frames = PositiveIntegerField(db_column='wykonane_ramki', |
281 | 281 | default=0) |
282 | + # wspoldzielone ramki (wypelniane dla semantyka) | |
283 | + related_frames = PositiveIntegerField(db_column='wspoldzielone_ramki', | |
284 | + default=0) | |
282 | 285 | # poprawione ramki (wypelniane dla supersemantyka) |
283 | 286 | corr_frames = PositiveIntegerField(db_column='poprawione_ramki', |
284 | 287 | default=0) |
... | ... |
dictionary/ajax_lemma_status.py
... | ... | @@ -117,10 +117,10 @@ def lemma_status_change(request, status_id, lemma_id): |
117 | 117 | not lemma_obj.status.next_statuses.filter(pk=new_status.pk).exists()): |
118 | 118 | raise AjaxError('wrong change') |
119 | 119 | |
120 | - actual_semantic_frames = SemanticFrame.objects.none() | |
120 | + visible_semantic_frames = SemanticFrame.objects.none() | |
121 | 121 | next_status = False |
122 | 122 | if(new_status): |
123 | - actual_semantic_frames = backup_lemma_and_get_frames(lemma_obj) | |
123 | + visible_semantic_frames = backup_lemma_and_get_frames(lemma_obj) | |
124 | 124 | |
125 | 125 | if(new_status and new_status.priority > lemma_obj.status.priority): |
126 | 126 | next_status = True |
... | ... | @@ -223,8 +223,10 @@ def lemma_status_change(request, status_id, lemma_id): |
223 | 223 | and next_status): |
224 | 224 | ### naliczanie oplat za gotowosc semantyczna |
225 | 225 | frame_value = 12.0 |
226 | - update_sem_stats_ready_s(lemma_obj.entry_obj, actual_semantic_frames, | |
227 | - lemma_obj.semanticist, new_status, frame_value) | |
226 | + related_frame_value = 2.0 | |
227 | + update_sem_stats_ready_s(lemma_obj.entry_obj, visible_semantic_frames, | |
228 | + lemma_obj.semanticist, new_status, frame_value, | |
229 | + related_frame_value) | |
228 | 230 | add_new_frames_to_phraseologic_propositions(lemma_obj) |
229 | 231 | changed = True |
230 | 232 | # zmiana statusu hasla na sprawdzone semantycznie |
... | ... | @@ -236,8 +238,8 @@ def lemma_status_change(request, status_id, lemma_id): |
236 | 238 | part_bonus = 2.0 |
237 | 239 | connection_bonus = 0.1 |
238 | 240 | ### naliczanie oplat za sprawdzenie i bonusow |
239 | - update_sem_stats_conf_s(entry=lemma_obj.entry_obj, | |
240 | - semantic_frames=actual_semantic_frames, | |
241 | + update_sem_stats_conf_s(entry=lemma_obj.entry_obj, | |
242 | + checked_sem_frames_backup=visible_semantic_frames, | |
241 | 243 | semanticist=lemma_obj.semanticist, |
242 | 244 | supersemanticist=request.user, |
243 | 245 | status=new_status, |
... | ... | @@ -273,7 +275,7 @@ def lemma_status_change(request, status_id, lemma_id): |
273 | 275 | lemma=lemma_obj, |
274 | 276 | status=new_status) |
275 | 277 | status_change.save() |
276 | - status_change.semantic_frames.add(*actual_semantic_frames.all()) | |
278 | + status_change.semantic_frames.add(*visible_semantic_frames.all()) | |
277 | 279 | lemma_obj.status_history.add(status_change) |
278 | 280 | |
279 | 281 | if new_status: |
... | ... | @@ -472,45 +474,59 @@ def update_lemma_stats_conf_f(lemma, phraseologist, superphraseologist, status, |
472 | 474 | phraseologist.user_stats.phraseology_real_history.add(phraseologist_real_lemma) |
473 | 475 | |
474 | 476 | ####################### semantics ############################# |
475 | -def update_sem_stats_ready_s(entry, semantic_frames, semanticist, status, frame_value): | |
476 | - actual_frames_count = semantic_frames.count() | |
477 | - sem_dict = {'made_frames': actual_frames_count, | |
478 | - 'cash': frame_value*float(actual_frames_count)} | |
477 | +def update_sem_stats_ready_s(entry, visible_semantic_frames, semanticist, status, | |
478 | + frame_value, related_frame_value): | |
479 | + actual_frames = entry.actual_frames() | |
480 | + actual_frames_count = actual_frames.count() | |
479 | 481 | |
480 | - realized_semantics = RealizedSemantics(entry=entry, cash=sem_dict['cash'], | |
481 | - made_frames=sem_dict['made_frames'], | |
482 | + related_frames = entry.related_frames() | |
483 | + related_frames_count = related_frames.count() | |
484 | + | |
485 | + cash = frame_value*float(actual_frames_count) + related_frame_value*float(related_frames_count) | |
486 | + | |
487 | + | |
488 | + realized_semantics = RealizedSemantics(entry=entry, cash=cash, | |
489 | + made_frames=actual_frames_count, | |
490 | + related_frames=related_frames_count, | |
482 | 491 | status=status, bonus=False) |
483 | 492 | realized_semantics.save() |
484 | - realized_semantics.frames.add(*semantic_frames.all()) | |
493 | + realized_semantics.frames.add(*visible_semantic_frames.all()) | |
485 | 494 | semanticist.user_stats.semantics_real_history.add(realized_semantics) |
486 | 495 | |
487 | -def update_sem_stats_conf_s(entry, semantic_frames, semanticist, supersemanticist, status, | |
488 | - checked_frame_value, corrected_frame_value, | |
489 | - bonus_factor, part_bonus_factor, connection_bonus): | |
496 | +def update_sem_stats_conf_s(entry, checked_sem_frames_backup, semanticist, supersemanticist, status, | |
497 | + checked_frame_value, corrected_frame_value, | |
498 | + bonus_factor, part_bonus_factor, connection_bonus): | |
490 | 499 | ready_statuses = Lemma_Status.objects.filter(type__sym_name='ready_s') |
491 | 500 | q_ready_statuses = [Q(status=ready_status) for ready_status in ready_statuses.all()] |
492 | 501 | |
493 | 502 | ready_semantics = RealizedSemantics.objects.filter(reduce(operator.or_, q_ready_statuses)) |
494 | - ready_sem_frames= ready_semantics.get(entry=entry).frames | |
495 | - checked_sem_frames = semantic_frames | |
496 | - ready_to_checked_diffs = get_frames_differences(ready_sem_frames.all(), checked_sem_frames.all()) | |
497 | - checked_to_ready_diffs = get_frames_differences(checked_sem_frames.all(), ready_sem_frames.all()) | |
503 | + ready_sem_visible_frames = ready_semantics.get(entry=entry).frames | |
504 | + ready_sem_actual_frames = entry.filter_local(ready_sem_visible_frames) | |
505 | + ready_sem_related_frames = entry.filter_related(ready_sem_visible_frames) | |
506 | + | |
507 | + checked_sem_actual_frames = entry.actual_frames() | |
508 | + checked_sem_related_frames = entry.related_frames() | |
509 | + | |
510 | + actual_ready_to_checked_diffs = get_frames_differences(ready_sem_actual_frames.all(), checked_sem_actual_frames.all()) | |
511 | + actual_checked_to_ready_diffs = get_frames_differences(checked_sem_actual_frames.all(), ready_sem_actual_frames.all()) | |
512 | + | |
513 | + visible_ready_to_checked_diffs = get_frames_differences(ready_sem_visible_frames.all(), checked_sem_frames_backup.all()) | |
498 | 514 | |
499 | - connections_amount = count_connections(ready_to_checked_diffs) | |
500 | - sem_cash = (bonus_factor*float(len(ready_to_checked_diffs['matching_frames'])) + | |
501 | - part_bonus_factor*float(len(ready_to_checked_diffs['part_matching_frames'])) + | |
515 | + connections_amount = count_connections(entry, visible_ready_to_checked_diffs) | |
516 | + sem_cash = (bonus_factor*float(len(actual_ready_to_checked_diffs['matching_frames'])) + | |
517 | + part_bonus_factor*float(len(actual_ready_to_checked_diffs['part_matching_frames'])) + | |
502 | 518 | connection_bonus*float(connections_amount)) |
503 | - sem_dict = {'same_frames': len(ready_to_checked_diffs['matching_frames']), | |
504 | - 'part_same_frames': len(ready_to_checked_diffs['part_matching_frames']), | |
505 | - 'wrong_frames': len(ready_to_checked_diffs['missing_frames']), | |
519 | + sem_dict = {'same_frames': len(actual_ready_to_checked_diffs['matching_frames']), | |
520 | + 'part_same_frames': len(actual_ready_to_checked_diffs['part_matching_frames']), | |
521 | + 'wrong_frames': len(actual_ready_to_checked_diffs['missing_frames']), | |
506 | 522 | 'added_connections': connections_amount, |
507 | 523 | 'cash': sem_cash} |
508 | 524 | |
509 | - supersem_cash = (float(len(checked_to_ready_diffs['missing_frames'])+len(checked_to_ready_diffs['part_matching_frames']))*corrected_frame_value + | |
510 | - float(len(ready_to_checked_diffs['matching_frames']))*checked_frame_value) | |
511 | - supersem_dict = {'same_frames': len(checked_to_ready_diffs['matching_frames']), | |
512 | - 'part_same_frames': len(checked_to_ready_diffs['part_matching_frames']), | |
513 | - 'redo_frames': len(checked_to_ready_diffs['missing_frames']), | |
525 | + supersem_cash = (float(len(actual_checked_to_ready_diffs['missing_frames'])+len(actual_checked_to_ready_diffs['part_matching_frames']))*corrected_frame_value + | |
526 | + float(len(actual_ready_to_checked_diffs['matching_frames']))*checked_frame_value) | |
527 | + supersem_dict = {'same_frames': len(actual_checked_to_ready_diffs['matching_frames']), | |
528 | + 'part_same_frames': len(actual_checked_to_ready_diffs['part_matching_frames']), | |
529 | + 'redo_frames': len(actual_checked_to_ready_diffs['missing_frames']), | |
514 | 530 | 'cash': supersem_cash} |
515 | 531 | |
516 | 532 | supersem_real_semantics = RealizedSemantics(entry=entry, |
... | ... | @@ -521,7 +537,7 @@ def update_sem_stats_conf_s(entry, semantic_frames, semanticist, supersemanticis |
521 | 537 | status=status, |
522 | 538 | bonus=False) |
523 | 539 | supersem_real_semantics.save() |
524 | - supersem_real_semantics.frames.add(*semantic_frames.all()) | |
540 | + supersem_real_semantics.frames.add(*checked_sem_frames_backup.all()) | |
525 | 541 | supersemanticist.user_stats.semantics_real_history.add(supersem_real_semantics) |
526 | 542 | |
527 | 543 | sem_real_semantics = RealizedSemantics(entry=entry, |
... | ... | @@ -533,15 +549,16 @@ def update_sem_stats_conf_s(entry, semantic_frames, semanticist, supersemanticis |
533 | 549 | status=status, |
534 | 550 | bonus=True) |
535 | 551 | sem_real_semantics.save() |
536 | - sem_real_semantics.frames.add(*semantic_frames.all()) | |
552 | + sem_real_semantics.frames.add(*checked_sem_frames_backup.all()) | |
537 | 553 | semanticist.user_stats.semantics_real_history.add(sem_real_semantics) |
538 | 554 | |
539 | -def count_connections(differences): | |
540 | - amount = 0 | |
555 | +def count_connections(entry, differences): | |
556 | + amount = 0 | |
557 | + schemata = entry.actual_schemata() | |
541 | 558 | for frame in differences['matching_frames']: |
542 | - amount += frame.connected_schemata().count() | |
559 | + amount += frame.connected_schemata().filter(pk__in=schemata).count() | |
543 | 560 | for frame in differences['part_matching_frames']: |
544 | - amount += frame.connected_schemata().count() | |
561 | + amount += frame.connected_schemata().filter(pk__in=schemata).count() | |
545 | 562 | return amount |
546 | 563 | |
547 | 564 | def remove_semantic_payments(entry): |
... | ... |
dictionary/ajax_lemma_view.py
... | ... | @@ -830,12 +830,24 @@ def disrelate_entries(request, lemma_id, preview_lemma_id): |
830 | 830 | error, lemma, preview_lemma = check_if_selected_and_get(lemma_id, preview_lemma_id) |
831 | 831 | if not error: |
832 | 832 | error = check_if_has_rights_to_relate(lemma, request.user) |
833 | + if not error: | |
834 | + error = check_if_share_sematic_frames(lemma, preview_lemma) | |
833 | 835 | if error: |
834 | 836 | raise AjaxError(error) |
835 | 837 | else: |
836 | 838 | cancel_entries_relation(request, lemma, preview_lemma) |
837 | 839 | return {} |
838 | 840 | |
841 | +def check_if_share_sematic_frames(lemma, preview_lemma): | |
842 | + error = '' | |
843 | + lemma_visible_frames = lemma.entry_obj.visible_frames() | |
844 | + preview_visible_frames = preview_lemma.entry_obj.visible_frames() | |
845 | + | |
846 | + if (lemma_visible_frames.filter(pk__in=preview_visible_frames).exists() or | |
847 | + preview_visible_frames.filter(pk__in=lemma_visible_frames).exists()): | |
848 | + error = 'shared frames' | |
849 | + return error | |
850 | + | |
839 | 851 | def cancel_entries_relation(request, lemma, preview_lemma): |
840 | 852 | lemma_entry, preview_lemma_entry = get_entries(lemma, preview_lemma) |
841 | 853 | lemma_entry.rel_entries.remove(preview_lemma_entry) |
... | ... |
dictionary/ajax_user_stats.py
... | ... | @@ -239,6 +239,9 @@ def get_semantics_stats(user): |
239 | 239 | ncorr_frames = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('ncorr_frames'))['ncorr_frames__sum'] |
240 | 240 | if ncorr_frames == None: |
241 | 241 | ncorr_frames = 0 |
242 | + related_frames = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('related_frames'))['related_frames__sum'] | |
243 | + if related_frames == None: | |
244 | + related_frames = 0 | |
242 | 245 | made_frames = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('made_frames'))['made_frames__sum'] |
243 | 246 | if made_frames == None: |
244 | 247 | made_frames = 0 |
... | ... | @@ -257,6 +260,7 @@ def get_semantics_stats(user): |
257 | 260 | 'corr_frames': corr_frames, |
258 | 261 | 'part_corr_frames': part_corr_frames, |
259 | 262 | 'checked_frames': ncorr_frames+corr_frames+part_corr_frames, |
263 | + 'related_frames': related_frames, | |
260 | 264 | 'made_frames': made_frames, |
261 | 265 | 'efficacy': round(efficacy, 2), |
262 | 266 | 'added_connections' : added_connections} |
... | ... |
dictionary/models.py
... | ... | @@ -1369,7 +1369,7 @@ class AspectRelationsGroup(Model): |
1369 | 1369 | return ','.join(str_ls) |
1370 | 1370 | |
1371 | 1371 | class Entry(Model): |
1372 | - name = CharField(max_length=64, db_column='nazwa') # usunieto , unique=True | |
1372 | + name = CharField(max_length=64, db_column='nazwa') | |
1373 | 1373 | # czesc mowy |
1374 | 1374 | pos = ForeignKey('POS', db_column='czesc_mowy', related_name='entries') |
1375 | 1375 | # powiazane lematy |
... | ... | @@ -1395,23 +1395,45 @@ class Entry(Model): |
1395 | 1395 | ('change_semantics', u'Może edytować semantykę.'), |
1396 | 1396 | ('view_semantics', u'Może oglądać semantykę.'), |
1397 | 1397 | ) |
1398 | - | |
1398 | + | |
1399 | + def related_frames(self): | |
1400 | + visible = self.visible_frames() | |
1401 | + actual = self.actual_frames() | |
1402 | + return visible.exclude(pk__in=actual) | |
1403 | + | |
1404 | + def visible_frames(self): | |
1405 | + frames = [] | |
1406 | + for frame in self.all_frames(): | |
1407 | + for lu in frame.lexical_units.all(): | |
1408 | + if self.meanings.filter(pk=lu.pk).exists(): | |
1409 | + frames.append(frame.pk) | |
1410 | + break | |
1411 | + return get_model('semantics', 'SemanticFrame').objects.filter(pk__in=frames) | |
1412 | + | |
1413 | + def all_frames(self): | |
1414 | + frames = self.actual_frames() | |
1415 | + for entry in self.rel_entries.all(): | |
1416 | + new_frames = entry.actual_frames() | |
1417 | + frames |= new_frames | |
1418 | + return get_model('semantics', 'SemanticFrame').objects.filter(pk__in=frames) | |
1419 | + | |
1399 | 1420 | def actual_frames(self): |
1400 | - # frame_ids = [] | |
1401 | - # lexical_units = self.meanings.order_by('sense') | |
1402 | - # for lexical_unit in lexical_units: | |
1403 | - # frame_ids.extend([f.id for f in lexical_unit.actual_frames()]) | |
1404 | - # return get_model('semantics', 'SemanticFrame').objects.filter(id__in=list(set(frame_ids))) | |
1405 | 1421 | return self.semantic_frames.filter(next__isnull=True, removed=False) |
1422 | + | |
1423 | + def actual_schemata(self): | |
1424 | + return self.lemmas.get(old=False).frames.all() | |
1425 | + | |
1426 | + def filter_local(self, frames): | |
1427 | + return frames.filter(pk__in=self.semantic_frames.all()) | |
1428 | + | |
1429 | + def filter_related(self, frames): | |
1430 | + return frames.exclude(pk__in=self.semantic_frames.all()) | |
1406 | 1431 | |
1407 | 1432 | def matching_connections(self, schema, position, phrase_type): |
1408 | 1433 | |
1409 | 1434 | matching_connections = [] |
1410 | 1435 | |
1411 | - frames = self.actual_frames() | |
1412 | - for entry in self.rel_entries.all(): | |
1413 | - new_frames = entry.actual_frames() | |
1414 | - frames |= new_frames | |
1436 | + frames = self.visible_frames() | |
1415 | 1437 | |
1416 | 1438 | for frame in frames: |
1417 | 1439 | for compl in frame.complements.all(): |
... | ... |
dictionary/saving.py
... | ... | @@ -9,12 +9,8 @@ from wordnet.models import LexicalUnit |
9 | 9 | def get_semantic_operations(lemma, schemata_conversions): |
10 | 10 | connections = [] |
11 | 11 | operations = [] |
12 | - # frames = lemma.entry_obj.actual_frames() | |
13 | 12 | |
14 | - frames = lemma.entry_obj.actual_frames() | |
15 | - for entry in lemma.entry_obj.rel_entries.all(): | |
16 | - new_frames = entry.actual_frames() | |
17 | - frames |= new_frames | |
13 | + frames = lemma.entry_obj.visible_frames() | |
18 | 14 | |
19 | 15 | for conv in schemata_conversions: |
20 | 16 | schema_operations = get_reconnect_operations_and_extend_connections(frames, |
... | ... |
dictionary/static/js/lemma-view.js
... | ... | @@ -3567,13 +3567,15 @@ function restore_lemma() { |
3567 | 3567 | HideProgressAnimation(); |
3568 | 3568 | error_alert('Nie zaznaczono hasła w zakładce "Schematy" lub "Podglądzie hasła".'); |
3569 | 3569 | return false; |
3570 | - } | |
3571 | - else if (result == 'can not edit') { | |
3570 | + } else if (result == 'can not edit') { | |
3572 | 3571 | HideProgressAnimation(); |
3573 | 3572 | error_alert('Brak uprawnień do edycji hasła.'); |
3574 | 3573 | return false; |
3575 | - } | |
3576 | - else { | |
3574 | + } else if (result == 'shared frames') { | |
3575 | + HideProgressAnimation(); | |
3576 | + error_alert('Nie można anulować: Hasła współdzielą ramy semantyczne.'); | |
3577 | + return false; | |
3578 | + } else { | |
3577 | 3579 | HideProgressAnimation(); |
3578 | 3580 | return true; |
3579 | 3581 | } |
... | ... |
dictionary/templates/sel_user_stats.html
... | ... | @@ -123,12 +123,13 @@ |
123 | 123 | <table class='PaymentsTable'> |
124 | 124 | <tr> |
125 | 125 | <td class='EmptyCell' colspan=1></td> |
126 | - <td class='ColumnHeader' colspan=7>Semantycy:</td> | |
126 | + <td class='ColumnHeader' colspan=8>Semantycy:</td> | |
127 | 127 | <td class='ColumnHeader' colspan=3>Supersemantycy:</td> |
128 | 128 | </tr> |
129 | 129 | <tr> |
130 | 130 | <td class='ColumnHeader'>Kwota za wykonaną pracę:</td> |
131 | 131 | <td class='ColumnHeader'>Bonus:</td> |
132 | + <td class='ColumnHeader'>Współdzielone ramy:</td> | |
132 | 133 | <td class='ColumnHeader'>Wykonane ramy:</td> |
133 | 134 | <td class='ColumnHeader'>Poprawnie wykonane ramy:</td> |
134 | 135 | <td class='ColumnHeader'>Częściowo poprawnie wykonane ramy:</td> |
... | ... | @@ -142,6 +143,7 @@ |
142 | 143 | <tr> |
143 | 144 | <td>{{semantics_work_stats.earned_cash}} zł</td> |
144 | 145 | <td>{{semantics_work_stats.bonus_cash}} zł</td> |
146 | + <td>{{semantics_work_stats.related_frames}}</td> | |
145 | 147 | <td>{{semantics_work_stats.made_frames}}</td> |
146 | 148 | <td>{{semantics_work_stats.prop_frames}}</td> |
147 | 149 | <td>{{semantics_work_stats.part_prop_frames}}</td> |
... | ... |
semantics/management/commands/find_hanging_connections.py
... | ... | @@ -16,7 +16,7 @@ class Command(BaseCommand): |
16 | 16 | def find_hanging_connections(): |
17 | 17 | lemmas = Lemma.objects.filter(old=False).order_by('entry_obj__name') |
18 | 18 | for lemma in lemmas: |
19 | - frames = lemma.entry_obj.actual_frames() | |
19 | + frames = lemma.entry_obj.visible_frames() | |
20 | 20 | for frame in frames: |
21 | 21 | for compl in frame.complements.all(): |
22 | 22 | for real in compl.realizations.all(): |
... | ... |
semantics/validation.py
... | ... | @@ -8,15 +8,15 @@ from semantics.utils import get_structural_matching_frame |
8 | 8 | |
9 | 9 | def validate_frames(lemma_id): |
10 | 10 | lemma = Lemma.objects.get(id=lemma_id) |
11 | - actual_frames = lemma.entry_obj.actual_frames() | |
11 | + visible_frames = lemma.entry_obj.visible_frames() | |
12 | 12 | error_msg = u'' |
13 | - for frame in actual_frames.all(): | |
14 | - error_msg = frame_valid(lemma, frame, actual_frames) | |
13 | + for frame in visible_frames.all(): | |
14 | + error_msg = frame_valid(lemma, frame, visible_frames) | |
15 | 15 | if error_msg: |
16 | 16 | break |
17 | 17 | return error_msg |
18 | 18 | |
19 | -def frame_valid(lemma, frame, actual_frames): | |
19 | +def frame_valid(lemma, frame, frames): | |
20 | 20 | error_msg = '' |
21 | 21 | complements = frame.complements.all() |
22 | 22 | if not arguments_exists(complements): |
... | ... | @@ -33,7 +33,7 @@ def frame_valid(lemma, frame, actual_frames): |
33 | 33 | error_msg = u'Semantyka: Rama semantyczna %d zawiera argumenty bez zdefiniowanych preferencji selekcyjnych.' % frame.id |
34 | 34 | elif not examples_added(frame): |
35 | 35 | error_msg = u'Semantyka: Rama semantyczna %d nie ma dopiętych przykładów.' % frame.id |
36 | - elif duplicates_exists(frame, actual_frames): | |
36 | + elif duplicates_exists(frame, frames): | |
37 | 37 | error_msg = u'Semantyka: Rama semantyczna %d posiada duplikaty.' % frame.id |
38 | 38 | elif not schemas_reflex_agreed(lemma, frame): |
39 | 39 | error_msg = u'Semantyka: Rama semantyczna %d ma dopięte elementy o niezgodnej zwrotności.' % frame.id |
... | ... | @@ -107,8 +107,12 @@ def examples_added(frame): |
107 | 107 | return True |
108 | 108 | return False |
109 | 109 | |
110 | -def duplicates_exists(frame, actual_frames): | |
111 | - frames_to_check = actual_frames.exclude(id=frame.id) | |
110 | +def duplicates_exists(frame, frames): | |
111 | + # frazeologicznych ram nie sprawdzamy | |
112 | + if frame.complements.filter(roles__role='Lemma').exists(): | |
113 | + return False | |
114 | + | |
115 | + frames_to_check = frames.exclude(id=frame.id) | |
112 | 116 | if get_structural_matching_frame(frames_to_check, frame): |
113 | 117 | return True |
114 | 118 | return False |
... | ... | @@ -236,7 +240,7 @@ def validate_schemas(lemma_id): |
236 | 240 | return error_msg |
237 | 241 | |
238 | 242 | def all_schemas_used(lemma): |
239 | - frames = lemma.entry_obj.actual_frames() | |
243 | + frames = lemma.entry_obj.visible_frames() | |
240 | 244 | schemas = lemma.frames |
241 | 245 | for schema in schemas.all(): |
242 | 246 | if not schema_is_bad(lemma, schema) and not schema_used(schema, frames): |
... | ... |
semantics/views.py
... | ... | @@ -92,10 +92,7 @@ def create_frames_context(lemma_id, user): |
92 | 92 | frames_dict = {} |
93 | 93 | frame_units = {} |
94 | 94 | |
95 | - frames = lemma.entry_obj.actual_frames() | |
96 | - for entry in connected: | |
97 | - new_frames = entry.actual_frames() | |
98 | - frames |= new_frames | |
95 | + frames = lemma.entry_obj.all_frames() | |
99 | 96 | |
100 | 97 | for frame in frames: |
101 | 98 | alternations[frame.id] = {} |
... | ... |