diff --git a/INSTALL_PL b/INSTALL_PL index cbe27c5..1db79ba 100644 --- a/INSTALL_PL +++ b/INSTALL_PL @@ -1,23 +1,23 @@ Instrukcja przeznaczona jest dla systemu operacyjnego Ubuntu. Zainstaluj pipa: ->> apt-get update ->> apt-get -y install python-pip +>> sudo apt-get update +>> sudo apt-get -y install python-pip Zainstaluj Django w wersji 1.4.8: ->> pip install Django==1.4.8 +>> sudo pip install Django==1.4.8 Zainstaluj Django south: ->> apt-get install python-django-south +>> sudo apt-get install python-django-south Zainstaluj Django extensions: ->> apt-get install python-django-extensions +>> sudo apt-get install python-django-extensions Zainstaluj Django registration w wersji 0.8: >> sudo pip install django-registration==0.8 Zainstaluj pythonowy moduł lxml: ->> apt-get install python-lxml +>> sudo apt-get install python-lxml Zainstaluj Postgresa: >> sudo apt-get update @@ -30,12 +30,12 @@ Stwórz pustą bazę danych dla Slowala: >> createdb slowal -E UTF8 -T template0 -l pl_PL.utf8 Jeśli locale pl_PL.utf8 nie istnieje dodatkowo należy uruchomić komendy: >> sudo locale-gen pl_PL.utf8 ->> service postgresql restart +>> sudo service postgresql restart Załaduj dump bazy danych poleceniem (zrzut bazy umieszczony jest w archiwum INSTALL_PACK.zip, jeśli archiwum nie dołączono do niniejszej paczki sprawdź na stronie http://zil.ipipan.waw.pl/Slowal): >> psql slowal < obraz_bazy.db Zainstaluj gita: ->> apt-get install git +>> sudo apt-get install git Sklonuj repozytorium gitowe z GitLaba: >> git clone http://git.nlp.ipipan.waw.pl/walenty/Slowal.git @@ -69,10 +69,10 @@ Zgraj pliki statyczne do dedykowanego katalogu poleceniem: >> python manage.py collectstatic Zainstaluj Apacha: ->> apt-get install apache2 +>> sudo apt-get install apache2 Zainstaluj mod-wsgi: ->> apt-get install libapache2-mod-wsgi +>> sudo apt-get install libapache2-mod-wsgi Utwórz plik slowal.wsgi odpowiednio definiując w nim ścieżki do plików statycznych. Przykładowa treść pliku poniżej: -------------------------------------------- @@ -115,7 +115,7 @@ Skonfiguruj apacha dodając plik konfiguracyjny (np. o nazwie slowal.conf) do fo -------------------------------------------- Uruchom stronę poleceniem: ->> a2ensite slowal.conf +>> sudo a2ensite slowal.conf Zrestartuj apacha: >> sudo service apache2 restart diff --git a/accounts/management/commands/get_payments_data.py b/accounts/management/commands/get_payments_data.py new file mode 100644 index 0000000..d637bfa --- /dev/null +++ b/accounts/management/commands/get_payments_data.py @@ -0,0 +1,312 @@ +# -*- coding:utf-8 -*- + +import codecs +import datetime + +from django.contrib.auth.models import User +from django.core.management.base import BaseCommand +from django.db.models import Sum + +from accounts.models import RealizedLemma, RealizedPhraseology, RealizedPhraseologyBinding, \ + RealizedSemantics +from dictionary.ajax_user_stats import get_used_bindings +from dictionary.models import Lemma + + +USERNAME = 'JakubS' +FUNCTION = 'Leksykograf' +POS = 'noun' +STARTDATE = datetime.datetime(2011, 1, 1, 00, 00) +ENDDATE = (datetime.datetime.now() - + datetime.timedelta(days=1)).replace(hour=23, minute=59, second=59) + + +class Command(BaseCommand): + args = 'none' + + def handle(self, **options): + get_payments_data(FUNCTION) + + +def get_payments_data(function): + start = STARTDATE.strftime('%Y%m%d') + end = ENDDATE.strftime('%Y%m%d') + payments_path = 'data/work_%s_%s_%s-%s.csv' % (USERNAME, FUNCTION, start, end) + payments_file = codecs.open(payments_path, 'wt', 'utf-8') + user = User.objects.get(username=USERNAME) + + if function == 'Semantyk': + work_stats = write_semantic_stats(payments_file, user, POS) + elif function == 'Superfrazeolog': + work_stats = write_superphraseologic_stats(payments_file, user, POS) + elif function == 'Frazeolog': + work_stats = write_phraseologic_stats(payments_file, user, POS) + elif function == 'Leksykograf': + work_stats = write_lexicographic_stats(payments_file, user, POS) + elif function == 'Superleksykograf': + work_stats = write_superlexicographic_stats(payments_file, user, POS) + total_earned_cash = work_stats['earned_cash'] + if total_earned_cash > 0.0: + payments_file.write(u'\n%s\t%.2f\n' % (user.username, + total_earned_cash)) + payments_file.close() + + +def write_superlexicographic_stats(payments_file, user, pos): + real_lemmas = RealizedLemma.objects.filter(user_stats__user=user, + lemma__entry_obj__pos__tag=pos, + date__gte=STARTDATE, + date__lte=ENDDATE, + status__type__sym_name='checked', + bonus=False) + + earned_cash = real_lemmas.aggregate(Sum('cash'))['cash__sum'] + if earned_cash == None: + earned_cash = 0.0 + + payments_file.write(u'Sprawdzone:\n') + for done_lemma in real_lemmas.order_by('date'): + payments_file.write(u'%s\t%.2f\t%s\n' % (done_lemma.lemma.entry_obj.name, + done_lemma.cash, + done_lemma.date.strftime('%Y%m%d'))) + + lex_work_stats = {'earned_cash': round(earned_cash, 2)} + return lex_work_stats + + +def write_semantic_stats(payments_file, user, pos): + + real_semantics = RealizedSemantics.objects.filter(user_stats__user=user, + date__gte=STARTDATE, + date__lte=ENDDATE, + entry__pos__tag=pos) + + earned_cash = real_semantics.filter(user_stats__user=user).aggregate(Sum('cash'))['cash__sum'] + if earned_cash == None: + earned_cash = 0.0 + + bonus_cash = real_semantics.filter(user_stats__user=user, + bonus=True).aggregate(Sum('cash'))['cash__sum'] + if bonus_cash == None: + bonus_cash = 0.0 + prop_frames = real_semantics.filter(user_stats__user=user).aggregate(Sum('prop_frames'))[ + 'prop_frames__sum'] + if prop_frames == None: + prop_frames = 0 + part_prop_frames = real_semantics.filter(user_stats__user=user).aggregate(Sum('part_prop_frames'))[ + 'part_prop_frames__sum'] + if part_prop_frames == None: + part_prop_frames = 0 + wrong_frames = real_semantics.filter(user_stats__user=user).aggregate(Sum('wrong_frames'))[ + 'wrong_frames__sum'] + if wrong_frames == None: + wrong_frames = 0 + corr_frames = real_semantics.filter(user_stats__user=user).aggregate(Sum('corr_frames'))[ + 'corr_frames__sum'] + if corr_frames == None: + corr_frames = 0 + part_corr_frames = real_semantics.filter(user_stats__user=user).aggregate(Sum('part_corr_frames'))[ + 'part_corr_frames__sum'] + if part_corr_frames == None: + part_corr_frames = 0 + ncorr_frames = real_semantics.filter(user_stats__user=user).aggregate(Sum('ncorr_frames'))[ + 'ncorr_frames__sum'] + if ncorr_frames == None: + ncorr_frames = 0 + made_frames = real_semantics.filter(user_stats__user=user).aggregate(Sum('made_frames'))[ + 'made_frames__sum'] + if made_frames == None: + made_frames = 0 + added_connections = real_semantics.filter(user_stats__user=user).aggregate(Sum('added_connections'))[ + 'added_connections__sum'] + if added_connections == None: + added_connections = 0 + efficacy = 0.0 + if prop_frames + wrong_frames > 0: + efficacy = float(prop_frames) / float(prop_frames + wrong_frames) * 100.0 + + payments_file.write(u'Wykonane:\n') + done_semantics = real_semantics.filter(bonus=False).order_by('date') + for done_sem in done_semantics: + done_cash = done_sem.cash + try: + done_bonus = real_semantics.get(bonus=True, entry=done_sem.entry).cash + done_cash += done_bonus + except RealizedSemantics.DoesNotExist: + pass + payments_file.write(u'%s\t%.2f\t%s\n' % (done_sem.entry.name, + done_cash, + done_sem.date.strftime('%Y%m%d'))) + + sem_work_stats = {'earned_cash': round(earned_cash, 2), + 'bonus_cash': round(bonus_cash, 2), + 'prop_frames': prop_frames, + 'part_prop_frames': part_prop_frames, + 'wrong_frames': wrong_frames, + 'corr_frames': corr_frames, + 'part_corr_frames': part_corr_frames, + 'checked_frames': ncorr_frames + corr_frames + part_corr_frames, + 'made_frames': made_frames, + 'efficacy': round(efficacy, 2), + 'added_connections': added_connections} + return sem_work_stats + + +def write_superphraseologic_stats(payments_file, user, pos): + added_bindings = RealizedPhraseologyBinding.objects.filter(user_stats__user=user, + date__gte=STARTDATE, + date__lte=ENDDATE) + used_bindings = get_used_bindings(added_bindings) + + checked_phraseology = RealizedPhraseology.objects.filter(user_stats__user=user, + date__gte=STARTDATE, + date__lte=ENDDATE, + bonus=False, + status__type__sym_name='checked_f', + lemma__entry_obj__pos__tag=pos) + + earned_cash_frames = checked_phraseology.aggregate(Sum('cash'))['cash__sum'] + if earned_cash_frames == None: + earned_cash_frames = 0.0 + earned_cash_bindings = used_bindings.aggregate(Sum('cash'))['cash__sum'] + if earned_cash_bindings == None: + earned_cash_bindings = 0.0 + earned_cash = earned_cash_frames + earned_cash_bindings + + phraseologic_empty_frame_value = 1.0 + empty_value = 0.0 + + payments_file.write(u'Sprawdzone:\n') + checked_phraseology = checked_phraseology.order_by('date') + for checked_phr in checked_phraseology: + cash = checked_phr.cash + if cash == 0.0: + cash = phraseologic_empty_frame_value + empty_value += phraseologic_empty_frame_value + payments_file.write(u'%s\t%.2f\t%s\n' % (checked_phr.lemma.entry_obj.name, + cash, + checked_phr.date.strftime('%Y%m%d'))) + earned_cash += empty_value + + payments_file.write(u'\n\nDodane powiazania frazeologiczne:\n') + for binding in used_bindings.order_by('date'): + payments_file.write(u'%s\t%.2f\t%s\n' % (binding.binded_entry.name, + binding.cash, + binding.date.strftime('%Y%m%d'))) + + + phraseology_work_stats = {'earned_cash': round(earned_cash, 2), + 'added_bindings': added_bindings.count(), + 'used_bindings': used_bindings.count()} + return phraseology_work_stats + + +def write_phraseologic_stats(payments_file, user, pos): + added_bindings = RealizedPhraseologyBinding.objects.filter(user_stats__user=user, + date__gte=STARTDATE, + date__lte=ENDDATE) + used_bindings = get_used_bindings(added_bindings) + + checked_and_done_phraseology = RealizedPhraseology.objects.filter(user_stats__user=user, + date__gte=STARTDATE, + date__lte=ENDDATE, + lemma__entry_obj__pos__tag=pos) + + done_phraseology = checked_and_done_phraseology.filter(status__type__sym_name='ready_f', + bonus=False) + + earned_cash_frames = done_phraseology.aggregate(Sum('cash'))['cash__sum'] + if earned_cash_frames == None: + earned_cash_frames = 0.0 + earned_cash_bindings = used_bindings.aggregate(Sum('cash'))['cash__sum'] + if earned_cash_bindings == None: + earned_cash_bindings = 0.0 + earned_cash = earned_cash_frames + earned_cash_bindings + + bonus_cash = checked_and_done_phraseology.filter(bonus=True).aggregate(Sum('cash'))['cash__sum'] + if bonus_cash == None: + bonus_cash = 0.0 + earned_cash += bonus_cash + + phraseologic_empty_frame_value = 1.0 + empty_value = 0.0 + + payments_file.write(u'Wykonane:\n') + for done_phr in done_phraseology.order_by('date'): + cash = done_phr.cash + if cash == 0.0: + cash = phraseologic_empty_frame_value + empty_value += phraseologic_empty_frame_value + try: + done_bonus = checked_and_done_phraseology.get(bonus=True, lemma__entry_obj=done_phr.lemma.entry_obj).cash + cash += done_bonus + except RealizedPhraseology.DoesNotExist: + pass + payments_file.write(u'%s\t%.2f\t%s\n' % (done_phr.lemma.entry_obj.name, + cash, + done_phr.date.strftime('%Y%m%d'))) + + payments_file.write(u'\n\nDodane powiazania frazeologiczne:\n') + for binding in used_bindings.order_by('date'): + payments_file.write(u'%s\t%.2f\t%s\n' % (binding.binded_entry.name, + binding.cash, + binding.date.strftime('%Y%m%d'))) + + earned_cash += empty_value + + phraseology_work_stats = {'earned_cash': round(earned_cash, 2), + 'added_bindings': added_bindings.count(), + 'used_bindings': used_bindings.count(),} + return phraseology_work_stats + + +def write_lexicographic_stats(payments_file, user, pos): + + real_lemmas = RealizedLemma.objects.filter(user_stats__user=user, + lemma__entry_obj__pos__tag=pos, + date__gte=STARTDATE, + date__lte=ENDDATE) + + earned_cash = real_lemmas.filter(status__type__sym_name='ready').aggregate(Sum('cash'))['cash__sum'] + if earned_cash == None: + earned_cash = 0.0 + + lemmas_to_erase_cash = 0.0 + lemmas_marked_to_erase = Lemma.objects.filter(owner=user, + old=False, + status__type__sym_name='erase', + entry_obj__pos__tag=pos) + + payments_file.write(u'Zaznaczone do usunięcia:\n') + for lemma in lemmas_marked_to_erase: + erase_date = lemma.status_history.order_by('-date')[0].date + if erase_date >= STARTDATE and erase_date <= ENDDATE: + payments_file.write(u'%s\t%.2f\t%s\n' % (lemma.entry_obj.name, + 1.0, + erase_date.strftime('%Y%m%d'))) + lemmas_to_erase_cash += 1.0 + earned_cash += lemmas_to_erase_cash + + bonus_cash = real_lemmas.filter(bonus=True).aggregate(Sum('cash'))['cash__sum'] + if bonus_cash == None: + bonus_cash = 0.0 + earned_cash += bonus_cash + + payments_file.write(u'\n\nWykonane:\n') + done_lemmas = real_lemmas.filter(bonus=False, + status__type__sym_name='ready').order_by('date') + for done_lemma in done_lemmas: + cash = done_lemma.cash + try: + bonus = real_lemmas.get(bonus=True, lemma__entry_obj=done_lemma.lemma.entry_obj).cash + cash += bonus + except RealizedLemma.DoesNotExist: + pass + payments_file.write(u'%s\t%.2f\t%s\n' % (done_lemma.lemma.entry_obj.name, + cash, + done_lemma.date.strftime('%Y%m%d'))) + + lex_work_stats = {'earned_cash': round(earned_cash, 2), + 'bonus_cash': round(bonus_cash, 2), + 'lemmas_to_erase_cash': round(lemmas_to_erase_cash, 2)} + return lex_work_stats diff --git a/accounts/models.py b/accounts/models.py index ff9dfa7..cb21b54 100644 --- a/accounts/models.py +++ b/accounts/models.py @@ -279,6 +279,9 @@ class RealizedSemantics(Model): # wykonane ramki (wypelniane dla semantyka) made_frames = PositiveIntegerField(db_column='wykonane_ramki', default=0) + # wspoldzielone ramki (wypelniane dla semantyka) + related_frames = PositiveIntegerField(db_column='wspoldzielone_ramki', + default=0) # poprawione ramki (wypelniane dla supersemantyka) corr_frames = PositiveIntegerField(db_column='poprawione_ramki', default=0) diff --git a/dictionary/ajax_lemma_status.py b/dictionary/ajax_lemma_status.py index 2a85574..1b3530a 100644 --- a/dictionary/ajax_lemma_status.py +++ b/dictionary/ajax_lemma_status.py @@ -117,10 +117,10 @@ def lemma_status_change(request, status_id, lemma_id): not lemma_obj.status.next_statuses.filter(pk=new_status.pk).exists()): raise AjaxError('wrong change') - actual_semantic_frames = SemanticFrame.objects.none() + visible_semantic_frames = SemanticFrame.objects.none() next_status = False if(new_status): - actual_semantic_frames = backup_lemma_and_get_frames(lemma_obj) + visible_semantic_frames = backup_lemma_and_get_frames(lemma_obj) if(new_status and new_status.priority > lemma_obj.status.priority): next_status = True @@ -223,8 +223,10 @@ def lemma_status_change(request, status_id, lemma_id): and next_status): ### naliczanie oplat za gotowosc semantyczna frame_value = 12.0 - update_sem_stats_ready_s(lemma_obj.entry_obj, actual_semantic_frames, - lemma_obj.semanticist, new_status, frame_value) + related_frame_value = 2.0 + update_sem_stats_ready_s(lemma_obj.entry_obj, visible_semantic_frames, + lemma_obj.semanticist, new_status, frame_value, + related_frame_value) add_new_frames_to_phraseologic_propositions(lemma_obj) changed = True # zmiana statusu hasla na sprawdzone semantycznie @@ -236,8 +238,8 @@ def lemma_status_change(request, status_id, lemma_id): part_bonus = 2.0 connection_bonus = 0.1 ### naliczanie oplat za sprawdzenie i bonusow - update_sem_stats_conf_s(entry=lemma_obj.entry_obj, - semantic_frames=actual_semantic_frames, + update_sem_stats_conf_s(entry=lemma_obj.entry_obj, + checked_sem_frames_backup=visible_semantic_frames, semanticist=lemma_obj.semanticist, supersemanticist=request.user, status=new_status, @@ -273,7 +275,7 @@ def lemma_status_change(request, status_id, lemma_id): lemma=lemma_obj, status=new_status) status_change.save() - status_change.semantic_frames.add(*actual_semantic_frames.all()) + status_change.semantic_frames.add(*visible_semantic_frames.all()) lemma_obj.status_history.add(status_change) if new_status: @@ -472,45 +474,59 @@ def update_lemma_stats_conf_f(lemma, phraseologist, superphraseologist, status, phraseologist.user_stats.phraseology_real_history.add(phraseologist_real_lemma) ####################### semantics ############################# -def update_sem_stats_ready_s(entry, semantic_frames, semanticist, status, frame_value): - actual_frames_count = semantic_frames.count() - sem_dict = {'made_frames': actual_frames_count, - 'cash': frame_value*float(actual_frames_count)} +def update_sem_stats_ready_s(entry, visible_semantic_frames, semanticist, status, + frame_value, related_frame_value): + actual_frames = entry.actual_frames() + actual_frames_count = actual_frames.count() - realized_semantics = RealizedSemantics(entry=entry, cash=sem_dict['cash'], - made_frames=sem_dict['made_frames'], + related_frames = entry.related_frames() + related_frames_count = related_frames.count() + + cash = frame_value*float(actual_frames_count) + related_frame_value*float(related_frames_count) + + + realized_semantics = RealizedSemantics(entry=entry, cash=cash, + made_frames=actual_frames_count, + related_frames=related_frames_count, status=status, bonus=False) realized_semantics.save() - realized_semantics.frames.add(*semantic_frames.all()) + realized_semantics.frames.add(*visible_semantic_frames.all()) semanticist.user_stats.semantics_real_history.add(realized_semantics) -def update_sem_stats_conf_s(entry, semantic_frames, semanticist, supersemanticist, status, - checked_frame_value, corrected_frame_value, - bonus_factor, part_bonus_factor, connection_bonus): +def update_sem_stats_conf_s(entry, checked_sem_frames_backup, semanticist, supersemanticist, status, + checked_frame_value, corrected_frame_value, + bonus_factor, part_bonus_factor, connection_bonus): ready_statuses = Lemma_Status.objects.filter(type__sym_name='ready_s') q_ready_statuses = [Q(status=ready_status) for ready_status in ready_statuses.all()] ready_semantics = RealizedSemantics.objects.filter(reduce(operator.or_, q_ready_statuses)) - ready_sem_frames= ready_semantics.get(entry=entry).frames - checked_sem_frames = semantic_frames - ready_to_checked_diffs = get_frames_differences(ready_sem_frames.all(), checked_sem_frames.all()) - checked_to_ready_diffs = get_frames_differences(checked_sem_frames.all(), ready_sem_frames.all()) + ready_sem_visible_frames = ready_semantics.get(entry=entry).frames + ready_sem_actual_frames = entry.filter_local(ready_sem_visible_frames) + ready_sem_related_frames = entry.filter_related(ready_sem_visible_frames) + + checked_sem_actual_frames = entry.actual_frames() + checked_sem_related_frames = entry.related_frames() + + actual_ready_to_checked_diffs = get_frames_differences(ready_sem_actual_frames.all(), checked_sem_actual_frames.all()) + actual_checked_to_ready_diffs = get_frames_differences(checked_sem_actual_frames.all(), ready_sem_actual_frames.all()) + + visible_ready_to_checked_diffs = get_frames_differences(ready_sem_visible_frames.all(), checked_sem_frames_backup.all()) - connections_amount = count_connections(ready_to_checked_diffs) - sem_cash = (bonus_factor*float(len(ready_to_checked_diffs['matching_frames'])) + - part_bonus_factor*float(len(ready_to_checked_diffs['part_matching_frames'])) + + connections_amount = count_connections(entry, visible_ready_to_checked_diffs) + sem_cash = (bonus_factor*float(len(actual_ready_to_checked_diffs['matching_frames'])) + + part_bonus_factor*float(len(actual_ready_to_checked_diffs['part_matching_frames'])) + connection_bonus*float(connections_amount)) - sem_dict = {'same_frames': len(ready_to_checked_diffs['matching_frames']), - 'part_same_frames': len(ready_to_checked_diffs['part_matching_frames']), - 'wrong_frames': len(ready_to_checked_diffs['missing_frames']), + sem_dict = {'same_frames': len(actual_ready_to_checked_diffs['matching_frames']), + 'part_same_frames': len(actual_ready_to_checked_diffs['part_matching_frames']), + 'wrong_frames': len(actual_ready_to_checked_diffs['missing_frames']), 'added_connections': connections_amount, 'cash': sem_cash} - supersem_cash = (float(len(checked_to_ready_diffs['missing_frames'])+len(checked_to_ready_diffs['part_matching_frames']))*corrected_frame_value + - float(len(ready_to_checked_diffs['matching_frames']))*checked_frame_value) - supersem_dict = {'same_frames': len(checked_to_ready_diffs['matching_frames']), - 'part_same_frames': len(checked_to_ready_diffs['part_matching_frames']), - 'redo_frames': len(checked_to_ready_diffs['missing_frames']), + supersem_cash = (float(len(actual_checked_to_ready_diffs['missing_frames'])+len(actual_checked_to_ready_diffs['part_matching_frames']))*corrected_frame_value + + float(len(actual_ready_to_checked_diffs['matching_frames']))*checked_frame_value) + supersem_dict = {'same_frames': len(actual_checked_to_ready_diffs['matching_frames']), + 'part_same_frames': len(actual_checked_to_ready_diffs['part_matching_frames']), + 'redo_frames': len(actual_checked_to_ready_diffs['missing_frames']), 'cash': supersem_cash} supersem_real_semantics = RealizedSemantics(entry=entry, @@ -521,7 +537,7 @@ def update_sem_stats_conf_s(entry, semantic_frames, semanticist, supersemanticis status=status, bonus=False) supersem_real_semantics.save() - supersem_real_semantics.frames.add(*semantic_frames.all()) + supersem_real_semantics.frames.add(*checked_sem_frames_backup.all()) supersemanticist.user_stats.semantics_real_history.add(supersem_real_semantics) sem_real_semantics = RealizedSemantics(entry=entry, @@ -533,15 +549,16 @@ def update_sem_stats_conf_s(entry, semantic_frames, semanticist, supersemanticis status=status, bonus=True) sem_real_semantics.save() - sem_real_semantics.frames.add(*semantic_frames.all()) + sem_real_semantics.frames.add(*checked_sem_frames_backup.all()) semanticist.user_stats.semantics_real_history.add(sem_real_semantics) -def count_connections(differences): - amount = 0 +def count_connections(entry, differences): + amount = 0 + schemata = entry.actual_schemata() for frame in differences['matching_frames']: - amount += frame.connected_schemata().count() + amount += frame.connected_schemata().filter(pk__in=schemata).count() for frame in differences['part_matching_frames']: - amount += frame.connected_schemata().count() + amount += frame.connected_schemata().filter(pk__in=schemata).count() return amount def remove_semantic_payments(entry): diff --git a/dictionary/ajax_lemma_view.py b/dictionary/ajax_lemma_view.py index 5ec4b8f..b472270 100644 --- a/dictionary/ajax_lemma_view.py +++ b/dictionary/ajax_lemma_view.py @@ -798,6 +798,8 @@ def relate_entries(request, lemma_id, preview_lemma_id): error, lemma, preview_lemma = check_if_selected_and_get(lemma_id, preview_lemma_id) if not error: error = check_if_can_relate(lemma, preview_lemma) + if not error: + error = check_if_has_rights_to_relate(lemma, request.user) if error: raise AjaxError(error) else: @@ -810,20 +812,42 @@ def check_if_can_relate(lemma, preview_lemma): error = 'same lemma id' elif lemma.entry_obj.pos.tag == preview_lemma.entry_obj.pos.tag: error = 'same part of speech' - return error + return error + +def check_if_has_rights_to_relate(lemma, user): + error = '' + if not user_can_modify(lemma, user): + error = 'can not edit' + return error def add_entries_relation(lemma, preview_lemma): lemma_entry, preview_lemma_entry = get_entries(lemma, preview_lemma) lemma_entry.rel_entries.add(preview_lemma_entry) preview_lemma_entry.rel_entries.add(lemma_entry) - + @ajax(method='post') def disrelate_entries(request, lemma_id, preview_lemma_id): error, lemma, preview_lemma = check_if_selected_and_get(lemma_id, preview_lemma_id) if not error: + error = check_if_has_rights_to_relate(lemma, request.user) + if not error: + error = check_if_share_sematic_frames(lemma, preview_lemma) + if error: + raise AjaxError(error) + else: cancel_entries_relation(request, lemma, preview_lemma) return {} +def check_if_share_sematic_frames(lemma, preview_lemma): + error = '' + lemma_visible_frames = lemma.entry_obj.visible_frames() + preview_visible_frames = preview_lemma.entry_obj.visible_frames() + + if (lemma_visible_frames.filter(pk__in=preview_visible_frames).exists() or + preview_visible_frames.filter(pk__in=lemma_visible_frames).exists()): + error = 'shared frames' + return error + def cancel_entries_relation(request, lemma, preview_lemma): lemma_entry, preview_lemma_entry = get_entries(lemma, preview_lemma) lemma_entry.rel_entries.remove(preview_lemma_entry) diff --git a/dictionary/ajax_user_stats.py b/dictionary/ajax_user_stats.py index 6028f66..aa9a345 100644 --- a/dictionary/ajax_user_stats.py +++ b/dictionary/ajax_user_stats.py @@ -239,6 +239,9 @@ def get_semantics_stats(user): ncorr_frames = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('ncorr_frames'))['ncorr_frames__sum'] if ncorr_frames == None: ncorr_frames = 0 + related_frames = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('related_frames'))['related_frames__sum'] + if related_frames == None: + related_frames = 0 made_frames = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('made_frames'))['made_frames__sum'] if made_frames == None: made_frames = 0 @@ -257,6 +260,7 @@ def get_semantics_stats(user): 'corr_frames': corr_frames, 'part_corr_frames': part_corr_frames, 'checked_frames': ncorr_frames+corr_frames+part_corr_frames, + 'related_frames': related_frames, 'made_frames': made_frames, 'efficacy': round(efficacy, 2), 'added_connections' : added_connections} diff --git a/dictionary/management/commands/create_TEI_walenty.py b/dictionary/management/commands/create_TEI_walenty.py index 3b89749..a6fe739 100644 --- a/dictionary/management/commands/create_TEI_walenty.py +++ b/dictionary/management/commands/create_TEI_walenty.py @@ -5,38 +5,56 @@ import os import tarfile from django.core.management.base import BaseCommand +from optparse import make_option from dictionary.models import Lemma, Frame_Opinion_Value, \ - get_ready_statuses + get_statuses from dictionary.teixml import createteixml, write_phrase_types_expansions_in_TEI from settings import WALENTY_PATH class Command(BaseCommand): args = '<dict dict ...>' help = 'Get Walenty in TEI format.' + option_list = BaseCommand.option_list + ( + make_option('--min_status', + action='store', + type='string', + dest='min_status_type', + default='ready', + help='Minimum lemma status.'), + make_option('--start_date', + action='store', + type='string', + dest='start_date', + default='all', + help='Status change start date (format: YYYY-MM-DD).'), + + ) def handle(self, *args, **options): try: now = datetime.datetime.now().strftime('%Y%m%d') - vocab_names = list(args) vocab_names.sort() - if vocab_names: - filename_base = '%s_%s_%s' % ('walenty', '+'.join(vocab_names), now) - else: - filename_base = '%s_%s' % ('walenty', now) + + filename_base = self.create_filename_base(vocab_names, options, now) base_path = os.path.join(WALENTY_PATH, filename_base) outpath = base_path + '.xml' - ready_statuses = get_ready_statuses() + statuses = get_statuses(options['min_status_type']) lemmas = Lemma.objects.filter(old=False) if vocab_names: lemmas = lemmas.filter(vocabulary__name__in=vocab_names) - ready_lemmas = lemmas.filter(status__in=ready_statuses).order_by('entry_obj__name') + lemmas = lemmas.filter(status__in=statuses) + if options['start_date'] != 'all': + lemmas = self.filter_lemmas_by_status_change(lemmas, statuses, options['start_date']) + lemmas = lemmas.order_by('entry_obj__name') + + self.print_statistics(lemmas) frame_opinion_values = Frame_Opinion_Value.objects.all() - createteixml(outpath, ready_lemmas, frame_opinion_values) + createteixml(outpath, lemmas, frame_opinion_values) archive = tarfile.open(base_path + '-TEI.tar.gz', 'w:gz') phrase_types_expand_path = os.path.join(WALENTY_PATH, @@ -50,3 +68,49 @@ class Command(BaseCommand): archive.close() os.remove(outpath) os.remove(phrase_types_expand_path) + + def create_filename_base(self, vocab_names, options, now): + start_date = '' + if options['start_date'] != 'all': + start_date = '-' + options['start_date'].replace('-', '') + + vocab_names_str = '' + if vocab_names: + vocab_names_str = '-' + '+'.join(vocab_names) + + min_status = '' + if options['min_status_type'] != 'ready': + min_status = '-' + options['min_status_type'] + + filename_base = 'walenty%s%s%s_%s' % (min_status, vocab_names_str, + start_date, now) + return filename_base + + + def filter_lemmas_by_status_change(self, lemmas, statuses, start_date_str): + start_date = self.parse_date(start_date_str) + filtered_lemmas_pks = [] + for lemma in lemmas: + if lemma.status_history.filter(status=statuses[0], date__gte=start_date).exists(): + filtered_lemmas_pks.append(lemma.pk) + return lemmas.filter(pk__in=filtered_lemmas_pks) + + def parse_date(self, date_str): + date_parts = date_str.split('-') + year = int(date_parts[0]) + month = int(date_parts[1].lstrip('0')) + day = int(date_parts[2].lstrip('0')) + date = datetime.datetime(year, month, day, 00, 00) + return date + + def print_statistics(self, lemmas): + count = {'frames': 0, + 'arguments': 0} + for lemma in lemmas: + frames = lemma.entry_obj.actual_frames() + count['frames'] += frames.count() + for frame in frames.all(): + count['arguments'] += frame.complements.count() + print (u'Lemmas:\t%d' % lemmas.count()) + print (u'Frames:\t%d' % count['frames']) + print (u'Arguments:\t%d' % count['arguments']) diff --git a/dictionary/management/commands/get_examples.py b/dictionary/management/commands/get_examples.py index 6bbcb0d..b3da6b1 100644 --- a/dictionary/management/commands/get_examples.py +++ b/dictionary/management/commands/get_examples.py @@ -6,7 +6,8 @@ import os from django.core.management.base import BaseCommand -from dictionary.models import Lemma, get_ready_statuses +from dictionary.models import Lemma, NKJP_Example, get_ready_statuses, get_checked_statuses +from semantics.models import LexicalUnitExamples from settings import PROJECT_PATH BASE_PATH = os.path.join(PROJECT_PATH, 'data') @@ -28,8 +29,13 @@ class Command(BaseCommand): get_examples() def get_examples(): - ready_statuses = get_ready_statuses() - write_detailed_examples(ready_statuses) + write_example_sentences('semantyczne-S_sprawdzone-20170811.txt', ['(S) sprawdzone'], True) + write_example_sentences('wszystkie-S_sprawdzone-20170811.txt', ['(S) sprawdzone'], False) + + checked_names = [checked.status for checked in get_checked_statuses()] + write_example_sentences('wszystkie-sprawdzone-20170811.txt', checked_names, False) + # ready_statuses = get_ready_statuses() + # write_detailed_examples(ready_statuses) # write_examples(ready_statuses) def write_detailed_examples(statuses): @@ -76,4 +82,51 @@ def write_examples(statuses): examples_file.write('\t\t--> %s\n' % example.sentence) examples_file.write('\n\n') finally: - examples_file.close() + examples_file.close() + + +def write_example_sentences(filename, statuses, semantic): + try: + examples_file = codecs.open(os.path.join(BASE_PATH, filename), 'wt', 'utf-8') + for lemma in Lemma.objects.filter(old=False, entry_obj__pos__tag='verb').filter(status__status__in=statuses).order_by('entry_obj__name'): + print lemma + wrong_examples = lemma.nkjp_examples.filter(opinion__opinion=u'zły') + not_wanted_semantic_examples = get_not_needed_semantic_examples(lemma) + wanted_semantic_examples = get_wanted_semantic_examples(lemma) + + for example in lemma.nkjp_examples.filter(source__sym_name__in=['NKJP300M', 'NKJP1800M']): + if (lemma.frame_opinions.filter(frame=example.frame, value__value__in=[u'archaiczny', + u'zły']).exists()): + continue + + if semantic: + if (wanted_semantic_examples.filter(pk=example.pk).exists() and + not wrong_examples.filter(pk=example.pk).exists()): + examples_file.write(u'%s\n' % example.sentence) + else: + if (not not_wanted_semantic_examples.filter(pk=example.pk).exists() and + not wrong_examples.filter(pk=example.pk).exists()): + examples_file.write(u'%s\n' % example.sentence) + + finally: + examples_file.close() + + +def get_not_needed_semantic_examples(lemma): + not_needed_ids = [] + not_needed_frames = lemma.entry_obj.actual_frames().filter(opinion__value__in=[u'archaiczna', u'zła']) + for frame in not_needed_frames: + for lu in frame.lexical_units.all(): + for luex in LexicalUnitExamples.objects.filter(lexical_unit=lu): + not_needed_ids.append(luex.example.id) + return NKJP_Example.objects.filter(id__in=not_needed_ids) + + +def get_wanted_semantic_examples(lemma): + needed_ids = [] + needed_frames = lemma.entry_obj.actual_frames().exclude(opinion__value__in=[u'archaiczna', u'zła']) + for frame in needed_frames: + for lu in frame.lexical_units.all(): + for luex in LexicalUnitExamples.objects.filter(lexical_unit=lu): + needed_ids.append(luex.example.id) + return NKJP_Example.objects.filter(id__in=needed_ids) diff --git a/dictionary/models.py b/dictionary/models.py index f8a2b79..ec801ae 100644 --- a/dictionary/models.py +++ b/dictionary/models.py @@ -100,6 +100,10 @@ def get_checked_statuses(): def get_ready_statuses(): ready_type = LemmaStatusType.objects.get(sym_name='ready') return Lemma_Status.objects.filter(type__priority__gte=ready_type.priority).distinct() + +def get_statuses(min_status_type): + min_type = LemmaStatusType.objects.get(sym_name=min_status_type) + return Lemma_Status.objects.filter(type__priority__gte=min_type.priority).distinct() class LemmaStatusType(Model): @@ -1365,7 +1369,7 @@ class AspectRelationsGroup(Model): return ','.join(str_ls) class Entry(Model): - name = CharField(max_length=64, db_column='nazwa') # usunieto , unique=True + name = CharField(max_length=64, db_column='nazwa') # czesc mowy pos = ForeignKey('POS', db_column='czesc_mowy', related_name='entries') # powiazane lematy @@ -1391,18 +1395,50 @@ class Entry(Model): ('change_semantics', u'Może edytować semantykę.'), ('view_semantics', u'Może oglądać semantykę.'), ) - + + def related_frames(self): + visible = self.visible_frames() + actual = self.actual_frames() + return visible.exclude(pk__in=actual) + + def visible_frames(self): + frames = [] + act_frames = self.actual_frames() + for frame in self.all_frames(): + if act_frames.filter(pk=frame.pk).exists(): + frames.append(frame.pk) + else: + for lu in frame.lexical_units.all(): + if self.meanings.filter(pk=lu.pk).exists(): + frames.append(frame.pk) + break + return get_model('semantics', 'SemanticFrame').objects.filter(pk__in=frames) + + def all_frames(self): + frames = self.actual_frames() + for entry in self.rel_entries.all(): + new_frames = entry.actual_frames() + frames |= new_frames + return get_model('semantics', 'SemanticFrame').objects.filter(pk__in=frames) + def actual_frames(self): - # frame_ids = [] - # lexical_units = self.meanings.order_by('sense') - # for lexical_unit in lexical_units: - # frame_ids.extend([f.id for f in lexical_unit.actual_frames()]) - # return get_model('semantics', 'SemanticFrame').objects.filter(id__in=list(set(frame_ids))) return self.semantic_frames.filter(next__isnull=True, removed=False) + + def actual_schemata(self): + return self.lemmas.get(old=False).frames.all() + + def filter_local(self, frames): + return frames.filter(pk__in=self.semantic_frames.all()) + + def filter_related(self, frames): + return frames.exclude(pk__in=self.semantic_frames.all()) def matching_connections(self, schema, position, phrase_type): - frames = self.actual_frames() + matching_connections = [] + + frames = self.visible_frames() + for frame in frames: for compl in frame.complements.all(): matching_realizations = compl.realizations.filter(frame=schema, diff --git a/dictionary/saving.py b/dictionary/saving.py index 495f479..baae225 100644 --- a/dictionary/saving.py +++ b/dictionary/saving.py @@ -9,14 +9,17 @@ from wordnet.models import LexicalUnit def get_semantic_operations(lemma, schemata_conversions): connections = [] operations = [] - frames = lemma.entry_obj.actual_frames() + + frames = lemma.entry_obj.visible_frames() + for conv in schemata_conversions: schema_operations = get_reconnect_operations_and_extend_connections(frames, connections, conv['obj'], conv['js']) operations.extend(schema_operations) - operations.extend(get_disconnect_operations(frames, connections)) + operations.extend(get_disconnect_operations(lemma, frames, connections)) + return operations def get_reconnect_operations_and_extend_connections(frames, connections, schema, js_schema): @@ -75,20 +78,31 @@ def create_phrase_type_ref(schema, position, phrase_type, alternation): def create_operation(operation, arg_ref, phrase_type_ref): return {'operation': operation, 'arg': arg_ref, 'connect': phrase_type_ref} -def get_disconnect_operations(frames, connections): +def get_disconnect_operations(lemma, frames, connections): operations = [] + shared_schemata_ids = get_shared_schemata_ids(lemma) for frame in frames: for compl in frame.complements.all(): conn_dict = next((conn_dict for conn_dict in connections if conn_dict['compl'] == compl.id), None) for real in compl.realizations.all(): - if not conn_dict or not real.id in conn_dict['realizations']: - phrase_type_ref = create_phrase_type_ref(real.frame, real.position, - real.argument, real.alternation) - arg_ref = create_argument_ref(frame, compl) - operations.append(create_operation('disconnect', arg_ref, phrase_type_ref)) + if real.frame.id not in shared_schemata_ids: + if not conn_dict or not real.id in conn_dict['realizations']: + phrase_type_ref = create_phrase_type_ref(real.frame, real.position, + real.argument, real.alternation) + arg_ref = create_argument_ref(frame, compl) + operations.append(create_operation('disconnect', arg_ref, phrase_type_ref)) return operations +def get_shared_schemata_ids(lemma): + print lemma + ids = [f.id for f in lemma.frames.all()] + print ids + for connected in lemma.entry_obj.rel_entries.all(): + ids += [f.id for f in connected.actual_lemma().frames.all()] + print ids + return ids + def update_connections(lemma_id, reconnect_operations, user): modify_frames(lemma_id, reconnect_operations, user) @@ -114,4 +128,3 @@ def disconnect_example_operation(example_dict, example_obj): def reconnect_examples(lemma, operations): update_meanings(lemma.id, operations) - \ No newline at end of file diff --git a/dictionary/static/js/lemma-view.js b/dictionary/static/js/lemma-view.js index 69fd59d..e52ca22 100644 --- a/dictionary/static/js/lemma-view.js +++ b/dictionary/static/js/lemma-view.js @@ -205,41 +205,6 @@ function load_content(id) { loadSchemataAndExamples(); createSplitter('framesSplit','new-frame-tables', 'tabs'); - /*if(window.can_modify) - { - addSyntacticFramesPerm = user_has_perm('dictionary.add_syntactic_frames'); - addPhraseologicFramesPerm = user_has_perm('dictionary.add_phraseologic_frames'); - - if(addSyntacticFramesPerm || addPhraseologicFramesPerm) { - $(document).unbind('keydown'); - $(document).bind('keydown', 'shift+s', saveHandle); - $(document).bind('keydown', 'shift+z', function(evt){backOneModification(); return false; }); - $(document).bind('keydown', 'shift+y', function(evt){forwardOneModification(); return false; }); - $(document).bind('keydown', 'shift+a', function(evt){addElement(); return false; }); - $(document).bind('keydown', 'shift+r', function(evt){removeElement(); return false; }); - $(document).bind('keydown', 'shift+d', function(evt){duplicateElement(); return false; }); - $(document).bind('keydown', 'shift+c', function(evt){copyElement(); return false; }); - $(document).bind('keydown', 'shift+v', function(evt){pasteElement(); return false; }); - $(document).bind('keydown', 'shift+w', function(evt){validateSchemata(); return false; }); - if(addSyntacticFramesPerm) { - $(document).bind('keydown', 'shift+x', function(evt){cutElement(); return false; }); - $(document).bind('keydown', 'shift+m', function(evt){reserveLemma(); return false; }); - } - if(addPhraseologicFramesPerm) { - $(document).bind('keydown', 'shift+l', function(evt){addPhraseologicFrame(); return false; }); - $(document).bind('keydown', 'shift+b', function(evt){openAssignPhraseologicFrameDialog(); return false; }); - } - } - } - else - { - $(document).unbind('keydown'); - $.get(ajax_user_has_perm, {perm: 'dictionary.own_lemmas'}, function(result) { - if(result['has_perm']) { - $(document).bind('keydown', 'shift+m', function(evt){reserveLemma(); return false; }); - } - }); - }*/ if(document.getElementById("lemma_example_show")) { @@ -3527,8 +3492,11 @@ function restore_lemma() { return false; } - function relateEntries() - { + function relateEntries() { + if(semanticsChanged() || window.change) { + error_alert('Przed dodaniem relacji hasło musi zostać zapisane.'); + return false; + } if (confirm('Czy jesteś pewien, że chcesz powiazać hasło z zakładki "Schematy" z hasłem wybranym w zakładce "Podgląd hasła"?')) { ShowProgressAnimation(); $.ajaxJSON({ @@ -3541,6 +3509,7 @@ function restore_lemma() { callback: function(result) { $("button#prev_disrelate_entries").css("visibility", "visible"); + reloadFrames(window.lemma_id); HideProgressAnimation(); }, error_callback: function(xhr, status, error) { @@ -3562,9 +3531,13 @@ function restore_lemma() { HideProgressAnimation(); error_alert('Nie można powiązywać haseł reprezentujących tę samą część mowy.'); return false; - } - else - { + } + else if (result == 'can not edit') { + HideProgressAnimation(); + error_alert('Brak uprawnień do edycji hasła.'); + return false; + } + else { HideProgressAnimation(); return true; } @@ -3574,8 +3547,11 @@ function restore_lemma() { } } - function disrelateEntries() - { + function disrelateEntries() { + if(semanticsChanged() || window.change) { + error_alert('Przed usunięciem relacji hasło musi zostać zapisane.'); + return false; + } if (confirm('Czy jesteś pewien, że chcesz anulować relację hasła wybranego w zakładce "Schematy" z hasłem wybranym w zakładce "Podgląd hasła"?')) { ShowProgressAnimation(); $.ajaxJSON({ @@ -3585,8 +3561,9 @@ function restore_lemma() { lemma_id: window.lemma_id, preview_lemma_id: window.prev_lemma_id }, - callback: function(result) { + callback: function(result) { $("button#prev_disrelate_entries").css("visibility", "hidden"); + reloadFrames(window.lemma_id); HideProgressAnimation(); }, error_callback: function(xhr, status, error) { @@ -3594,8 +3571,22 @@ function restore_lemma() { error_alert(status + ': ' + error); }, bad_data_callback: function(result) { - HideProgressAnimation(); - return true; + if (result == 'lemma not selected') { + HideProgressAnimation(); + error_alert('Nie zaznaczono hasła w zakładce "Schematy" lub "Podglądzie hasła".'); + return false; + } else if (result == 'can not edit') { + HideProgressAnimation(); + error_alert('Brak uprawnień do edycji hasła.'); + return false; + } else if (result == 'shared frames') { + HideProgressAnimation(); + error_alert('Nie można anulować: Hasła współdzielą ramy semantyczne.'); + return false; + } else { + HideProgressAnimation(); + return true; + } }, }); return false; diff --git a/dictionary/templates/lemma_preview.html b/dictionary/templates/lemma_preview.html index ffdd06f..45280b2 100644 --- a/dictionary/templates/lemma_preview.html +++ b/dictionary/templates/lemma_preview.html @@ -164,11 +164,13 @@ function unselectPrevTd(id) <div id="prev_frames_modif"> - {% if perms.dictionary.add_syntactic_frames or perms.dictionary.add_phraseologic_frames %} - <div> - <button type="button" id="prev_copy" style="width:120px">Kopiuj</button> - </div> - {% if perms.dictionary.add_syntactic_frames %} + {% if perms.dictionary.add_syntactic_frames or perms.dictionary.add_phraseologic_frames or perms.dictionary.add_semantic_frames%} + {% if perms.dictionary.add_syntactic_frames or perms.dictionary.add_phraseologic_frames %} + <div> + <button type="button" id="prev_copy" style="width:120px">Kopiuj</button> + </div> + {% endif %} + {% if perms.dictionary.add_syntactic_frames or perms.dictionary.add_semantic_frames %} <div> <button type="button" id="prev_relate_entries" style="width:120px">Powiąż hasła</button> <button type="button" id="prev_disrelate_entries" style="width:140px; visibility:hidden;">Anuluj powiązanie</button> diff --git a/dictionary/templates/lemma_view.html b/dictionary/templates/lemma_view.html index c928760..7b5f87c 100644 --- a/dictionary/templates/lemma_view.html +++ b/dictionary/templates/lemma_view.html @@ -44,7 +44,7 @@ <span class="ui-icon ui-icon-columns">pokaż/ukryj</span> </button> {% endif %} - {% if perms.dictionary.change_lemmas %} + {% if perms.dictionary.change_lemmas or perms.dictionary.change_semantics %} <button id="reset-similar-button" title="ukryj podobne"> <span class="ui-icon ui-icon-closethick">ukryj podobne</span> </button> @@ -63,7 +63,6 @@ <ul> <li id="refresh_frames"><a href="#new_frames">{% trans "Schematy" %} [<span id="new-frames-count"></span>]</a></li> <li><a href="#semantics">{% trans "Semantyka" %} [<span id="semantic-frames-count"></span>]</a></li> - <!-- li><a href="#old_frames">{% trans "Stare schematy" %}</a></li --> {% if perms.dictionary.add_notes %} <li><a href="#notes">{% trans "Notatki" %} [<span id="lemma-notes-count"></span>]</a></li> {% endif %} @@ -73,6 +72,7 @@ <li><a href="#change_ctrl">{% trans "Kontrola zmian" %}</a></li> <li><a href="#status">{% trans "Status" %}</a></li> {% elif perms.dictionary.change_semantics %} + <li><a href="#preview_lemma">{% trans "Podgląd hasła" %}</a></li> <li><a href="#status">{% trans "Status" %}</a></li> {% endif %} <li id="lemma_desc" style="float:right;"></li> @@ -95,6 +95,8 @@ <div id="status"> </div> {% elif perms.dictionary.change_semantics %} + <div id="preview_lemma"> + </div> <div id="status"> </div> {% endif %} diff --git a/dictionary/templates/sel_user_stats.html b/dictionary/templates/sel_user_stats.html index 0e80e21..927b884 100644 --- a/dictionary/templates/sel_user_stats.html +++ b/dictionary/templates/sel_user_stats.html @@ -123,12 +123,13 @@ <table class='PaymentsTable'> <tr> <td class='EmptyCell' colspan=1></td> - <td class='ColumnHeader' colspan=7>Semantycy:</td> + <td class='ColumnHeader' colspan=8>Semantycy:</td> <td class='ColumnHeader' colspan=3>Supersemantycy:</td> </tr> <tr> <td class='ColumnHeader'>Kwota za wykonaną pracę:</td> <td class='ColumnHeader'>Bonus:</td> + <td class='ColumnHeader'>Współdzielone ramy:</td> <td class='ColumnHeader'>Wykonane ramy:</td> <td class='ColumnHeader'>Poprawnie wykonane ramy:</td> <td class='ColumnHeader'>Częściowo poprawnie wykonane ramy:</td> @@ -142,6 +143,7 @@ <tr> <td>{{semantics_work_stats.earned_cash}} zł</td> <td>{{semantics_work_stats.bonus_cash}} zł</td> + <td>{{semantics_work_stats.related_frames}}</td> <td>{{semantics_work_stats.made_frames}}</td> <td>{{semantics_work_stats.prop_frames}}</td> <td>{{semantics_work_stats.part_prop_frames}}</td> diff --git a/semantics/management/commands/find_hanging_connections.py b/semantics/management/commands/find_hanging_connections.py index 25a8b02..a645a58 100644 --- a/semantics/management/commands/find_hanging_connections.py +++ b/semantics/management/commands/find_hanging_connections.py @@ -16,7 +16,7 @@ class Command(BaseCommand): def find_hanging_connections(): lemmas = Lemma.objects.filter(old=False).order_by('entry_obj__name') for lemma in lemmas: - frames = lemma.entry_obj.actual_frames() + frames = lemma.entry_obj.visible_frames() for frame in frames: for compl in frame.complements.all(): for real in compl.realizations.all(): diff --git a/semantics/phraseology_generator.py b/semantics/phraseology_generator.py index c47b6e5..e05bc25 100644 --- a/semantics/phraseology_generator.py +++ b/semantics/phraseology_generator.py @@ -124,7 +124,7 @@ def get_nps(cases, number, nouns, atr): filtered = [] for option in options: (orth, tag) = option - if u':' + case in tag: + if u':' + case in tag or u'.' + case in tag: filtered.append(option) options_temp += filtered else: diff --git a/semantics/static/js/semantics_connections.js b/semantics/static/js/semantics_connections.js index c6fb428..ebd8335 100644 --- a/semantics/static/js/semantics_connections.js +++ b/semantics/static/js/semantics_connections.js @@ -1,6 +1,15 @@ var connected = {}; // dictionaries of connections and disconnections between frames and schemas var connected_reverse = {}; +function getConnected(frame_id) { /* TODO */ + return []; +} + +function removeFrameConnections(frame_id) { /* TODO */ + return; +} + + function memorizeConnections(arguments_connected, frames_connection){ connected = arguments_connected; connected_reverse = frames_connection; diff --git a/semantics/static/js/semantics_frames.js b/semantics/static/js/semantics_frames.js index 65c02ce..a61b8e5 100644 --- a/semantics/static/js/semantics_frames.js +++ b/semantics/static/js/semantics_frames.js @@ -4,23 +4,6 @@ var free_complement_id = -1; var free_frame_id = -1; var free_preference_id = -1; var semantic_opinion_vals = []; -var connected_entries = [] - - -function getConnected(frames_display) { - var i, j; - for (i = 0; i < frames_display.length; i++) { - lexical_units_num.push(frames_display[i].lexical_units); - lexical_units_frames[i] = []; - - var frames = frames_display[i].frames; - for (j = 0; j < frames.length; j++) { - frame_content[frames[j].frame_id] = frames[j]; - frame_localization[frames[j].frame_id] = {"units": i, "position": j}; - lexical_units_frames[i].push(frames[j].frame_id); - } - } -} function selectedFrame() { return "frame_" + highlighted_id + "_"; @@ -74,6 +57,7 @@ function getDisplay(visibility, checkboxes) { display = ''; var i, j; for ( i = 0; i < lexical_units_num.length; i++) { + if (lexical_units_frames[i].length > 0){ var visible = frame_content[lexical_units_frames[i][0]].visible; if (visible == visibility) { display += '<div id="lus_' + i + '_">'; @@ -121,11 +105,25 @@ function getDisplay(visibility, checkboxes) { display += '</div>'; display += '</div>'; - } + } + } } return display; } +function reloadFrames(lid){ + $.getJSON(ajax_frames, {lemma_id: lid}, function(data){ + lexical_units_frames = []; + getFrames(data.frames_display); + memorizeConnections(data.connections.connected, + data.connections.connected_reverse); + alternationCounts(data.alternations); + displayFrames(); + $("#semantic-frames-count").empty(); + $("#semantic-frames-count").append(data.frames_count); + }); +} + function displayFrames(){ $("#frames").html(getDisplay(true, false)); } @@ -204,7 +202,7 @@ function newFrame(units) { } lexical_units_num.push(units_list); - frame_content[free_frame_id] = {colspan: "1", rowspan: "1", status: "brak", frame_id: "" + x, display: {preferences:[[]], roles:[]}, lemma: {include: false}}; + frame_content[free_frame_id] = {colspan: "1", rowspan: "1", status: "brak", frame_id: "" + x, display: {preferences:[[]], roles:[]}, lemma: {include: false}, local:true, visible:true}; frames_operations.push({operation: "create_frame", meanings: units_list, id: x}); free_frame_id = free_frame_id - 1; } diff --git a/semantics/static/js/semantics_lexical_units.js b/semantics/static/js/semantics_lexical_units.js index ea4e33e..070febd 100644 --- a/semantics/static/js/semantics_lexical_units.js +++ b/semantics/static/js/semantics_lexical_units.js @@ -4,6 +4,7 @@ var free_luid = -1; // these ids will be temporarely given to new le var free_sense; // what sense should be given to new lexical unit var lexical_units_frames = []; var lexical_units_num = []; +var part_of_speech; // store lexical units from database function memorizeLexicalUnits(input_lexical_units) { @@ -18,8 +19,10 @@ function memorizeLexicalUnits(input_lexical_units) { function basicLexicalUnitsData(info){ base = info.base; free_sense = info.sense; + part_of_speech = info.pos; } + // create new lexical_unit function createLexicalUnit(refl, glossa, relation, to) { @@ -34,7 +37,7 @@ function createLexicalUnit(refl, glossa, relation, to) { refl_text = ""; } - var lu = {base: base + refl_text, glossa: "" + glossa, definition: "", id: free_luid, luid: -1, refl: refl, glossa: glossa, pos: "czasownik", sense: free_sense, relation: relation, to: to, location: ""}; + var lu = {base: base + refl_text, glossa: "" + glossa, definition: "", id: free_luid, luid: -1, refl: refl, glossa: glossa, pos: part_of_speech, sense: free_sense, relation: relation, to: to, location: ""}; var operation = {operation: 'add_unit', unit:lu}; lexical_units.push(lu); lexical_unit_examples[free_luid] = [] @@ -225,18 +228,20 @@ function getMeaningsSelectionForFrame(frame_id) { sid_alt = rows[j].split('_'); var sch = "schema_" + sid_alt[0] + "_"; var k; - for (k = 0; k < schemas_content[sch].display.arguments[0].length; k++) { - var proper = schemas_content[sch].display.arguments[0][k].csv_id + "alt_" + sid_alt[1] + "_"; - if (connected[lem].indexOf(proper) != -1) { - if (schemas_content[sch].display.arguments[0][k].vrb != null && - schemas_content[sch].display.arguments[0][k].vrb.length > 0) { - pre = pre.concat(schemas_content[sch].display.arguments[0][k].lex); - vrb = schemas_content[sch].display.arguments[0][k].vrb; - } else { - options.push(schemas_content[sch].display.arguments[0][k].lex); - } - } - } + if (typeof(schemas_content[sch]) != 'undefined'){ + for (k = 0; k < schemas_content[sch].display.arguments[0].length; k++) { + var proper = schemas_content[sch].display.arguments[0][k].csv_id + "alt_" + sid_alt[1] + "_"; + if (connected[lem].indexOf(proper) != -1) { + if (schemas_content[sch].display.arguments[0][k].vrb != null && + schemas_content[sch].display.arguments[0][k].vrb.length > 0) { + pre = pre.concat(schemas_content[sch].display.arguments[0][k].lex); + vrb = schemas_content[sch].display.arguments[0][k].vrb; + } else { + options.push(schemas_content[sch].display.arguments[0][k].lex); + } + } + } + } if (vrb.length == 0) { var lex = {lemma: [base], pre: pre, args: options}; if (hasRefl(sch)) { @@ -441,7 +446,7 @@ function getLexicalUnit(luid) { function addPhraseologicalUnit(mwe, glossa, relation, to) { - var lu = {base: mwe, glossa: "" + glossa, definition: "", id: free_luid, luid: -1, refl: "false", glossa: glossa, pos: "czasownik", sense: "A", relation: relation, to: to, location: ""}; + var lu = {base: mwe, glossa: "" + glossa, definition: "", id: free_luid, luid: -1, refl: "false", glossa: glossa, pos: part_of_speech, sense: "A", relation: relation, to: to, location: ""}; var operation = {operation: 'add_unit', unit:lu}; lexical_units.push(lu); lexical_unit_examples[free_luid] = []; @@ -451,7 +456,7 @@ function addPhraseologicalUnit(mwe, glossa, relation, to) { } function unlockPhraseologicalUnit(mwe) { - var lu = {base: mwe.lu.split('-')[0], glossa: "", definition: "", id: mwe.id, luid: mwe.luid, refl: false, glossa: "", pos: "czasownik", sense: mwe.lu.split('-')[1], relation: 2, to: -1, location: ""}; + var lu = {base: mwe.lu.split('-')[0], glossa: "", definition: "", id: mwe.id, luid: mwe.luid, refl: false, glossa: "", pos: part_of_speech, sense: mwe.lu.split('-')[1], relation: 2, to: -1, location: ""}; lexical_units.push(lu); return mwe.id; } diff --git a/semantics/static/js/semantics_view.js b/semantics/static/js/semantics_view.js index fd3261b..ae89dbe 100644 --- a/semantics/static/js/semantics_view.js +++ b/semantics/static/js/semantics_view.js @@ -56,7 +56,7 @@ function changeSynsetInput() { } function openMeaningsMenu() { - if(window.lemma_id != getActualLemmaId(window.lemma_id)){ + if(window.lemma_id != getActualLemmaId(window.lemma_id)){ alertOldSchemas(); } else { @@ -185,7 +185,7 @@ function displayMeanings() { e.preventDefault(); if (parseInt(f.relation) != 2) { $.prompt.removeState('state12'); - $.prompt.addState('state12', {title: 'Znaczenia', html: getFormContent(f) + getRelation(f) + "w stosunku do:<br />" + getSynsets(f.context, "czasownik"), buttons: {Wstecz: -1, Anuluj: 0, Zatwierdź: 1}, focus: 1, submit: submitSynsetSelection}, 'state11'); + $.prompt.addState('state12', {title: 'Znaczenia', html: getFormContent(f) + getRelation(f) + "w stosunku do:<br />" + getSynsets(f.context, part_of_speech), buttons: {Wstecz: -1, Anuluj: 0, Zatwierdź: 1}, focus: 1, submit: submitSynsetSelection}, 'state11'); $.prompt.goToState('state12'); } else { createLexicalUnit(f.refl, f.glossa, f.relation, -1); @@ -207,12 +207,16 @@ function displayMeanings() { var getFormContent = function(f) { - var result = '<label>Zwrotny <input type="checkbox" name="refl" value="true" disabled'; + var result = ''; + if (part_of_speech == 'czasownik') { + result += '<label>Zwrotny <input type="checkbox" name="refl" value="true" disabled'; if (f.refl == 'true') { result += ' checked'; - } - result += '></label><br /><label>Glossa <input type="text" name="glossa" value="' + f.glossa + '" disabled></label><br />'; - return result; + } + result += '></label><br />'; + } + result += '<label>Glossa <input type="text" name="glossa" value="' + f.glossa + '" disabled></label><br />'; + return result; }; var getRelation = @@ -262,6 +266,16 @@ function displayMeanings() { } }; + var addMeaningsHtml = + function() { + var result = ''; + if (part_of_speech == 'czasownik') { + result += '<label>Zwrotny <input type="checkbox" name="refl" value="true"></label><br />'; + } + result += '<label>Glossa <input type="text" name="glossa" value=""></label><br />'; + return result; + }; + var display_meanings = { state0: { title: 'Znaczenia', @@ -272,8 +286,7 @@ function displayMeanings() { }, state1: { title: 'Dodawanie znaczenia', - html: '<label>Zwrotny <input type="checkbox" name="refl" value="true"></label><br />'+ - '<label>Glossa <input type="text" name="glossa" value=""></label><br />', + html: addMeaningsHtml(), buttons: { "Anuluj": -1, "Potwierdź": 1 }, focus: 1, submit:function(e,v,m,f){ @@ -339,7 +352,13 @@ function displayMeanings() { if (change == true) { alertSemantics(); } else { - $.prompt(display_meanings); + $.prompt(display_meanings, + {close:function(e){ + window.units_operations = []; + $.getJSON(ajax_units, {lemma_id: window.lemma_id}, function(data){ + memorizeLexicalUnits(data.lexical_units); + basicLexicalUnitsData(data.informations); + })}}); } } @@ -529,7 +548,7 @@ function changeLexicalUnits() { { title: 'Znaczenia', html: '<label>Glossa <input type="text" name="glossa" value="' + gloss + '" disabled></label><br />' + - getRelation(f) + "w stosunku do:<br />" + getSynsets(f.context, "czasownik"), + getRelation(f) + "w stosunku do:<br />" + getSynsets(f.context, part_of_speech), buttons: {Wstecz: -1, Anuluj: 0, Zatwierdź: 1}, focus: 1, submit: addPhraseology @@ -818,7 +837,7 @@ function removeFromFrame() { // highlighted_id = "26" removeFrame(highlighted_id); - + frameClick("frame_" + highlighted_id + "_"); displayFrames(); @@ -936,7 +955,10 @@ function frameClick(clicked_id) { } else { if (clicked_id.split('_').length == 3) { // frame click var frame_id = clicked_id.split('_')[1]; - var local = frame_content[frame_id].local; + var local = true; + if (frame_content[frame_id] != null){ + local = frame_content[frame_id].local; + } if (highlighted_id != "") { deselect(); @@ -972,7 +994,10 @@ function frameClick(clicked_id) { } } else if (clicked_id.split('_')[4] == 'lemma') { // part of lemma click frame_id = clicked_id.split('_')[1] - var local = frame_content[frame_id].local; + var local = true; + if (frame_content[frame_id] != null){ + local = frame_content[frame_id].local; + } if (highlighted_id != "") { deselect(); @@ -1010,7 +1035,10 @@ function frameClick(clicked_id) { } } else { // argument click frame_id = clicked_id.split('_')[1] - var local = frame_content[frame_id].local; + var local = true; + if (frame_content[frame_id] != null){ + local = frame_content[frame_id].local; + } if (highlighted_id != "") { deselect(); diff --git a/semantics/validation.py b/semantics/validation.py index 641565e..73e5309 100644 --- a/semantics/validation.py +++ b/semantics/validation.py @@ -8,15 +8,15 @@ from semantics.utils import get_structural_matching_frame def validate_frames(lemma_id): lemma = Lemma.objects.get(id=lemma_id) - actual_frames = lemma.entry_obj.actual_frames() + visible_frames = lemma.entry_obj.visible_frames() error_msg = u'' - for frame in actual_frames.all(): - error_msg = frame_valid(lemma, frame, actual_frames) + for frame in visible_frames.all(): + error_msg = frame_valid(lemma, frame, visible_frames) if error_msg: break return error_msg -def frame_valid(lemma, frame, actual_frames): +def frame_valid(lemma, frame, frames): error_msg = '' complements = frame.complements.all() if not arguments_exists(complements): @@ -31,9 +31,9 @@ def frame_valid(lemma, frame, actual_frames): error_msg = u'Semantyka: Rama semantyczna %d zawiera argumenty, które nie są powiązane z żadnym schematem.' % frame.id elif not preferences_selected(complements): error_msg = u'Semantyka: Rama semantyczna %d zawiera argumenty bez zdefiniowanych preferencji selekcyjnych.' % frame.id - elif not examples_added(frame): + elif not examples_added(lemma, frame): error_msg = u'Semantyka: Rama semantyczna %d nie ma dopiętych przykładów.' % frame.id - elif duplicates_exists(frame, actual_frames): + elif duplicates_exists(frame, frames): error_msg = u'Semantyka: Rama semantyczna %d posiada duplikaty.' % frame.id elif not schemas_reflex_agreed(lemma, frame): error_msg = u'Semantyka: Rama semantyczna %d ma dopięte elementy o niezgodnej zwrotności.' % frame.id @@ -101,14 +101,20 @@ def preference_valid(complement): return True return False -def examples_added(frame): +def examples_added(lemma, frame): + local_examples = lemma.nkjp_examples.all() for lexical_unit in frame.lexical_units.all(): - if LexicalUnitExamples.objects.filter(lexical_unit=lexical_unit).exists(): + if LexicalUnitExamples.objects.filter(lexical_unit=lexical_unit, + example__in=local_examples).exists(): return True return False -def duplicates_exists(frame, actual_frames): - frames_to_check = actual_frames.exclude(id=frame.id) +def duplicates_exists(frame, frames): + # frazeologicznych ram nie sprawdzamy + if frame.complements.filter(roles__role='Lemma').exists(): + return False + + frames_to_check = frames.exclude(id=frame.id) if get_structural_matching_frame(frames_to_check, frame): return True return False @@ -236,7 +242,7 @@ def validate_schemas(lemma_id): return error_msg def all_schemas_used(lemma): - frames = lemma.entry_obj.actual_frames() + frames = lemma.entry_obj.visible_frames() schemas = lemma.frames for schema in schemas.all(): if not schema_is_bad(lemma, schema) and not schema_used(schema, frames): @@ -282,4 +288,3 @@ def hanging_meaning(lexical_unit): if lexical_unit.luid < 0 and not lexical_unit.actual_frames().exists(): return True return False - \ No newline at end of file diff --git a/semantics/views.py b/semantics/views.py index b88f923..ca22d97 100644 --- a/semantics/views.py +++ b/semantics/views.py @@ -92,10 +92,7 @@ def create_frames_context(lemma_id, user): frames_dict = {} frame_units = {} - frames = lemma.entry_obj.actual_frames() - for entry in connected: - new_frames = entry.actual_frames() - frames |= new_frames + frames = lemma.entry_obj.all_frames() for frame in frames: alternations[frame.id] = {} @@ -129,11 +126,13 @@ def create_frames_context(lemma_id, user): lemma_entry = lemma.entry_obj if (lu_entry.name, lu_entry.pos.tag) == (lemma_entry.name, lemma_entry.pos.tag): frame_display["visible"] = True + for frame in type_frames[t]: frame_entry = frame.entry lemma_entry = lemma.entry_obj if (frame_entry.name, frame_entry.pos.tag) == (lemma_entry.name, lemma_entry.pos.tag): frame_display["local"] = True + frame_display["visible"] = True else: frame_display["local"] = False @@ -223,6 +222,8 @@ def create_frames_context(lemma_id, user): else: lemma_info = {"include": False} frame_display["frames"].append({"frame_id": str(frame.id), "colspan": str(max(len(frame_roles), 1)), "rowspan": str(frame_preferences_rowspan), "status": status, "display": display, "lemma": lemma_info}) + + schemata_ids = [f.id for f in lemma.frames.all()] for complement, complement_class in zip(frame_complements, frame_ids): if complement_class not in complement_arguments: @@ -230,29 +231,40 @@ def create_frames_context(lemma_id, user): for schema_position in complement.realizations.all(): schema = schema_position.frame - position = schema_position.position - argument = schema_position.argument - alternation = schema_position.alternation - realization_id = u'schema_' + str(schema.id) + u'_pos_' + str(position.id) + '_arg_' + str(argument.id) + '_' + 'alt_' + str(alternation) + '_' - complement_arguments[complement_class].append(realization_id) - if realization_id not in arguments_frame_connected: - arguments_frame_connected[realization_id] = [] - arguments_frame_connected[realization_id].append('frame_' + str(frame.id) + '_') - if schema.id in alternations[frame.id]: - alternations[frame.id][schema.id] = max(alternations[frame.id][schema.id], alternation) - else: - alternations[frame.id][schema.id] = alternation - # alternations[frame.id] = {} + if schema.id in schemata_ids: + position = schema_position.position + argument = schema_position.argument + alternation = schema_position.alternation + realization_id = u'schema_' + str(schema.id) + u'_pos_' + str(position.id) + '_arg_' + str(argument.id) + '_' + 'alt_' + str(alternation) + '_' + complement_arguments[complement_class].append(realization_id) + if realization_id not in arguments_frame_connected: + arguments_frame_connected[realization_id] = [] + arguments_frame_connected[realization_id].append('frame_' + str(frame.id) + '_') + if schema.id in alternations[frame.id]: + alternations[frame.id][schema.id] = max(alternations[frame.id][schema.id], alternation) + else: + alternations[frame.id][schema.id] = alternation + # alternations[frame.id] = {} frames_display.append(frame_display) # ala["ma"] = "kot" + + frames_count_local = 0 + frames_count_imported = 0 + for frame in frames_display: + if frame['visible']: + if frame['local']: + frames_count_local += 1 + else: + frames_count_imported += 1 + frames_count = str(frames_count_local) + "+" + str(frames_count_imported) context = { 'frames_display': frames_display, 'connections': {'connected': complement_arguments, 'connected_reverse': arguments_frame_connected}, - 'frames_count': lemma.entry_obj.actual_frames().count(), + 'frames_count': frames_count, 'alternations': alternations } @@ -288,12 +300,23 @@ def ajax_units(request, lemma_id): def create_units_context(lemma_id): lemma = Lemma.objects.get(id=lemma_id) + pos_en = lemma.entry_obj.pos.tag + pos = 'brak' + if pos_en == 'adj': + pos = 'przymiotnik' + elif pos_en == 'noun': + pos = 'rzeczownik' + elif pos_en == 'adv': + pos = 'przysłówek' + elif pos_en == 'verb': + pos = 'czasownik' lexical_units = lemma.entry_obj.meanings.order_by('base', 'sense') # lexical_units = LexicalUnit.objects.filter(Q(base__startswith=lemma.entry + u' ', pos="czasownik")|Q(base__contains=u' '+lemma.entry+u' ', pos="czasownik")|Q(base__endswith=u' '+lemma.entry, pos="czasownik")|Q(base=lemma.entry, pos="czasownik")).order_by('base', 'sense') context = { 'lexical_units': [{"id": lu.id, "luid": lu.luid, "base": lu.base, "sense": lu.sense, "pos": lu.pos, "glossa": lu.glossa, "definition": lu.definition, "location": location(lu)} for lu in lexical_units], - 'informations': {'base': lemma.entry, 'sense': max(['A'] + [chr(ord(lu.sense) + 1) for lu in lexical_units.filter(luid=-1)])}, # TODO: 2 different free senses for with/whthout 'się' + 'informations': {'base': lemma.entry, 'sense': max(['A'] + [chr(ord(lu.sense) + 1) for lu in lexical_units.filter(luid=-1)]), # TODO: 2 different free senses for with/without 'się' + 'pos': pos} } return context