Commit 71ad2c03b8010f91302c3684dad4aad1e3df9264

Authored by Tomasz Bartosiak
2 parents 6df6f35d fa726329

Merge branch 'master' into tomek

Conflicts:
	semantics/static/js/semantics_roles.js
	semantics/views.py
Showing 55 changed files with 2253 additions and 2441 deletions

Too many changes to show.

To preserve performance only 24 of 55 files are displayed.

... ... @@ -61,6 +61,8 @@ Dodaj w głównym folderze projektu plik konfiguracyjny database_data.py oraz zd
61 61 Zainstaluj moduł psycopg2:
62 62 >> sudo apt-get install python-psycopg2
63 63  
  64 +Zainstaluj Morfeusza2 zgodnie z instrukcjami na stronie http://sgjp.pl/morfeusz/dopobrania.html.
  65 +
64 66 Zgraj pliki statyczne do dedykowanego katalogu poleceniem:
65 67 >> python manage.py collectstatic
66 68  
... ... @@ -74,8 +76,8 @@ Utwórz plik slowal.wsgi odpowiednio definiując w nim ścieżki do plików stat
74 76 --------------------------------------------
75 77 import os, sys
76 78  
77   -sys.path.append('/home/zil/static')
78   -sys.path.append('/home/zil/static/Slowal')
  79 +sys.path.append('/home/zil/Slowal')
  80 +sys.path.append('/home/zil')
79 81 os.environ['DJANGO_SETTINGS_MODULE'] = 'Slowal.settings'
80 82  
81 83 import django.core.handlers.wsgi
... ... @@ -83,7 +85,7 @@ import django.core.handlers.wsgi
83 85 application = django.core.handlers.wsgi.WSGIHandler()
84 86 --------------------------------------------
85 87  
86   -Skonfiguruj apacha dodając plik konfiguracyjny (np. o nazwie slowal.conf) do folderu sites-available apacha (domyślnie /etc/apache2/sites-enabled/), ścieżka WSGIScriptAlias musi wskazywać na plik slowal.wsgi. Przykładowy plik konfiguracyjny poniżej:
  88 +Skonfiguruj apacha dodając plik konfiguracyjny (np. o nazwie slowal.conf) do folderu sites-available apacha (domyślnie /etc/apache2/sites-available/), ścieżka WSGIScriptAlias musi wskazywać na plik slowal.wsgi. Przykładowy plik konfiguracyjny poniżej:
87 89 --------------------------------------------
88 90 <VirtualHost *:80>
89 91 ServerAdmin bartek.niton@gmail.com
... ... @@ -116,8 +118,6 @@ Uruchom stronę poleceniem:
116 118 Zrestartuj apacha:
117 119 >> sudo service apache2 restart
118 120  
119   -Zainstaluj Morfeusza2 zgodnie z instrukcjami na stronie http://sgjp.pl/morfeusz/dopobrania.html.
120   -
121 121 Ustaw w crontabie cykliczne uruchamianie komend create_walenty i count_positions_occurrences:
122 122 1 0 * * 5 python /home/zil/Slowal/manage.py create_walenty
123 123 0 1 * * * python /home/zil/Slowal/manage.py count_positions_occurrences
... ...
accounts/models.py
... ... @@ -43,7 +43,7 @@ class UserStats(Model):
43 43 # oplaty za prace leksykograficzne
44 44 bind_phraseology_frames_history = ManyToManyField('RealizedPhraseologyBinding', db_table='powiazania_frazeologiczne',
45 45 blank=True, null=True, related_name='user_stats')
46   - # !NOWE! oplaty za prace semantyczne
  46 + # oplaty za prace semantyczne
47 47 semantics_real_history = ManyToManyField('RealizedSemantics', db_table='prace_semantyczne',
48 48 blank=True, null=True, related_name='user_stats')
49 49 # kwota uiszczona
... ... @@ -272,7 +272,6 @@ def get_anon_profile():
272 272 def filtering_mode(user):
273 273 return user.usersettings.filter_search
274 274  
275   -# !NOWE!
276 275 class RealizedSemantics(Model):
277 276 """Model representing realized semantic work."""
278 277 # wykonane haslo
... ... @@ -288,15 +287,24 @@ class RealizedSemantics(Model):
288 287 # wlasciwie wykonane ramki (wypelniane dla semantyka)
289 288 prop_frames = PositiveIntegerField(db_column='poprawne_ramki',
290 289 default=0)
  290 + # czesciowo wlasciwie wykonane ramki (wypelniane dla semantyka) !NOWE
  291 + part_prop_frames = PositiveIntegerField(db_column='czesciowo_poprawne_ramki',
  292 + default=0)
291 293 # niewlasciwie wykonane ramki (wypelniane dla semantyka)
292 294 wrong_frames = PositiveIntegerField(db_column='niepoprawne_ramki',
293 295 default=0)
  296 + # dodane powiazania miedzy ramami i schematami !NOWE
  297 + added_connections = PositiveIntegerField(db_column='dodane_powiazania',
  298 + default=0)
294 299 # wykonane ramki (wypelniane dla semantyka)
295 300 made_frames = PositiveIntegerField(db_column='wykonane_ramki',
296 301 default=0)
297 302 # poprawione ramki (wypelniane dla supersemantyka)
298 303 corr_frames = PositiveIntegerField(db_column='poprawione_ramki',
299 304 default=0)
  305 + # czesciowo poprawione ramki (wypelniane dla supersemantyka) !NOWE
  306 + part_corr_frames = PositiveIntegerField(db_column='czesciowo_poprawione_ramki',
  307 + default=0)
300 308 # ramki niepoprawiane (wypelniane dla supersemantyka)
301 309 ncorr_frames = PositiveIntegerField(db_column='niepoprawione_ramki',
302 310 default=0)
... ...
dictionary/ajax_jqgrid.py
... ... @@ -11,26 +11,7 @@ def default_sort_rules():
11 11 'semanticist': { 'priority': None, 'sort_order': 'desc'},
12 12 'vocabulary': { 'priority': None, 'sort_order': 'desc'},
13 13 'status': { 'priority': None, 'sort_order': 'desc'}}
14   -
15   -def default_filter_rules():
16   - return { 'pos': None,
17   - 'owner': None,
18   - 'phraseologist': None,
19   - 'semanticist': None,
20   - 'vocabulary': None,
21   - 'status': None,
22   - 'example_source': None,
23   - 'approver': None,
24   - 'reflex': None,
25   - 'negativity': None,
26   - 'predicativity': None,
27   - 'aspect': None,
28   - 'argument': '.*',
29   - 'position': '.*',
30   - 'frame_opinion' : None,
31   - 'sender': None,
32   - 'frame_phraseologic': False}
33   -
  14 +
34 15 class JqGridAjax(object):
35 16 model = None
36 17 search_field = None
... ...
dictionary/ajax_lemma_status.py
1 1 # -*- coding: utf-8 -*-
2 2  
3   -#Copyright (c) 2012, Bartłomiej Nitoń
4   -#All rights reserved.
5   -
6   -#Redistribution and use in source and binary forms, with or without modification, are permitted provided
7   -#that the following conditions are met:
8   -
9   -# Redistributions of source code must retain the above copyright notice, this list of conditions and
10   -# the following disclaimer.
11   -# Redistributions in binary form must reproduce the above copyright notice, this list of conditions
12   -# and the following disclaimer in the documentation and/or other materials provided with the distribution.
13   -
14   -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
15   -# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
16   -# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
17   -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
18   -# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
19   -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20   -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21   -# POSSIBILITY OF SUCH DAMAGE.
22   -
23 3 import operator
24 4  
25 5 from django.db.models import Q
... ... @@ -36,9 +16,7 @@ from semantics.utils import get_frames_differences
36 16  
37 17 @render('lemma_status.html')
38 18 @ajax(method='get', encode_result=False)
39   -def get_lemma_status(request, id):
40   - if request.session.has_key('lemma_from_note_id') and request.session['lemma_from_note_id']:
41   - id = request.session['lemma_from_note_id']
  19 +def get_lemma_status(request, id):
42 20 selected_lemma = Lemma.objects.get(id=id)
43 21 abort_status = None
44 22 next_statuses = []
... ... @@ -254,10 +232,20 @@ def lemma_status_change(request, status_id, lemma_id):
254 232 checked_frame_value = 0.0
255 233 corrected_frame_value = 0.0
256 234 bonus = 4.0
  235 + part_bonus = 2.0
  236 + connection_bonus = 0.1
257 237 ### naliczanie oplat za sprawdzenie i bonusow
258   - update_sem_stats_conf_s(lemma_obj.entry_obj, actual_semantic_frames,
259   - lemma_obj.semanticist, request.user, new_status,
260   - checked_frame_value, corrected_frame_value, bonus)
  238 + update_sem_stats_conf_s(entry=lemma_obj.entry_obj,
  239 + semantic_frames=actual_semantic_frames,
  240 + semanticist=lemma_obj.semanticist,
  241 + supersemanticist=request.user,
  242 + status=new_status,
  243 + checked_frame_value=checked_frame_value,
  244 + corrected_frame_value=corrected_frame_value,
  245 + bonus_factor=bonus,
  246 + part_bonus_factor=part_bonus,
  247 + connection_bonus=connection_bonus)
  248 +
261 249 add_new_frames_to_phraseologic_propositions(lemma_obj)
262 250 changed = True
263 251 # zmiana statusu na w obrobce semantycznej
... ... @@ -496,7 +484,8 @@ def update_sem_stats_ready_s(entry, semantic_frames, semanticist, status, frame_
496 484 semanticist.user_stats.semantics_real_history.add(realized_semantics)
497 485  
498 486 def update_sem_stats_conf_s(entry, semantic_frames, semanticist, supersemanticist, status,
499   - checked_frame_value, corrected_frame_value, bonus_factor):
  487 + checked_frame_value, corrected_frame_value,
  488 + bonus_factor, part_bonus_factor, connection_bonus):
500 489 ready_statuses = Lemma_Status.objects.filter(type__sym_name='ready_s')
501 490 q_ready_statuses = [Q(status=ready_status) for ready_status in ready_statuses.all()]
502 491  
... ... @@ -505,17 +494,28 @@ def update_sem_stats_conf_s(entry, semantic_frames, semanticist, supersemanticis
505 494 checked_sem_frames = semantic_frames
506 495 ready_to_checked_diffs = get_frames_differences(ready_sem_frames.all(), checked_sem_frames.all())
507 496 checked_to_ready_diffs = get_frames_differences(checked_sem_frames.all(), ready_sem_frames.all())
508   - sem_dict = {'same_frames': len(ready_to_checked_diffs['matching_frames']),
  497 +
  498 + connections_amount = count_connections(ready_to_checked_diffs)
  499 + sem_cash = (bonus_factor*float(len(ready_to_checked_diffs['matching_frames'])) +
  500 + part_bonus_factor*float(len(ready_to_checked_diffs['part_matching_frames'])) +
  501 + connection_bonus*float(connections_amount))
  502 + sem_dict = {'same_frames': len(ready_to_checked_diffs['matching_frames']),
  503 + 'part_same_frames': len(ready_to_checked_diffs['part_matching_frames']),
509 504 'wrong_frames': len(ready_to_checked_diffs['missing_frames']),
510   - 'cash': bonus_factor*float(len(ready_to_checked_diffs['matching_frames']))}
  505 + 'added_connections': connections_amount,
  506 + 'cash': sem_cash}
  507 +
  508 + supersem_cash = (float(len(checked_to_ready_diffs['missing_frames'])+len(checked_to_ready_diffs['part_matching_frames']))*corrected_frame_value +
  509 + float(len(ready_to_checked_diffs['matching_frames']))*checked_frame_value)
511 510 supersem_dict = {'same_frames': len(checked_to_ready_diffs['matching_frames']),
  511 + 'part_same_frames': len(checked_to_ready_diffs['part_matching_frames']),
512 512 'redo_frames': len(checked_to_ready_diffs['missing_frames']),
513   - 'cash': (float(len(checked_to_ready_diffs['missing_frames']))*corrected_frame_value+
514   - float(len(ready_to_checked_diffs['matching_frames']))*checked_frame_value)}
  513 + 'cash': supersem_cash}
515 514  
516 515 supersem_real_semantics = RealizedSemantics(entry=entry,
517 516 cash=supersem_dict['cash'],
518 517 corr_frames=supersem_dict['redo_frames'],
  518 + part_corr_frames=supersem_dict['part_same_frames'],
519 519 ncorr_frames=supersem_dict['same_frames'],
520 520 status=status,
521 521 bonus=False)
... ... @@ -526,12 +526,22 @@ def update_sem_stats_conf_s(entry, semantic_frames, semanticist, supersemanticis
526 526 sem_real_semantics = RealizedSemantics(entry=entry,
527 527 cash=sem_dict['cash'],
528 528 prop_frames=sem_dict['same_frames'],
  529 + part_prop_frames=sem_dict['part_same_frames'],
529 530 wrong_frames=sem_dict['wrong_frames'],
  531 + added_connections=sem_dict['added_connections'],
530 532 status=status,
531 533 bonus=True)
532 534 sem_real_semantics.save()
533 535 sem_real_semantics.frames.add(*semantic_frames.all())
534 536 semanticist.user_stats.semantics_real_history.add(sem_real_semantics)
535   -
  537 +
  538 +def count_connections(differences):
  539 + amount = 0
  540 + for frame in differences['matching_frames']:
  541 + amount += frame.connected_schemata().count()
  542 + for frame in differences['part_matching_frames']:
  543 + amount += frame.connected_schemata().count()
  544 + return amount
  545 +
536 546 def remove_semantic_payments(entry):
537 547 RealizedSemantics.objects.filter(entry=entry).delete()
... ...
dictionary/ajax_lemma_view.py
1 1 # -*- coding: utf-8 -*-
2 2  
3   -#Copyright (c) 2012, Bartłomiej Nitoń
4   -#All rights reserved.
5   -
6   -#Redistribution and use in source and binary forms, with or without modification, are permitted provided
7   -#that the following conditions are met:
8   -
9   -# Redistributions of source code must retain the above copyright notice, this list of conditions and
10   -# the following disclaimer.
11   -# Redistributions in binary form must reproduce the above copyright notice, this list of conditions
12   -# and the following disclaimer in the documentation and/or other materials provided with the distribution.
13   -
14   -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
15   -# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
16   -# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
17   -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
18   -# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
19   -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20   -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21   -# POSSIBILITY OF SUCH DAMAGE.
22   -
23 3 import math
24 4 import copy
25 5 import re
... ... @@ -32,7 +12,11 @@ from django.contrib.auth.models import User, Group
32 12 from django.core import serializers
33 13  
34 14 from common.js_to_obj import frameObjToSerializableDict, jsArgToObj, jsFrameToObj, jsPosToObj
  15 +from dictionary.common_func import escape_regex
35 16 from dictionary.convert_frames import frame_conversion
  17 +from dictionary.filtering import default_filter_rules, prepare_filter_form, \
  18 + save_lemma_filters_and_get_schemata_filter_setup, \
  19 + schemata_filter_options
36 20 from dictionary.models import Vocabulary, Lemma, Lemma_Status, Frame_Opinion, \
37 21 Frame_Opinion_Value, Frame, NKJP_Example, NKJP_ArgSelection, \
38 22 NKJP_Source, NKJP_Opinion, Position, \
... ... @@ -44,9 +28,9 @@ from dictionary.models import Vocabulary, Lemma, Lemma_Status, Frame_Opinion, \
44 28 sorted_default_frame_char_vals, XcpExample, \
45 29 POS, get_frame_char_and_its_value, get_frame_char_by_type_and_value_pk, \
46 30 sortFrameChars, sortArguments, sortPositions, \
47   - get_or_create_position, get_phraseologic_frames_only, pos_compatible
  31 + get_or_create_position, get_schemata_by_type, pos_compatible
48 32 from dictionary.forms import AddPositionForm, FrameForm, Pos_Cat_Form, \
49   - AddNkjpExampleForm, MessageForm, SortForm, FilterForm, \
  33 + AddNkjpExampleForm, MessageForm, SortForm, \
50 34 SimilarLemmasNewForm, ChangeUserFunctionForm, \
51 35 ExampleOpinionForm, \
52 36 FrameConversionForm, CreatePositionForm, AssignPhraseologicFrameForm
... ... @@ -56,13 +40,15 @@ from dictionary.saving import connect_example_operation, disconnect_all_examples
56 40  
57 41 from common.decorators import render, ajax, AjaxError
58 42 from common.util import triple_arg_poss
  43 +from dictionary.filtering import filter_lemmas
59 44 from dictionary.validation import find_similar_frames, get_all_test_missing_frames, get_aspect_rel_lemmas, \
60 45 get_wrong_aspect_frames, validate_B_frames, get_deriv_miss_frames_message, \
61 46 validate_phraseology_binded_frames, validate_rule_5, \
62 47 validate_examples_and_mark_errors, validate_schemas_and_mark_errors, \
  48 + validate_schemata_for_semantics_and_mark_errors, \
63 49 get_missing_aspects_msg, validate_same_positions_schemata
64 50  
65   -from semantics.models import LexicalUnitExamples
  51 +from semantics.models import LexicalUnitExamples, SemanticFrame
66 52  
67 53 from wordnet.models import LexicalUnit
68 54  
... ... @@ -77,13 +63,11 @@ from django.core.validators import email_re
77 63 from accounts.models import UserSettings, UserStats, RealizedPhraseologyBinding, \
78 64 can_modify_phraseology_only, get_anon_profile
79 65  
80   -from ajax_jqgrid import JqGridAjax, default_sort_rules, default_filter_rules
  66 +from ajax_jqgrid import JqGridAjax, default_sort_rules
81 67  
82 68 import locale
83 69 locale.setlocale(locale.LC_ALL, 'pl_PL.UTF-8')
84 70  
85   -import HTMLParser
86   -
87 71 DEFAULT_SAVE_PATH = os.path.join(PROJECT_PATH, 'tmp')
88 72  
89 73 def reverse(string):
... ... @@ -165,7 +149,7 @@ def prepareFrameTable(frame):
165 149  
166 150 def nkjpExamplesObjToJs(nkjp_examples, user, lemma):
167 151 example_dict_list = []
168   - lexical_units = lemma.entry_obj.lexical_units()
  152 + lexical_units = lemma.entry_obj.meanings.all()
169 153 for example in nkjp_examples:
170 154 frame = example.frame;
171 155 frame_table_id = 'frame_'+str(frame.id)+'_'
... ... @@ -178,18 +162,14 @@ def nkjpExamplesObjToJs(nkjp_examples, user, lemma):
178 162  
179 163 confirmed = True
180 164 approvers_count = lemma.entry_obj.pos.example_approvers_num
181   - #Configuration.objects.get(selected_conf=True).example_approvers_num
182 165 if example.source.confirmation_required and example.approvers.count() < approvers_count:
183 166 try:
184 167 example.approvers.get(username=user.username)
185 168 except:
186   - confirmed = False
187   -
188   - sentence = example.sentence.replace('\\', '\\\\').replace("\n", "\\n").replace("\r", "\\r").replace("\t", "\\t").replace("\"", "\\\"")
189   - comment = example.comment.replace('\\', '\\\\').replace("\n", "\\n").replace("\r", "\\r").replace("\t", "\\t").replace("\"", "\\\"")
190   - h = HTMLParser.HTMLParser()
191   - sentence = h.unescape(sentence)
192   - comment = h.unescape(comment)
  169 + confirmed = False
  170 +
  171 + sentence = example.sentence
  172 + comment = example.comment
193 173  
194 174 example_dict = { 'example_id' : example.id,
195 175 'frame_id' : frame_table_id,
... ... @@ -219,18 +199,19 @@ def nkjpLemmaExamplesObjToJs(nkjp_examples, user, lemma):
219 199 for example in nkjp_examples:
220 200 confirmed = True
221 201 approvers_count = lemma.entry_obj.pos.example_approvers_num
222   - #Configuration.objects.get(selected_conf=True).example_approvers_num
223 202 if example.source.confirmation_required and example.approvers.count() < approvers_count:
224 203 try:
225 204 example.approvers.get(username=user.username)
226 205 except:
227 206 confirmed = False
228 207  
229   - sentence = example.sentence.replace('\\', '\\\\').replace("\n", "\\n").replace("\r", "\\r").replace("\t", "\\t").replace("\"", "\\\"")
230   - comment = example.comment.replace('\\', '\\\\').replace("\n", "\\n").replace("\r", "\\r").replace("\t", "\\t").replace("\"", "\\\"")
231   - h = HTMLParser.HTMLParser()
232   - sentence = h.unescape(sentence)
233   - comment = h.unescape(comment)
  208 + sentence = example.sentence
  209 + comment = example.comment
  210 +# sentence = example.sentence.replace('\\', '\\\\').replace("\n", "\\n").replace("\r", "\\r").replace("\t", "\\t").replace("\"", "\\\"")
  211 +# comment = example.comment.replace('\\', '\\\\').replace("\n", "\\n").replace("\r", "\\r").replace("\t", "\\t").replace("\"", "\\\"")
  212 +# h = HTMLParser.HTMLParser()
  213 +# sentence = h.unescape(sentence)
  214 +# comment = h.unescape(comment)
234 215  
235 216 example_dict = { 'example_id' : example.id,
236 217 'frame_id' : '',
... ... @@ -249,8 +230,6 @@ def nkjpLemmaExamplesObjToJs(nkjp_examples, user, lemma):
249 230 @render('old_frames.html')
250 231 @ajax(method='get', encode_result=False)
251 232 def get_old_frames(request, id):
252   - if request.session.has_key('lemma_from_note_id') and request.session['lemma_from_note_id']:
253   - id = request.session['lemma_from_note_id']
254 233 selected_lemma = Lemma.objects.get(id=id)
255 234 old_frames = selected_lemma.old_frames
256 235 reflexed_frames = []
... ... @@ -299,13 +278,7 @@ def get_arg_id(request, text_rep):
299 278 @ajax(method='get', encode_result=False)
300 279 def get_lemma_preview(request, id, main_lemma_id):
301 280 selected_lemma = Lemma.objects.get(id=id)
302   - new_frames = selected_lemma.frames.order_by('text_rep')
303   -
304   - serialized_frames = []
305   - for frame in new_frames:
306   - serialized_frames.append(frameObjToSerializableDict(selected_lemma, frame))
307   - json_frames = json_encode(serialized_frames)
308   -
  281 +
309 282 frame_char_models = Frame_Char_Model.objects.order_by('priority')
310 283 frame_char_prior_model_vals = frame_char_models[0].frame_char_values.all()
311 284  
... ... @@ -315,51 +288,15 @@ def get_lemma_preview(request, id, main_lemma_id):
315 288  
316 289 json_frame_char_list = json_encode(frame_char_list)
317 290  
318   - # konwertowanie przykladow na zrozumiale przez java sript
319   - nkjp_examples = selected_lemma.nkjp_examples.all()
320   - nkjp_examples_js = nkjpExamplesObjToJs(nkjp_examples, request.user, selected_lemma)
321   -
322   - json_nkjp_examples = json_encode(nkjp_examples_js)
323   -
324 291 similarLemmasNewForm = SimilarLemmasNewForm(statuses=Lemma_Status.objects.order_by('priority'))
325 292  
326   - return {'serialized_frames': json_frames,
327   - 'frame_char_list': json_frame_char_list,
328   - 'nkjp_examples': json_nkjp_examples,
  293 + return {'frame_char_list': json_frame_char_list,
329 294 'selected_lemma': selected_lemma,
330 295 'similarLemmasNewForm': similarLemmasNewForm}
331 296  
332 297 @ajax(method='get')
333 298 def get_frame_filter_options(request):
334   - # pobieranie wartosci aspektu
335   - aspect_model = Frame_Char_Model.objects.get(model_name=u'ASPEKT')
336   - aspect_vals_objs = aspect_model.frame_char_values.order_by('-priority')
337   - aspect_str_list = [val.value for val in aspect_vals_objs]
338   -
339   - # pobieranie wartosci zwrotnosci
340   - reflex_model = Frame_Char_Model.objects.get(model_name=u'ZWROTNOŚĆ')
341   - reflex_vals_objs = reflex_model.frame_char_values.order_by('-priority')
342   - reflex_str_list = [val.value for val in reflex_vals_objs]
343   -
344   - # pobieranie wartosci negatywnosci
345   - neg_model = Frame_Char_Model.objects.get(model_name=u'NEGATYWNOŚĆ')
346   - neg_vals_objs = neg_model.frame_char_values.order_by('-priority')
347   - neg_str_list = [val.value for val in neg_vals_objs]
348   -
349   - # pobieranie wartosci predykatywnosci
350   - pred_model = Frame_Char_Model.objects.get(model_name=u'PREDYKATYWNOŚĆ')
351   - pred_vals_objs = pred_model.frame_char_values.order_by('-priority')
352   - pred_str_list = [val.value for val in pred_vals_objs]
353   -
354   - # pobieranie opinii o schemacie
355   - opinion_str_list = [val.value for val in Frame_Opinion_Value.objects.order_by('priority')]
356   -
357   - return {'reflex_options': reflex_str_list,
358   - 'aspect_options': aspect_str_list,
359   - 'neg_options': neg_str_list,
360   - 'pred_options': pred_str_list,
361   - 'opinion_options': opinion_str_list}
362   -
  299 + return schemata_filter_options()
363 300  
364 301 @render('lemma_desc.html')
365 302 @ajax(method='get', encode_result=False)
... ... @@ -376,44 +313,14 @@ def get_lemma_desc(request, id):
376 313 @render('new_frames.html')
377 314 @ajax(method='get', encode_result=False)
378 315 def get_new_frames(request, id):
379   - if request.session.has_key('lemma_from_note_id') and request.session['lemma_from_note_id']:
380   - id = request.session['lemma_from_note_id']
381   -
382   - selected_lemma = Lemma.objects.get(id=id)
383   - new_frames = selected_lemma.frames.order_by('text_rep')
384   -
385   -# sprawdz czy uzytkownik jest wlascicielem wybranego hasla
386   - can_modify = user_can_modify(selected_lemma, request.user)
387   -
388   - serialized_frames = []
389   - for frame in new_frames:
390   - serialized_frames.append(frameObjToSerializableDict(selected_lemma, frame, True))
391   - json_frames = json_encode(serialized_frames)
392   -
393   - # konwertowanie przykladow na zrozumiale przez java sript
394   - nkjp_examples = selected_lemma.nkjp_examples.order_by('source__priority',
395   - 'opinion__priority',
396   - 'sentence')
397   - nkjp_examples_js = nkjpExamplesObjToJs(nkjp_examples, request.user, selected_lemma)
398   -
399   - json_nkjp_examples = json_encode(nkjp_examples_js)
400   -
401   - add_nkjp_form = AddNkjpExampleForm()
402   -
403   - lemma_nkjp_examples = selected_lemma.lemma_nkjp_examples.order_by('source__priority',
404   - 'opinion__priority',
405   - 'sentence')
406   - lemma_nkjp_examples_js = nkjpLemmaExamplesObjToJs(lemma_nkjp_examples, request.user, selected_lemma)
407   - json_lemma_nkjp_examples = json_encode(lemma_nkjp_examples_js)
408   -
409   - return {'serialized_frames': json_frames,
410   - 'add_nkjp_form': add_nkjp_form,
411   - 'nkjp_examples': json_nkjp_examples,
412   - 'can_modify': can_modify,
413   - 'selected_lemma': selected_lemma,
414   - 'lemma_nkjp_examples': json_lemma_nkjp_examples,
415   - 'skladnica_examples': selected_lemma.skladnica_frames.exists(),
416   - 'xcp_examples': selected_lemma.entry_obj.xcp_examples.exists()}
  316 + selected_lemma = Lemma.objects.get(id=id)
  317 + can_modify = user_can_modify(selected_lemma, request.user)
  318 + add_nkjp_form = AddNkjpExampleForm()
  319 + return {'add_nkjp_form': add_nkjp_form,
  320 + 'can_modify': can_modify,
  321 + 'selected_lemma': selected_lemma,
  322 + 'skladnica_examples': selected_lemma.skladnica_frames.exists(),
  323 + 'xcp_examples': selected_lemma.entry_obj.xcp_examples.exists()}
417 324  
418 325  
419 326 @ajax(method='get', encode_result=True)
... ... @@ -427,72 +334,26 @@ def get_ctrl_preview(request, id):
427 334  
428 335 @render('lemma_examples.html')
429 336 @ajax(method='get', encode_result=False)
430   -def get_lemma_examples(request, id):
431   - if request.session.has_key('lemma_from_note_id') and request.session['lemma_from_note_id']:
432   - id = request.session['lemma_from_note_id']
  337 +def get_lemma_examples(request, id):
433 338 selected_lemma = Lemma.objects.get(id=id)
434   - lemma_nkjp_examples = selected_lemma.lemma_nkjp_examples.order_by('source__priority',
435   - 'opinion__priority',
436   - 'sentence')
437   - lemma_nkjp_examples_js = nkjpLemmaExamplesObjToJs(lemma_nkjp_examples, request.user, selected_lemma)
  339 +# lemma_nkjp_examples = selected_lemma.lemma_nkjp_examples.order_by('source__priority',
  340 +# 'opinion__priority',
  341 +# 'sentence')
  342 +# lemma_nkjp_examples_js = nkjpLemmaExamplesObjToJs(lemma_nkjp_examples, request.user, selected_lemma)
438 343 # sprawdz czy uzytkownik jest wlascicielem wybranego hasla
439 344 can_modify = user_can_modify(selected_lemma, request.user)
440   - json_lemma_nkjp_examples = json_encode(lemma_nkjp_examples_js)
  345 + #json_lemma_nkjp_examples = json_encode(lemma_nkjp_examples_js)
441 346 add_nkjp_form = AddNkjpExampleForm()
442 347  
443 348 return {'add_nkjp_form': add_nkjp_form,
444   - 'lemma_nkjp_examples': json_lemma_nkjp_examples,
  349 + #'lemma_nkjp_examples': json_lemma_nkjp_examples,
445 350 'can_modify': can_modify}
446 351  
447   -############################ semantics ###################################
448   -@ajax(method='get')
449   -def schema_got_assigned_semantics(request, lemma_id, schema_id):
450   - lemma = Lemma.objects.get(id=lemma_id)
451   -# sprawdz czy najnowsza wersja
452   -# if lemma.old:
453   -# raise AjaxError('old lemma')
454   - frames = lemma.entry_obj.actual_frames() # jeszcze po przykladach trzeba sprawdzac
455   - schema_examples = lemma.nkjp_examples.filter(frame__id=schema_id)
456   - for frame in frames.all():
457   - if complements_pinned(frame, schema_id) or examples_pinned(frame, schema_examples):
458   - return {'got_assigned_semantics': True}
459   - return {'got_assigned_semantics': False}
460   -
461   -def complements_pinned(frame, schema_id):
462   - if frame.complements.filter(realizations__frame__id=schema_id).exists():
463   - return True
464   - return False
465   -
466   -def examples_pinned(frame, schema_examples):
467   - for lexical_unit in frame.lexical_units.all():
468   - for example in schema_examples.all():
469   - if LexicalUnitExamples.objects.filter(lexical_unit=lexical_unit,
470   - example=example).exists():
471   - return True
472   - return False
473   -
474   -@ajax(method='get')
475   -def example_got_assigned_semantics(request, lemma_id, example_id):
476   - lemma = Lemma.objects.get(id=lemma_id)
477   -# sprawdz czy najnowsza wersja
478   -# if lemma.old:
479   -# raise AjaxError('old lemma')
480   - # TODO niech lexical units beda wybierane z uzyciem Entry jak juz Tomasz to zrobi
481   - lexical_units = LexicalUnit.objects.filter(Q(base = lemma.entry)|Q(base = lemma.entry + u' się'))
482   - for lu in lexical_units:
483   - if LexicalUnitExamples.objects.filter(example__id=example_id,
484   - lexical_unit=lu).exists():
485   - return {'got_assigned_semantics': True}
486   - return {'got_assigned_semantics': False}
487   -
488 352 ############################ lemma notes #################################
489 353  
490 354 @render('lemma_notes.html')
491 355 @ajax(method='get', encode_result=False)
492 356 def get_lemma_notes(request, id):
493   - if request.session.has_key('lemma_from_note_id') and request.session['lemma_from_note_id']:
494   - id = request.session['lemma_from_note_id']
495   - request.session['lemma_from_note_id'] = ''
496 357 selected_lemma = Lemma.objects.get(id=id)
497 358 add_note_form = MessageForm()
498 359 messages = selected_lemma.messages.filter(private=False).order_by('-time')
... ... @@ -672,9 +533,7 @@ def restore_lemma(request, change_id, lemma_id):
672 533  
673 534 @render('change_ctrl.html')
674 535 @ajax(method='get', encode_result=False)
675   -def get_change_ctrl(request, id):
676   - if request.session.has_key('lemma_from_note_id') and request.session['lemma_from_note_id']:
677   - id = request.session['lemma_from_note_id']
  536 +def get_change_ctrl(request, id):
678 537 selected_lemma = Lemma.objects.get(id=id)
679 538 old_versions = selected_lemma.old_versions.order_by('-time')
680 539 can_modify = (user_can_modify(selected_lemma, request.user) and
... ... @@ -844,176 +703,14 @@ def get_sort_order(request):
844 703  
845 704 @render('filter_form.html')
846 705 @ajax(method='get', encode_result=False)
847   -def filter_form(request):
848   - if request.session.has_key('lemma_preview') and request.session['lemma_preview']:
849   - if not request.session.has_key('filter_rules_lemma_preview'):
850   - request.session['filter_rules_lemma_preview'] = default_filter_rules()
851   - filter_rules = request.session['filter_rules_lemma_preview']
852   - else:
853   - if not request.session.has_key('filter_rules'):
854   - request.session['filter_rules'] = default_filter_rules()
855   - filter_rules = request.session['filter_rules']
856   -
857   - users = User.objects.none()
858   - phraseologists = User.objects.none()
859   - semanticists = User.objects.none()
860   - vocabularies = Vocabulary.objects.none()
861   - senders = User.objects.none()
862   - statuses = get_anon_profile().visible_statuses.all()
863   - can_confirm_example = False
864   - if request.user.is_authenticated():
865   - users = User.objects.filter(lemmas__old=False).distinct().order_by('username')
866   - phraseologists = User.objects.filter(phraseologist_lemmas__old=False).distinct().order_by('username')
867   - semanticists = User.objects.filter(semanticist_lemmas__old=False).distinct().order_by('username')
868   - vocabularies = request.user.visible_vocabularies.all()
869   - senders = User.objects.order_by('groups__group_settings__priority')
870   - statuses = Lemma_Status.objects.all()
871   - if request.user.has_perm('dictionary.confirm_example') or request.user.is_superuser:
872   - can_confirm_example = True
873   -
874   - form = FilterForm(users=users,
875   - phraseologists=phraseologists,
876   - semanticists=semanticists,
877   - vocabularies=vocabularies,
878   - senders=senders,
879   - statuses=statuses,
880   - sel_pos=filter_rules['pos'],
881   - sel_user=filter_rules['owner'],
882   - sel_phraseologist=filter_rules['phraseologist'],
883   - sel_semanticist=filter_rules['semanticist'],
884   - sel_vocabulary=filter_rules['vocabulary'],
885   - sel_status=filter_rules['status'],
886   - sel_reflex=filter_rules['reflex'],
887   - sel_negativity=filter_rules['negativity'],
888   - sel_predicativity=filter_rules['predicativity'],
889   - sel_aspect=filter_rules['aspect'],
890   - sel_has_argument=filter_rules['argument'],
891   - sel_has_position=filter_rules['position'],
892   - sel_frame_opinion=filter_rules['frame_opinion'],
893   - can_confirm_example = can_confirm_example,
894   - sel_example_source=filter_rules['example_source'],
895   - sel_approver=filter_rules['approver'],
896   - sel_sender=filter_rules['sender'],
897   - frame_phraseologic=filter_rules['frame_phraseologic'])
898   - return {'form': form}
  706 +def filter_form(request):
  707 + return prepare_filter_form(request)
899 708  
900 709 @ajax(method='post')
901 710 def filter_form_submit(request, form_data):
902   - filter_dict = dict((x['name'], x['value']) for x in form_data)
903   -
904   - if filter_dict['pos']:
905   - pos_obj = POS.objects.get(id=filter_dict['pos'])
906   - else:
907   - pos_obj = None
908   -
909   - if filter_dict['owner']:
910   - owner_obj = User.objects.get(id=filter_dict['owner'])
911   - else:
912   - owner_obj = None
913   -
914   - if filter_dict['phraseologist']:
915   - phraseologist_obj = User.objects.get(id=filter_dict['phraseologist'])
916   - else:
917   - phraseologist_obj = None
918   -
919   - if filter_dict['semanticist']:
920   - semanticist_obj = User.objects.get(id=filter_dict['semanticist'])
921   - else:
922   - semanticist_obj = None
923   -
924   - if filter_dict['vocabulary']:
925   - vocabulary_obj = Vocabulary.objects.get(name=filter_dict['vocabulary'])
926   - else:
927   - vocabulary_obj = None
928   -
929   - if filter_dict['status']:
930   - status_obj = Lemma_Status.objects.get(id=filter_dict['status'])
931   - else:
932   - status_obj = None
933   -
934   - if filter_dict['example_source']:
935   - nkjp_source_obj = NKJP_Source.objects.get(id=filter_dict['example_source'])
936   - else:
937   - nkjp_source_obj = None
938   -
939   - if filter_dict['approver']:
940   - approver_obj = User.objects.get(id=filter_dict['approver'])
941   - else:
942   - approver_obj = None
943   -
944   - if filter_dict['has_message_from']:
945   - try:
946   - sender_obj = User.objects.get(pk=filter_dict['has_message_from'])
947   - except User.DoesNotExist:
948   - sender_obj = None
949   - else:
950   - sender_obj = None
951   -
952   - reflex_obj, reflex_val = get_frame_char_and_its_value(filter_dict['reflex'], '*')
953   - negativity_obj, negativity_val = get_frame_char_and_its_value(filter_dict['negativity'], '*')
954   - aspect_obj, aspect_val = get_frame_char_and_its_value(filter_dict['aspect'], '*')
955   - pred_obj, pred_val = get_frame_char_and_its_value(filter_dict['predicativity'], '*')
956   -
957   - argument = filter_dict['has_argument']
958   - position = filter_dict['has_position']
959   -
960   - if filter_dict['frame_opinion']:
961   - frame_opinion_obj = Frame_Opinion_Value.objects.get(id=filter_dict['frame_opinion'])
962   - opinion_val = frame_opinion_obj.value
963   - else:
964   - frame_opinion_obj = None
965   - opinion_val = '*'
966   -
967   - if 'frame_phraseologic' in filter_dict:
968   - frame_phraseologic = filter_dict['frame_phraseologic']
969   - else:
970   - frame_phraseologic = False
971   -
972   - if request.session.has_key('lemma_preview') and request.session['lemma_preview']:
973   - request.session['filter_rules_lemma_preview'] = {'pos' : pos_obj,
974   - 'owner' : owner_obj,
975   - 'phraseologist' : phraseologist_obj,
976   - 'semanticist' : semanticist_obj,
977   - 'vocabulary' : vocabulary_obj,
978   - 'status' : status_obj,
979   - 'example_source' : nkjp_source_obj,
980   - 'approver' : approver_obj,
981   - 'reflex' : reflex_obj,
982   - 'negativity' : negativity_obj,
983   - 'predicativity' : pred_obj,
984   - 'aspect' : aspect_obj,
985   - 'argument' : argument,
986   - 'position' : position,
987   - 'frame_opinion' : frame_opinion_obj,
988   - 'sender' : sender_obj,
989   - 'frame_phraseologic' : frame_phraseologic}
990   - else:
991   - request.session['filter_rules'] = {'pos' : pos_obj,
992   - 'owner' : owner_obj,
993   - 'phraseologist' : phraseologist_obj,
994   - 'semanticist' : semanticist_obj,
995   - 'vocabulary' : vocabulary_obj,
996   - 'status' : status_obj,
997   - 'example_source' : nkjp_source_obj,
998   - 'approver' : approver_obj,
999   - 'reflex' : reflex_obj,
1000   - 'negativity' : negativity_obj,
1001   - 'predicativity' : pred_obj,
1002   - 'aspect' : aspect_obj,
1003   - 'argument' : argument,
1004   - 'position' : position,
1005   - 'frame_opinion' : frame_opinion_obj,
1006   - 'sender' : sender_obj,
1007   - 'frame_phraseologic' : frame_phraseologic}
1008   -
1009   - return {'filter_frames': filter_dict['filter_frames'],
1010   - 'reflex' : reflex_val,
1011   - 'negativity' : negativity_val,
1012   - 'predicativity': pred_val,
1013   - 'opinion' : opinion_val,
1014   - 'aspect' : aspect_val,
1015   - 'position' : filter_dict['has_position'],
1016   - 'argument' : filter_dict['has_argument']}
  711 + filter_dict = dict((x['name'], x['value']) for x in form_data)
  712 + schemata_filter_options = save_lemma_filters_and_get_schemata_filter_setup(request, filter_dict)
  713 + return schemata_filter_options
1017 714  
1018 715 @ajax(method='post')
1019 716 def save_columns(request, col_model, col_names, remap):
... ... @@ -1572,10 +1269,7 @@ def get_default_char_value(possibilities):
1572 1269 value = possibilities.get(default=True)
1573 1270 except Frame_Char_Value.DoesNotExist:
1574 1271 value = possibilities.all()[0]
1575   - return value
1576   -
1577   -def escape_regex(string):
1578   - return string.replace('(', '\(').replace(')', '\)').replace('{', '\{').replace('}', '\}').replace('[', '\[').replace(']', '\]')
  1272 + return value
1579 1273  
1580 1274 def regex_query(string):
1581 1275 q_query = []
... ... @@ -2180,7 +1874,7 @@ def get_frame_chars(request, frame_id):
2180 1874 @ajax(method='get', encode_result=False)
2181 1875 def xcp_example_propositions(request, frame, argument_ids, lemma_id):
2182 1876 propositions = []
2183   - lemma_obj = Lemma.objects.get(old=False, id=lemma_id)
  1877 + lemma_obj = Lemma.objects.get(id=lemma_id)
2184 1878 entry = lemma_obj.entry_obj
2185 1879 # TODO: zlikwidowac zaslepke na przyslowki
2186 1880 if entry.pos.tag != 'adv':
... ... @@ -2553,7 +2247,7 @@ def save_new_frames(request, data, id, examples, lemma_examples):
2553 2247 reconnect_examples_operations.remove(disconnect_example_operation(example, nkjp_example_obj))
2554 2248 except ValueError:
2555 2249 reconnect_examples_operations.append(connect_example_operation(example, nkjp_example_obj))
2556   - reconnect_examples(reconnect_examples_operations)
  2250 + reconnect_examples(new_lemma_ver, reconnect_examples_operations)
2557 2251  
2558 2252 # dodawanie przykladow nkjp do czasownika
2559 2253 for example in decoded_lemma_examples:
... ... @@ -2605,10 +2299,12 @@ def validate_new_frames(request, data, id, examples, lemma_examples,
2605 2299  
2606 2300 status_need_validation = False
2607 2301 status_need_examples_check = False
  2302 + status_need_semantic_check = False
2608 2303 try:
2609 2304 status_obj = Lemma_Status.objects.get(id=status_id)
2610 2305 status_need_validation = status_obj.validate
2611 2306 status_need_examples_check = status_obj.check_examples
  2307 + status_need_semantic_check = status_obj.check_semantics
2612 2308 except Lemma_Status.DoesNotExist:
2613 2309 status_obj = None
2614 2310  
... ... @@ -2620,6 +2316,8 @@ def validate_new_frames(request, data, id, examples, lemma_examples,
2620 2316 serialized_frames, error = validate_examples_and_mark_errors(old_object, status_obj, selected_frame_id)
2621 2317 elif status_need_validation or not status_obj:
2622 2318 serialized_frames, error = validate_schemas_and_mark_errors(old_object, status_obj, selected_frame_id)
  2319 + elif status_need_semantic_check:
  2320 + serialized_frames, error = validate_schemata_for_semantics_and_mark_errors(old_object, status_obj, selected_frame_id)
2623 2321  
2624 2322 if error:
2625 2323 return {'id' : '',
... ... @@ -2820,63 +2518,6 @@ def prepare_sort_rules(sort_rules):
2820 2518 prepared_sort_rules.append(rule['name'])
2821 2519 return prepared_sort_rules
2822 2520  
2823   -def pos_regex_frames(frames, string):
2824   - try:
2825   - alternative_queries = []
2826   - for alternative in string.split('|'):
2827   - possible_frames = frames
2828   - for conj in alternative.split('&'):
2829   - model_results = []
2830   - negation = False
2831   - conj = conj.strip()
2832   - if conj.startswith('!'):
2833   - conj = conj.lstrip('!')
2834   - negation = True
2835   - regex = ur'^%s$' % escape_regex(conj)
2836   - model_results = Position.objects.filter(frames__lemmas__old=False,
2837   - text_rep__regex=regex).distinct()
2838   - if model_results.exists():
2839   - if negation:
2840   - possible_frames = possible_frames.exclude(positions__in=model_results)
2841   - else:
2842   - possible_frames = possible_frames.filter(positions__in=model_results)
2843   - elif not model_results.exists() and not negation:
2844   - possible_frames = Frame.objects.none()
2845   - alternative_queries.append(Q(id__in=possible_frames))
2846   - frames = frames.filter(reduce(operator.or_, alternative_queries)).distinct()
2847   - except:
2848   - frames = Frame.objects.none()
2849   - return frames
2850   -
2851   -def arg_regex_frames(frames, string):
2852   - try:
2853   - alternative_queries = []
2854   - for alternative in string.split('|'):
2855   - possible_frames = frames
2856   - for conj in alternative.split('&'):
2857   - model_results = []
2858   - negation = False
2859   - conj = conj.strip()
2860   - if conj.startswith('!'):
2861   - conj = conj.lstrip('!')
2862   - negation = True
2863   - regex = ur'^%s$' % escape_regex(conj)
2864   - model_results = Argument.objects.filter(positions__frames__lemmas__old=False,
2865   - text_rep__regex=regex).distinct()
2866   - if model_results.exists():
2867   - if negation:
2868   - possible_frames = possible_frames.exclude(positions__arguments__in=model_results)
2869   - else:
2870   - possible_frames = possible_frames.filter(positions__arguments__in=model_results)
2871   - elif not model_results.exists() and not negation:
2872   - possible_frames = Frame.objects.none()
2873   - alternative_queries.append(Q(id__in=possible_frames))
2874   - frames = frames.filter(reduce(operator.or_, alternative_queries)).distinct()
2875   - except:
2876   - frames = Frame.objects.none()
2877   - return frames
2878   -
2879   -# @TODO to co tutaj jest prezentowane jest bardzo glupie
2880 2521 def get_lemma_query(prepared_sort_rules, filter_rules, lemma_query, user):
2881 2522 lemmas = Lemma.objects.none()
2882 2523 if user.is_authenticated():
... ... @@ -2897,66 +2538,8 @@ def get_lemma_query(prepared_sort_rules, filter_rules, lemma_query, user):
2897 2538 lemmas = lemmas.filter(reduce(operator.or_, q_vocab_list))
2898 2539 lemmas = lemmas.filter(reduce(operator.or_, q_status_list))
2899 2540  
2900   - ## filtrowanie
2901   - if filter_rules['owner']:
2902   - lemmas = lemmas.filter(owner=filter_rules['owner'])
2903   - if filter_rules['phraseologist']:
2904   - lemmas = lemmas.filter(phraseologist=filter_rules['phraseologist'])
2905   - if filter_rules['semanticist']:
2906   - lemmas = lemmas.filter(semanticist=filter_rules['semanticist'])
2907   - if filter_rules['vocabulary']:
2908   - lemmas = lemmas.filter(vocabulary=filter_rules['vocabulary'])
2909   - if filter_rules['status']:
2910   - lemmas = lemmas.filter(status=filter_rules['status'])
2911   - if filter_rules['frame_opinion']:
2912   - lemmas = lemmas.filter(frame_opinions__value=filter_rules['frame_opinion'])
2913   -
2914   - frames = Frame.objects.all()
2915   - if filter_rules['reflex']:
2916   - frames = frames.filter(characteristics=filter_rules['reflex'])
2917   - if filter_rules['negativity']:
2918   - frames = frames.filter(characteristics=filter_rules['negativity'])
2919   - if filter_rules['predicativity']:
2920   - frames = frames.filter(characteristics=filter_rules['predicativity'])
2921   - if filter_rules['aspect']:
2922   - frames = frames.filter(characteristics=filter_rules['aspect'])
2923   - if filter_rules['position'] and filter_rules['position'] != '.*':
2924   - frames = pos_regex_frames(frames, filter_rules['position'])
2925   - if filter_rules['argument'] and filter_rules['argument'] != '.*':
2926   - frames = arg_regex_frames(frames, filter_rules['argument'])
2927   - if filter_rules['frame_phraseologic']:
2928   - frames = get_phraseologic_frames_only(frames)
2929   -
2930   - if (filter_rules['reflex'] or filter_rules['negativity'] or
2931   - filter_rules['aspect'] or filter_rules['predicativity'] or
2932   - filter_rules['frame_phraseologic'] or filter_rules['frame_opinion'] or
2933   - (filter_rules['argument'] and filter_rules['argument'] != '.*') or
2934   - (filter_rules['position'] and filter_rules['position'] != '.*')):
2935   - if filter_rules['frame_opinion']:
2936   - lemmas = lemmas.filter(frame_opinions__frame__in=frames,
2937   - frame_opinions__value=filter_rules['frame_opinion']).distinct()
2938   - else:
2939   - lemmas = lemmas.filter(frames__in=frames).distinct()
2940   -
2941   - if filter_rules['sender']:
2942   - lemmas = lemmas.filter(messages__sender=filter_rules['sender'])
2943   -
2944   - if filter_rules['pos']:
2945   - lemmas = lemmas.filter(entry_obj__pos=filter_rules['pos'])
2946   -
2947   - if filter_rules['example_source']:
2948   - lemmas = lemmas.filter(Q(nkjp_examples__source=filter_rules['example_source']) &
2949   - Q(nkjp_examples__approved=False)).distinct()
2950   - napproved_examples = NKJP_Example.objects.filter(Q(source=filter_rules['example_source']) &
2951   - Q(approved=False) &
2952   - Q(lemmas__old=False) &
2953   - ~Q(approvers=user)).distinct()
2954   -
2955   - if filter_rules['approver']:
2956   - napproved_examples = napproved_examples.filter(approvers=filter_rules['approver'])
2957   - lemmas = lemmas.filter(nkjp_examples__in=napproved_examples)
2958   -
2959   - lemmas = lemmas.distinct()
  2541 + ## filtrowanie
  2542 + lemmas = filter_lemmas(lemmas, filter_rules, user)
2960 2543  
2961 2544 ## sortowanie
2962 2545 entrySortDefined = False
... ... @@ -3211,3 +2794,28 @@ def get_examples(request, lemma_id):
3211 2794 json_examples = json_encode(examples_js)
3212 2795 return {'examples': json_examples,
3213 2796 'can_modify': user_can_modify(lemma, request.user)}
  2797 +
  2798 +@ajax(method='get')
  2799 +def get_schemata_and_examples(request, lemma_id):
  2800 + lemma = Lemma.objects.get(id=lemma_id)
  2801 +
  2802 + examples = lemma.nkjp_examples.order_by('source__priority',
  2803 + 'opinion__priority',
  2804 + 'sentence')
  2805 + examples_js = nkjpExamplesObjToJs(examples, request.user, lemma)
  2806 + json_examples = json_encode(examples_js)
  2807 +
  2808 + lemma_examples = lemma.lemma_nkjp_examples.order_by('source__priority',
  2809 + 'opinion__priority',
  2810 + 'sentence')
  2811 + lemma_examples_js = nkjpLemmaExamplesObjToJs(lemma_examples, request.user, lemma)
  2812 + json_lemma_examples = json_encode(lemma_examples_js)
  2813 +
  2814 + schemata = lemma.frames.order_by('text_rep')
  2815 + serialized_schemata = [frameObjToSerializableDict(lemma, schema, True) for schema in schemata]
  2816 + json_schemata = json_encode(serialized_schemata)
  2817 +
  2818 + return {'examples': json_examples,
  2819 + 'lemma_examples': json_lemma_examples,
  2820 + 'schemata': json_schemata,
  2821 + 'can_modify': user_can_modify(lemma, request.user)}
... ...
dictionary/ajax_user_stats.py
... ... @@ -22,8 +22,10 @@
22 22  
23 23 """Module covering functions used in user statistics views"""
24 24  
  25 +import operator
  26 +
25 27 from django.contrib.auth.models import User
26   -from django.db.models import Count, Sum
  28 +from django.db.models import Count, Sum, Q
27 29  
28 30 from common.decorators import render, ajax
29 31 from dictionary.models import Lemma, Lemma_Status
... ... @@ -86,8 +88,8 @@ def get_user_stats(request, user_name):
86 88 'all_semantic_owned_lemmas_count': all_semantic_owned_lemmas.count(),
87 89 'all_semantic_owned_frames_count': all_semantic_owned_frames_count,
88 90 'earned_cash': total_earned_cash,
89   - 'paid_cash' : round(user.user_stats.paid_cash, 2),
90   - 'surcharge' : round(user.user_stats.paid_cash-total_earned_cash, 2),
  91 + 'paid_cash': round(user.user_stats.paid_cash, 2),
  92 + 'surcharge': round(user.user_stats.paid_cash-total_earned_cash, 2),
91 93 'lex_work_stats': lex_work_stats,
92 94 'phraseology_work_stats': phraseology_work_stats,
93 95 'semantics_work_stats': semantics_work_stats}
... ... @@ -212,22 +214,23 @@ def get_phraseology_stats(user):
212 214 return phraseology_work_stats
213 215  
214 216 def get_used_bindings(added_bindings):
215   - used_bindings = added_bindings
  217 + unused_bindings = []
216 218 for added_binding in added_bindings.all():
217 219 binded_entry = added_binding.binded_entry
218 220 act_binded_lemma = binded_entry.lemmas.get(old=False)
219 221 if act_binded_lemma.status.type.sym_name == 'erase':
220   - used_bindings = used_bindings.exclude(pk=added_binding.pk)
  222 + unused_bindings.append(added_binding.pk)
221 223 else:
222 224 added_frame = added_binding.phraseologic_frame
223   - act_lemma_phras_frames = act_binded_lemma.frames.annotate(positions_count=Count('positions'))\
224   - .filter(phraseologic=True,
225   - positions_count=added_frame.positions.count())
  225 + act_lemma_phras_frames = act_binded_lemma.frames.filter(phraseologic=True)
  226 + act_lemma_phras_frames = act_lemma_phras_frames.annotate(positions_count=Count('positions'))
  227 + act_lemma_phras_frames = act_lemma_phras_frames.filter(positions_count=added_frame.positions.count())
226 228 for pos in added_frame.positions.all():
227 229 act_lemma_phras_frames = act_lemma_phras_frames.filter(positions__text_rep=pos.text_rep)
228   - if not act_lemma_phras_frames.exists():
229   - used_bindings = used_bindings.exclude(pk=added_binding.pk)
230   - return used_bindings
  230 + if not act_lemma_phras_frames.exists():
  231 + unused_bindings.append(added_binding.pk)
  232 + break
  233 + return added_bindings.exclude(pk__in=unused_bindings)
231 234  
232 235 def get_semantics_stats(user):
233 236 earned_cash = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('cash'))['cash__sum']
... ... @@ -241,28 +244,40 @@ def get_semantics_stats(user):
241 244 prop_frames = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('prop_frames'))['prop_frames__sum']
242 245 if prop_frames == None:
243 246 prop_frames = 0
  247 + part_prop_frames = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('part_prop_frames'))['part_prop_frames__sum']
  248 + if part_prop_frames == None:
  249 + part_prop_frames = 0
244 250 wrong_frames = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('wrong_frames'))['wrong_frames__sum']
245 251 if wrong_frames == None:
246 252 wrong_frames = 0
247 253 corr_frames = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('corr_frames'))['corr_frames__sum']
248 254 if corr_frames == None:
249   - corr_frames = 0
  255 + corr_frames = 0
  256 + part_corr_frames = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('part_corr_frames'))['part_corr_frames__sum']
  257 + if part_corr_frames == None:
  258 + part_corr_frames = 0
250 259 ncorr_frames = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('ncorr_frames'))['ncorr_frames__sum']
251 260 if ncorr_frames == None:
252 261 ncorr_frames = 0
253 262 made_frames = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('made_frames'))['made_frames__sum']
254 263 if made_frames == None:
255   - made_frames = 0
  264 + made_frames = 0
  265 + added_connections = RealizedSemantics.objects.filter(user_stats__user=user).aggregate(Sum('added_connections'))['added_connections__sum']
  266 + if added_connections == None:
  267 + added_connections = 0
256 268 efficacy = 0.0
257 269 if prop_frames+wrong_frames > 0:
258 270 efficacy = float(prop_frames)/float(prop_frames+wrong_frames)*100.0
259 271  
260   - sem_work_stats = {'earned_cash' : round(earned_cash, 2),
261   - 'bonus_cash' : round(bonus_cash, 2),
262   - 'prop_frames' : prop_frames,
263   - 'wrong_frames' : wrong_frames,
264   - 'corr_frames' : corr_frames,
265   - 'checked_frames': ncorr_frames+corr_frames,
266   - 'made_frames' : made_frames,
267   - 'efficacy' : round(efficacy, 2)}
  272 + sem_work_stats = {'earned_cash': round(earned_cash, 2),
  273 + 'bonus_cash': round(bonus_cash, 2),
  274 + 'prop_frames': prop_frames,
  275 + 'part_prop_frames': part_prop_frames,
  276 + 'wrong_frames': wrong_frames,
  277 + 'corr_frames': corr_frames,
  278 + 'part_corr_frames': part_corr_frames,
  279 + 'checked_frames': ncorr_frames+corr_frames+part_corr_frames,
  280 + 'made_frames': made_frames,
  281 + 'efficacy': round(efficacy, 2),
  282 + 'added_connections' : added_connections}
268 283 return sem_work_stats
... ...
dictionary/ajax_vocabulary_management.py
... ... @@ -155,6 +155,8 @@ def create_text_walenty(file_name, lemmas, vocabularies, frame_opinions,
155 155 pred_val=pred_val,
156 156 aspect_val=aspect_val).order_by('text_rep')
157 157 for frame in matching_frames:
  158 + if not lemma.phraseology_ready() and frame.phraseologic:
  159 + continue
158 160 if not frame_opinions or founded_frame_opinions.filter(frame=frame).exists():
159 161 text_rep = frame.get_position_spaced_text_rep()
160 162 if add_frame_opinions:
... ...
dictionary/common_func.py
1 1 #-*- coding:utf-8 -*-
2 2  
3   -#Copyright (c) 2012, Bartłomiej Nitoń
4   -#All rights reserved.
5   -
6   -#Redistribution and use in source and binary forms, with or without modification, are permitted provided
7   -#that the following conditions are met:
8   -
9   -# Redistributions of source code must retain the above copyright notice, this list of conditions and
10   -# the following disclaimer.
11   -# Redistributions in binary form must reproduce the above copyright notice, this list of conditions
12   -# and the following disclaimer in the documentation and/or other materials provided with the distribution.
13   -
14   -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
15   -# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
16   -# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
17   -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
18   -# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
19   -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20   -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21   -# POSSIBILITY OF SUCH DAMAGE.
22   -
23 3 '''
24 4 Common functions used in Slowal application.
25 5 '''
... ... @@ -131,4 +111,6 @@ def have_same_positions_structure(pos1, pos2):
131 111 if(pos1_args == pos2_args and pos1_cats==pos2_cats):
132 112 same_structure = True
133 113 return same_structure
134   -
  114 +
  115 +def escape_regex(string):
  116 + return string.replace('(', '\(').replace(')', '\)').replace('{', '\{').replace('}', '\}').replace('[', '\[').replace(']', '\]')
... ...
dictionary/filtering.py 0 → 100644
  1 +# -*- coding: utf-8 -*-
  2 +
  3 +import operator
  4 +
  5 +from django.contrib.auth.models import User
  6 +from django.db.models import Q
  7 +
  8 +from accounts.models import get_anon_profile
  9 +from dictionary.common_func import escape_regex
  10 +from dictionary.forms import FilterForm
  11 +from dictionary.models import Argument, Frame, Frame_Char_Model, Frame_Opinion_Value, \
  12 + Lemma, Lemma_Status, NKJP_Example, NKJP_Source, POS, \
  13 + Position, Vocabulary, \
  14 + get_frame_char_and_its_value, get_schemata_by_type
  15 +from semantics.forms import GeneralSelPrefForm, RelationalSelPrefForm, RoleForm, \
  16 + SynsetSelPrefForm
  17 +from semantics.models import Complement, FrameOpinion, RelationalSelectivePreference, \
  18 + SemanticFrame
  19 +from wordnet.models import LexicalUnit
  20 +
  21 +def schemata_filter_options():
  22 + # pobieranie wartosci aspektu
  23 + aspect_model = Frame_Char_Model.objects.get(model_name=u'ASPEKT')
  24 + aspect_vals_objs = aspect_model.frame_char_values.order_by('-priority')
  25 + aspect_options = [{'name': '*', 'value': '*'}]
  26 + aspect_options.extend([{'name': val.value, 'value': val.value} for val in aspect_vals_objs])
  27 +
  28 + # pobieranie wartosci zwrotnosci
  29 + reflex_model = Frame_Char_Model.objects.get(model_name=u'ZWROTNOŚĆ')
  30 + reflex_vals_objs = reflex_model.frame_char_values.order_by('-priority')
  31 + reflex_options = [{'name': '*', 'value': '*'}]
  32 + reflex_options.extend([{'name': val.value, 'value': val.value} for val in reflex_vals_objs])
  33 +
  34 + # pobieranie wartosci negatywnosci
  35 + neg_model = Frame_Char_Model.objects.get(model_name=u'NEGATYWNOŚĆ')
  36 + neg_vals_objs = neg_model.frame_char_values.order_by('-priority')
  37 + neg_options = [{'name': '*', 'value': '*'}]
  38 + neg_options.extend([{'name': val.value, 'value': val.value} for val in neg_vals_objs])
  39 +
  40 + # pobieranie wartosci predykatywnosci
  41 + pred_model = Frame_Char_Model.objects.get(model_name=u'PREDYKATYWNOŚĆ')
  42 + pred_vals_objs = pred_model.frame_char_values.order_by('-priority')
  43 + pred_options = [{'name': '*', 'value': '*'}]
  44 + pred_options.extend([{'name': val.value, 'value': val.value} for val in pred_vals_objs])
  45 +
  46 + # pobieranie opinii o schemacie
  47 + opinion_options = [{'name': '*', 'value': '*'}]
  48 + opinion_options.extend([{'name': val.value, 'value': val.value} for val in Frame_Opinion_Value.objects.order_by('priority')])
  49 +
  50 + schema_type_options = [{'name': '*', 'value': '*'},
  51 + {'name': 'normalny', 'value': 'normal'},
  52 + {'name': 'frazeologiczny', 'value': 'phraseologic'}]
  53 +
  54 + return {'schema_type_options': schema_type_options,
  55 + 'reflex_options': reflex_options,
  56 + 'aspect_options': aspect_options,
  57 + 'neg_options': neg_options,
  58 + 'pred_options': pred_options,
  59 + 'opinion_options': opinion_options}
  60 +
  61 +def all_filter_rules_loaded(rules):
  62 + if set(default_filter_rules().keys()) != set(rules):
  63 + return False
  64 + return True
  65 +
  66 +def default_filter_rules():
  67 + return {'lemma': '.*',
  68 + 'pos': None,
  69 + 'contains_phraseology': None,
  70 + 'owner': None,
  71 + 'phraseologist': None,
  72 + 'semanticist': None,
  73 + 'vocabulary': None,
  74 + 'status': None,
  75 + 'example_source': None,
  76 + 'approver': None,
  77 + 'reflex': None,
  78 + 'negativity': None,
  79 + 'predicativity': None,
  80 + 'aspect': None,
  81 + 'argument': '.*',
  82 + 'position': '.*',
  83 + 'schema_opinion' : None,
  84 + 'sender': None,
  85 + 'schema_type': None,
  86 + 'frame_opinion': None,
  87 + 'sem_arguments': []}
  88 +
  89 +def prepare_filter_form(request):
  90 + if request.session.has_key('lemma_preview') and request.session['lemma_preview']:
  91 + if not request.session.has_key('filter_rules_lemma_preview'):
  92 + request.session['filter_rules_lemma_preview'] = default_filter_rules()
  93 + filter_rules = request.session['filter_rules_lemma_preview']
  94 + else:
  95 + if not request.session.has_key('filter_rules'):
  96 + request.session['filter_rules'] = default_filter_rules()
  97 + filter_rules = request.session['filter_rules']
  98 +
  99 + users = User.objects.none()
  100 + phraseologists = User.objects.none()
  101 + semanticists = User.objects.none()
  102 + vocabularies = Vocabulary.objects.none()
  103 + senders = User.objects.none()
  104 + statuses = get_anon_profile().visible_statuses.all()
  105 + can_confirm_example = False
  106 + if request.user.is_authenticated():
  107 + users = User.objects.filter(lemmas__old=False).distinct().order_by('username')
  108 + phraseologists = User.objects.filter(phraseologist_lemmas__old=False).distinct().order_by('username')
  109 + semanticists = User.objects.filter(semanticist_lemmas__old=False).distinct().order_by('username')
  110 + vocabularies = request.user.visible_vocabularies.all()
  111 + senders = User.objects.order_by('groups__group_settings__priority')
  112 + statuses = Lemma_Status.objects.all()
  113 + if request.user.has_perm('dictionary.confirm_example') or request.user.is_superuser:
  114 + can_confirm_example = True
  115 +
  116 + form = FilterForm(users=users,
  117 + phraseologists=phraseologists,
  118 + semanticists=semanticists,
  119 + vocabularies=vocabularies,
  120 + senders=senders,
  121 + statuses=statuses,
  122 + lemma=filter_rules['lemma'],
  123 + sel_pos=filter_rules['pos'],
  124 + contains_phraseology=filter_rules['contains_phraseology'],
  125 + sel_user=filter_rules['owner'],
  126 + sel_phraseologist=filter_rules['phraseologist'],
  127 + sel_semanticist=filter_rules['semanticist'],
  128 + sel_vocabulary=filter_rules['vocabulary'],
  129 + sel_status=filter_rules['status'],
  130 + sel_reflex=filter_rules['reflex'],
  131 + sel_negativity=filter_rules['negativity'],
  132 + sel_predicativity=filter_rules['predicativity'],
  133 + sel_aspect=filter_rules['aspect'],
  134 + sel_has_argument=filter_rules['argument'],
  135 + sel_has_position=filter_rules['position'],
  136 + sel_schema_opinion=filter_rules['schema_opinion'],
  137 + can_confirm_example = can_confirm_example,
  138 + sel_example_source=filter_rules['example_source'],
  139 + sel_approver=filter_rules['approver'],
  140 + sel_sender=filter_rules['sender'],
  141 + sel_schema_type=filter_rules['schema_type'],
  142 + sel_frame_opinion=filter_rules['frame_opinion'])
  143 + return {'form': form,
  144 + 'sem_args_forms': sem_args_to_forms(filter_rules['sem_arguments'])}
  145 +
  146 +def sem_args_to_forms(sem_arguments):
  147 + args_forms = []
  148 + first_alternative = True
  149 + for alternative in sem_arguments:
  150 + if first_alternative:
  151 + first_alternative = False
  152 + else:
  153 + args_forms.append('or')
  154 + for arg in alternative:
  155 + args_forms.append(RoleForm(negation=arg['negation'],
  156 + sel_role=arg['role'],
  157 + sel_attribute=arg['attribute'],
  158 + sel_preferences=get_sel_prefs_as_forms(arg)))
  159 + return args_forms
  160 +
  161 +def get_sel_prefs_as_forms(arg):
  162 + forms = []
  163 + if arg['general_prefs']:
  164 + forms.extend(general_prefs_to_forms(arg['general_prefs']))
  165 + if arg['synset_prefs']:
  166 + forms.extend(synset_prefs_to_forms(arg['synset_prefs']))
  167 + if arg['relational_prefs']:
  168 + forms.extend(relational_prefs_to_forms(arg['relational_prefs']))
  169 + return forms
  170 +
  171 +def general_prefs_to_forms(prefs):
  172 + forms = []
  173 + for pref in prefs:
  174 + forms.append(GeneralSelPrefForm(sel_preference=pref))
  175 + return forms
  176 +
  177 +def synset_prefs_to_forms(prefs):
  178 + forms = []
  179 + for pref in prefs:
  180 + forms.append(SynsetSelPrefForm(sel_preference=pref))
  181 + return forms
  182 +
  183 +def relational_prefs_to_forms(prefs):
  184 + forms = []
  185 + for pref in prefs:
  186 + forms.append(RelationalSelPrefForm(sel_relation=pref['relation'],
  187 + sel_role=pref['role'],
  188 + sel_attribute=pref['attribute']))
  189 + return forms
  190 +
  191 +def save_lemma_filters_and_get_schemata_filter_setup(request, filter_dict):
  192 + if filter_dict['pos']:
  193 + pos_obj = POS.objects.get(id=filter_dict['pos'])
  194 + else:
  195 + pos_obj = None
  196 +
  197 + if filter_dict['owner']:
  198 + owner_obj = User.objects.get(id=filter_dict['owner'])
  199 + else:
  200 + owner_obj = None
  201 +
  202 + if filter_dict['phraseologist']:
  203 + phraseologist_obj = User.objects.get(id=filter_dict['phraseologist'])
  204 + else:
  205 + phraseologist_obj = None
  206 +
  207 + if filter_dict['semanticist']:
  208 + semanticist_obj = User.objects.get(id=filter_dict['semanticist'])
  209 + else:
  210 + semanticist_obj = None
  211 +
  212 + if filter_dict['vocabulary']:
  213 + vocabulary_obj = Vocabulary.objects.get(name=filter_dict['vocabulary'])
  214 + else:
  215 + vocabulary_obj = None
  216 +
  217 + if filter_dict['status']:
  218 + status_obj = Lemma_Status.objects.get(id=filter_dict['status'])
  219 + else:
  220 + status_obj = None
  221 +
  222 + if filter_dict['example_source']:
  223 + nkjp_source_obj = NKJP_Source.objects.get(id=filter_dict['example_source'])
  224 + else:
  225 + nkjp_source_obj = None
  226 +
  227 + if filter_dict['approver']:
  228 + approver_obj = User.objects.get(id=filter_dict['approver'])
  229 + else:
  230 + approver_obj = None
  231 +
  232 + if filter_dict['has_message_from']:
  233 + try:
  234 + sender_obj = User.objects.get(pk=filter_dict['has_message_from'])
  235 + except User.DoesNotExist:
  236 + sender_obj = None
  237 + else:
  238 + sender_obj = None
  239 +
  240 + reflex_obj, reflex_val = get_frame_char_and_its_value(filter_dict['reflex'], '*')
  241 + negativity_obj, negativity_val = get_frame_char_and_its_value(filter_dict['negativity'], '*')
  242 + aspect_obj, aspect_val = get_frame_char_and_its_value(filter_dict['aspect'], '*')
  243 + pred_obj, pred_val = get_frame_char_and_its_value(filter_dict['predicativity'], '*')
  244 +
  245 + if filter_dict['schema_opinion']:
  246 + schema_opinion_obj = Frame_Opinion_Value.objects.get(id=filter_dict['schema_opinion'])
  247 + opinion_val = schema_opinion_obj.value
  248 + else:
  249 + schema_opinion_obj = None
  250 + opinion_val = '*'
  251 +
  252 + if 'schema_type' in filter_dict:
  253 + schema_type = filter_dict['schema_type']
  254 + else:
  255 + schema_type = None
  256 +
  257 + if 'frame_opinion' in filter_dict and filter_dict['frame_opinion']:
  258 + frame_opinion = FrameOpinion.objects.get(id=filter_dict['frame_opinion'])
  259 + else:
  260 + frame_opinion = None
  261 +
  262 + sem_arguments = [constraints for constraints in filter_dict['sem_arguments'] if constraints != []]
  263 +
  264 + if request.session.has_key('lemma_preview') and request.session['lemma_preview']:
  265 + request.session['filter_rules_lemma_preview'] = {'pos' : pos_obj,
  266 + 'contains_phraseology': filter_dict['contains_phraseology'],
  267 + 'owner' : owner_obj,
  268 + 'phraseologist' : phraseologist_obj,
  269 + 'semanticist' : semanticist_obj,
  270 + 'vocabulary' : vocabulary_obj,
  271 + 'status' : status_obj,
  272 + 'example_source' : nkjp_source_obj,
  273 + 'approver' : approver_obj,
  274 + 'reflex' : reflex_obj,
  275 + 'negativity' : negativity_obj,
  276 + 'predicativity' : pred_obj,
  277 + 'aspect' : aspect_obj,
  278 + 'argument' : filter_dict['has_argument'],
  279 + 'position' : filter_dict['has_position'],
  280 + 'lemma' : filter_dict['lemma'],
  281 + 'schema_opinion' : schema_opinion_obj,
  282 + 'sender' : sender_obj,
  283 + 'schema_type' : schema_type,
  284 + 'frame_opinion' : frame_opinion,
  285 + 'sem_arguments' : sem_arguments}
  286 + else:
  287 + request.session['filter_rules'] = {'pos' : pos_obj,
  288 + 'contains_phraseology': filter_dict['contains_phraseology'],
  289 + 'owner' : owner_obj,
  290 + 'phraseologist' : phraseologist_obj,
  291 + 'semanticist' : semanticist_obj,
  292 + 'vocabulary' : vocabulary_obj,
  293 + 'status' : status_obj,
  294 + 'example_source' : nkjp_source_obj,
  295 + 'approver' : approver_obj,
  296 + 'reflex' : reflex_obj,
  297 + 'negativity' : negativity_obj,
  298 + 'predicativity' : pred_obj,
  299 + 'aspect' : aspect_obj,
  300 + 'argument' : filter_dict['has_argument'],
  301 + 'position' : filter_dict['has_position'],
  302 + 'lemma' : filter_dict['lemma'],
  303 + 'schema_opinion' : schema_opinion_obj,
  304 + 'sender' : sender_obj,
  305 + 'schema_type' : schema_type,
  306 + 'frame_opinion' : frame_opinion,
  307 + 'sem_arguments' : sem_arguments}
  308 +
  309 + return {'filter_frames': filter_dict['filter_frames'],
  310 + 'schema_type' : schema_type,
  311 + 'reflex' : reflex_val,
  312 + 'negativity' : negativity_val,
  313 + 'predicativity': pred_val,
  314 + 'opinion' : opinion_val,
  315 + 'aspect' : aspect_val,
  316 + 'position' : filter_dict['has_position'],
  317 + 'argument' : filter_dict['has_argument']}
  318 +
  319 +def filter_lemmas(lemmas, filter_rules, user):
  320 + lemmas = filter_by_lemma_properties(lemmas, filter_rules, user)
  321 + lemmas = filter_by_schemata(lemmas, filter_rules)
  322 + lemmas = filter_by_frames(lemmas, filter_rules)
  323 + return lemmas
  324 +
  325 +def filter_by_lemma_properties(lemmas, filter_rules, user):
  326 + if filter_rules['owner']:
  327 + lemmas = lemmas.filter(owner=filter_rules['owner'])
  328 + if filter_rules['phraseologist']:
  329 + lemmas = lemmas.filter(phraseologist=filter_rules['phraseologist'])
  330 + if filter_rules['semanticist']:
  331 + lemmas = lemmas.filter(semanticist=filter_rules['semanticist'])
  332 + if filter_rules['vocabulary']:
  333 + lemmas = lemmas.filter(vocabulary=filter_rules['vocabulary'])
  334 + if filter_rules['status']:
  335 + lemmas = lemmas.filter(status=filter_rules['status'])
  336 + if filter_rules['schema_opinion']:
  337 + lemmas = lemmas.filter(frame_opinions__value=filter_rules['schema_opinion'])
  338 + if filter_rules['lemma'] and filter_rules['lemma'] != '.*':
  339 + lemmas = lemma_regex_filter(lemmas, filter_rules['lemma'])
  340 + if filter_rules['sender']:
  341 + lemmas = lemmas.filter(messages__sender=filter_rules['sender'])
  342 + if filter_rules['pos']:
  343 + lemmas = lemmas.filter(entry_obj__pos=filter_rules['pos'])
  344 + if filter_rules['contains_phraseology']:
  345 + phraseologic_lemmas = lemmas.filter(frames__phraseologic=True)
  346 + if filter_rules['contains_phraseology'] == 'yes':
  347 + lemmas = phraseologic_lemmas
  348 + else:
  349 + lemmas = lemmas.exclude(pk__in=phraseologic_lemmas)
  350 + if filter_rules['example_source']:
  351 + lemmas = lemmas.filter(Q(nkjp_examples__source=filter_rules['example_source']) &
  352 + Q(nkjp_examples__approved=False)).distinct()
  353 + napproved_examples = NKJP_Example.objects.filter(Q(source=filter_rules['example_source']) &
  354 + Q(approved=False) &
  355 + Q(lemmas__old=False) &
  356 + ~Q(approvers=user)).distinct()
  357 +
  358 + if filter_rules['approver']:
  359 + napproved_examples = napproved_examples.filter(approvers=filter_rules['approver'])
  360 + lemmas = lemmas.filter(nkjp_examples__in=napproved_examples)
  361 + lemmas = lemmas.distinct()
  362 + return lemmas
  363 +
  364 +def lemma_regex_filter(lemmas, string):
  365 + try:
  366 + alternative_queries = []
  367 + for alternative in string.split('|'):
  368 + possible_lemmas = lemmas
  369 + for conj in alternative.split('&'):
  370 + model_results = []
  371 + negation = False
  372 + conj = conj.strip()
  373 + if conj.startswith('!'):
  374 + conj = conj.lstrip('!')
  375 + negation = True
  376 + regex = ur'^%s$' % escape_regex(conj)
  377 + model_results = Lemma.objects.filter(old=False,
  378 + entry_obj__name__regex=regex).distinct()
  379 + if model_results.exists():
  380 + if negation:
  381 + possible_lemmas = possible_lemmas.exclude(pk__in=model_results)
  382 + else:
  383 + possible_lemmas = possible_lemmas.filter(pk__in=model_results)
  384 + elif not model_results.exists() and not negation:
  385 + possible_lemmas = Lemma.objects.none()
  386 + alternative_queries.append(Q(id__in=possible_lemmas))
  387 + lemmas = lemmas.filter(reduce(operator.or_, alternative_queries)).distinct()
  388 + except:
  389 + lemmas = Lemma.objects.none()
  390 + return lemmas
  391 +
  392 +def filter_by_schemata(lemmas, filter_rules):
  393 + schemata = Frame.objects
  394 + if filter_rules['reflex']:
  395 + schemata = schemata.filter(characteristics=filter_rules['reflex'])
  396 + if filter_rules['negativity']:
  397 + schemata = schemata.filter(characteristics=filter_rules['negativity'])
  398 + if filter_rules['predicativity']:
  399 + schemata = schemata.filter(characteristics=filter_rules['predicativity'])
  400 + if filter_rules['aspect']:
  401 + schemata = schemata.filter(characteristics=filter_rules['aspect'])
  402 + if filter_rules['position'] and filter_rules['position'] != '.*':
  403 + schemata = pos_regex_frames(schemata, filter_rules['position'])
  404 + if filter_rules['argument'] and filter_rules['argument'] != '.*':
  405 + schemata = arg_regex_frames(schemata, filter_rules['argument'])
  406 + if filter_rules['schema_type']:
  407 + schemata = get_schemata_by_type(filter_rules['schema_type'], schemata)
  408 +
  409 + if (filter_rules['reflex'] or filter_rules['negativity'] or
  410 + filter_rules['aspect'] or filter_rules['predicativity'] or
  411 + filter_rules['schema_type'] or filter_rules['schema_opinion'] or
  412 + (filter_rules['argument'] and filter_rules['argument'] != '.*') or
  413 + (filter_rules['position'] and filter_rules['position'] != '.*')):
  414 + if filter_rules['schema_opinion']:
  415 + lemmas = lemmas.filter(frame_opinions__frame__in=schemata.all(),
  416 + frame_opinions__value=filter_rules['schema_opinion'])
  417 + else:
  418 + lemmas = lemmas.filter(frames__in=schemata.all())
  419 + lemmas = lemmas.distinct()
  420 + return lemmas
  421 +
  422 +def pos_regex_frames(frames, string):
  423 + try:
  424 + alternative_queries = []
  425 + for alternative in string.split('|'):
  426 + possible_frames = frames
  427 + for conj in alternative.split('&'):
  428 + model_results = []
  429 + negation = False
  430 + conj = conj.strip()
  431 + if conj.startswith('!'):
  432 + conj = conj.lstrip('!')
  433 + negation = True
  434 + regex = ur'^%s$' % escape_regex(conj)
  435 + model_results = Position.objects.filter(frames__lemmas__old=False,
  436 + text_rep__regex=regex).distinct()
  437 + if model_results.exists():
  438 + if negation:
  439 + possible_frames = possible_frames.exclude(positions__in=model_results)
  440 + else:
  441 + possible_frames = possible_frames.filter(positions__in=model_results)
  442 + elif not model_results.exists() and not negation:
  443 + possible_frames = Frame.objects.none()
  444 + alternative_queries.append(Q(id__in=possible_frames))
  445 + frames = frames.filter(reduce(operator.or_, alternative_queries)).distinct()
  446 + except:
  447 + frames = Frame.objects.none()
  448 + return frames
  449 +
  450 +def arg_regex_frames(frames, string):
  451 + try:
  452 + alternative_queries = []
  453 + for alternative in string.split('|'):
  454 + possible_frames = frames
  455 + for conj in alternative.split('&'):
  456 + model_results = []
  457 + negation = False
  458 + conj = conj.strip()
  459 + if conj.startswith('!'):
  460 + conj = conj.lstrip('!')
  461 + negation = True
  462 + regex = ur'^%s$' % escape_regex(conj)
  463 + model_results = Argument.objects.filter(positions__frames__lemmas__old=False,
  464 + text_rep__regex=regex).distinct()
  465 + if model_results.exists():
  466 + if negation:
  467 + possible_frames = possible_frames.exclude(positions__arguments__in=model_results)
  468 + else:
  469 + possible_frames = possible_frames.filter(positions__arguments__in=model_results)
  470 + elif not model_results.exists() and not negation:
  471 + possible_frames = Frame.objects.none()
  472 + alternative_queries.append(Q(id__in=possible_frames))
  473 + frames = frames.filter(reduce(operator.or_, alternative_queries)).distinct()
  474 + except:
  475 + frames = Frame.objects.none()
  476 + return frames
  477 +
  478 +def filter_by_frames(lemmas, filter_rules):
  479 + frames = SemanticFrame.objects.filter(next__isnull=True, removed=False)
  480 + if filter_rules['frame_opinion']:
  481 + frames = frames.filter(opinion=filter_rules['frame_opinion'])
  482 + if filter_rules['sem_arguments']:
  483 + frames = get_frames_by_args_rule(frames, filter_rules['sem_arguments'])
  484 + if filter_rules['frame_opinion'] or filter_rules['sem_arguments']:
  485 + lemmas = lemmas.filter(entry_obj__meanings__frames__in=frames).distinct()
  486 + return lemmas
  487 +
  488 +def get_frames_by_args_rule(frames, args_filter_rule):
  489 + matching_frames = []
  490 + for alternative in args_filter_rule:
  491 + alt_matching_frames = get_matching_frames(frames, alternative)
  492 + matching_frames.extend(alt_matching_frames.values_list('id', flat=True))
  493 + return frames.filter(id__in=list(set(matching_frames)))
  494 +
  495 +def get_matching_frames(frames, arguments_rules):
  496 + for rules in arguments_rules:
  497 + if not rules['negation']:
  498 + frames = frames.filter(complements__in=matching_complements(rules))
  499 + else:
  500 + frames = frames.exclude(complements__in=matching_complements(rules))
  501 + return frames
  502 +
  503 +def matching_complements(filter_rules):
  504 + complements = Complement.objects
  505 + if filter_rules['role']:
  506 + complements = complements.filter(roles=filter_rules['role'])
  507 + if filter_rules['attribute']:
  508 + complements = complements.filter(roles=filter_rules['attribute'])
  509 + if filter_rules['general_prefs'] or filter_rules['synset_prefs'] or filter_rules['relational_prefs']:
  510 + complements = complements.filter(selective_preference__isnull=False)
  511 + if filter_rules['general_prefs']:
  512 + complements = filter_by_general_prefs(complements, filter_rules['general_prefs'])
  513 + if filter_rules['synset_prefs']:
  514 + complements = filter_by_synset_prefs(complements, filter_rules['synset_prefs'])
  515 + if filter_rules['relational_prefs']:
  516 + complements = filter_by_relational_prefs(complements, filter_rules['relational_prefs'])
  517 + return complements.all()
  518 +
  519 +def filter_by_general_prefs(complements, prefs):
  520 + complements = complements.exclude(selective_preference__generals=None)
  521 + for pref in list(set(prefs)):
  522 + if pref:
  523 + complements = complements.filter(selective_preference__generals=pref)
  524 + return complements
  525 +
  526 +def filter_by_synset_prefs(complements, prefs):
  527 + complements = complements.exclude(selective_preference__synsets=None)
  528 + for pref in list(set(prefs)):
  529 + if pref:
  530 + try:
  531 + pref_parts = pref.split('-')
  532 + base = pref_parts[0]
  533 + sense = pref_parts[1]
  534 + synset = LexicalUnit.objects.get(base=base, sense=sense).synset
  535 + complements = complements.filter(selective_preference__synsets=synset)
  536 + except:
  537 + complements = Complement.objects.none()
  538 + return complements
  539 +
  540 +def filter_by_relational_prefs(complements, prefs):
  541 + complements = complements.exclude(selective_preference__relations=None)
  542 + for pref in prefs:
  543 + if pref['relation'] or pref['role'] or pref['attribute']:
  544 + relational_prefs = RelationalSelectivePreference.objects
  545 + if pref['relation']:
  546 + relational_prefs = relational_prefs.filter(relation=pref['relation'])
  547 + if pref['role'] or pref['attribute']:
  548 + to_complements = Complement.objects
  549 + if pref['role']:
  550 + to_complements = to_complements.filter(roles=pref['role'])
  551 + if pref['attribute']:
  552 + to_complements = to_complements.filter(roles=pref['attribute'])
  553 + relational_prefs = relational_prefs.filter(to__in=to_complements.all()).distinct()
  554 + complements = complements.filter(selective_preference__relations__in=relational_prefs).distinct()
  555 + return complements
  556 +
0 557 \ No newline at end of file
... ...
dictionary/forms.py
1 1 #-*- coding:utf-8 -*-
2 2  
3   -#Copyright (c) 2012, Bartłomiej Nitoń
4   -#All rights reserved.
5   -
6   -#Redistribution and use in source and binary forms, with or without modification, are permitted provided
7   -#that the following conditions are met:
8   -
9   -# Redistributions of source code must retain the above copyright notice, this list of conditions and
10   -# the following disclaimer.
11   -# Redistributions in binary form must reproduce the above copyright notice, this list of conditions
12   -# and the following disclaimer in the documentation and/or other materials provided with the distribution.
13   -
14   -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
15   -# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
16   -# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
17   -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
18   -# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
19   -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20   -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21   -# POSSIBILITY OF SUCH DAMAGE.
22   -
23 3 import datetime
24 4  
25 5 from django.contrib.auth.forms import UserCreationForm
... ... @@ -28,6 +8,7 @@ from django.db.models import Q
28 8 from django.forms import *
29 9  
30 10 from dictionary.models import *
  11 +from semantics.models import FrameOpinion
31 12 #Frame, Argument_Model, AttrValueSelectionMode, \
32 13 # AttrValuesSeparator, Atribute_Value, PositionCategory, \
33 14 # Argument, Atribute_Model, ArgRealOpinion, Frame_Characteristic, \
... ... @@ -529,9 +510,14 @@ class MessageForm(ModelForm):
529 510 exclude = ('sender', 'lemma', 'new', 'recipient')
530 511  
531 512  
532   -############################ sorting, filtering
  513 +############################ sorting, filtering
  514 +
533 515 class FilterForm(Form):
  516 + # Lemma filters
  517 + lemma = forms.CharField(label=u'Lemat', required=False)
534 518 pos = ModelChoiceField(label=u'Część mowy', queryset=POS.objects.none(), required=False)
  519 + contains_phraseology = ChoiceField(choices=[('', '---------'), ('yes', 'zawiera'), ('no', 'nie zawiera')],
  520 + label=u'Frazeologia', required=False)
535 521 owner = ModelChoiceField(label=u'Właściciel', queryset=User.objects.none(), required=False)
536 522 phraseologist = ModelChoiceField(label=u'Frazeolog', queryset=User.objects.none(), required=False)
537 523 semanticist = ModelChoiceField(label=u'Semantyk', queryset=User.objects.none(), required=False)
... ... @@ -544,7 +530,10 @@ class FilterForm(Form):
544 530 approver = ModelChoiceField(label=u'Zatwierdzający przykład', queryset=User.objects.filter(Q(groups__permissions__codename='confirm_example') |
545 531 Q(is_superuser=True)).distinct().order_by('username'),
546 532 required=False)
547   - reflex = ModelChoiceField(label=u'Zwrotność',
  533 + # Schema filters
  534 + schema_type = ChoiceField(choices=[('', '---------'), ('normal', 'normalny'), ('phraseologic', 'frazeologiczny')],
  535 + label=u'Typ schematu', required=False)
  536 + reflex = ModelChoiceField(label=u'Zwrotność',
548 537 queryset=Frame_Characteristic.objects.filter(type=u'ZWROTNOŚĆ').order_by('value__priority'),
549 538 required=False)
550 539 negativity = ModelChoiceField(label=u'Negatywność',
... ... @@ -558,28 +547,34 @@ class FilterForm(Form):
558 547 required=False)
559 548 has_argument = forms.CharField(label=u'Zawiera typ frazy', required=False)
560 549 has_position = forms.CharField(label=u'Zawiera pozycję', required=False)
561   - frame_opinion = ModelChoiceField(label=u'Opinia o schemacie', queryset=Frame_Opinion_Value.objects.all(),
562   - required=False)
563   - frame_phraseologic = forms.BooleanField(label=u'Schemat frazeologiczny', initial=False,
564   - required=False)
  550 + schema_opinion = ModelChoiceField(label=u'Opinia o schemacie', queryset=Frame_Opinion_Value.objects.all(),
  551 + required=False)
565 552 filter_frames = forms.BooleanField(label=u'Odfiltruj niepasujące schematy', initial=False,
566 553 required=False)
  554 + # Frame filters
  555 + frame_opinion = ModelChoiceField(label=u'Opinia o ramie', queryset=FrameOpinion.objects.order_by('priority'),
  556 + required=False)
567 557  
568 558 def __init__(self, users, phraseologists, semanticists, vocabularies, statuses, senders,
569   - sel_pos=None, sel_user=None, sel_phraseologist=None, sel_semanticist=None,
570   - sel_vocabulary=None, sel_status=None, frame_phraseologic=False,#sel_old_property=None,
  559 + lemma='.*', sel_pos=None, contains_phraseology=None,
  560 + sel_user=None, sel_phraseologist=None, sel_semanticist=None,
  561 + sel_vocabulary=None, sel_status=None, sel_schema_type=None,
571 562 sel_reflex=None, sel_negativity=None, sel_predicativity=None,
572   - sel_aspect=None, sel_has_argument='.*', sel_has_position='.*', #sel_has_frame='.*',
573   - sel_frame_opinion=None, can_confirm_example=False, sel_example_source=None,
574   - sel_approver=None, sel_sender=None, *args, **kwargs):
  563 + sel_aspect=None, sel_has_argument='.*', sel_has_position='.*',
  564 + sel_schema_opinion=None, can_confirm_example=False, sel_example_source=None,
  565 + sel_approver=None, sel_sender=None,
  566 + sel_frame_opinion=None, *args, **kwargs):
575 567 super(FilterForm, self).__init__(*args, **kwargs)
  568 +
576 569 self.fields['pos'].queryset = POS.objects.exclude(tag='unk')
577 570 self.fields['owner'].queryset = users
578 571 self.fields['phraseologist'].queryset = phraseologists
579 572 self.fields['semanticist'].queryset = semanticists
580 573 self.fields['vocabulary'].queryset = vocabularies
581 574 self.fields['status'].queryset = statuses
  575 + self.fields['lemma'].initial = lemma
582 576 self.fields['pos'].initial = sel_pos
  577 + self.fields['contains_phraseology'].initial = contains_phraseology
583 578 self.fields['owner'].initial = sel_user
584 579 self.fields['phraseologist'].initial = sel_phraseologist
585 580 self.fields['semanticist'].initial = sel_semanticist
... ... @@ -593,23 +588,20 @@ class FilterForm(Form):
593 588 self.fields['example_source'].initial = None
594 589 self.fields['approver'].widget = self.fields['approver'].hidden_widget()
595 590 self.fields['approver'].initial = None
596   -# self.fields['has_old_frames_property'].initial = sel_old_property
597 591 self.fields['reflex'].initial = sel_reflex
598 592 self.fields['negativity'].initial = sel_negativity
599 593 self.fields['predicativity'].initial = sel_predicativity
600 594 self.fields['aspect'].initial = sel_aspect
601 595 self.fields['has_argument'].initial = sel_has_argument
602 596 self.fields['has_position'].initial = sel_has_position
603   - #self.fields['has_frame'].initial = sel_has_frame
604   - self.fields['frame_opinion'].initial = sel_frame_opinion
  597 + self.fields['schema_opinion'].initial = sel_schema_opinion
605 598 self.fields['has_message_from'].initial = sel_sender
606 599 self.fields['has_message_from'].queryset = senders
607 600  
608   - self.fields['frame_phraseologic'].initial = frame_phraseologic
  601 + self.fields['schema_type'].initial = sel_schema_type
  602 + self.fields['frame_opinion'].initial = sel_frame_opinion
609 603  
610 604 self.hide_unused_fields()
611   -
612   - #self.fields['has_frame'].widget = self.fields['has_frame'].hidden_widget()
613 605  
614 606 def hide_unused_fields(self):
615 607 for field_name in self.fields:
... ...
dictionary/management/commands/approve_examples.py 0 → 100644
  1 +#-*- coding:utf-8 -*-
  2 +
  3 +from django.core.management.base import BaseCommand
  4 +
  5 +from dictionary.models import NKJP_Example
  6 +
  7 +class Command(BaseCommand):
  8 + args = 'none'
  9 + help = ""
  10 +
  11 + def handle(self, **options):
  12 + approve_examples()
  13 +
  14 +def approve_examples():
  15 + for example in NKJP_Example.objects.filter(approved=False):
  16 + if example.approvers.count() > 0:
  17 + example.approved = True
  18 + example.save()
  19 + print example
  20 +
0 21 \ No newline at end of file
... ...
dictionary/management/commands/create_TEI_walenty.py
1 1 #-*- coding:utf-8 -*-
2 2  
3   -#Copyright (c) 2015, Bartłomiej Nitoń
4   -#All rights reserved.
5   -
6   -#Redistribution and use in source and binary forms, with or without modification, are permitted provided
7   -#that the following conditions are met:
8   -
9   -# Redistributions of source code must retain the above copyright notice, this list of conditions and
10   -# the following disclaimer.
11   -# Redistributions in binary form must reproduce the above copyright notice, this list of conditions
12   -# and the following disclaimer in the documentation and/or other materials provided with the distribution.
13   -
14   -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
15   -# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
16   -# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
17   -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
18   -# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
19   -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20   -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21   -# POSSIBILITY OF SUCH DAMAGE.
22   -
23 3 import datetime
24 4 import os
25 5  
... ...
dictionary/management/commands/get_examples.py
1 1 #-*- coding:utf-8 -*-
2 2  
3 3 import codecs
4   -import operator
  4 +import datetime
5 5 import os
6   -import re
7   -from subprocess import call
8   -from tempfile import mkdtemp, mkstemp
9 6  
10 7 from django.core.management.base import BaseCommand
11   -from django.utils.encoding import smart_str
12   -from django.db.models import Q
13 8  
14   -#import corpus2
15   -from common.morfeusz import analyse
16   -
17   -from dictionary.models import Argument, Lemma
  9 +from dictionary.models import Lemma, get_ready_statuses
18 10 from settings import PROJECT_PATH
19 11  
20 12 BASE_PATH = os.path.join(PROJECT_PATH, 'data')
21   -#['gotowe', 'sprawdzone', 'tymczasowy']
22   -STATUSES_LS = [u'zalążkowe', u'gotowe', u'sprawdzone',
23   - u'(F) w obróbce', u'(F) gotowe', u'(F) sprawdzone',
24   - u'(S) w obróbce', u'(S) gotowe', u'(S) sprawdzone']
25   -
26   -NOUN_TAGS = ['subst', 'ger']
27   -
28   -#VERBTAGLIST = ['fin', 'praet', 'bedzie', 'inf', 'imps', 'impt',
29   -# 'winien', 'pred']
30   -#ADJTAGLIST = ['adj', 'pact', 'ppas']
31   -#INTERPTAGLIST = ['interp']
32   -#NUMERALTAGLIST = ['num', 'numcol']
33   -
34   -XCES_HEADER = """<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE cesAna SYSTEM 'xcesAnaIPI.dtd'><cesAna type="pre_morph" version="WROC-1.0" xmlns:xlink="http://www.w3.org/1999/xlink">
35   -<chunkList xml:base="text.xml">
36   -"""
37   -XCES_FOOTER = """</chunkList>
38   -</cesAna>
39   -"""
40   -
41   -WCRFT_CONFIG = 'nkjp_s2.ini'
42   -
43   -LABELS = ('haslo',
44   - 'status hasla',
45   - 'identyfikator schematu',
46   - 'schemat',
47   - 'opinia o schemacie',
48   - 'przyklad',
49   - 'otagowany przyklad',
50   - 'fragmenty przykladu',
51   - 'opinia o przykladzie',
52   - 'zrodlo przykladu',
53   - 'wybor argumentow')
54 13  
55   -
56   -ARG_TYPES_BY_PRIORITY = ['fixed',
57   - # frazy przyimkowe
58   - 'preplexnp', 'comprepnp', 'prepnp', 'prepncp', 'prepadjp',
59   - # frazy rzeczownikowe
60   - 'lexnp', 'np',
61   - # frazy rzeczownikowo-zdaniowe
62   - 'ncp', 'cp',
63   - # adjp
64   - 'adjp',
65   - # bezokoliczniki
66   - 'infp',
67   - # refl
68   - 'refl',
69   - # xp
70   - 'xp',
71   - # advp
72   - 'advp',
73   - # nonch
74   - 'nonch',
75   - # lemma - nie jest sortowane chyba, bo dodawane na innym etapie niz reszta argumentow
76   - 'lemma',
77   - # xp
78   - 'xp'
79   - ]
  14 +LABELS = (u'hasło',
  15 + u'status hasła',
  16 + u'identyfikator schematu',
  17 + u'schemat',
  18 + u'opinia o schemacie',
  19 + u'przykład',
  20 + u'opinia o przykładzie',
  21 + u'zródło przykładu',
  22 + u'wybór typów fraz')
80 23  
81 24 class Command(BaseCommand):
82 25 help = 'Get pinned examples from Slowal.'
83 26  
84 27 def handle(self, **options):
85   - get_examples()
86   -
87   -def write_examples(q_statuses):
88   - try:
89   - examples_file = codecs.open(os.path.join(BASE_PATH,
90   - 'examples_gotowe_plus.txt'), 'wt', 'utf-8')
91   - for lemma in Lemma.objects.filter(old=False).filter(reduce(operator.or_, q_statuses)).order_by('entry').all():
92   - print lemma
93   - examples_file.write(lemma.entry+'\n')
94   - for frame in lemma.frames.order_by('text_rep').all():
95   - if lemma.frame_opinions.get(frame=frame).value.value != u'zła':
96   - examples_file.write('\t%s\n' % frame.text_rep)
97   - for example in lemma.nkjp_examples.filter(frame=frame):
98   - examples_file.write('\t\t--> %s\n' % example.sentence)
99   - examples_file.write('\n\n')
100   - finally:
101   - examples_file.close()
102   -
103   -def write_xces_opening(outfile):
104   - outfile.write(XCES_HEADER)
105   -
106   -def write_xces_closing(outfile):
107   - outfile.write(XCES_FOOTER)
  28 + get_examples()
  29 +
  30 +def get_examples():
  31 + ready_statuses = get_ready_statuses()
  32 + write_detailed_examples(ready_statuses)
  33 + # write_examples(ready_statuses)
108 34  
109   -def write_paragraph(what, outfile):
110   - if len(what) > 0 and not what.isspace():
111   - outfile.write(u'<chunk type="p" id="p1">')
112   - outfile.write(what)
113   - outfile.write(u'</chunk>\n')
114   -
115   -def sentence_to_xces(sentence):
  35 +def write_detailed_examples(statuses):
116 36 try:
117   - tmp_folder = mkdtemp()
118   - os.chdir(tmp_folder)
119   - tmp_file, tmpfilename = mkstemp(dir=tmp_folder)
120   - os.close(tmp_file)
121   - outfile = codecs.open(tmpfilename, 'wt', 'utf-8')
122   - write_xces_opening(outfile)
123   - write_paragraph(sentence, outfile)
124   - write_xces_closing(outfile)
125   - finally:
126   - outfile.close()
127   - return tmpfilename
128   -
129   -def chunks(rdr):
130   - """Yields subsequent paragraphs from a reader."""
131   - while True:
132   - chunk = rdr.get_next_chunk()
133   - if not chunk:
134   - break
135   - yield chunk
136   -
137   -#def tag_sentence(tagged_sentence_path):
138   -# sentences_count = 0
139   -# tagged_sentence_chunks = []
140   -# tagset = corpus2.get_named_tagset('nkjp')
141   -# rdr = corpus2.TokenReader.create_path_reader('xces', tagset, tagged_sentence_path)
142   -# for chunk in chunks(rdr):
143   -# for sent in chunk.sentences():
144   -# sentences_count += 1
145   -# for tok in sent.tokens():
146   -# prefered_lexeme = tok.get_preferred_lexeme(tagset)
147   -# base_form = prefered_lexeme.lemma_utf8().decode('utf-8')
148   -# orth_form = tok.orth_utf8().decode('utf-8')
149   -# tags = tagset.tag_to_string(prefered_lexeme.tag())
150   -# sentence_chunk = u'%s[%s>%s]' % (orth_form, base_form, tags)
151   -# tagged_sentence_chunks.append(sentence_chunk)
152   -# tagged_sentence = ' '.join(tagged_sentence_chunks)
153   -# if sentences_count > 1:
154   -# pass
155   -# return tagged_sentence
156   -
157   -#def get_tagged_sentence(sentence):
158   -# tagged_sentence = 'Error!'
159   -# try:
160   -# tmp_folder = mkdtemp()
161   -# os.chdir(tmp_folder)
162   -# xces_file, xces_path = mkstemp(dir=tmp_folder)
163   -# os.close(xces_file)
164   -# tagged_sentence_file, tagged_sentence_path = mkstemp(dir=tmp_folder)
165   -# os.close(tagged_sentence_file)
166   -# xces_file = codecs.open(xces_path, 'wt', 'utf-8')
167   -# write_xces_opening(xces_file)
168   -# write_paragraph(sentence, xces_file)
169   -# write_xces_closing(xces_file)
170   -# xces_file.close()
171   -# try:
172   -# call(['wcrft', WCRFT_CONFIG, xces_path, '-O', tagged_sentence_path, '-C', '-i', 'premorph'])
173   -# tagged_sentence = tag_sentence(tagged_sentence_path)
174   -# except:
175   -# print 'Tagging failed.'
176   -# finally:
177   -# xces_file.close()
178   -# os.remove(xces_path)
179   -# os.remove(tagged_sentence_path)
180   -# return tagged_sentence
181   -
182   -def write_detailed_examples(q_statuses):
183   - try:
184   - examples_file = codecs.open(os.path.join(BASE_PATH,
185   - 'detailed_examples_20150616.csv'), 'wt', 'utf-8')
186   - examples_file.write(u'%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % LABELS)
187   - for lemma in Lemma.objects.filter(old=False).filter(reduce(operator.or_, q_statuses)).order_by('entry').all():
  37 + lemmas = Lemma.objects.filter(old=False)
  38 + lemmas = lemmas.filter(status__in=statuses)
  39 + now = datetime.datetime.now().strftime('%Y%m%d')
  40 + examples_file = codecs.open(os.path.join(BASE_PATH, 'detailed_examples_%s.csv' % now), 'wt', 'utf-8')
  41 + examples_file.write(u'%s\n' % u'\t'.join(LABELS))
  42 + for lemma in lemmas.order_by('entry_obj__name'):
188 43 print lemma
189   - lemma_entry = lemma.entry
  44 + lemma_entry = lemma.entry_obj.name
190 45 lemma_status = lemma.status.status
191 46 for frame in lemma.frames.order_by('text_rep').all():
192   - frame_text_rep = frame.text_rep
  47 + if not lemma.phraseology_ready() and frame.phraseologic:
  48 + continue
193 49 frame_opinion = lemma.frame_opinions.filter(frame=frame).all()[0].value
194 50 for example in lemma.nkjp_examples.filter(frame=frame):
195   - sentence = example.sentence.replace('\n', ' ').replace('\r', '').replace('\t', ' ')
196   - #tagged_sentence = get_tagged_sentence(sentence) mozna wlaczyc w razie czego
197   - tagged_sentence = ''
198   - example_opinion = example.opinion.opinion
199   - example_source = example.source.source
  51 + sentence = example.sentence.replace('\n', ' ').replace('\r', '').replace('\t', ' ')
200 52 arguments_selection = u'%s' % u' + '.join([u'%s' % selection.__unicode__() for selection in example.arguments.all()])
201   - examples_file.write(u'%s\t%s\t%d\t%s\t%s\t%s\t%s\t\t%s\t%s\t%s\n' % (lemma_entry,
202   - lemma_status,
203   - frame.id,
204   - frame_text_rep,
205   - frame_opinion,
206   - sentence,
207   - tagged_sentence,
208   - example_opinion,
209   - example_source,
210   - arguments_selection))
  53 + examples_file.write(u'%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\n' % (lemma_entry,
  54 + lemma_status,
  55 + frame.id,
  56 + frame.get_position_spaced_text_rep(),
  57 + frame_opinion,
  58 + sentence,
  59 + example.opinion.opinion,
  60 + example.source.source,
  61 + arguments_selection))
211 62 finally:
212 63 examples_file.close()
213   -
214   -def get_arguments(arguments_selection):
215   - arguments = []
216   - positions = arguments_selection.split('+')
217   - for position in positions:
218   - category = ''
219   - position = position.strip().lstrip('[').rstrip(']')
220   - if position.startswith('subj'):
221   - category = 'subj'
222   - elif position.startswith('obj'):
223   - category = 'obj'
224   - selection = re.findall(ur'<.*?>', position)[0]
225   - for arg in selection.lstrip('<').rstrip('>').split(';'):
226   - if category:
227   - arguments.append(u'%s:%s' % (category, arg))
228   - else:
229   - arguments.append(arg)
230   - arguments = sort_arguments(arguments)
231   - return arguments
232   -
233   -def sort_arguments(arguments):
234   - sorted_arguments = []
235   - for type in ARG_TYPES_BY_PRIORITY:
236   - for arg in arguments:
237   - (arg_type, attributes, category) = arg_from_text_rep(arg)
238   - if arg_type == type:
239   - sorted_arguments.append(arg)
240   - return sorted_arguments
241   -
242   -def arg_from_text_rep(argument):
243   - attributes = []
244   - category = ''
245   - if ':' in argument:
246   - arg_split = argument.split(':')
247   - category = arg_split[0]
248   - argument = arg_split[1]
249   - arg_parts = argument.split('(')
250   - arg_type = arg_parts[0]
251   - if len(arg_parts) > 1:
252   - attributes = arg_parts[1].rstrip(')').replace("'", "").split(',')
253   - return arg_type, attributes, category
254   -
255   -def tokenize_sentence(sentence):
256   - token_idx = 0
257   - tokens = []
258   - chunks = sentence.split('] ')
259   - for chunk in chunks:
260   - if chunk.startswith('[[['):
261   - token = {'idx': token_idx,
262   - 'orth': '[',
263   - 'base': '[',
264   - 'tags': ['interp'],
265   - 'argument': '',
266   - 'argument_start': -1,
267   - 'argument_end': -1,
268   - 'occupied': False}
269   - elif chunk.startswith('>'):
270   - token = {'idx': token_idx,
271   - 'orth': '>',
272   - 'base': '>',
273   - 'tags': ['interp'],
274   - 'argument': '',
275   - 'argument_start': -1,
276   - 'argument_end': -1,
277   - 'occupied': False}
278   - else:
279   - chunk_parts = chunk.split('[')
280   - (base, tags) = (chunk_parts[1].split('>'))#rstrip(']').)
281   - orth = chunk_parts[0].lower()
282   - token = {'idx': token_idx,
283   - 'orth': orth,
284   - 'base': base,
285   - 'tags': tags.split(':'),
286   - 'argument': '',
287   - 'argument_start': -1,
288   - 'argument_end': -1,
289   - 'occupied': False}
290   - tokens.append(token)
291   - token_idx += 1
292   - return tokens
293   -
294   -def case_conversion(case, category):
295   - if case == 'instr':
296   - case = 'inst'
297   - elif case == 'part':
298   - case = u'gen|acc'
299   - elif case == 'str' and (category == 'subj' or not category):
300   - case = 'nom'
301   - elif case == 'str' and category == 'obj':
302   - case = 'acc'
303   - return case
304   -
305   -def number_conversion(number):
306   - if number == '_':
307   - number = ''
308   - return number
309   -
310   -def aspect_conversion(aspect):
311   - if aspect == '_':
312   - aspect = ''
313   - return aspect
314   -
315   -def phrase_type_conversion(phrase_type):
316   - if phrase_type == u'że':
317   - phrase_type = u'że|iż'
318   - elif phrase_type == u'żeby':
319   - phrase_type = u'żeby|aby|by|iżby|ażeby'
320   - elif phrase_type == u'żeby2':
321   - phrase_type = u'że|iż|żeby' # !!! nie wiem co ma być pod żeby2
322   - elif phrase_type == u'int':
323   - phrase_type = u'kiedy|jak|czy' # !!! nie wiem co ma być pod int
324   - elif phrase_type == u'jakby':
325   - phrase_type = u'jakby|jak gdyby'
326   - return phrase_type
327 64  
328   -def complex_prep_lemma_conversion(lemma):
329   - if lemma == u'powodu':
330   - lemma = u'powód'
331   - elif lemma == u'sprawie':
332   - lemma = u'sprawa'
333   - elif lemma == u'kwestii':
334   - lemma = u'kwestia'
335   - elif lemma == u'roli':
336   - lemma = u'rola'
337   - elif lemma == u'okolicach':
338   - lemma = u'okolica'
339   - elif lemma == u'czasie':
340   - lemma = u'czas'
341   - elif lemma == u'stronie':
342   - lemma = u'strona'
343   - elif lemma == u'początku':
344   - lemma = u'początek'
345   - return lemma
346   -
347   -def proper_case(token, case):
348   - possible_cases = [case]
349   - proper_case = False
350   - if '|' in case:
351   - possible_cases = case.split('|')
352   - if len(set(token['tags']) & set(possible_cases)) == 1:
353   - proper_case = True
354   - return proper_case
355   -
356   -def get_matching_token(tokens, orth='', base='', case='',
357   - number='', phrase_type='', aspect='',
358   - degree='', pos=''):
359   -# print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
360   -# print 'orth: %s, base: %s, case: %s, number: %s, pos: %s' % (orth,
361   -# base,
362   -# case,
363   -# number,
364   -# pos)
365   - matching_token = None
366   - for token in tokens:
367   - match = True
368   - if token['occupied']:
369   - continue
370   - if orth and not token['orth'] == orth:
371   - match = False
372   - if base and not token['base'] == base:
373   - match = False
374   - if case and not proper_case(token, case):
375   - match = False
376   - if number and not number in token['tags']:
377   - match = False
378   - if aspect and not aspect in token['tags']:
379   - match = False
380   - if degree and not degree in token['tags']:
381   - match = False
382   - if pos and not pos in token['tags']:
383   - match = False
384   - if match:
385   - matching_token = token
386   - break
387   - return matching_token
388   -
389   -def fill_token_data(token, argument, start_idx, end_idx):
390   - token['argument'] = argument
391   - token['argument_start'] = start_idx
392   - token['argument_end'] = end_idx
393   -
394   -def mark_fixed(tokens, argument, tresc):
395   - tresc_idx = 0
396   - tresc_orths = tresc.split()
397   - tresc_start = -1
398   - tresc_end = -1
399   - for token in tokens:
400   - if token['occupied']:
401   - continue
402   - if token['orth'] == tresc_orths[tresc_idx]:
403   - tresc_idx += 1
404   - if tresc_start == -1:
405   - tresc_start = tokens.index(token)
406   - else:
407   - tresc_idx = 0
408   - tresc_start = -1
409   - if tresc_idx == len(tresc_orths):
410   - tresc_end = tokens.index(token)
411   - break
412   - for token in tokens[tresc_start:tresc_end+1]:
413   - fill_token_data(token, argument, tresc_start, tresc_end)
414   - token['occupied'] = True
415   -
416   -def mark_preplexnp(tokens, argument, preposition, case, number, lemma):
417   - preposition_token = get_matching_token(tokens, orth='', base=preposition,
418   - case=case, number='', pos='prep') # !! case nie powinien być zgodny z lematem??
419   - start_idx = tokens.index(preposition_token)
420   - lemma_token = get_matching_token(tokens[start_idx:], orth='', base=lemma,
421   - case=case, number=number, pos='subst')
422   - end_idx = tokens.index(lemma_token)
423   - fill_token_data(preposition_token, argument, start_idx, end_idx)
424   - fill_token_data(lemma_token, argument, start_idx, end_idx)
425   - for token in tokens[start_idx:end_idx+1]:
426   - token['occupied'] = True
427   -
428   -def mark_comprepnp(tokens, argument, preposition, lemma):
429   - if preposition == u'co' and lemma == u'do':
430   - preposition_token = get_matching_token(tokens, orth='co', base='',
431   - case='', number='', pos='subst') # !! czy pos nie powinien byc subst
432   - start_idx = tokens.index(preposition_token)
433   - lemma_token = get_matching_token(tokens[start_idx:], orth='do', base='',
434   - case='', number='', pos='prep')
435   - end_idx = tokens.index(lemma_token)
436   - else:
437   - preposition_token = get_matching_token(tokens, orth='', base=preposition,
438   - case='', number='', pos='prep') # !! case nie powinien być zgodny z lematem??
439   - start_idx = tokens.index(preposition_token)
440   - lemma_base = complex_prep_lemma_conversion(lemma)
441   - lemma_token = get_matching_token(tokens[start_idx:], orth='', base=lemma_base,
442   - case='', number='', pos='subst')
443   - end_idx = tokens.index(lemma_token)
444   - noun_token = get_matching_token(tokens[end_idx+1:], orth='', base='',
445   - case='', number='', pos='subst') # za proste, glupoty wychodza
446   - end_idx = tokens.index(noun_token)
447   - fill_token_data(preposition_token, argument, start_idx, end_idx)
448   - fill_token_data(lemma_token, argument, start_idx, end_idx)
449   - fill_token_data(noun_token, argument, start_idx, end_idx)
450   - for token in tokens[start_idx:end_idx+1]:
451   - token['occupied'] = True
452   -
453   -def mark_prepnp(tokens, argument, preposition, case):
454   - preposition_token = get_matching_token(tokens, orth='', base=preposition,
455   - case=case, number='', pos='prep') # !! case nie powinien być zgodny z lematem??
456   - start_idx = tokens.index(preposition_token)
457   - noun_token = get_matching_token(tokens[start_idx:], orth='', base='',
458   - case=case, number='', pos='subst')
459   - end_idx = tokens.index(noun_token)
460   - fill_token_data(preposition_token, argument, start_idx, end_idx)
461   - fill_token_data(noun_token, argument, start_idx, end_idx)
462   - for token in tokens[start_idx:end_idx+1]:
463   - token['occupied'] = True
464   -
465   -def mark_phrase(tokens, start_idx, argument, phrase_type):
466   - for phrase in phrase_type.split('|'):
467   - phrase_parts = phrase.split()
468   - if len(phrase_parts) > 1:
469   - phrase_token1 = get_matching_token(tokens[start_idx+1:], orth='', base=phrase_parts[0],
470   - case='', number='', pos='')
471   - if phrase_token1:
472   - phrase_start_idx = tokens.index(phrase_token1)
473   - phrase_token2 = get_matching_token(tokens[phrase_start_idx+1:], orth='', base=phrase_parts[1],
474   - case='', number='', pos='')
475   - if phrase_token1 and phrase_token2:
476   - phrase_end_idx = tokens.index(phrase_token2)
477   - fill_token_data(phrase_token1, argument, phrase_start_idx, phrase_end_idx)
478   - fill_token_data(phrase_token2, argument, phrase_start_idx, phrase_end_idx)
479   - break
480   - else:
481   - phrase_token = get_matching_token(tokens[start_idx+1:], base=phrase)
482   - if phrase_token:
483   - phrase_end_idx = tokens.index(phrase_token)
484   - phrase_start_idx = phrase_end_idx
485   - fill_token_data(phrase_token, argument, phrase_start_idx, phrase_end_idx)
486   - break
487   - return phrase_start_idx, phrase_end_idx
488   -
489   -def mark_prepncp(tokens, argument, preposition, case, phrase_type):
490   - preposition_token = get_matching_token(tokens, orth='', base=preposition,
491   - case=case, number='', pos='prep') # !! case nie powinien być zgodny z lematem??
492   - start_idx = tokens.index(preposition_token)
493   - noun_token = get_matching_token(tokens[start_idx:], orth='', base='',
494   - case=case, number='', pos='subst')
495   - end_idx = tokens.index(noun_token)
496   - xx, end_idx = mark_phrase(tokens, end_idx, argument, phrase_type)
497   - fill_token_data(preposition_token, argument, start_idx, end_idx)
498   - fill_token_data(noun_token, argument, start_idx, end_idx)
499   - for token in tokens[start_idx:end_idx+1]:
500   - token['occupied'] = True
501   -
502   -def mark_prepadjp(tokens, argument, preposition, case):
503   - preposition_token = get_matching_token(tokens, orth='', base=preposition,
504   - case=case, number='', pos='prep') # !! case nie powinien być zgodny z lematem??
505   - start_idx = tokens.index(preposition_token)
506   - adj_token = get_matching_token(tokens[start_idx:], orth='', base='',
507   - case=case, number='', pos='adj')
508   - end_idx = tokens.index(adj_token)
509   - fill_token_data(preposition_token, argument, start_idx, end_idx)
510   - fill_token_data(adj_token, argument, start_idx, end_idx)
511   - for token in tokens[start_idx:end_idx+1]:
512   - token['occupied'] = True
513   -
514   -def mark_lexnp(tokens, argument, case, number, lemma):
515   - lemma_token = get_matching_token(tokens, orth='', base=lemma,
516   - case=case, number=number, pos='subst')
517   - start_idx = tokens.index(lemma_token)
518   - end_idx = start_idx
519   - fill_token_data(lemma_token, argument, start_idx, end_idx)
520   - for token in tokens[start_idx:end_idx+1]:
521   - token['occupied'] = True
522   -
523   -def mark_np(tokens, argument, case):
524   - noun_token = get_matching_token(tokens, orth='', base='',
525   - case=case, number='', pos='subst')
526   - start_idx = tokens.index(noun_token)
527   - end_idx = start_idx
528   - fill_token_data(noun_token, argument, start_idx, end_idx)
529   - for token in tokens[start_idx:end_idx+1]:
530   - token['occupied'] = True
531   -
532   -def mark_ncp(tokens, argument, case, phrase_type):
533   - noun_token = get_matching_token(tokens, orth='', base='',
534   - case=case, number='', pos='subst')
535   - start_idx = tokens.index(noun_token)
536   - xx, end_idx = mark_phrase(tokens, start_idx, argument, phrase_type)
537   - fill_token_data(noun_token, argument, start_idx, end_idx)
538   - for token in tokens[start_idx:end_idx+1]:
539   - token['occupied'] = True
540   -
541   -def mark_cp(tokens, argument, phrase_type):
542   - start_idx, end_idx = mark_phrase(tokens, -1, argument, phrase_type)
543   - for token in tokens[start_idx:end_idx+1]:
544   - token['occupied'] = True
545   -
546   -def mark_adjp(tokens, argument, case):
547   - adj_token = get_matching_token(tokens, case=case, pos='adj')
548   - start_idx = tokens.index(adj_token)
549   - end_idx = start_idx
550   - fill_token_data(adj_token, argument, start_idx, end_idx)
551   - for token in tokens[start_idx:end_idx+1]:
552   - token['occupied'] = True
553   -
554   -def mark_infp(tokens, argument, aspect):
555   - inf_token = get_matching_token(tokens, orth='', base='',
556   - case='', number='', aspect=aspect, pos='inf')
557   - start_idx = tokens.index(inf_token)
558   - end_idx = start_idx
559   - fill_token_data(inf_token, argument, start_idx, end_idx)
560   - for token in tokens[start_idx:end_idx+1]:
561   - token['occupied'] = True
562   -
563   -def mark_lemma(tokens, argument, lemma, sie, aspect):
564   - lemma_token = get_matching_token(tokens, orth='', base=lemma,
565   - case='', number='', aspect=aspect,
566   - pos='')
567   - start_idx = tokens.index(lemma_token)
568   - if sie:
569   - sie_token = get_matching_token(tokens[start_idx:], orth='', base=u'się',
570   - case='', number='', pos='')
571   - end_idx = tokens.index(sie_token)
572   - fill_token_data(sie_token, argument, start_idx, end_idx)
573   - else:
574   - end_idx = start_idx
575   - fill_token_data(lemma_token, argument, start_idx, end_idx)
576   -
577   - for token in tokens[start_idx:end_idx+1]:
578   - token['occupied'] = True
579   -
580   -def mark_nonch(tokens, argument, nonch):
581   - for pronoun in nonch.split('|'):
582   - pronoun_parts = pronoun.split()
583   - if len(pronoun_parts) > 1:
584   - matched_tokens = []
585   - parts_matched = True
586   - pronoun_start_idx = 0
587   - for pronoun_part in pronoun_parts:
588   - pronoun_token = get_matching_token(tokens[pronoun_start_idx+1:], orth='', base=pronoun_part,
589   - case='', number='', pos='')
590   - if pronoun_token:
591   - pronoun_start_idx = tokens.index(pronoun_token)
592   - matched_tokens.append(pronoun_token)
593   - else:
594   - parts_matched = False
595   - break
596   - if parts_matched:
597   - start_idx = tokens.index(matched_tokens[0])
598   - end_idx = tokens.index(matched_tokens[-1])
599   - for token in matched_tokens:
600   - fill_token_data(token, argument, start_idx, end_idx)
601   - break
602   - else:
603   - pronoun_token = get_matching_token(tokens, orth='', base=pronoun,
604   - case='', number='', pos='')
605   - if pronoun_token:
606   - start_idx = tokens.index(pronoun_token)
607   - end_idx = start_idx
608   - fill_token_data(pronoun_token, argument, start_idx, end_idx)
609   - break
610   - for token in tokens[start_idx:end_idx+1]:
611   - token['occupied'] = True
612   -
613   -def mark_advp(tokens, argument, advp_type):
614   - if advp_type == 'pron':
615   - possible_bases = ['tak', 'jak']
616   - for base in possible_bases:
617   - advp_token = get_matching_token(tokens, base=base, pos='adv')
618   - if advp_token:
619   - break
620   - elif advp_type == 'misc':
621   - possible_degrees = ['com', 'sup']
622   - for degree in possible_degrees:
623   - advp_token = get_matching_token(tokens, degree=degree, pos='adv')
624   - if advp_token:
625   - break
626   - start_idx = tokens.index(advp_token)
627   - end_idx = start_idx
628   - fill_token_data(advp_token, argument, start_idx, end_idx)
629   - for token in tokens[start_idx:end_idx+1]:
630   - token['occupied'] = True
631   -
632   -def count_occupied(tokens):
633   - occupied_tokens = [token for token in tokens if token['occupied']]
634   - return len(occupied_tokens)
635   -
636   -def mark_arg_in_sentence(argument, sentence_tokens):
637   - (arg_type, attributes, category) = arg_from_text_rep(argument)
638   - if arg_type == 'fixed':
639   - mark_fixed(sentence_tokens, argument, attributes[0])
640   - elif arg_type == 'preplexnp':
641   - preposition = attributes[0]
642   - case = case_conversion(attributes[1], category)
643   - number = number_conversion(attributes[2])
644   - lemma = attributes[3]
645   - mark_preplexnp(sentence_tokens, argument, preposition, case, number, lemma)
646   - elif arg_type == 'comprepnp':
647   - complex_preposition_parts = attributes[0].split()
648   - preposition = complex_preposition_parts[0]
649   - lemma = complex_preposition_parts[1]
650   - mark_comprepnp(sentence_tokens, argument, preposition, lemma)
651   - elif arg_type == 'prepnp':
652   - preposition = attributes[0]
653   - case = case_conversion(attributes[1], category)
654   - mark_prepnp(sentence_tokens, argument, preposition, case)
655   - elif arg_type == 'prepncp':
656   - preposition = attributes[0]
657   - case = case_conversion(attributes[1], category)
658   - phrase_type = phrase_type_conversion(attributes[2])
659   - mark_prepncp(sentence_tokens, argument, preposition, case, phrase_type)
660   - elif arg_type == 'prepadjp':
661   - preposition = attributes[0]
662   - case = case_conversion(attributes[1], category)
663   - mark_prepadjp(sentence_tokens, argument, preposition, case)
664   - elif arg_type == 'lexnp':
665   - case = case_conversion(attributes[0], category)
666   - number = number_conversion(attributes[1])
667   - lemma = attributes[2]
668   - mark_lexnp(sentence_tokens, argument, case, number, lemma)
669   - elif arg_type == 'np':
670   - case = case_conversion(attributes[0], category)
671   - mark_np(sentence_tokens, argument, case)
672   - elif arg_type == 'ncp':
673   - case = case_conversion(attributes[0], category)
674   - phrase_type = phrase_type_conversion(attributes[1])
675   - mark_ncp(sentence_tokens, argument, case, phrase_type)
676   - elif arg_type == 'cp':
677   - phrase_type = phrase_type_conversion(attributes[0])
678   - mark_cp(sentence_tokens, argument, phrase_type)
679   - elif arg_type == 'adjp':
680   - case = case_conversion(attributes[0], category)
681   - mark_adjp(sentence_tokens, argument, case)
682   - elif arg_type == 'infp':
683   - aspect = aspect_conversion(attributes[0])
684   - mark_infp(sentence_tokens, argument, aspect)
685   - elif arg_type == u'nonch':
686   - nonch = u'co|coś|nic|to|to samo co'
687   - mark_nonch(sentence_tokens, argument, nonch)
688   - elif arg_type == 'lemma':
689   - lemma = attributes[0]
690   - sie = attributes[1]
691   - aspect = aspect_conversion(attributes[2])
692   - mark_lemma(sentence_tokens, argument, lemma, sie, aspect)
693   - elif arg_type == 'advp':
694   - advp_type = attributes[0]
695   - mark_advp(sentence_tokens, argument, advp_type)
696   -# elif arg_type == 'xp':
697   -# argument_obj = Argument.objects.get(text_rep=argument)
698   -# realizations = [realization.argument.text_rep for realization in argument_obj.realizations.all()]
699   -# start_occupacy = count_occupied(sentence_tokens)
700   -# for realization in sort_arguments(realizations):
701   -# mark_arg_in_sentence(realization, sentence_tokens)
702   -# if count_occupied(sentence_tokens) > start_occupacy:
703   -# break
704   -
705   -
706   -def cut_sentence_chunks(sentence_tokens):
707   - endpoint = -1
708   - ignore = False
709   - sentence_chunks = []
710   - for token in sentence_tokens:
711   - if token['argument'] and not ignore:
712   - orths = [tok['orth'] for tok in sentence_tokens[token['argument_start']:token['argument_end']+1] if tok['argument']]
713   - arg_realization = u'%s (%s)' % (u' '.join(orths), token['argument'])
714   - endpoint = token['argument_end']
715   - sentence_chunks.append(arg_realization)
716   - ignore = True
717   - if token['idx'] == endpoint:
718   - ignore = False
719   - return u' '.join(sentence_chunks)
720   -
721   -def get_sentence_chunk(arguments, sentence_tokens):
722   - for arg in arguments:
723   - mark_arg_in_sentence(arg, sentence_tokens)
724   - return cut_sentence_chunks(sentence_tokens)
725   -
726   -def create_lemma_argument(lemma_entry, frame_text_rep):
727   - frame_parts = frame_text_rep.split(':')
728   - sie = frame_parts[0]
729   - aspect = frame_parts[2]
730   - frame_structure = frame_parts[3]
731   - if not sie and u'refl' in frame_structure:
732   - sie = u'się'
733   - argument = u'lemma(%s,%s,%s)' % (lemma_entry, sie, aspect)
734   - return argument
735   -
736   -def get_arguments_coverage():
737   - try:
738   - first_line = True
  65 +def write_examples(statuses):
  66 + try:
739 67 examples_file = codecs.open(os.path.join(BASE_PATH,
740   - 'detailed_examples_v2.csv'), 'rt', 'utf-8')
741   - output_file = codecs.open(os.path.join(BASE_PATH,
742   - 'detailed_examples_cover_v2.csv'), 'wt', 'utf-8')
743   - output_file.write(u'%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % LABELS)
744   - for line in examples_file:
745   - if first_line:
746   - first_line = False
747   - continue
748   - if 'Error!!!' in line:
749   - continue
750   - line = line.strip()
751   - example_data = line.split('\t')
752   - lemma_entry = example_data[0]
753   - lemma_status = example_data[1]
754   - frame_text_rep = example_data[2]
755   - frame_opinion = example_data[3]
756   - sentence = example_data[4]
757   - tagged_sentence = example_data[5]
758   - example_opinion = example_data[6]
759   - example_source = example_data[7]
760   - arguments_selection = example_data[8]
761   - if not tagged_sentence:
762   - sentence_chunk = u'Error!!! Błąd tagowania.'
763   - else:
764   -# print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
765   -# print sentence
766   - lemma_argument = create_lemma_argument(lemma_entry, frame_text_rep)
767   - arguments = [lemma_argument]
768   - arguments.extend(get_arguments(arguments_selection))
769   - sentence_tokens = tokenize_sentence(tagged_sentence)
770   - try:
771   - sentence_chunk = get_sentence_chunk(arguments, sentence_tokens)
772   - except:
773   - sentence_chunk = u'Error!!! Nie dopasowano wszystkich argumentów.'
774   - output_file.write(u'%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (lemma_entry,
775   - lemma_status,
776   - frame_text_rep,
777   - frame_opinion,
778   - sentence,
779   - tagged_sentence,
780   - sentence_chunk,
781   - example_opinion,
782   - example_source,
783   - arguments_selection))
  68 + 'examples_gotowe_plus.txt'), 'wt', 'utf-8')
  69 + for lemma in Lemma.objects.filter(old=False).filter(status__in=statuses).order_by('entry').all():
  70 + print lemma
  71 + examples_file.write(lemma.entry+'\n')
  72 + for frame in lemma.frames.order_by('text_rep').all():
  73 + if lemma.frame_opinions.get(frame=frame).value.value != u'zła':
  74 + examples_file.write('\t%s\n' % frame.text_rep)
  75 + for example in lemma.nkjp_examples.filter(frame=frame):
  76 + examples_file.write('\t\t--> %s\n' % example.sentence)
  77 + examples_file.write('\n\n')
784 78 finally:
785   - examples_file.close()
786   - output_file.close()
787   -
788   -def get_examples():
789   - q_statuses = []
790   - for status in STATUSES_LS:
791   - q_statuses.append(Q(status__status=status))
792   - write_detailed_examples(q_statuses)
793   -# write_examples(q_statuses)
794   -# get_arguments_coverage()
795   -
796 79 \ No newline at end of file
  80 + examples_file.close()
... ...
dictionary/models.py
1 1 #-*- coding:utf-8 -*-
2 2  
3   -#Copyright (c) 2012, Bartłomiej Nitoń
4   -#All rights reserved.
5   -
6   -#Redistribution and use in source and binary forms, with or without modification, are permitted provided
7   -#that the following conditions are met:
8   -
9   -# Redistributions of source code must retain the above copyright notice, this list of conditions and
10   -# the following disclaimer.
11   -# Redistributions in binary form must reproduce the above copyright notice, this list of conditions
12   -# and the following disclaimer in the documentation and/or other materials provided with the distribution.
13   -
14   -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
15   -# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
16   -# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
17   -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
18   -# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
19   -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20   -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21   -# POSSIBILITY OF SUCH DAMAGE.
22   -
23 3 from django.contrib.auth.models import User
24 4 from django.db.models import *
25 5  
26   -from wordnet.models import LexicalUnit
27   -
28 6 class Configuration(Model):
29 7 name = CharField(max_length=16, primary_key=True, unique=True, db_column='nazwa_konfiguracji')
30 8 selected_conf = BooleanField(db_column='wybrana_konfiguracja', default=False)
... ... @@ -250,6 +228,20 @@ class Lemma(Model):
250 228 except Frame_Opinion.DoesNotExist:
251 229 pass
252 230 return frame_opinion_name
  231 +
  232 + def phraseology_ready(self):
  233 + actual_status = self.status
  234 + ready_f_status = Lemma_Status.objects.get(type__sym_name='ready_f')
  235 + if actual_status.priority >= ready_f_status.priority:
  236 + return True
  237 + return False
  238 +
  239 + def semantics_ready(self):
  240 + actual_status = self.status
  241 + ready_s_status = Lemma_Status.objects.get(type__sym_name='ready_s')
  242 + if actual_status.priority >= ready_s_status.priority:
  243 + return True
  244 + return False
253 245  
254 246 class Meta:
255 247 db_table = 'hasla'
... ... @@ -439,15 +431,22 @@ def positions_to_frame(positions, reflex, negativity, predicativity, aspect):
439 431 if frame_obj.has_phraseologic_arguments():
440 432 frame_obj.phraseologic = True
441 433 frame_obj.save()
442   - return frame_obj
  434 + return frame_obj
  435 +
  436 +def get_schemata_by_type(sch_type, schemata_query):
  437 + if sch_type == 'normal':
  438 + schemata_query = get_normal_schemata_only(schemata_query)
  439 + elif sch_type == 'phraseologic':
  440 + schemata_query = get_phraseologic_schemata_only(schemata_query)
  441 + return schemata_query
443 442  
444   -def get_phraseologic_frames_only(frames_query):
445   - frames_query = frames_query.filter(phraseologic=True)
446   -# phraseologic_arg_models = Argument_Model.objects.filter(phraseologic=True)
447   -# phraseologic_arg_models_names = [arg_model.arg_model_name for arg_model in phraseologic_arg_models.all()]
448   -# frames_query = frames_query.filter(Q(positions__arguments__type__in=phraseologic_arg_models_names) |
449   -# Q(positions__arguments__atributes__values__argument__type__in=phraseologic_arg_models_names))
450   - return frames_query
  443 +def get_normal_schemata_only(schemata_query):
  444 + schemata_query = schemata_query.filter(phraseologic=False)
  445 + return schemata_query
  446 +
  447 +def get_phraseologic_schemata_only(schemata_query):
  448 + schemata_query = schemata_query.filter(phraseologic=True)
  449 + return schemata_query
451 450  
452 451  
453 452 class NKJP_Example(Model):
... ... @@ -745,13 +744,6 @@ class Argument(Model):
745 744 break
746 745 return is_fully_lexicalized
747 746  
748   - class Meta:
749   - permissions = (
750   - ('view_realization', u'Może oglądać realizacje argumentów.'),
751   - ('create_realization', u'Może kreować realizacje argumentów.'),
752   - ('view_arg_stats', u'Może oglądać statystyki argumentów.'),
753   - )
754   -
755 747 def __unicode__(self):
756 748 return '%s' % (self.text_rep)
757 749  
... ... @@ -764,7 +756,21 @@ class Argument(Model):
764 756 for value in attr.values.filter(type__sym_name=u'parameter'):
765 757 if value.parameter.type.realization_only:
766 758 return True
767   - return False
  759 + return False
  760 +
  761 + def main_phrase_type(self):
  762 + category_attrs = self.atributes.filter(type=u'KATEGORIA')
  763 + if not category_attrs.exists() or category_attrs.all()[0].values.count() == 0: # xp bez podtypow
  764 + return self
  765 + else:
  766 + return Argument.objects.get(text_rep=u'%s(%s)' % (self.type, category_attrs.all()[0].selection_mode.name))
  767 +
  768 + class Meta:
  769 + permissions = (
  770 + ('view_realization', u'Może oglądać realizacje argumentów.'),
  771 + ('create_realization', u'Może kreować realizacje argumentów.'),
  772 + ('view_arg_stats', u'Może oglądać statystyki argumentów.'),
  773 + )
768 774  
769 775 def sort_arguments(arguments):
770 776 return sortArguments(arguments)
... ... @@ -1382,12 +1388,9 @@ class Entry(Model):
1382 1388 ('view_semantics', u'Może oglądać semantykę.'),
1383 1389 )
1384 1390  
1385   - def lexical_units(self):
1386   - return LexicalUnit.objects.filter(Q(base = self.name)|Q(base = self.name + u' się'))
1387   -
1388 1391 def actual_frames(self):
1389 1392 frame_ids = []
1390   - lexical_units = self.lexical_units().order_by('sense')
  1393 + lexical_units = self.meanings.order_by('sense')
1391 1394 for lexical_unit in lexical_units:
1392 1395 frame_ids.extend([f.id for f in lexical_unit.actual_frames()])
1393 1396 return get_model('semantics', 'SemanticFrame').objects.filter(id__in=list(set(frame_ids)))
... ... @@ -1406,6 +1409,9 @@ class Entry(Model):
1406 1409 'realizations': realizations_ids})
1407 1410 return matching_connections
1408 1411  
  1412 + def actual_lemma(self):
  1413 + return self.lemmas.get(old=False)
  1414 +
1409 1415 def __unicode__(self):
1410 1416 return self.name
1411 1417  
... ...
dictionary/saving.py
... ... @@ -94,7 +94,7 @@ def update_connections(lemma_id, reconnect_operations, user):
94 94  
95 95 def disconnect_all_examples_operations(lemma):
96 96 operations = []
97   - lex_units = lemma.entry_obj.lexical_units().all()
  97 + lex_units = lemma.entry_obj.meanings.all()
98 98 for lu in lex_units:
99 99 lu_examples = LexicalUnitExamples.objects.filter(lexical_unit=lu)
100 100 for lu_ex in lu_examples:
... ... @@ -112,6 +112,6 @@ def disconnect_example_operation(example_dict, example_obj):
112 112 lu = LexicalUnit.objects.get(id=example_dict['lexical_unit'])
113 113 return {'operation': 'remove_example', 'unit': lu.id, 'example': example_obj.id}
114 114  
115   -def reconnect_examples(operations):
116   - update_meanings(operations)
  115 +def reconnect_examples(lemma, operations):
  116 + update_meanings(lemma.id, operations)
117 117  
118 118 \ No newline at end of file
... ...
dictionary/static/css/frame_table.css
... ... @@ -80,8 +80,7 @@ table.ActiveFrameTable td.ColumnCategory, table.InactiveFrameTable td.ColumnCate
80 80 }
81 81  
82 82 table.ActiveFrameTable td[selected=selected], table.InactiveFrameTable td[selected=selected] {
83   - position: relative;
84   - box-shadow: 0px 0px 0px 4px grey;
  83 + border-width: 5px;
85 84 }
86 85  
87 86 table.ActiveFrameTable .Opinion, table.InactiveFrameTable .Opinion {
... ...
dictionary/static/css/lemmas_filtering.css 0 → 100644
  1 +hr.filtersSeparator {
  2 + border-top: medium double #333;
  3 +}
  4 +
  5 +hr.argSeparator {
  6 + border-top: 1px dashed #8c8b8b;
  7 +}
  8 +
  9 +hr.alterSeparator {
  10 + border-top: 1px solid #8c8b8b;
  11 +}
... ...
dictionary/static/js/lemma-view.js
... ... @@ -22,25 +22,6 @@ var nkjp_source_tab = ax_nkjp_source_vals;
22 22 // te wartosci maja zasieg na wszystkie hasla
23 23 window.schemas = new Array();
24 24 var lemma_id = -1;
25   - var aspect_vals = [];
26   - var reflex_vals = [];
27   - var neg_vals = [];
28   - var pred_vals = [];
29   - var opinion_vals = [];
30   - var filter_aspect_val = '*';
31   - var filter_reflex_val = '*';
32   - var filter_neg_val = '*';
33   - var filter_pred_val = '*';
34   - var filter_opinion_val = '*';
35   - var filter_position_val = '.*';
36   - var filter_argument_val = '.*';
37   - var prev_filter_aspect_val = '*';
38   - var prev_filter_reflex_val = '*';
39   - var prev_filter_neg_val = '*';
40   - var prev_filter_pred_val = '*';
41   - var prev_filter_opinion_val = '*';
42   - var prev_filter_position_val = '.*';
43   - var prev_filter_argument_val = '.*';
44 25 var prev_lemma_id = -1;
45 26  
46 27 // te wartosci trzeba czyscic przy ladowaniu innego hasla
... ... @@ -68,7 +49,7 @@ var nkjp_source_tab = ax_nkjp_source_vals;
68 49 var lemma_entry = '';
69 50 var prev_lemma_entry = '';
70 51 var selected_notes_row_id = -1;
71   - var can_modify = false;
  52 + //var can_modify = false;
72 53  
73 54 ////////////////////////////////////////////////////////////////
74 55  
... ... @@ -84,33 +65,6 @@ function resetLemmaVersions() {
84 65 window.nkjp_lemma_examples);
85 66 frames_modif.push(lemma_version);
86 67 }
87   -
88   -function initiateFrameFilters()
89   -{
90   - $.ajaxJSON({
91   - method: 'get',
92   - url: ajax_get_frame_filter_options,
93   - data: {
94   - //message_id: message_id,
95   - },
96   - callback: function(result) {
97   - window.aspect_vals = ['*'];
98   - window.reflex_vals = ['*'];
99   - window.neg_vals = ['*'];
100   - window.pred_vals = ['*'];
101   - window.opinion_vals = ['*'];
102   - $.merge(window.aspect_vals, result['aspect_options']);
103   - $.merge(window.reflex_vals, result['reflex_options']);
104   - $.merge(window.neg_vals, result['neg_options']);
105   - $.merge(window.pred_vals, result['pred_options']);
106   - $.merge(window.opinion_vals, result['opinion_options']);
107   - },
108   -
109   - error_callback: function(xhr, status, error) {
110   - error_alert(status + ': ' + error);
111   - },
112   - });
113   -}
114 68  
115 69 function argsToRemove(example, elementToRemoveId)
116 70 {
... ... @@ -211,198 +165,7 @@ function addPinnedExamplesDialog() {
211 165  
212 166 ///////////////////////////////////////////////////////////////
213 167  
214   -function filter_update(id)
215   -{
216   - if(id == 'frame_filter')
217   - {
218   - window.filter_aspect_val = $('#frame_filter #aspect_filter').val();
219   - window.filter_reflex_val = $('#frame_filter #reflex_filter').val();
220   - window.filter_neg_val = $('#frame_filter #neg_filter').val();
221   - window.filter_pred_val = $('#frame_filter #pred_filter').val();
222   - window.filter_opinion_val = $('#frame_filter #opinion_filter').val();
223   - window.filter_position_val = $('#frame_filter #position_filter').val();
224   - window.filter_argument_val = $('#frame_filter #argument_filter').val();
225   - }
226   - else if(id == 'prev_frame_filter')
227   - {
228   - window.prev_filter_aspect_val = $('#prev_frame_filter #aspect_filter').val();
229   - window.prev_filter_reflex_val = $('#prev_frame_filter #reflex_filter').val();
230   - window.prev_filter_neg_val = $('#prev_frame_filter #neg_filter').val();
231   - window.prev_filter_pred_val = $('#prev_frame_filter #pred_filter').val();
232   - window.prev_filter_opinion_val = $('#prev_frame_filter #opinion_filter').val();
233   - window.prev_filter_position_val = $('#prev_frame_filter #position_filter').val();
234   - window.prev_filter_argument_val = $('#prev_frame_filter #argument_filter').val();
235   - }
236   -}
237   -
238   -
239   -function draw_frames_filter(id)
240   -{
241   - var frame_filter = document.getElementById(id);
242   -
243   - p = document.createElement('p');
244   - text = document.createTextNode("Aspekt: ");
245   - p.appendChild(text);
246   - var select = document.createElement('select');
247   - select.setAttribute('id', 'aspect_filter');
248   - select.setAttribute('name', 'ASPEKT');
249   - p.appendChild(select);
250   - for(var i=0; i<aspect_vals.length; i++)
251   - {
252   - var option = document.createElement('option');
253   - option.setAttribute('value', aspect_vals[i]);
254   - option.appendChild(document.createTextNode(aspect_vals[i]));
255   - select.appendChild(option);
256   - }
257   - frame_filter.appendChild(p);
258   -
259   - p = document.createElement('p');
260   - text = document.createTextNode("Zwrotność: ");
261   - p.appendChild(text);
262   - select = document.createElement('select');
263   - select.setAttribute('id', 'reflex_filter');
264   - select.setAttribute('name', 'ZWROTNOSC');
265   - p.appendChild(select);
266   - for(var i=0; i<reflex_vals.length; i++)
267   - {
268   - var option = document.createElement('option');
269   - option.setAttribute('value', reflex_vals[i]);
270   - option.appendChild(document.createTextNode(reflex_vals[i]));
271   - select.appendChild(option);
272   - }
273   - frame_filter.appendChild(p);
274   -
275   - p = document.createElement('p');
276   - text = document.createTextNode("Negatywność: ");
277   - p.appendChild(text);
278   - select = document.createElement('select');
279   - select.setAttribute('id', 'neg_filter');
280   - select.setAttribute('name', 'NEGATYWNOŚĆ');
281   - p.appendChild(select);
282   - for(var i=0; i<neg_vals.length; i++)
283   - {
284   - var option = document.createElement('option');
285   - option.setAttribute('value', neg_vals[i]);
286   - option.appendChild(document.createTextNode(neg_vals[i]));
287   - select.appendChild(option);
288   - }
289   - frame_filter.appendChild(p);
290   -
291   - p = document.createElement('p');
292   - text = document.createTextNode("Predykatywność: ");
293   - p.appendChild(text);
294   - select = document.createElement('select');
295   - select.setAttribute('id', 'pred_filter');
296   - select.setAttribute('name', 'PREDYKATYWNOŚĆ');
297   - p.appendChild(select);
298   - for(var i=0; i<pred_vals.length; i++)
299   - {
300   - var option = document.createElement('option');
301   - option.setAttribute('value', pred_vals[i]);
302   - option.appendChild(document.createTextNode(pred_vals[i]));
303   - select.appendChild(option);
304   - }
305   - frame_filter.appendChild(p);
306   -
307   - p = document.createElement('p');
308   - text = document.createTextNode("Opinia: ");
309   - p.appendChild(text);
310   - select = document.createElement('select');
311   - select.setAttribute('id', 'opinion_filter');
312   - select.setAttribute('name', 'OPINIA');
313   - p.appendChild(select);
314   - for(var i=0; i<opinion_vals.length; i++)
315   - {
316   - var option = document.createElement('option');
317   - option.setAttribute('value', opinion_vals[i]);
318   - option.appendChild(document.createTextNode(opinion_vals[i]));
319   - select.appendChild(option);
320   - }
321   - frame_filter.appendChild(p);
322   -
323   - p = document.createElement('p');
324   - text = document.createTextNode("Typ frazy: ");
325   - p.appendChild(text);
326   - select = document.createElement('input');
327   - select.setAttribute('id', 'argument_filter');
328   - select.setAttribute('name', 'ARGUMENT');
329   - p.appendChild(select);
330   - frame_filter.appendChild(p);
331   -
332   - p = document.createElement('p');
333   - text = document.createTextNode("Pozycja: ");
334   - p.appendChild(text);
335   - select = document.createElement('input');
336   - select.setAttribute('id', 'position_filter');
337   - select.setAttribute('name', 'POZYCJA');
338   - p.appendChild(select);
339   - frame_filter.appendChild(p);
340   -
341   - if(id == 'frame_filter')
342   - {
343   - $('#frame_filter #aspect_filter').val(window.filter_aspect_val);
344   - $('#frame_filter #reflex_filter').val(window.filter_reflex_val);
345   - $('#frame_filter #neg_filter').val(window.filter_neg_val);
346   - $('#frame_filter #pred_filter').val(window.filter_pred_val);
347   - $('#frame_filter #opinion_filter').val(window.filter_opinion_val);
348   - $('#frame_filter #position_filter').val(window.filter_position_val);
349   - $('#frame_filter #argument_filter').val(window.filter_argument_val);
350   - }
351   - else if(id == 'prev_frame_filter')
352   - {
353   - $('#prev_frame_filter #aspect_filter').val(window.prev_filter_aspect_val);
354   - $('#prev_frame_filter #reflex_filter').val(window.prev_filter_reflex_val);
355   - $('#prev_frame_filter #neg_filter').val(window.prev_filter_neg_val);
356   - $('#prev_frame_filter #pred_filter').val(window.prev_filter_pred_val);
357   - $('#prev_frame_filter #opinion_filter').val(window.prev_filter_opinion_val);
358   - $('#prev_frame_filter #position_filter').val(window.prev_filter_position_val);
359   - $('#prev_frame_filter #argument_filter').val(window.prev_filter_argument_val);
360   - }
361   -
362   - //attach autocomplete
363   - $('#' + id + ' #argument_filter').autocomplete({
364   - // triggers when selection performed
365   - select: function(event, ui){
366   - filter_update(id);
367   - },
368   - //define callback to format results
369   - source: function(req, add){
370   - //pass request to server
371   - $.getJSON(ajax_argument_lookup, req, function(data) {
372   - //create array for response objects
373   - var suggestions = [];
374   - $.each(data['result'], function(i, val){
375   - suggestions.push(val[0]);
376   - });
377   - //pass array to callback
378   - add(suggestions);
379   - });
380   - },
381   - });
382   - $('#' + id + ' #position_filter').autocomplete({
383   - // triggers when selection performed
384   - select: function(event, ui){
385   - filter_update(id);
386   - },
387   - //define callback to format results
388   - source: function(req, add){
389   - //pass request to server
390   - $.getJSON(ajax_position_lookup, req, function(data) {
391   - //create array for response objects
392   - var suggestions = [];
393   - $.each(data['result'], function(i, val){
394   - suggestions.push(val[0]);
395   - });
396   - //pass array to callback
397   - add(suggestions);
398   - });
399   - },
400   - });
401   -}
402   -
403   -
404 168 function load_content(id) {
405   - ShowProgressAnimation();
406 169 $('#add-table-elem-dialog').dialog('close');
407 170 if(lemma_id != id)
408 171 $('#ready-note-dialog').dialog('close');
... ... @@ -410,15 +173,17 @@ function load_content(id) {
410 173  
411 174 if(window.activeLemmaPanel == 'preview_lemma')
412 175 {
  176 + ShowProgressAnimation();
413 177 $('#preview_lemma').load(ajax_lemma_preview, 'id='+id+'&main_lemma_id='+window.lemma_id, function(){
414 178 window.prev_lemma_id=id;
  179 + loadPrevSchemataAndExamples(true);
415 180 createSplitter('prevFramesSplit', 'prev-lemma-tables', 'prev_tabs');
416 181 areNewPreviewEntriesRelated();
417   - HideProgressAnimation();
418 182 });
419 183 }
420 184 else
421 185 {
  186 + ShowProgressAnimation();
422 187 // czyszczenie wartosci
423 188 window.elem_in_bucket = '';
424 189 window.selected_notes_row_id = -1;
... ... @@ -446,10 +211,13 @@ function load_content(id) {
446 211 window.notesNotSaved = false;
447 212 window.lemmaExNotSaved = false;
448 213  
449   - $('#new_frames').load(ajax_new_frames, 'id='+id, function(){
  214 + $('#new_frames').load(ajax_new_frames, 'id='+id, function(data){
450 215 window.lemma_id = id;
  216 +
  217 + loadSchemataAndExamples();
  218 +
451 219 createSplitter('framesSplit','new-frame-tables', 'tabs');
452   - if(window.can_modify)
  220 + /*if(window.can_modify)
453 221 {
454 222 addSyntacticFramesPerm = user_has_perm('dictionary.add_syntactic_frames');
455 223 addPhraseologicFramesPerm = user_has_perm('dictionary.add_phraseologic_frames');
... ... @@ -464,15 +232,7 @@ function load_content(id) {
464 232 $(document).bind('keydown', 'shift+d', function(evt){duplicateElement(); return false; });
465 233 $(document).bind('keydown', 'shift+c', function(evt){copyElement(); return false; });
466 234 $(document).bind('keydown', 'shift+v', function(evt){pasteElement(); return false; });
467   - $(document).bind('keydown', 'shift+w', function(evt){
468   - if(window.change)
469   - {
470   - error_alert('Przed walidacją/zmianą statusu hasło musi zostać zapisane.');
471   - return false;
472   - }
473   - validate_new_frames(false, false);
474   - return false;
475   - });
  235 + $(document).bind('keydown', 'shift+w', function(evt){validateSchemata(); return false; });
476 236 if(addSyntacticFramesPerm) {
477 237 $(document).bind('keydown', 'shift+x', function(evt){cutElement(); return false; });
478 238 $(document).bind('keydown', 'shift+m', function(evt){reserveLemma(); return false; });
... ... @@ -485,18 +245,14 @@ function load_content(id) {
485 245 }
486 246 else
487 247 {
488   - $(document).unbind('keydown')
  248 + $(document).unbind('keydown');
489 249 $.get(ajax_user_has_perm, {perm: 'dictionary.own_lemmas'}, function(result) {
490 250 if(result['has_perm']) {
491 251 $(document).bind('keydown', 'shift+m', function(evt){reserveLemma(); return false; });
492 252 }
493 253 });
494   - }
495   -
496   - window.frames_modif = new Array();
497   - window.frames_modif_idx = 0;
498   - var lemma_version = new Lemma_Version(window.schemas, window.nkjp_examples, window.nkjp_lemma_examples);
499   - frames_modif.push(lemma_version);
  254 + }*/
  255 +
500 256 if(document.getElementById("lemma_example_show"))
501 257 {
502 258 draw_nkjp_table(document.getElementById("lemma_example_show"), '', window.nkjp_lemma_examples, 'NkjpLemmaTableRow', 'nkjpLemma_')
... ... @@ -506,20 +262,10 @@ function load_content(id) {
506 262 addPinnedExamplesDialog();
507 263 $('#lemma_desc').load(ajax_get_lemma_desc, 'id='+id);
508 264 areNewPreviewEntriesRelated();
509   - HideProgressAnimation();
510 265 refresh_example_propositions();
511 266 });
512 267 $('#change_ctrl').load(ajax_change_ctrl, 'id='+id);
513 268 $('#semantics').load(ajax_semantics, 'id='+id);
514   - $('#examples').load(ajax_lemma_examples, 'id='+id, function(){
515   - window.frames_modif = new Array(); // UWAGA, przestawic do lemma-view
516   - window.frames_modif_idx = 0;
517   - var lemma_version = new Lemma_Version(window.schemas, window.nkjp_examples, window.nkjp_lemma_examples); // TO
518   - frames_modif.push(lemma_version);
519   - draw_nkjp_table(document.getElementById("lemma_example_show"), '', window.nkjp_lemma_examples, 'NkjpLemmaTableRow', 'nkjpLemma_')
520   - $("tr.NkjpLemmaTableRow").click(function(){
521   - selectLemmaNkjpTr(this.id)});
522   - });
523 269 $('#status').load(ajax_lemma_status, 'id='+id, function(){
524 270 $("#lemma-status-change button").click(validate_and_change_status);
525 271 $('#ready-note-dialog').dialog({ autoOpen: false,
... ... @@ -539,12 +285,74 @@ function load_content(id) {
539 285  
540 286 $('#preview_lemma').load(ajax_lemma_preview, 'id='+prevId+'&main_lemma_id='+id, function(){
541 287 window.prev_lemma_id=prevId;
  288 + loadPrevSchemataAndExamples(false);
542 289 createSplitter('prevFramesSplit', 'prev-lemma-tables', 'prev_tabs');
543 290 areNewPreviewEntriesRelated();
544 291 });
545 292 }
546 293 }
547 294  
  295 +function validateSchemata() {
  296 + if(window.change) {
  297 + error_alert('Przed walidacją/zmianą statusu hasło musi zostać zapisane.');
  298 + return false;
  299 + }
  300 + validate_new_frames(false, false);
  301 +}
  302 +
  303 +function loadSchemataAndExamples() {
  304 + $.ajaxJSON({
  305 + method: 'get',
  306 + url: ajax_get_schemata_and_examples,
  307 + data: {
  308 + lemma_id: window.lemma_id
  309 + },
  310 +
  311 + callback: function(result) {
  312 + window.schemas = serializedObjToObj(result['schemata']);
  313 + window.nkjp_examples = serializedNkjpToObj(result['examples']);
  314 + window.nkjp_lemma_examples = serializedNkjpToObj(result['lemma_examples']);
  315 + resetLemmaVersions();
  316 + var frame_class = 'InactiveFrameTable';
  317 + if(result['can_modify']) {
  318 + frame_class = 'ActiveFrameTable';
  319 + }
  320 + draw_filtered_frames(window.schemas, 'new-frame-tables', 'new-frame-table',
  321 + 'frame_filter', window.nkjp_examples, frame_class,
  322 + window.lemma_entry, window.lemma_entry);
  323 + $('#examples').load(ajax_lemma_examples, 'id='+window.lemma_id, function(){
  324 + draw_nkjp_table(document.getElementById("lemma_example_show"), '',
  325 + window.nkjp_lemma_examples, 'NkjpLemmaTableRow', 'nkjpLemma_')
  326 + $("tr.NkjpLemmaTableRow").click(function(){selectLemmaNkjpTr(this.id)});
  327 + HideProgressAnimation();
  328 + });
  329 + $("span#new-frames-count").empty();
  330 + $("span#new-frames-count").append(window.schemas.length);
  331 + $("span#lemma-examples-count").empty();
  332 + $("span#lemma-examples-count").append(window.nkjp_lemma_examples.length);
  333 + },
  334 + });
  335 +}
  336 +
  337 +function loadPrevSchemataAndExamples(hideWaitDialog) {
  338 + $.ajaxJSON({
  339 + method: 'get',
  340 + url: ajax_get_schemata_and_examples,
  341 + data: {
  342 + lemma_id: window.prev_lemma_id
  343 + },
  344 + callback: function(result) {
  345 + window.prev_frames = serializedObjToObj(result['schemata']);
  346 + window.prev_nkjp_examples = serializedNkjpToObj(result['examples']);
  347 + draw_filtered_frames(window.prev_frames, 'prev-lemma-tables', 'prev-lemma-table', 'prev_frame_filter',
  348 + window.prev_nkjp_examples, 'InactiveFrameTable', window.prev_lemma_entry);
  349 + if(hideWaitDialog) {
  350 + HideProgressAnimation();
  351 + }
  352 + },
  353 + });
  354 +}
  355 +
548 356 // klasa reprezentujaca wersje hasla, do cofania i dodawania
549 357 function Lemma_Version(schemas, nkjp_examples, nkjp_lemma_examples)
550 358 {
... ... @@ -877,7 +685,7 @@ function needConfirmation(nkjpInstance) {
877 685 function unpin_nkjp_example(example_tabId)
878 686 {
879 687 if(example_tabId != -1 &&
880   - !checkIfSemChangedAndAlert())// && !exampleGotAssignedSemantics(example_tabId))
  688 + !checkIfSemChangedAndAlert())
881 689 {
882 690 example_id = example_tabId.replace('nkjp_', '');
883 691 for(var i=0; i<window.nkjp_examples.length; i++)
... ... @@ -1014,7 +822,7 @@ function getNkjpLemmaExampleInstance(nkjp_examples, example_id)
1014 822 }
1015 823  
1016 824 function remove_semantic_example(example_id) {
1017   - if(example_id != -1 && !checkIfSemChangedAndAlert())// && !exampleGotAssignedSemantics(example_id))
  825 + if(example_id != -1 && !checkIfSemChangedAndAlert())
1018 826 {
1019 827 example_id = example_id.replace('nkjp_', '');
1020 828 for(var i=0; i<nkjp_examples.length; i++)
... ... @@ -1074,9 +882,11 @@ function remove_example_from_lemma(lemma_id, example_id, examplesTabId) {
1074 882 function addFrameClickEvents(tableClass, tableId) {
1075 883 selector = 'table.'+tableClass+'#'+tableId+' td';
1076 884 if(tableId === 'new-frame-table') {
1077   - $(selector).dblclick(function(e){
1078   - e.stopPropagation();
1079   - openEditForm(this.id)});
  885 + if(tableClass === 'ActiveFrameTable') {
  886 + $(selector).dblclick(function(e){
  887 + e.stopPropagation();
  888 + openEditForm(this.id)});
  889 + }
1080 890 $(selector).click(function(e){
1081 891 e.stopPropagation();
1082 892 selectTd(this.id)});
... ... @@ -1106,17 +916,17 @@ function draw_frames(schemas, parent, table_name, nkjp_examples, table_class, le
1106 916 first = true;
1107 917 for(var j=0; j<schemas.length; j++)
1108 918 {
1109   - if(schemas[j].characteristics[3]==aspect_vals[k] && schemas[j].characteristics[0]==reflex_vals[i] &&
1110   - schemas[j].characteristics[1]==neg_vals[l] && schemas[j].characteristics[2]==pred_vals[m])
  919 + if(schemas[j].characteristics[3]==aspect_vals[k].value && schemas[j].characteristics[0]==reflex_vals[i].value &&
  920 + schemas[j].characteristics[1]==neg_vals[l].value && schemas[j].characteristics[2]==pred_vals[m].value)
1111 921 {
1112 922 if(first)
1113 923 {
1114 924 div = document.createElement('div');
1115 925 strong = document.createElement('strong');
1116   - if(reflex_vals[i])
1117   - strong.appendChild(document.createTextNode(lemma_entry+" "+reflex_vals[i]+" "+"("+neg_vals[l]+","+pred_vals[m]+","+aspect_vals[k]+"):"));
  926 + if(reflex_vals[i].value)
  927 + strong.appendChild(document.createTextNode(lemma_entry+" "+reflex_vals[i].name+" "+"("+neg_vals[l].name+","+pred_vals[m].name+","+aspect_vals[k].name+"):"));
1118 928 else
1119   - strong.appendChild(document.createTextNode(lemma_entry+" "+"("+neg_vals[l]+","+pred_vals[m]+","+aspect_vals[k]+"):"));
  929 + strong.appendChild(document.createTextNode(lemma_entry+" "+"("+neg_vals[l].name+","+pred_vals[m].name+","+aspect_vals[k].name+"):"));
1120 930 div.appendChild(strong);
1121 931 parent.appendChild(div);
1122 932 first = false;
... ... @@ -1526,11 +1336,7 @@ function can_add_position_category(lemma_id) {
1526 1336 }
1527 1337  
1528 1338 function openEditForm(id) {
1529   - if(window.can_modify && !checkIfSemChangedAndAlert()) {
1530   - /*if(schemaGotAssignedSemantics(id)) {
1531   - semanticsAssignedAlert();
1532   - }*/
1533   -
  1339 + if(!checkIfSemChangedAndAlert()) {
1534 1340 editedFrameInstance = getFrameInstance(id, window.schemas);
1535 1341 elemInstance = getElementInstance(id, window.schemas);
1536 1342 addSyntacticFramesPerm = user_has_perm('dictionary.add_syntactic_frames');
... ... @@ -2000,10 +1806,10 @@ function frame_form_submit() {
2000 1806 else {
2001 1807 edited_frame = getElementInstance(edited_id, schemas);
2002 1808 var old_edited_frame_id = edited_frame['element'].id;
2003   - //edited_frame['element'].id = new_elem_id; tuta zmienilem
2004   - edited_frame['element'].id = result['id'];
  1809 + edited_frame['element'].id = new_elem_id; //tutaj zmienilem
  1810 + //edited_frame['element'].id = result['id'];
2005 1811 edited_frame_id = edited_frame['element'].id;
2006   - //new_elem_id--;
  1812 + new_elem_id--;
2007 1813 edited_frame['element'].text_rep = result['text_rep'];
2008 1814 edited_frame['element'].characteristics = result['characteristics'];
2009 1815 edited_frame['element'].opinion = result['opinion'];
... ... @@ -3202,125 +3008,6 @@ function escape_regex(str)
3202 3008 split('}').join('\\}')
3203 3009 }
3204 3010  
3205   -function has_positions(frame, pos_term)
3206   -{
3207   - var alternatives = pos_term.split('|');
3208   - for(var h=0; h<alternatives.length; h++) {
3209   - var allConjsMatch = true;
3210   - var conjs = alternatives[h].split('&');
3211   -
3212   - for(var i=0; i<conjs.length; i++) {
3213   - try {
3214   - var matched_poss = [];
3215   - var conj = conjs[i].trim();
3216   - var regEx = conj;
3217   - if (regEx.substring(0, 1) == '!') {
3218   - regEx = regEx.substring(1);
3219   - }
3220   - var posRe = new RegExp('^'+escape_regex(regEx)+'$');
3221   - matched_poss = $.grep(frame.positions,
3222   - function(pos){
3223   - return pos.text_rep.match(posRe);
3224   - });
3225   - if((matched_poss.length > 0 && conj.startsWith('!')) ||
3226   - (matched_poss.length == 0 && !conj.startsWith('!'))) {
3227   - allConjsMatch = false;
3228   - break;
3229   - }
3230   - }
3231   - catch(e) {
3232   - allConjsMatch = false;
3233   - break;
3234   - }
3235   - }
3236   - if(allConjsMatch) {
3237   - return true;
3238   - }
3239   - }
3240   -
3241   - return false;
3242   -}
3243   -
3244   -function has_arguments(frame, arg_term) {
3245   - var alternatives = arg_term.split('|');
3246   - for(var h=0; h<alternatives.length; h++) {
3247   - var allConjsMatch = true;
3248   - var conjs = alternatives[h].split('&');
3249   - for(var i=0; i<conjs.length; i++) {
3250   - try {
3251   - var matched_args = [];
3252   - var conj = conjs[i].trim();
3253   - var regEx = conj;
3254   - if (regEx.substring(0, 1) == '!') {
3255   - regEx = regEx.substring(1);
3256   - }
3257   - var argRe = new RegExp('^'+escape_regex(regEx)+'$');
3258   -
3259   - for(var j=0; j<frame.positions.length; j++) {
3260   - matched_args = $.grep(frame.positions[j].arguments, function(arg) {
3261   - return arg.text_rep.match(argRe);
3262   - });
3263   - if(matched_args.length > 0) {
3264   - break;
3265   - }
3266   - }
3267   - if((matched_args.length > 0 && conj.startsWith('!')) ||
3268   - (matched_args.length == 0 && !conj.startsWith('!'))) {
3269   - allConjsMatch = false;
3270   - break;
3271   - }
3272   - }
3273   - catch(e) {
3274   - allConjsMatch = false;
3275   - break;
3276   - }
3277   - }
3278   - if(allConjsMatch){
3279   - return true;
3280   - }
3281   - }
3282   - return false;
3283   -}
3284   -
3285   -function filter_frames(schemas, filter_id)
3286   -{
3287   - var aspect_val = $('#'+filter_id+' #aspect_filter').val();
3288   - var reflex_val = $('#'+filter_id+' #reflex_filter').val();
3289   - var neg_val = $('#'+filter_id+' #neg_filter').val();
3290   - var pred_val = $('#'+filter_id+' #pred_filter').val();
3291   - var opinion_val = $('#'+filter_id+' #opinion_filter').val();
3292   - var position_val = $('#'+filter_id+' #position_filter').val().trim();
3293   - var argument_val = $('#'+filter_id+' #argument_filter').val().trim();
3294   - var filtered_frames = new Array();
3295   -
3296   - if(position_val == '.*')
3297   - position_val = ''
3298   - if(argument_val == '.*')
3299   - argument_val = ''
3300   -
3301   - for(var i=0; i<schemas.length; i++)
3302   - {
3303   - if((schemas[i].characteristics[3] == aspect_val || aspect_val == '*')
3304   - && (schemas[i].characteristics[0] == reflex_val || reflex_val == '*')
3305   - && (schemas[i].characteristics[1] == neg_val || neg_val == '*')
3306   - && (schemas[i].characteristics[2] == pred_val || pred_val == '*')
3307   - && (schemas[i].opinion == opinion_val || opinion_val == '*'))
3308   - {
3309   - frameMatch = false;
3310   - if(position_val)
3311   - frameMatch = has_positions(schemas[i], position_val)
3312   - if(argument_val && (frameMatch || !position_val))
3313   - {
3314   -
3315   - frameMatch = has_arguments(schemas[i], argument_val)
3316   - }
3317   - if(frameMatch || (!argument_val && !position_val))
3318   - filtered_frames.push(schemas[i]);
3319   - }
3320   - }
3321   - return filtered_frames;
3322   -}
3323   -
3324 3011 function draw_filtered_frames(schemas, parent_id, table_id, filter_id, nkjp_examples, table_class, lemma_entry)
3325 3012 {
3326 3013 var parent = document.getElementById(parent_id);
... ... @@ -3781,11 +3468,6 @@ function restore_lemma() {
3781 3468 var assignedExamples = [];
3782 3469 if(canModifyFrame(window.selected_id, window.schemas) &&
3783 3470 !checkIfSemChangedAndAlert()) {
3784   -
3785   - /*if(schemaGotAssignedSemantics(window.selected_id)) {
3786   - semanticsAssignedAlert();
3787   - }*/
3788   -
3789 3471 assignedExamples = gotAssignedExample(nkjp_examples, selected_id, true);
3790 3472 if(assignedExamples.length == 0) {
3791 3473 schemas = removeFrameElement(selected_id, schemas);
... ... @@ -3801,9 +3483,6 @@ function restore_lemma() {
3801 3483 function addElement() {
3802 3484 if(!checkIfSemChangedAndAlert() &&
3803 3485 (window.selected_id == -1 || canModifyFrame(window.selected_id, window.schemas))) {
3804   - /*if(schemaGotAssignedSemantics(window.selected_id)) {
3805   - semanticsAssignedAlert();
3806   - }*/
3807 3486 window.schemas = addFrameElementDialog(window.selected_id, window.schemas);
3808 3487 }
3809 3488 }
... ... @@ -3941,10 +3620,6 @@ function restore_lemma() {
3941 3620 if(window.elem_in_bucket && !checkIfSemChangedAndAlert() &&
3942 3621 (window.selected_id == -1 ||
3943 3622 canModifyFrame(window.selected_id, window.schemas))) {
3944   -
3945   - /*if(schemaGotAssignedSemantics(window.selected_id)) {
3946   - semanticsAssignedAlert();
3947   - }*/
3948 3623 pasteFrameElement(selected_id, elem_in_bucket, schemas);
3949 3624 }
3950 3625 }
... ... @@ -3975,12 +3650,6 @@ function restore_lemma() {
3975 3650 canModifyFrame(window.selected_id, window.schemas) &&
3976 3651 !checkIfSemChangedAndAlert())
3977 3652 {
3978   - /*if(getElementInstance(selected_id, schemas)['type'] != 'frame' &&
3979   - schemaGotAssignedSemantics(selected_id)) {
3980   - semanticsAssignedAlert();
3981   - return;
3982   - }*/
3983   -
3984 3653 elem_in_bucket = getElementInstance(selected_id, schemas);
3985 3654  
3986 3655 var parent_elem = getParentInstance(selected_id, schemas);
... ... @@ -4093,7 +3762,7 @@ function restore_lemma() {
4093 3762  
4094 3763 function delete_nkjp_example(example_id)
4095 3764 {
4096   - if(example_id != -1 && !checkIfSemChangedAndAlert())// && !exampleGotAssignedSemantics(example_id))
  3765 + if(example_id != -1 && !checkIfSemChangedAndAlert())
4097 3766 {
4098 3767 example_id = selected_example_id.replace('nkjp_', '');
4099 3768 for(var i=0; i<nkjp_examples.length; i++)
... ... @@ -4128,7 +3797,7 @@ function restore_lemma() {
4128 3797 function delete_all_nkjp_examples(frame_id)
4129 3798 {
4130 3799 if(canModifyFrame(frame_id, window.schemas) &&
4131   - !checkIfSemChangedAndAlert())// && !schemaGotAssignedSemantics(frame_id))
  3800 + !checkIfSemChangedAndAlert())
4132 3801 {
4133 3802 var new_example_tab = new Array();
4134 3803 for(var i=0; i<nkjp_examples.length; i++)
... ... @@ -4187,7 +3856,7 @@ function restore_lemma() {
4187 3856  
4188 3857 function modify_nkjp_example(example_id)
4189 3858 {
4190   - if(example_id != -1 && !checkIfSemChangedAndAlert())// && !exampleGotAssignedSemantics(example_id))
  3859 + if(example_id != -1 && !checkIfSemChangedAndAlert())
4191 3860 {
4192 3861 var example = '';
4193 3862 for(var i=0; i<window.nkjp_examples.length; i++)
... ...
dictionary/static/js/lemma_grid.js
... ... @@ -61,7 +61,6 @@ $(function(){
61 61 window['remap']? remap : undefined},
62 62  
63 63 gridComplete: function() {
64   - //grid.jqGrid('sortGrid', grid.jqGrid('getGridParam','sortname'), false, grid.jqGrid('getGridParam','sortorder'));
65 64 var lemma_id = window.lemma_id;
66 65 var lastSelectedId = window.lastSelectedId;
67 66 if(window.activeLemmaPanel == 'preview_lemma') {
... ... @@ -156,7 +155,7 @@ $(function(){
156 155 $('#search-panel-dialog').dialog( "option", "title", "Sortowanie haseł:" ).load(ajax_sort_form).dialog('open');
157 156 });
158 157 $("#filter-button").click(function(e){
159   - $('#search-panel-dialog').dialog( "option", "title", "Filtrowanie haseł:" ).load(ajax_filter_form).dialog('open');
  158 + $('#search-panel-dialog').empty().dialog( "option", "title", "Filtrowanie haseł:" ).load(ajax_filter_form).dialog('open');
160 159 });
161 160  
162 161 $("#show-columns-button").click(function(){
... ... @@ -199,128 +198,6 @@ function createSearchDialog() {
199 198 width: 'auto' });
200 199 }
201 200  
202   -function filter_form_submit() {
203   - this_form = $(this);
204   - form_data = this_form.serializeArray();
205   -
206   - var owner = '';
207   - var vocabulary = '';
208   - var status = '';
209   - var filter_frames = false;
210   -
211   - form_data = $.map(form_data, function(elem)
212   - {
213   - if (elem.name != 'owner' && elem.name != 'vocabulary' &&
214   - elem.name != 'status' &&
215   - elem.name != 'reflex' && elem.name != 'negativity' && elem.name != 'aspect' &&
216   - elem.name != 'has_argument' && elem.name != 'has_position' &&
217   - elem.name != 'approver' && elem.name != 'has_message_from' &&
218   - elem.name != 'filter_frames')
219   - return elem;
220   - else {
221   - if (elem.name == 'owner')
222   - owner = elem.value;
223   - else if (elem.name == 'vocabulary')
224   - vocabulary = elem.value;
225   - else if (elem.name == 'status')
226   - status = elem.value;
227   - else if (elem.name == 'has_message_from')
228   - has_message_from = elem.value;
229   - else if (elem.name == 'reflex')
230   - reflex = elem.value;
231   - else if (elem.name == 'negativity')
232   - negativity = elem.value;
233   - else if (elem.name == 'aspect')
234   - aspect = elem.value;
235   - else if (elem.name == 'has_argument')
236   - has_argument = elem.value;
237   - else if (elem.name == 'has_position')
238   - has_position = elem.value;
239   - else if (elem.name == 'approver')
240   - approver = elem.value;
241   - else if (elem.name == 'filter_frames')
242   - filter_frames = elem.value;
243   - }
244   - });
245   -
246   - form_data.push({name: 'owner', value: owner});
247   - form_data.push({name: 'vocabulary', value: vocabulary});
248   - form_data.push({name: 'status', value: status});
249   - form_data.push({name: 'has_message_from', value: has_message_from});
250   - form_data.push({name: 'reflex', value: reflex});
251   - form_data.push({name: 'negativity', value: negativity});
252   - form_data.push({name: 'aspect', value: aspect});
253   - form_data.push({name: 'has_argument', value: has_argument});
254   - form_data.push({name: 'has_position', value: has_position});
255   - form_data.push({name: 'approver', value: approver});
256   - form_data.push({name: 'filter_frames', value: filter_frames});
257   -
258   - act_lemma_id = window.prev_lemma_id;
259   - if(window.activeLemmaPanel != 'preview_lemma')
260   - act_lemma_id = window.lemma_id;
261   -
262   - form_data.push({name: 'lemma_id', value: act_lemma_id})
263   -
264   - $.ajaxJSON({
265   - method: 'post',
266   - url: ajax_filter_form_submit,
267   - data: {
268   - form_data: form_data
269   - },
270   -
271   - callback: function(result) {
272   - $('#search-panel-dialog').dialog('close');
273   - if(result['filter_frames'])
274   - {
275   - if(window.activeLemmaPanel == 'preview_lemma')
276   - {
277   - window.prev_filter_reflex_val = result['reflex'];
278   - window.prev_filter_neg_val = result['negativity'];
279   - window.prev_filter_pred_val = result['predicativity'];
280   - window.prev_filter_opinion_val = result['opinion'];
281   - window.prev_filter_aspect_val = result['aspect'];
282   - window.prev_filter_position_val = result['position'];
283   - window.prev_filter_argument_val = result['argument'];
284   - $('#prev_frame_filter #reflex_filter').val(result['reflex']);
285   - $('#prev_frame_filter #neg_filter').val(result['negativity']);
286   - $('#prev_frame_filter #pred_filter').val(result['predicativity']);
287   - $('#prev_frame_filter #opinion_filter').val(result['opinion']);
288   - $('#prev_frame_filter #aspect_filter').val(result['aspect']);
289   - $('#prev_frame_filter #argument_filter').val(result['argument']);
290   - $('#prev_frame_filter #position_filter').val(result['position']);
291   - $('#prev_frame_filter #argument_filter').trigger('change');
292   - }
293   - else
294   - {
295   - window.filter_reflex_val = result['reflex'];
296   - window.filter_neg_val = result['negativity'];
297   - window.filter_pred_val = result['predicativity'];
298   - window.filter_opinion_val = result['opinion'];
299   - window.filter_aspect_val = result['aspect'];
300   - window.filter_position_val = result['position'];
301   - window.filter_argument_val = result['argument'];
302   - $('#frame_filter #reflex_filter').val(result['reflex']);
303   - $('#frame_filter #neg_filter').val(result['negativity']);
304   - $('#frame_filter #pred_filter').val(result['predicativity']);
305   - $('#frame_filter #opinion_filter').val(result['opinion']);
306   - $('#frame_filter #aspect_filter').val(result['aspect']);
307   - $('#frame_filter #argument_filter').val(result['argument']);
308   - $('#frame_filter #position_filter').val(result['position']);
309   - $('#frame_filter #argument_filter').trigger('change');
310   - }
311   - }
312   - grid.trigger("reloadGrid");
313   - },
314   - error_callback: function(xhr, status, error) {
315   - error_alert(status + ': ' + error);
316   - },
317   - bad_data_callback: function(result) {
318   - return true;
319   - },
320   - });
321   - return false;
322   -}
323   -
324 201 function sort_form_submit() {
325 202 this_form = $(this);
326 203 form_data = this_form.serializeArray();
... ...
dictionary/static/js/lemmas_filtering.js 0 → 100644
  1 +function filter_form_submit() {
  2 + this_form = $(this);
  3 + form_data = this_form.serializeArray();
  4 +
  5 + var filter_frames = false;
  6 + var actSemArgument = {};
  7 + var relationalSemPref = {'relation': '',
  8 + 'role': '',
  9 + 'attribute': ''};
  10 + var semArgumentsAlternatives = [];
  11 + var semArguments = [];
  12 +
  13 + form_data = $.map(form_data, function(elem)
  14 + {
  15 + if (elem.name != 'filter_frames' && !isPartOfSemArgFilter(elem)) {
  16 + return elem;
  17 + }
  18 + else {
  19 + if(elem.name == 'filter_frames') {
  20 + filter_frames = elem.value;
  21 + }
  22 + else if(elem.name == 'negation') {
  23 + if(!jQuery.isEmptyObject(actSemArgument)) {
  24 + semArguments.push(actSemArgument);
  25 + }
  26 + actSemArgument = {'negation': elem.value,
  27 + 'role': '',
  28 + 'attribute': '',
  29 + 'general_prefs': [],
  30 + 'synset_prefs': [],
  31 + 'relational_prefs': []}
  32 + }
  33 + else if(elem.name == 'or') {
  34 + if(!jQuery.isEmptyObject(actSemArgument)) {
  35 + semArguments.push(actSemArgument);
  36 + actSemArgument = {};
  37 + }
  38 + semArgumentsAlternatives.push(semArguments);
  39 + semArguments = [];
  40 + }
  41 + else if(elem.name == 'role' || elem.name == 'attribute') {
  42 + actSemArgument[elem.name] = elem.value;
  43 + }
  44 + else if(elem.name == 'general_pref' || elem.name == 'synset_pref') {
  45 + actSemArgument[elem.name+'s'].push(elem.value);
  46 + }
  47 + else if(elem.name.startsWith('relational_pref')) {
  48 + if(elem.name.endsWith('relation')) {
  49 + relationalSemPref = {'relation': elem.value,
  50 + 'role': '',
  51 + 'attribute': ''};
  52 + }
  53 + else if(elem.name.endsWith('role')) {
  54 + relationalSemPref['role'] = elem.value;
  55 + }
  56 + else if(elem.name.endsWith('attribute')) {
  57 + relationalSemPref['attribute'] = elem.value;
  58 + actSemArgument['relational_prefs'].push(relationalSemPref);
  59 + relationalSemPref = {};
  60 + }
  61 + }
  62 + }
  63 + });
  64 + if(!jQuery.isEmptyObject(actSemArgument)) {
  65 + semArguments.push(actSemArgument);
  66 + }
  67 + if(semArguments.length > 0) {
  68 + semArgumentsAlternatives.push(semArguments);
  69 + }
  70 +
  71 +
  72 + form_data.push({name: 'filter_frames', value: filter_frames});
  73 + form_data.push({name: 'sem_arguments', value: semArgumentsAlternatives});
  74 +
  75 + act_lemma_id = window.prev_lemma_id;
  76 + if(window.activeLemmaPanel != 'preview_lemma')
  77 + act_lemma_id = window.lemma_id;
  78 +
  79 + form_data.push({name: 'lemma_id', value: act_lemma_id})
  80 +
  81 + $.ajaxJSON({
  82 + method: 'post',
  83 + url: ajax_filter_form_submit,
  84 + data: {
  85 + form_data: form_data
  86 + },
  87 +
  88 + callback: function(result) {
  89 + $('#search-panel-dialog').dialog('close');
  90 + if(result['filter_frames'])
  91 + {
  92 + if(window.activeLemmaPanel == 'preview_lemma')
  93 + {
  94 + window.prev_filter_schema_type_val = result['schema_type'];
  95 + window.prev_filter_reflex_val = result['reflex'];
  96 + window.prev_filter_neg_val = result['negativity'];
  97 + window.prev_filter_pred_val = result['predicativity'];
  98 + window.prev_filter_opinion_val = result['opinion'];
  99 + window.prev_filter_aspect_val = result['aspect'];
  100 + window.prev_filter_position_val = result['position'];
  101 + window.prev_filter_argument_val = result['argument'];
  102 + $('#prev_frame_filter #schema_type_filter').val(result['schema_type']);
  103 + $('#prev_frame_filter #reflex_filter').val(result['reflex']);
  104 + $('#prev_frame_filter #neg_filter').val(result['negativity']);
  105 + $('#prev_frame_filter #pred_filter').val(result['predicativity']);
  106 + $('#prev_frame_filter #opinion_filter').val(result['opinion']);
  107 + $('#prev_frame_filter #aspect_filter').val(result['aspect']);
  108 + $('#prev_frame_filter #argument_filter').val(result['argument']);
  109 + $('#prev_frame_filter #position_filter').val(result['position']);
  110 + $('#prev_frame_filter #argument_filter').trigger('change');
  111 + }
  112 + else
  113 + {
  114 + window.filter_schema_type_val = result['schema_type'];
  115 + window.filter_reflex_val = result['reflex'];
  116 + window.filter_neg_val = result['negativity'];
  117 + window.filter_pred_val = result['predicativity'];
  118 + window.filter_opinion_val = result['opinion'];
  119 + window.filter_aspect_val = result['aspect'];
  120 + window.filter_position_val = result['position'];
  121 + window.filter_argument_val = result['argument'];
  122 + $('#frame_filter #schema_type_filter').val(result['schema_type']);
  123 + $('#frame_filter #reflex_filter').val(result['reflex']);
  124 + $('#frame_filter #neg_filter').val(result['negativity']);
  125 + $('#frame_filter #pred_filter').val(result['predicativity']);
  126 + $('#frame_filter #opinion_filter').val(result['opinion']);
  127 + $('#frame_filter #aspect_filter').val(result['aspect']);
  128 + $('#frame_filter #argument_filter').val(result['argument']);
  129 + $('#frame_filter #position_filter').val(result['position']);
  130 + $('#frame_filter #argument_filter').trigger('change');
  131 + }
  132 + }
  133 + grid.trigger("reloadGrid");
  134 + },
  135 + error_callback: function(xhr, status, error) {
  136 + error_alert(status + ': ' + error);
  137 + },
  138 + bad_data_callback: function(result) {
  139 + return true;
  140 + },
  141 + });
  142 + return false;
  143 +}
  144 +
  145 +function isPartOfSemArgFilter(field) {
  146 + if(field.name == 'or' || field.name == 'negation' ||
  147 + field.name == 'role' || field.name == 'attribute' ||
  148 + field.name == 'general_pref' || field.name == 'synset_pref' ||
  149 + field.name.startsWith('relational_pref')) {
  150 + return true;
  151 + }
  152 + return false;
  153 +}
  154 +
  155 +function addSemArgFilter(buttonElem) {
  156 + var semArgsElem = $(buttonElem).parent().parent();
  157 + semArgsElem.append('<p id="sem-argument"></p>');
  158 + semArgsElem.children().last().load(ajax_sem_arg_form);
  159 +}
  160 +
  161 +function addArgAlternative(buttonElem) {
  162 + var semArgsElem = $(buttonElem).parent().parent();
  163 + semArgsElem.append('<div><hr class="alterSeparator"><input type="hidden" name="or" value="or"><strong>lub</strong> <button type="button" onclick="removeAlternative(this)">Usuń</button></div>');
  164 +}
  165 +
  166 +function removeAlternative(buttonElem) {
  167 + $(buttonElem).parent().remove();
  168 +}
  169 +
  170 +function removeSemArgFilter(buttonElem) {
  171 + $(buttonElem).parent().parent().remove();
  172 +}
  173 +
  174 +function addSelectivePreferenceFilter(buttonElem) {
  175 + var selPrefsElem = $(buttonElem).parent().parent();
  176 + var selPrefType = selPrefsElem.find('#id_preference_type').first().val();
  177 + selPrefsElem.append('<p id="sel-preference"></p>');
  178 + if(selPrefType === 'general') {
  179 + selPrefsElem.children().last().load(ajax_general_preference_form);
  180 + }
  181 + else if(selPrefType === 'synset') {
  182 + selPrefsElem.children().last().load(ajax_synset_preference_form);
  183 + }
  184 + else if(selPrefType === 'relation') {
  185 + selPrefsElem.children().last().load(ajax_relational_preference_form);
  186 + }
  187 +}
  188 +
  189 +function removeSelPreferenceFilter(buttonElem) {
  190 + $(buttonElem).parent().remove();
  191 +}
... ...
dictionary/static/js/schemata_filtering.js 0 → 100644
  1 +var schema_type_vals = [];
  2 +var aspect_vals = [];
  3 +var reflex_vals = [];
  4 +var neg_vals = [];
  5 +var pred_vals = [];
  6 +var opinion_vals = [];
  7 +var filter_schema_type_val = '*';
  8 +var filter_aspect_val = '*';
  9 +var filter_reflex_val = '*';
  10 +var filter_neg_val = '*';
  11 +var filter_pred_val = '*';
  12 +var filter_opinion_val = '*';
  13 +var filter_position_val = '.*';
  14 +var filter_argument_val = '.*';
  15 +var prev_filter_schema_type_val = '*';
  16 +var prev_filter_aspect_val = '*';
  17 +var prev_filter_reflex_val = '*';
  18 +var prev_filter_neg_val = '*';
  19 +var prev_filter_pred_val = '*';
  20 +var prev_filter_opinion_val = '*';
  21 +var prev_filter_position_val = '.*';
  22 +var prev_filter_argument_val = '.*';
  23 +
  24 +function cancel_schemata_filtering() {
  25 + window.filter_position_val = '.*';
  26 + window.filter_argument_val = '.*';
  27 + window.filter_schema_type_val = '*';
  28 + window.filter_aspect_val = '*';
  29 + window.filter_reflex_val = '*';
  30 + window.filter_neg_val = '*';
  31 + window.filter_pred_val = '*';
  32 + window.filter_opinion_val = '*';
  33 + $('#frame_filter #argument_filter').val(window.filter_argument_val);
  34 + $('#frame_filter #position_filter').val(window.filter_position_val);
  35 + $('#frame_filter #schema_type_filter').val(window.filter_schema_type_val);
  36 + $('#frame_filter #aspect_filter').val(window.filter_aspect_val);
  37 + $('#frame_filter #reflex_filter').val(window.filter_reflex_val);
  38 + $('#frame_filter #neg_filter').val(window.filter_neg_val);
  39 + $('#frame_filter #pred_filter').val(window.filter_pred_val);
  40 + $('#frame_filter #opinion_filter').val(window.filter_opinion_val);
  41 + $('#frame_filter #argument_filter').trigger('change');
  42 +}
  43 +
  44 +function cancel_prev_schemata_filtering() {
  45 + window.prev_filter_position_val = '.*';
  46 + window.prev_filter_argument_val = '.*';
  47 + window.prev_filter_schema_type_val = '*';
  48 + window.prev_filter_aspect_val = '*';
  49 + window.prev_filter_reflex_val = '*';
  50 + window.prev_filter_neg_val = '*';
  51 + window.prev_filter_pred_val = '*';
  52 + window.prev_filter_opinion_val = '*';
  53 + $('#prev_frame_filter #argument_filter').val(window.prev_filter_argument_val);
  54 + $('#prev_frame_filter #position_filter').val(window.prev_filter_position_val);
  55 + $('#prev_frame_filter #schema_type_filter').val(window.prev_filter_schema_type_val);
  56 + $('#prev_frame_filter #aspect_filter').val(window.prev_filter_aspect_val);
  57 + $('#prev_frame_filter #reflex_filter').val(window.prev_filter_reflex_val);
  58 + $('#prev_frame_filter #neg_filter').val(window.prev_filter_neg_val);
  59 + $('#prev_frame_filter #pred_filter').val(window.prev_filter_pred_val);
  60 + $('#prev_frame_filter #opinion_filter').val(window.prev_filter_opinion_val);
  61 + $('#prev_frame_filter #argument_filter').trigger('change');
  62 +}
  63 +
  64 +function initiateFrameFilters() {
  65 + $.ajaxJSON({
  66 + method: 'get',
  67 + url: ajax_get_frame_filter_options,
  68 + data: {
  69 + //message_id: message_id,
  70 + },
  71 + callback: function(result) {
  72 + window.schema_type_vals = result['schema_type_options'];
  73 + window.aspect_vals = result['aspect_options'];
  74 + window.reflex_vals = result['reflex_options'];
  75 + window.neg_vals = result['neg_options'];
  76 + window.pred_vals = result['pred_options'];
  77 + window.opinion_vals = result['opinion_options'];
  78 + },
  79 +
  80 + error_callback: function(xhr, status, error) {
  81 + error_alert(status + ': ' + error);
  82 + },
  83 + });
  84 +}
  85 +
  86 +function filter_update(id) {
  87 + if(id == 'frame_filter')
  88 + {
  89 + window.filter_schema_type_val = $('#frame_filter #schema_type_filter').val();
  90 + window.filter_aspect_val = $('#frame_filter #aspect_filter').val();
  91 + window.filter_reflex_val = $('#frame_filter #reflex_filter').val();
  92 + window.filter_neg_val = $('#frame_filter #neg_filter').val();
  93 + window.filter_pred_val = $('#frame_filter #pred_filter').val();
  94 + window.filter_opinion_val = $('#frame_filter #opinion_filter').val();
  95 + window.filter_position_val = $('#frame_filter #position_filter').val();
  96 + window.filter_argument_val = $('#frame_filter #argument_filter').val();
  97 + }
  98 + else if(id == 'prev_frame_filter')
  99 + {
  100 + window.prev_filter_schema_type_val = $('#prev_frame_filter #schema_type_filter').val();
  101 + window.prev_filter_aspect_val = $('#prev_frame_filter #aspect_filter').val();
  102 + window.prev_filter_reflex_val = $('#prev_frame_filter #reflex_filter').val();
  103 + window.prev_filter_neg_val = $('#prev_frame_filter #neg_filter').val();
  104 + window.prev_filter_pred_val = $('#prev_frame_filter #pred_filter').val();
  105 + window.prev_filter_opinion_val = $('#prev_frame_filter #opinion_filter').val();
  106 + window.prev_filter_position_val = $('#prev_frame_filter #position_filter').val();
  107 + window.prev_filter_argument_val = $('#prev_frame_filter #argument_filter').val();
  108 + }
  109 +}
  110 +
  111 +function draw_frames_filter(id) {
  112 + var frame_filter = document.getElementById(id);
  113 +
  114 + p = document.createElement('p');
  115 + text = document.createTextNode("Typ schematu: ");
  116 + p.appendChild(text);
  117 + var select = document.createElement('select');
  118 + select.setAttribute('id', 'schema_type_filter');
  119 + select.setAttribute('name', 'schema_type');
  120 + p.appendChild(select);
  121 + for(var i=0; i<schema_type_vals.length; i++)
  122 + {
  123 + var option = document.createElement('option');
  124 + option.setAttribute('value', schema_type_vals[i].value);
  125 + option.appendChild(document.createTextNode(schema_type_vals[i].name));
  126 + select.appendChild(option);
  127 + }
  128 + frame_filter.appendChild(p);
  129 +
  130 + p = document.createElement('p');
  131 + text = document.createTextNode("Aspekt: ");
  132 + p.appendChild(text);
  133 + var select = document.createElement('select');
  134 + select.setAttribute('id', 'aspect_filter');
  135 + select.setAttribute('name', 'ASPEKT');
  136 + p.appendChild(select);
  137 + for(var i=0; i<aspect_vals.length; i++)
  138 + {
  139 + var option = document.createElement('option');
  140 + option.setAttribute('value', aspect_vals[i].value);
  141 + option.appendChild(document.createTextNode(aspect_vals[i].name));
  142 + select.appendChild(option);
  143 + }
  144 + frame_filter.appendChild(p);
  145 +
  146 + p = document.createElement('p');
  147 + text = document.createTextNode("Zwrotność: ");
  148 + p.appendChild(text);
  149 + select = document.createElement('select');
  150 + select.setAttribute('id', 'reflex_filter');
  151 + select.setAttribute('name', 'ZWROTNOSC');
  152 + p.appendChild(select);
  153 + for(var i=0; i<reflex_vals.length; i++)
  154 + {
  155 + var option = document.createElement('option');
  156 + option.setAttribute('value', reflex_vals[i].value);
  157 + option.appendChild(document.createTextNode(reflex_vals[i].name));
  158 + select.appendChild(option);
  159 + }
  160 + frame_filter.appendChild(p);
  161 +
  162 + p = document.createElement('p');
  163 + text = document.createTextNode("Negatywność: ");
  164 + p.appendChild(text);
  165 + select = document.createElement('select');
  166 + select.setAttribute('id', 'neg_filter');
  167 + select.setAttribute('name', 'NEGATYWNOŚĆ');
  168 + p.appendChild(select);
  169 + for(var i=0; i<neg_vals.length; i++)
  170 + {
  171 + var option = document.createElement('option');
  172 + option.setAttribute('value', neg_vals[i].value);
  173 + option.appendChild(document.createTextNode(neg_vals[i].name));
  174 + select.appendChild(option);
  175 + }
  176 + frame_filter.appendChild(p);
  177 +
  178 + p = document.createElement('p');
  179 + text = document.createTextNode("Predykatywność: ");
  180 + p.appendChild(text);
  181 + select = document.createElement('select');
  182 + select.setAttribute('id', 'pred_filter');
  183 + select.setAttribute('name', 'PREDYKATYWNOŚĆ');
  184 + p.appendChild(select);
  185 + for(var i=0; i<pred_vals.length; i++)
  186 + {
  187 + var option = document.createElement('option');
  188 + option.setAttribute('value', pred_vals[i].value);
  189 + option.appendChild(document.createTextNode(pred_vals[i].name));
  190 + select.appendChild(option);
  191 + }
  192 + frame_filter.appendChild(p);
  193 +
  194 + p = document.createElement('p');
  195 + text = document.createTextNode("Opinia: ");
  196 + p.appendChild(text);
  197 + select = document.createElement('select');
  198 + select.setAttribute('id', 'opinion_filter');
  199 + select.setAttribute('name', 'OPINIA');
  200 + p.appendChild(select);
  201 + for(var i=0; i<opinion_vals.length; i++)
  202 + {
  203 + var option = document.createElement('option');
  204 + option.setAttribute('value', opinion_vals[i].value);
  205 + option.appendChild(document.createTextNode(opinion_vals[i].name));
  206 + select.appendChild(option);
  207 + }
  208 + frame_filter.appendChild(p);
  209 +
  210 + p = document.createElement('p');
  211 + text = document.createTextNode("Typ frazy: ");
  212 + p.appendChild(text);
  213 + select = document.createElement('input');
  214 + select.setAttribute('id', 'argument_filter');
  215 + select.setAttribute('name', 'ARGUMENT');
  216 + p.appendChild(select);
  217 + frame_filter.appendChild(p);
  218 +
  219 + p = document.createElement('p');
  220 + text = document.createTextNode("Pozycja: ");
  221 + p.appendChild(text);
  222 + select = document.createElement('input');
  223 + select.setAttribute('id', 'position_filter');
  224 + select.setAttribute('name', 'POZYCJA');
  225 + p.appendChild(select);
  226 + frame_filter.appendChild(p);
  227 +
  228 + if(id == 'frame_filter')
  229 + {
  230 + $('#frame_filter #schema_type_filter').val(window.filter_schema_type_val);
  231 + $('#frame_filter #aspect_filter').val(window.filter_aspect_val);
  232 + $('#frame_filter #reflex_filter').val(window.filter_reflex_val);
  233 + $('#frame_filter #neg_filter').val(window.filter_neg_val);
  234 + $('#frame_filter #pred_filter').val(window.filter_pred_val);
  235 + $('#frame_filter #opinion_filter').val(window.filter_opinion_val);
  236 + $('#frame_filter #position_filter').val(window.filter_position_val);
  237 + $('#frame_filter #argument_filter').val(window.filter_argument_val);
  238 + }
  239 + else if(id == 'prev_frame_filter')
  240 + {
  241 + $('#prev_frame_filter #schema_type_filter').val(window.prev_filter_schema_type_val);
  242 + $('#prev_frame_filter #aspect_filter').val(window.prev_filter_aspect_val);
  243 + $('#prev_frame_filter #reflex_filter').val(window.prev_filter_reflex_val);
  244 + $('#prev_frame_filter #neg_filter').val(window.prev_filter_neg_val);
  245 + $('#prev_frame_filter #pred_filter').val(window.prev_filter_pred_val);
  246 + $('#prev_frame_filter #opinion_filter').val(window.prev_filter_opinion_val);
  247 + $('#prev_frame_filter #position_filter').val(window.prev_filter_position_val);
  248 + $('#prev_frame_filter #argument_filter').val(window.prev_filter_argument_val);
  249 + }
  250 +
  251 + //attach autocomplete
  252 + $('#' + id + ' #argument_filter').autocomplete({
  253 + // triggers when selection performed
  254 + select: function(event, ui){
  255 + filter_update(id);
  256 + },
  257 + //define callback to format results
  258 + source: function(req, add){
  259 + //pass request to server
  260 + $.getJSON(ajax_argument_lookup, req, function(data) {
  261 + //create array for response objects
  262 + var suggestions = [];
  263 + $.each(data['result'], function(i, val){
  264 + suggestions.push(val[0]);
  265 + });
  266 + //pass array to callback
  267 + add(suggestions);
  268 + });
  269 + },
  270 + });
  271 + $('#' + id + ' #position_filter').autocomplete({
  272 + // triggers when selection performed
  273 + select: function(event, ui){
  274 + filter_update(id);
  275 + },
  276 + //define callback to format results
  277 + source: function(req, add){
  278 + //pass request to server
  279 + $.getJSON(ajax_position_lookup, req, function(data) {
  280 + //create array for response objects
  281 + var suggestions = [];
  282 + $.each(data['result'], function(i, val){
  283 + suggestions.push(val[0]);
  284 + });
  285 + //pass array to callback
  286 + add(suggestions);
  287 + });
  288 + },
  289 + });
  290 +}
  291 +
  292 +function filter_frames(schemas, filter_id) {
  293 + var schema_type_val = $('#'+filter_id+' #schema_type_filter').val();
  294 + var aspect_val = $('#'+filter_id+' #aspect_filter').val();
  295 + var reflex_val = $('#'+filter_id+' #reflex_filter').val();
  296 + var neg_val = $('#'+filter_id+' #neg_filter').val();
  297 + var pred_val = $('#'+filter_id+' #pred_filter').val();
  298 + var opinion_val = $('#'+filter_id+' #opinion_filter').val();
  299 + var position_val = $('#'+filter_id+' #position_filter').val().trim();
  300 + var argument_val = $('#'+filter_id+' #argument_filter').val().trim();
  301 + var filtered_frames = new Array();
  302 +
  303 + if(position_val == '.*')
  304 + position_val = ''
  305 + if(argument_val == '.*')
  306 + argument_val = ''
  307 +
  308 + for(var i=0; i<schemas.length; i++)
  309 + {
  310 + if((schemas[i].characteristics[3] == aspect_val || aspect_val == '*')
  311 + && (schemas[i].characteristics[0] == reflex_val || reflex_val == '*')
  312 + && (schemas[i].characteristics[1] == neg_val || neg_val == '*')
  313 + && (schemas[i].characteristics[2] == pred_val || pred_val == '*')
  314 + && (schemas[i].opinion == opinion_val || opinion_val == '*')
  315 + && schema_type_valid(schemas[i], schema_type_val))
  316 + {
  317 + frameMatch = false;
  318 + if(position_val)
  319 + frameMatch = has_positions(schemas[i], position_val)
  320 + if(argument_val && (frameMatch || !position_val))
  321 + {
  322 +
  323 + frameMatch = has_arguments(schemas[i], argument_val)
  324 + }
  325 + if(frameMatch || (!argument_val && !position_val))
  326 + filtered_frames.push(schemas[i]);
  327 + }
  328 + }
  329 + return filtered_frames;
  330 +}
  331 +
  332 +function has_positions(frame, pos_term) {
  333 + var alternatives = pos_term.split('|');
  334 + for(var h=0; h<alternatives.length; h++) {
  335 + var allConjsMatch = true;
  336 + var conjs = alternatives[h].split('&');
  337 +
  338 + for(var i=0; i<conjs.length; i++) {
  339 + try {
  340 + var matched_poss = [];
  341 + var conj = conjs[i].trim();
  342 + var regEx = conj;
  343 + if (regEx.substring(0, 1) == '!') {
  344 + regEx = regEx.substring(1);
  345 + }
  346 + var posRe = new RegExp('^'+escape_regex(regEx)+'$');
  347 + matched_poss = $.grep(frame.positions,
  348 + function(pos){
  349 + return pos.text_rep.match(posRe);
  350 + });
  351 + if((matched_poss.length > 0 && conj.startsWith('!')) ||
  352 + (matched_poss.length == 0 && !conj.startsWith('!'))) {
  353 + allConjsMatch = false;
  354 + break;
  355 + }
  356 + }
  357 + catch(e) {
  358 + allConjsMatch = false;
  359 + break;
  360 + }
  361 + }
  362 + if(allConjsMatch) {
  363 + return true;
  364 + }
  365 + }
  366 +
  367 + return false;
  368 +}
  369 +
  370 +function has_arguments(frame, arg_term) {
  371 + var alternatives = arg_term.split('|');
  372 + for(var h=0; h<alternatives.length; h++) {
  373 + var allConjsMatch = true;
  374 + var conjs = alternatives[h].split('&');
  375 + for(var i=0; i<conjs.length; i++) {
  376 + try {
  377 + var matched_args = [];
  378 + var conj = conjs[i].trim();
  379 + var regEx = conj;
  380 + if (regEx.substring(0, 1) == '!') {
  381 + regEx = regEx.substring(1);
  382 + }
  383 + var argRe = new RegExp('^'+escape_regex(regEx)+'$');
  384 +
  385 + for(var j=0; j<frame.positions.length; j++) {
  386 + matched_args = $.grep(frame.positions[j].arguments, function(arg) {
  387 + return arg.text_rep.match(argRe);
  388 + });
  389 + if(matched_args.length > 0) {
  390 + break;
  391 + }
  392 + }
  393 + if((matched_args.length > 0 && conj.startsWith('!')) ||
  394 + (matched_args.length == 0 && !conj.startsWith('!'))) {
  395 + allConjsMatch = false;
  396 + break;
  397 + }
  398 + }
  399 + catch(e) {
  400 + allConjsMatch = false;
  401 + break;
  402 + }
  403 + }
  404 + if(allConjsMatch){
  405 + return true;
  406 + }
  407 + }
  408 + return false;
  409 +}
  410 +
  411 +function schema_type_valid(schema, filter_option) {
  412 + if(filter_option == '*') return true;
  413 + else if(filter_option == 'normal' && !schema.is_phraseologic) return true;
  414 + else if(filter_option == 'phraseologic' && schema.is_phraseologic) return true;
  415 + else return false;
  416 +}
... ...
dictionary/static/js/semantics_coupling.js
1   -function schemaGotAssignedSemantics(element_id) {
2   - var semanticsAssigned = true;
3   - var id_map = parseId(element_id);
4   - var schema_id = id_map['frame_id'];
5   - if(schema_id < 0) {
6   - semanticsAssigned = false;
7   - }
8   - else {
9   - jQuery.ajax({
10   - type: 'get',
11   - url: ajax_schema_got_assigned_semantics,
12   - data: {lemma_id: window.lemma_id,
13   - schema_id: schema_id},
14   - success: function(result) {
15   - semanticsAssigned = result['got_assigned_semantics'];
16   - },
17   - async: false
18   - });
19   - }
20   - return semanticsAssigned;
21   -}
22   -
23 1 function semanticsAssignedAlert() {
24 2 error_alert('Działaj rozważnie, element jest wykorzystywany w ramach semantycznych.');
25 3 }
26 4  
27   -function exampleGotAssignedSemantics(example_tab_id)
28   -{
29   - var semanticsAssigned = true;
30   - var example_id = example_tab_id.replace('nkjp_', '');
31   - if (example_id < 0) {
32   - semanticsAssigned = false;
33   - }
34   - else {
35   - jQuery.ajax({
36   - type: 'get',
37   - url: ajax_example_got_assigned_semantics,
38   - data: {lemma_id: window.lemma_id,
39   - example_id: example_id},
40   - success: function(result) {
41   - semanticsAssigned = result['got_assigned_semantics'];
42   - },
43   - async: false
44   - });
45   - }
46   - return semanticsAssigned;
47   -}
48   -
49 5 function semanticsAssignedExampleAlert() {
50 6 error_alert('Działaj rozważnie, przykład jest wykorzystywany w ramach semantycznych.');
51 7 }
... ...
dictionary/teixml.py
1 1 #-*- coding:utf-8 -*-
2 2 from semantics.models import LexicalUnitExamples
3 3  
4   -#Copyright (c) 2015, Bartłomiej Nitoń
5   -#All rights reserved.
6   -
7   -#Redistribution and use in source and binary forms, with or without modification, are permitted provided
8   -#that the following conditions are met:
9   -
10   -# Redistributions of source code must retain the above copyright notice, this list of conditions and
11   -# the following disclaimer.
12   -# Redistributions in binary form must reproduce the above copyright notice, this list of conditions
13   -# and the following disclaimer in the documentation and/or other materials provided with the distribution.
14   -
15   -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
16   -# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
17   -# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
18   -# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
19   -# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20   -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
21   -# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22   -# POSSIBILITY OF SUCH DAMAGE.
23   -
24   -'''
25   -File with functions responsible for creating TEI xml.
26   -'''
27   -
28 4 import datetime
29   -import operator
30 5  
31 6 from lxml import etree
32 7 from xml.sax.saxutils import escape
33 8  
34   -from dictionary.models import Atribute_Model, Frame_Opinion_Value, Frame_Char_Model, \
35   - PositionCategory, Argument_Model, \
  9 +from dictionary.models import Atribute_Model, \
36 10 sortArguments, sortatributes, sortPositions, sort_positions
37 11  
38 12 XML_NAMESPACE = 'http://www.w3.org/XML/1998/namespace'
... ... @@ -41,7 +15,6 @@ XML_NAMESPACE = &#39;http://www.w3.org/XML/1998/namespace&#39;
41 15 def createteixml(outpath, lemmas, frame_opinion_values):
42 16 root = write_root()
43 17 write_header(root)
44   - #lemmas = lemmas.filter(entry=u'brnąć')
45 18 write_entries(root, lemmas, frame_opinion_values)
46 19 with open(outpath, 'w') as output_file:
47 20 output_file.write(etree.tostring(root, pretty_print=True,
... ... @@ -92,9 +65,10 @@ def write_entry(body_elem, lemma, frame_opinions, frame_opinion_values):
92 65  
93 66 write_syntactic_layer(entry_elem, lemma, frame_opinions, frame_opinion_values)
94 67 write_examples_layer(entry_elem, lemma)
95   - write_semantic_layer(entry_elem, lemma)
96   - write_meanings_layer(entry_elem, lemma)
97   - write_connections_layer(entry_elem, lemma)
  68 + if lemma.semantics_ready():
  69 + write_semantic_layer(entry_elem, lemma)
  70 + write_meanings_layer(entry_elem, lemma)
  71 + write_connections_layer(entry_elem, lemma)
98 72  
99 73 def write_syntactic_layer(entry_elem, lemma, frame_opinions, frame_opinion_values):
100 74 synt_layer_fs_elem = etree.SubElement(entry_elem, 'fs')
... ... @@ -113,18 +87,18 @@ def write_syntactic_layer(entry_elem, lemma, frame_opinions, frame_opinion_value
113 87 pred_val=pred_val,
114 88 aspect_val=aspect_val).order_by('text_rep')
115 89 for frame in matchingframes:
116   - if (not frame_opinion_values.exists() or
117   - frame_opinions.filter(frame=frame).exists()):
  90 + if not lemma.phraseology_ready() and frame.phraseologic:
  91 + continue
  92 + if (not frame_opinion_values.exists() or frame_opinions.filter(frame=frame).exists()):
118 93 write_schema(vColl_elem, frame, lemma)
119 94  
120 95 def write_schema(parent_elem, schema, lemma):
121   -
122 96 schema_xml_id = 'wal_%s.%s-sch' % (str(lemma.entry_obj.id), str(schema.id))
123 97  
124 98 schema_fs_elem = etree.SubElement(parent_elem, 'fs')
125 99 schema_fs_elem.attrib[etree.QName(XML_NAMESPACE, 'id')] = schema_xml_id
126 100 schema_fs_elem.attrib['type'] = 'schema'
127   - # opinia o ramce
  101 + # opinia o schemacie
128 102 try:
129 103 schema_opinion = lemma.frame_opinions.filter(frame=schema).all()[0].value.short
130 104 except IndexError:
... ... @@ -420,20 +394,24 @@ def write_examples_layer(parent_elem, lemma):
420 394 write_examples_feature(vColl_elem, lemma)
421 395  
422 396 def write_examples_feature(parent_elem, lemma):
423   - entry = lemma.entry_obj
424 397 for example in lemma.nkjp_examples.order_by('opinion__priority').all():
425   - write_example(parent_elem, entry, example)
  398 + if not lemma.phraseology_ready() and example.frame.phraseologic:
  399 + pass
  400 + else:
  401 + write_example(parent_elem, lemma, example)
426 402 for example in lemma.lemma_nkjp_examples.order_by('opinion__priority').all():
427   - write_example(parent_elem, entry, example)
  403 + write_example(parent_elem, lemma, example)
428 404  
429   -def write_example(parent_elem, entry, example):
  405 +def write_example(parent_elem, lemma, example):
  406 + entry = lemma.entry_obj
430 407 example_xml_id = u'wal_%s.%s-exm' % (str(entry.id), str(example.id))
431 408  
432 409 example_fs_elem = etree.SubElement(parent_elem, 'fs')
433 410 example_fs_elem.attrib[etree.QName(XML_NAMESPACE, 'id')] = example_xml_id
434 411 example_fs_elem.attrib['type'] = 'example'
435 412  
436   - get_and_write_meaning_link(example_fs_elem, entry, example)
  413 + if lemma.semantics_ready():
  414 + get_and_write_meaning_link(example_fs_elem, entry, example)
437 415 write_phrases_links(example_fs_elem, entry, example)
438 416  
439 417 sentence_f_elem = etree.SubElement(example_fs_elem, 'f')
... ... @@ -441,7 +419,6 @@ def write_example(parent_elem, entry, example):
441 419 sentence_content_elem = etree.SubElement(sentence_f_elem, 'string')
442 420 sentence_content_elem.text = escape(example.sentence)
443 421  
444   - # trzeba do zrodel dodac nazwy symboliczne
445 422 source_f_elem = etree.SubElement(example_fs_elem, 'f')
446 423 source_f_elem.attrib['name'] = 'source'
447 424 source_symbol_elem = etree.SubElement(source_f_elem, 'symbol')
... ... @@ -460,8 +437,9 @@ def write_example(parent_elem, entry, example):
460 437  
461 438 def get_and_write_meaning_link(parent_elem, entry, example):
462 439 try:
  440 + entry_lex_units = entry.meanings.all()
463 441 lex_unit_example = LexicalUnitExamples.objects.get(example=example,
464   - lexical_unit__base=entry.name)
  442 + lexical_unit__in=entry_lex_units)
465 443 meaning = lex_unit_example.lexical_unit
466 444 meaning_xml_id = u'#wal_%s.%s-mng' % (str(entry.id), str(meaning.id))
467 445  
... ... @@ -518,9 +496,19 @@ def write_frame_fs(parent_elem, entry, frame):
518 496 frame_fs_elem.attrib[etree.QName(XML_NAMESPACE, 'id')] = frame_xml_id
519 497 frame_fs_elem.attrib['type'] = 'frame'
520 498  
  499 + write_frame_opinion(frame_fs_elem, frame)
521 500 write_frame_meanings(frame_fs_elem, entry, frame)
522 501 write_frame_arguments(frame_fs_elem, entry, frame)
523 502  
  503 +def write_frame_opinion(parent_elem, frame):
  504 + frame_opinion = 'unk'
  505 + if frame.opinion_selected():
  506 + frame_opinion = frame.opinion.short
  507 + opinion_f_elem = etree.SubElement(parent_elem, 'f')
  508 + opinion_f_elem.attrib['name'] = 'opinion'
  509 + opinion_symbol = etree.SubElement(opinion_f_elem, 'symbol')
  510 + opinion_symbol.attrib['value'] = frame_opinion
  511 +
524 512 def write_frame_meanings(parent_elem, entry, frame):
525 513 meanings_f_elem = etree.SubElement(parent_elem, 'f')
526 514 meanings_f_elem.attrib['name'] = 'meanings'
... ... @@ -686,8 +674,7 @@ def write_meanings_layer(parent_elem, lemma):
686 674  
687 675 def write_meanings(parent_elem, lemma):
688 676 entry = lemma.entry_obj
689   - lex_units = entry.lexical_units()
690   - for lex_unit in lex_units.all():
  677 + for lex_unit in entry.meanings.all():
691 678 write_meaning(parent_elem, entry, lex_unit)
692 679  
693 680 def write_meaning(parent_elem, entry, lex_unit):
... ... @@ -733,39 +720,30 @@ def write_connections_layer(parent_elem, lemma):
733 720 def write_alternations(parent_elem, lemma):
734 721 entry = lemma.entry_obj
735 722 frames = entry.actual_frames()
736   -
737 723 for schema in lemma.frames.all():
738 724 for frame in frames:
739 725 matching_complements = frame.complements.filter(realizations__frame=schema).distinct()
740   - if matching_complements.filter(realizations__alternation=1).exists():
741   - alternation_fs_elem = etree.SubElement(parent_elem, 'fs')
742   - alternation_fs_elem.attrib['type'] = 'alternation'
743   -
744   - connections_f_elem = etree.SubElement(alternation_fs_elem, 'f')
745   - connections_f_elem.attrib['name'] = 'connections'
746   -
747   - vColl_elem = etree.SubElement(connections_f_elem, 'vColl')
748   - vColl_elem.attrib['org'] = 'set'
749   -
750   - for arg in frame.complements.all():
751   - alt_realizations = arg.realizations.filter(frame=schema, alternation=1)
752   - if alt_realizations.exists():
753   - write_connection(vColl_elem, entry, frame, arg, alt_realizations)
754   -
755   - if matching_complements.filter(realizations__alternation=2).exists():
756   - alternation_fs_elem = etree.SubElement(parent_elem, 'fs')
757   - alternation_fs_elem.attrib['type'] = 'alternation'
758   -
759   - connections_f_elem = etree.SubElement(alternation_fs_elem, 'f')
760   - connections_f_elem.attrib['name'] = 'connections'
761   -
762   - vColl_elem = etree.SubElement(connections_f_elem, 'vColl')
763   - vColl_elem.attrib['org'] = 'set'
764   -
765   - for arg in frame.complements.all():
766   - alt_realizations = arg.realizations.filter(frame=schema, alternation=2)
767   - if alt_realizations.exists():
768   - write_connection(vColl_elem, entry, frame, arg, alt_realizations)
  726 + write_alternation(parent_elem, entry, schema, frame, matching_complements, 1)
  727 + write_alternation(parent_elem, entry, schema, frame, matching_complements, 2)
  728 +
  729 +def write_alternation(parent_elem, entry, schema, frame, complements, alternation):
  730 + alternation_compls = complements.filter(realizations__alternation=alternation)
  731 + if alternation_compls.exists():
  732 + first_connection = True
  733 + for arg in alternation_compls.all():
  734 + alt_realizations = arg.realizations.filter(frame=schema, alternation=alternation)
  735 + if alt_realizations.exists():
  736 + if first_connection:
  737 + alternation_fs_elem = etree.SubElement(parent_elem, 'fs')
  738 + alternation_fs_elem.attrib['type'] = 'alternation'
  739 +
  740 + connections_f_elem = etree.SubElement(alternation_fs_elem, 'f')
  741 + connections_f_elem.attrib['name'] = 'connections'
  742 +
  743 + vColl_elem = etree.SubElement(connections_f_elem, 'vColl')
  744 + vColl_elem.attrib['org'] = 'set'
  745 + first_connection = False
  746 + write_connection(vColl_elem, entry, frame, arg, alt_realizations)
769 747  
770 748 def write_connection(parent_elem, entry, frame, arg, realizations):
771 749 connection_fs_elem = etree.SubElement(parent_elem, 'fs')
... ... @@ -794,158 +772,5 @@ def write_phrases(parent_elem, entry, realizations):
794 772 realization.position.id, realization.argument.id)
795 773 phrase_link_elem = etree.SubElement(vColl_elem, 'fs')
796 774 phrase_link_elem.attrib['sameAs'] = phrase_xml_link
797   - phrase_link_elem.attrib['type'] = 'phrase'
798   -
799   -
800   -
801   -def writefsdecl(outfile):
802   - '''
803   - Write feature structures declarations
804   - '''
805   - outfile.write(u' <encodingDesc>\n')
806   - outfile.write(u' <fsdDecl>\n')
807   -
808   -# syntacticBahaviour fs declaration
809   - outfile.write(u' <fsDecl type="syntacticBehaviour">\n')
810   - outfile.write(u' <fsDescr>Describes syntactic behaviour of entry</fsDescr>\n')
811   - outfile.write(u' <fDecl name="frames">\n')
812   - outfile.write(u' <fDescr>syntactic frames</fDescr>\n')
813   - outfile.write(u' <vRange>\n')
814   - outfile.write(u' <vColl org="list">\n')
815   - outfile.write(u' <fs type="frame"/>\n')
816   - outfile.write(u' </vColl>\n')
817   - outfile.write(u' </vRange>\n')
818   - outfile.write(u' </fDecl>\n')
819   - outfile.write(u' </fsDecl>\n')
820   -
821   -# frame fs declaration
822   - outfile.write(u' <fsDecl type="frame">\n')
823   - outfile.write(u' <fsDescr>Describes syntactic frame</fsDescr>\n')
824   - # frame opinion
825   - outfile.write(u' <fDecl name="opinion">\n')
826   - outfile.write(u' <fDescr>frame opinion</fDescr>\n')
827   - outfile.write(u' <vRange>\n')
828   - outfile.write(u' <vAlt>\n')
829   - for alt in Frame_Opinion_Value.objects.order_by('priority'):
830   - outfile.write(u' <symbol value="%s"/>\n' % alt.short)
831   - outfile.write(u' </vAlt>\n')
832   - outfile.write(u' </vRange>\n')
833   - outfile.write(u' </fDecl>\n')
834   - # reflex
835   - outfile.write(u' <fDecl name="reflex">\n')
836   - outfile.write(u' <fDescr>frame reflexivity</fDescr>\n')
837   - outfile.write(u' <vRange>\n')
838   - outfile.write(u' <vAlt>\n')
839   - outfile.write(u' <binary value="true"/>\n')
840   - outfile.write(u' <binary value="false"/>\n')
841   - outfile.write(u' </vAlt>\n')
842   - outfile.write(u' </vRange>\n')
843   - outfile.write(u' </fDecl>\n')
844   - # aspect
845   - outfile.write(u' <fDecl name="aspect">\n')
846   - outfile.write(u' <fDescr>frame aspect</fDescr>\n')
847   - outfile.write(u' <vRange>\n')
848   - outfile.write(u' <vAlt>\n')
849   - aspect_obj = Frame_Char_Model.objects.get(model_name=u'ASPEKT')
850   - for alt in aspect_obj.frame_char_values.order_by('priority'):
851   - outfile.write(u' <symbol value="%s"/>\n' %
852   - alt.value)
853   - outfile.write(u' </vAlt>\n')
854   - outfile.write(u' </vRange>\n')
855   - outfile.write(u' </fDecl>\n')
856   - # negatywnosc
857   - outfile.write(u' <fDecl name="negativity">\n')
858   - outfile.write(u' <fDescr>frame negativity</fDescr>\n')
859   - outfile.write(u' <vRange>\n')
860   - outfile.write(u' <vAlt>\n')
861   - aspect_obj = Frame_Char_Model.objects.get(model_name=u'NEGATYWNOŚĆ')
862   - for alt in aspect_obj.frame_char_values.order_by('priority'):
863   - outfile.write(u' <symbol value="%s"/>\n' %
864   - alt.value)
865   - outfile.write(u' </vAlt>\n')
866   - outfile.write(u' </vRange>\n')
867   - outfile.write(u' </fDecl>\n')
868   - # predykatywnosc
869   - outfile.write(u' <fDecl name="predicativity">\n')
870   - outfile.write(u' <fDescr>frame predicativity</fDescr>\n')
871   - outfile.write(u' <vRange>\n')
872   - outfile.write(u' <vAlt>\n')
873   - aspect_obj = Frame_Char_Model.objects.get(model_name=u'PREDYKATYWNOŚĆ')
874   - for alt in aspect_obj.frame_char_values.order_by('priority'):
875   - outfile.write(u' <symbol value="%s"/>\n' %
876   - alt.value)
877   - outfile.write(u' </vAlt>\n')
878   - outfile.write(u' </vRange>\n')
879   - outfile.write(u' </fDecl>\n')
880   - # positions
881   - outfile.write(u' <fDecl name="positions">\n')
882   - outfile.write(u' <fDescr>syntactic positions</fDescr>\n')
883   - outfile.write(u' <vRange>\n')
884   - outfile.write(u' <vColl org="list">\n')
885   - outfile.write(u' <fs type="position"/>\n')
886   - outfile.write(u' </vColl>\n')
887   - outfile.write(u' </vRange>\n')
888   - outfile.write(u' </fDecl>\n')
889   - outfile.write(u' </fsDecl>\n')
890   -
891   -# position fs declaration
892   - outfile.write(u' <fsDecl type="position">\n')
893   - outfile.write(u' <fsDescr>Describes syntactic position</fsDescr>\n')
894   - # position category
895   - outfile.write(u' <fDecl name="category">\n')
896   - outfile.write(u' <fDescr>position category</fDescr>\n')
897   - outfile.write(u' <vRange>\n')
898   - outfile.write(u' <vAlt>\n')
899   - for alt in PositionCategory.objects.filter(control=False).order_by('priority'):
900   - outfile.write(u' <symbol value="%s"/>\n' % alt.category)
901   - outfile.write(u' </vAlt>\n')
902   - outfile.write(u' </vRange>\n')
903   - outfile.write(u' </fDecl>\n')
904   - # position control
905   - outfile.write(u' <fDecl name="control">\n')
906   - outfile.write(u' <fDescr>position category</fDescr>\n')
907   - outfile.write(u' <vRange>\n')
908   - outfile.write(u' <vAlt>\n')
909   - for alt in PositionCategory.objects.filter(control=True).order_by('priority'):
910   - outfile.write(u' <symbol value="%s"/>\n' % alt.category)
911   - outfile.write(u' </vAlt>\n')
912   - outfile.write(u' </vRange>\n')
913   - outfile.write(u' </fDecl>\n')
914   - # arguments
915   - outfile.write(u' <fDecl name="arguments">\n')
916   - outfile.write(u' <fDescr>syntactic arguments</fDescr>\n')
917   - outfile.write(u' <vRange>\n')
918   - outfile.write(u' <vColl org="list">\n')
919   - outfile.write(u' <fs type="argument"/>\n')
920   - outfile.write(u' </vColl>\n')
921   - outfile.write(u' </vRange>\n')
922   - outfile.write(u' </fDecl>\n')
923   - outfile.write(u' </fsDecl>\n')
924   -
925   -# argument fs declaration
926   - outfile.write(u' <fsDecl type="argument">\n')
927   - outfile.write(u' <fsDescr>Describes syntactic argument</fsDescr>\n')
928   - # position category
929   - outfile.write(u' <fDecl name="type">\n')
930   - outfile.write(u' <fDescr>type of argument</fDescr>\n')
931   - outfile.write(u' <vRange>\n')
932   - outfile.write(u' <vAlt>\n')
933   - for alt in Argument_Model.objects.order_by('priority'):
934   - outfile.write(u' <symbol value="%s"/>\n' % alt.arg_model_name)
935   - outfile.write(u' </vAlt>\n')
936   - outfile.write(u' </vRange>\n')
937   - outfile.write(u' </fDecl>\n')
938   - # attributes
939   - outfile.write(u' <fDecl name="attributes">\n')
940   - outfile.write(u' <fDescr>argument attributes</fDescr>\n')
941   - outfile.write(u' <vRange>\n')
942   - outfile.write(u' <vColl org="list">\n')
943   - outfile.write(u' <fs type="attribut"/>\n')
944   - outfile.write(u' </vColl>\n')
945   - outfile.write(u' </vRange>\n')
946   - outfile.write(u' </fDecl>\n')
947   - outfile.write(u' </fsDecl>\n')
948   -
949   - outfile.write(u' </fsdDecl>\n')
950   - outfile.write(u' </encodingDesc>\n')
  775 + phrase_link_elem.attrib['type'] = 'phrase'
951 776  
952 777 \ No newline at end of file
... ...
dictionary/templates/arg_realizations.html
... ... @@ -7,7 +7,7 @@
7 7 {% endblock %}
8 8  
9 9 {% block content %}
10   -</br>
  10 +<br/>
11 11 <table class='ArgRealViewTable'>
12 12 <tr>
13 13 <td>
... ...