Commit 9083aa82ce5a9b4f310e3857f6a7aa7b4c30e58f
0 parents
pierwszy commit po otwarciu kodu
Showing
251 changed files
with
47876 additions
and
0 deletions
Too many changes to show.
To preserve performance only 69 of 251 files are displayed.
.hgignore
0 โ 100644
INSTALL
0 โ 100644
1 | +++ a/INSTALL | |
1 | +Instalacja kuลบni na docelowym serwerze | |
2 | +====================================== | |
3 | + | |
4 | +(zakลadamy, ลผe domyลlny python jest w wersji przynajmniej 2.6) | |
5 | + | |
6 | +Zmiany w ลบrรณdลach projektu | |
7 | +-------------------------- | |
8 | + | |
9 | +Jedyne konieczne zmiany w projekcie (poza konfiguracjฤ bazy opisanฤ w README) to: | |
10 | + | |
11 | +1. Jeลli strona ma dziaลaฤ w podkatalogu domeny, to naleลผy zmieniฤ linijkฤ w settings.py z | |
12 | + | |
13 | +SITE_PREFIX = '' | |
14 | + | |
15 | +na | |
16 | + | |
17 | +SITE_PREFIX = '/kuznia' | |
18 | + | |
19 | +(na przykลad). | |
20 | + | |
21 | +2. Aby dziaลaลo wysyลanie maili, pod warunkiem ลผe na serwerze dziaลa poczta (jeลli nie dziaลa, to trzeba zainstalowaฤ jakiล serwer SMTP, np. postfix), wystarczy wykomentowaฤ linijkฤ | |
22 | + | |
23 | +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' | |
24 | + | |
25 | +w settings.py, stawiajฤ c przed niฤ # | |
26 | + | |
27 | + | |
28 | +Konfiguracja postgresa | |
29 | +---------------------- | |
30 | + | |
31 | +$ sudo su postgres | |
32 | +# psql | |
33 | +# create user kuznia with password 'kuznia'; | |
34 | +# create database kuznia; | |
35 | +# grant all privileges on database kuznia to kuznia; | |
36 | +# \c kuznia | |
37 | +# create language plpgsql; -- niepotrzebne w postgresie 9.1 | |
38 | +Ctrl-D | |
39 | +Ctrl-D | |
40 | + | |
41 | + | |
42 | +Konfiguracja apacza | |
43 | +------------------- | |
44 | + | |
45 | +Potrzebny jest moduล mod_fastcgi do apacza. Kiedy juลผ go mamy, to dodajemy do konfiguracji apacza nastฤpujฤ cฤ treลฤ (przy zaลoลผeniu, ลผe instalujemy kuลบniฤ w podkatalogu kuznia skonfigurowanej domeny): | |
46 | + | |
47 | +LoadModule fastcgi_module modules/mod_fastcgi.so | |
48 | + | |
49 | +FastCGIExternalServer /foo/bar/forge.fcgi -host 127.0.0.1:3033 | |
50 | + | |
51 | +<Location /kuznia> | |
52 | + RewriteEngine On | |
53 | + RewriteRule ^/site/root/kuznia/(media.*)$ /baz/$1 [QSA,L,PT] | |
54 | + RewriteRule ^/site/root/kuznia/(admin_media.*)$ /baz/$1 [QSA,L,PT] | |
55 | + RewriteCond %{REQUEST_FILENAME} !-f | |
56 | + RewriteRule ^/site/root/(.*)$ /baz/forge.fcgi/$1 [QSA,L] | |
57 | +</Location> | |
58 | + | |
59 | +Legenda: | |
60 | + | |
61 | +/foo/bar/ to ลcieลผka w systemie plikรณw do katalogu, ktรณry jest widoczny pod adresem http://domena/baz/ | |
62 | + | |
63 | +/site/root/ to gลรณwny katalog na dysku odpowiadajฤ cy domenie (na chopinie to jest /var/www/chopin.ipipan.waw.pl/) | |
64 | + | |
65 | +W katalogu /foo/bar/ powinien byฤ skrรณt do katalogu media w projekcie (i jeลli na serwerze dziaลa selinux, to trzeba zrobiฤ sztuczki, ลผeby skrรณt dziaลaล). Moลผna teลผ tam zrobiฤ skrรณt lub skopiowaฤ katalog contrib/admin/media do /foo/bar/admin_media, jeลli chcemy korzystaฤ z interfejsu admina (na razie chyba nie chcemy, ale i tak na chopinie dziaลa). | |
66 | + | |
67 | + | |
68 | +Uruchomienie serwera | |
69 | +-------------------- | |
70 | + | |
71 | +Najproลciej uruchamiaฤ serwer nastฤpujฤ cym skryptem: | |
72 | + | |
73 | + | |
74 | +#!/bin/bash | |
75 | + | |
76 | +HOME=/home/foobar/ | |
77 | + | |
78 | +kill `cat $HOME/pid` | |
79 | +cd $HOME/lexeme_forge/ | |
80 | +./manage.py runfcgi method=threaded host=127.0.0.1 port=3033 pidfile=$HOME/pid | |
81 | + | |
82 | + | |
83 | +Polecenie kill wywali niegroลบny bลฤ d, jeลli serwer nie byล wczeลniej uruchomiony, w przeciwnym razie go wyลฤ czy przed ponownym wลฤ czeniem. W katalogu /home/foobar powinno byฤ ลciฤ gniฤte repozytorium projektu. | |
84 | + | |
85 | + | |
86 | +Aktualizacja projektu | |
87 | +--------------------- | |
88 | + | |
89 | +$ cd /home/foobar/lexeme_forge/ | |
90 | +$ hg pull | |
91 | +$ hg up | |
92 | +$ /home/foobar/start-server | |
93 | + | |
94 | +gdzie start-server jest skryptem z poprzedniego punktu. | |
... | ... |
README
0 โ 100644
1 | +++ a/README | |
1 | +Zaleลผnoลci | |
2 | +---------- | |
3 | + | |
4 | +django-registration 0.7 | |
5 | +django-extensions [dowolna wersja] | |
6 | + | |
7 | +Takich paczek moลผe siฤ zrobiฤ duลผo i czasem mogฤ byฤ potrzebne nowsze wersje, niลผ w repozytoriach dystrybucji, dlatego polecam zrobiฤ w katalogu domowym podkatalog na te paczki (u mnie to jest ~/lib/python/) i instalowaฤ je programem easy_install w taki sposรณb: | |
8 | + | |
9 | +$ easy_install --install-dir=~/lib/python/ -Z django-registration | |
10 | + | |
11 | +Katalog z tymi paczkami powinien byฤ w ลcieลผce Pythona, moลผna o to zadbaฤ np. dopisujฤ c do ~/.bashrc linijkฤ | |
12 | + | |
13 | +export PYTHONPATH=$PYTHONPATH:~/lib/python/ | |
14 | + | |
15 | +UWAGA: niektรณre skrypty naleลผy na chopinie uruchamiaฤ pod Pythonem 2.7, ktรณry na chopinie nie jest domyลlnym Pythonem. Jeลli wiฤc wywoลanie jakiegoล skryptu zakoลczy siฤ bลฤdem, naleลผy sprรณbowaฤ poprzedziฤ jego wywoลanie przez 'python2.7', np.: | |
16 | + | |
17 | +zamiast: | |
18 | +$ skrypt.py [argumenty] | |
19 | +piszemy: | |
20 | +$ python2.7 skrypt.py [argumenty] | |
21 | + | |
22 | + | |
23 | +Odpalenie serwera deweloperskiego | |
24 | +--------------------------------- | |
25 | + | |
26 | +Przede wszystkim trzeba skonfigurowaฤ bazฤ. W tym celu naleลผy utworzyฤ plik o nazwie database_data.py w gลรณwnym katalogu projektu i wpisaฤ w nim parametry bazy w poniลผszym formacie: | |
27 | + | |
28 | +DATABASES = { | |
29 | + 'default': { | |
30 | + 'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. | |
31 | + 'NAME': '', # Or path to database file if using sqlite3. | |
32 | + 'USER': '', # Not used with sqlite3. | |
33 | + 'PASSWORD': '', # Not used with sqlite3. | |
34 | + 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. | |
35 | + 'PORT': '', # Set to empty string for default. Not used with sqlite3. | |
36 | + } | |
37 | +} | |
38 | + | |
39 | +Ze wzglฤdu na sortowanie a tergo, baza musi byฤ w postgresie. | |
40 | +Potrzebny jest postgres w wersji co najmniej 9.1. | |
41 | +W konfiguracji postgresa (postgresql.conf) trzeba ustawiฤ custom_variable_classes = 'var'. | |
42 | + | |
43 | +Baza oczywiลcie na poczฤ tku jest pusta. Tworzymy tabelฤ historii zmian: | |
44 | + | |
45 | +$ ./manage.py dbshell < history_wip/history_table.sql | |
46 | + | |
47 | +i pozostaลe tabele: | |
48 | + | |
49 | +$ ./manage.py syncdb | |
50 | + | |
51 | +TODO: poprawiฤ unique w odmieniasiach i zakoลczeniach | |
52 | + | |
53 | +(wszystkie polecenia majฤ byฤ wykonywane w gลรณwnym katalogu projektu, o ile nie wskazano inaczej) | |
54 | + | |
55 | +Pojawi siฤ pytanie o utworzenie superusera, nie tworzymy go. | |
56 | + | |
57 | +Nastฤpnie tworzymy standardowe klasyfikacje: | |
58 | + | |
59 | +$ ./manage.py create_classifications | |
60 | + | |
61 | +i role uลผytkownikรณw: | |
62 | + | |
63 | +$ ./manage.py create_groups | |
64 | + | |
65 | +Teraz moลผna uruchomiฤ polecenie import_data, ktรณre bierze dane z bazy sqlite3 podanej w parametrze: | |
66 | + | |
67 | +$ ./manage.py import_data baza.sqlite3 | |
68 | + | |
69 | +To trochฤ potrwa (od kilkunastu minut do pรณล godziny). | |
70 | +Teraz moลผemy utworzyฤ uลผytkownikรณw: | |
71 | + | |
72 | +$ ./manage.py create_users | |
73 | + | |
74 | +Moลผna teลผ wygenerowaฤ formy (do filtrowania wedลug form): | |
75 | + | |
76 | +$ ./manage.py create_forms | |
77 | +(trochฤ potrwa - rzฤdu kilkunastu minut, pod warunkiem, ลผe baza jest zvacuumowana) | |
78 | +(jeลli mamy zamiar importowaฤ inne dane, np. Morfologika, to moลผna odpaliฤ generowanie form po zaimportowaniu wszystkich) | |
79 | + | |
80 | +Jeลli dotฤ d wszystko siฤ udaลo, to moลผemy odpaliฤ runserver: | |
81 | + | |
82 | +$ ./manage.py runserver | |
83 | + | |
84 | +i poลฤ czyฤ siฤ z nim przez http://localhost:8000/. | |
85 | +Pojawi siฤ ekran logowania, gdzie moลผemy wpisaฤ login i identyczne z nim hasลo ktรณregoล z domyลlnie zdefiniowanych uลผytkownikรณw, np. sgjp. | |
86 | + | |
87 | + | |
88 | +Import danych z Morfologika | |
89 | +--------------------------- | |
90 | + | |
91 | +1. | |
92 | +Najpierw potrzebne sฤ pliki odm.txt z http://www.sjp.pl/slownik/odmiany/ oraz morfologik-tools-[nr wersji].jar z http://sourceforge.net/projects/morfologik/files/morfologik-stemming/ (paczka morfologik-stemming-[nr wersji].zip). Potrzebny jest teลผ program enconv z pakietu enca (powinien byฤ w repozytoriach wiฤkszoลci dystrybucji Linuksa). | |
93 | + | |
94 | +ลcieลผki do tych plikรณw wpisujemy do skrypty/prepare_odm.sh do zmiennych 'odm' i 'morfologik'. Nastฤpnie uruchamiamy ten skrypt: | |
95 | + | |
96 | +$ sh skrypty/prepare_odm.sh | |
97 | + | |
98 | +i dostajemy plik to_disamb.txt. Ten plik naleลผy przepuลciฤ przez polecenie | |
99 | + | |
100 | +$ ./manage.py disamb to_disamb.txt > disamb_result.txt 2> disamb_errors.txt | |
101 | + | |
102 | +(drugi plik nie przyda siฤ do automatycznego importu, ale bฤdzie zawieraล informacje o leksemach, ktรณre nie przeszลy dezambiguacji). Z pliku disamb_result.txt odsiewamy nastฤpnie poszczegรณlne czฤลci mowy: | |
103 | + | |
104 | +$ skrypty/filter_forms.py subst disamb_result.txt > rzeczowniki-odm.txt | |
105 | +$ skrypty/filter_forms.py adj disamb_result.txt > przymiotniki-odm.txt | |
106 | +$ skrypty/filter_forms.py v disamb_result.txt > czasowniki-odm.txt | |
107 | +$ skrypty/filter_forms.py ndm disamb_result.txt > nieodmienne-odm.txt | |
108 | + | |
109 | +2. | |
110 | +Czฤลฤ rozpoznanych przymiotnikรณw to w rzeczywistoลci nazwiska, ktรณre naleลผy rozdzieliฤ na mฤskie i ลผeลskie. Listฤ tych nazwisk powinniลmy mieฤ w pliku tekstowym, po jednym nazwisku na wiersz. Zaลรณลผmy, ลผe mamy taki plik nazwiska.txt w aktualnym katalogu. Uruchamiamy | |
111 | + | |
112 | +$ skrypty/wytnij_wiele.py nazwiska.txt < przymiotniki-odm.txt > nazwiska | |
113 | +$ skrypty/wytnij_wiele.py nazwiska.txt -r < przymiotniki-odm.txt > tmp && mv tmp przymiotniki-odm.txt | |
114 | +$ skrypty/podziel_nazwiska.sh nazwiska > nazwiska-odm.txt | |
115 | + | |
116 | +Generujemy dane do importu [uwaga: tutaj musi byฤ ustawione DEBUG = False w settings.py, inaczej zapycha siฤ pamiฤฤ]: | |
117 | + | |
118 | +$ ./manage.py import_morfologik subst rzeczowniki-odm.txt > rzeczowniki.json 2> rzeczowniki-failed.txt | |
119 | +$ ./manage.py import_morfologik subst nazwiska-odm.txt > nazwiska.json 2> nazwiska-failed.txt | |
120 | +$ ./manage.py import_morfologik adj przymiotniki-odm.txt > przymiotniki.json 2> przymiotniki-failed.txt | |
121 | +$ ./manage.py import_morfologik v czasowniki-odm.txt > czasowniki.json 2> czasowniki-failed.txt | |
122 | +$ ./manage.py import_morfologik ndm nieodmienne-odm.txt > nieodmienne.json 2> nieodmienne-failed.txt | |
123 | + | |
124 | +[UWAGA: generowanie danych dla rzeczownikรณw trwa ponad dobฤ na chopinie, dla czasownikรณw i przymiotnikรณw po parฤ godzin, dla nieodmiennych poniลผej godziny] | |
125 | + | |
126 | +Czฤลฤ nierozpoznanych przymiotnikรณw powinna byฤ tak naprawdฤ rozpoznana jako rzeczowniki. Takie rzeczowniki trafiajฤ do przymiotniki-failed.txt z odpowiednim prefiksem. Wycinamy je poleceniem: | |
127 | + | |
128 | +$ grep 'rzeczownik#' przymiotniki-failed.txt | cut -d'#' -f2 > rzeczowniki2-odm.txt | |
129 | + | |
130 | +i generujemy dane do importu: | |
131 | + | |
132 | +$ ./manage.py import_morfologik subst rzeczowniki2-odm.txt > rzeczowniki2.json 2> rzeczowniki2-failed.txt | |
133 | + | |
134 | +3. | |
135 | +Teraz moลผemy wszystko zaลadowaฤ do bazy: | |
136 | + | |
137 | +$ cat rzeczowniki.json rzeczowniki2.json nazwiska.json przymiotniki.json czasowniki.json nieodmienne.json > morfologik.json | |
138 | +$ ./manage.py load_morfologik morfologik.json 2> load_errors.txt | |
139 | + | |
140 | +To potrwa (na chopinie ponad 5 godzin). Nastฤpnie warto poprawiฤ wciฤ gniฤte dane. Kilka drobnych rzeczy poprawiamy przez | |
141 | + | |
142 | +$ ./manage.py fix_morfologik | |
143 | + | |
144 | +Nastฤpnie poprawiamy nazwiska z homonimami w SGJP bลฤdnie oznaczone jako przymiotniki: | |
145 | + | |
146 | +$ ./manage.py fix_surnames 2> surnames.log | |
147 | + | |
148 | +i przerabiamy na 'osc' rzeczowniki oznaczajฤ ce cechy, zaimportowane jako 'subst': | |
149 | + | |
150 | +$ ./manage.py fix_osc 2> osc.log | |
151 | + | |
152 | +Generowanie list do importu odsyลaczy: | |
153 | + | |
154 | +[$ python gen/adjadv.py < gen/to_disamb.txt > advadj] | |
155 | +$ grep advadj gen/disamb_errors.txt | cut -d" " -f2- > advadj2 | |
156 | +$ grep adv:comp gen/to_disamb.txt | cut -f1-2> advcom | |
157 | +$ grep adv:neg gen/to_disamb.txt | cut -f1-2 > advnie | |
158 | +$ python skrypty/adjadv.py < gen/to_disamb.txt > /dev/null 2> advnie2 | |
159 | + | |
160 | +Importowanie odsyลaczy: | |
161 | + | |
162 | +$ ./manage.py extra_crs advcom advcom 2> advcom.log | |
163 | +$ ./manage.py extra_crs advadj advadj2 2> advadj.log | |
164 | +$ ./manage.py extra_crs advnie advnie 2> advnie.log | |
165 | +$ ./manage.py extra_crs advnie advnie2 2> advnie2.log | |
166 | +[$ ./manage.py extra_crs adjadvc advnie 2> adjadvc.log] | |
167 | + | |
168 | +TODO | |
169 | +* przymiotniki Dzierลผawcze | |
170 | +* podwรณjne dopasowania | |
171 | + | |
172 | + | |
173 | +TODO | |
174 | +* edycja tematu listu zapraszajฤ cego | |
175 | +* kompilowanie lokalizacji | |
176 | + | |
177 | +Problemy | |
178 | +-------- | |
179 | + | |
180 | +Klawisze PageUp/PageDown dziaลajฤ (aktualnie przewijajฤ na sztywno o 10, to moลผna zmieniฤ), ale przy wลฤ czonym zaznaczeniu i szybkim naciskaniu powodujฤ , ลผe czasem zaznaczenie ucieka. (okazuje siฤ, ลผe nawet caลkiem czฤsto) | |
... | ... |
__init__.py
0 โ 100644
1 | +++ a/__init__.py | |
... | ... |
accounts/__init__.py
0 โ 100644
1 | +++ a/accounts/__init__.py | |
... | ... |
accounts/ajax.py
0 โ 100644
1 | +++ a/accounts/ajax.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from django.contrib.auth.models import User, Group | |
4 | +from common.decorators import ajax | |
5 | +from dictionary.models import Vocabulary | |
6 | + | |
7 | +@ajax(method='post') | |
8 | +def set_group(request, user_id, group_id, set): | |
9 | + user = User.objects.get(pk=user_id) | |
10 | + group = Group.objects.get(pk=group_id) | |
11 | + if set: | |
12 | + user.groups.add(group) | |
13 | + else: | |
14 | + user.groups.remove(group) | |
15 | + return {} | |
16 | + | |
17 | +@ajax(method='post') | |
18 | +def save_default_owner(request, vocab_id): | |
19 | + us = request.user.usersettings | |
20 | + us.default_owner = Vocabulary.objects.get(pk=vocab_id) | |
21 | + us.save() | |
22 | + return {} | |
... | ... |
accounts/forms.py
0 โ 100644
1 | +++ a/accounts/forms.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +import random | |
4 | +import string | |
5 | +from django.forms import * | |
6 | +from django.utils.translation import ugettext_lazy as _ | |
7 | +from django.contrib.auth.forms import PasswordResetForm | |
8 | +from django.contrib.auth.models import User | |
9 | +from accounts.models import UserSettings | |
10 | + | |
11 | +class AddUserForm(ModelForm): | |
12 | + def save(self, commit=True, request=None): | |
13 | + user = super(AddUserForm, self).save(commit=False) | |
14 | + password = ''.join(random.choice(string.printable) for i in range(32)) | |
15 | + user.set_password(password) | |
16 | + if commit: | |
17 | + user.save() | |
18 | + fakeform = PasswordResetForm(data={'email': user.email}) | |
19 | + if fakeform.is_valid(): # oczywiลcie, ลผe jest | |
20 | + fakeform.save(request=request) | |
21 | + UserSettings.objects.create(user=user) | |
22 | + user.groups.add(*self.cleaned_data['groups']) | |
23 | + return user | |
24 | + class Meta: | |
25 | + model = User | |
26 | + fields = ['username', 'first_name', 'last_name', 'email', 'groups'] | |
27 | + | |
28 | +class SettingsForm(ModelForm): | |
29 | + def __init__(self, vocabularies=None, **kwargs): | |
30 | + super(SettingsForm, self).__init__(**kwargs) | |
31 | + if vocabularies: | |
32 | + self.fields['default_owner'].queryset = vocabularies | |
33 | + else: | |
34 | + self.fields['default_owner'].widget = HiddenInput() | |
35 | + class Meta: | |
36 | + model = UserSettings | |
37 | + fields = ['incremental_search', 'filter_search', 'default_owner'] | |
... | ... |
accounts/models.py
0 โ 100644
1 | +++ a/accounts/models.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from django.db.models import * | |
4 | +from django.contrib.auth.models import User, Group | |
5 | +from dictionary.models import Vocabulary | |
6 | + | |
7 | +MANAGER_GROUPS = ('Obserwator', 'Leksykograf', 'Superleksykograf') | |
8 | + | |
9 | +def manager_groups(): | |
10 | + return Group.objects.filter(name__in=MANAGER_GROUPS) | |
11 | + | |
12 | +class UserSettings(Model): | |
13 | + user = OneToOneField(User) | |
14 | + incremental_search = BooleanField( | |
15 | + default=True, | |
16 | + verbose_name=u'wyszukiwanie przyrostowe', | |
17 | + help_text=u'Wyszukiwanie odbywa siฤ automatycznie w miarฤ wpisywania ' | |
18 | + u'szukanego hasลa. Sugerujemy wyลฤ czenie w wypadku wolnego ' | |
19 | + u'poลฤ czenia internetowego.') | |
20 | + filter_search = BooleanField( | |
21 | + default=True, | |
22 | + verbose_name=u'wyszukiwanie przez filtrowanie', | |
23 | + help_text=u'Wyszukiwanie powoduje zawฤลผenie listy haseล do pasujฤ cych ' | |
24 | + u'do zapytania.') | |
25 | + default_owner = ForeignKey( | |
26 | + Vocabulary, blank=True, null=True, | |
27 | + verbose_name=u'domyลlny sลownik wลaลciciel dodawanych leksemรณw') | |
28 | + | |
29 | + def views_lexeme(self): | |
30 | + return self.user.has_perm('dictionary.view_lexeme') | |
31 | + | |
32 | + def views_all_lexemes(self): | |
33 | + return self.user.has_perm('dictionary.view_all_lexemes') | |
34 | + | |
35 | + def changes_lexeme(self): | |
36 | + return self.user.has_perm('dictionary.change_lexeme') | |
37 | + | |
38 | + def manages_vocabulary(self): | |
39 | + return self.user.has_perm('dictionary.manage_vocabulary') | |
40 | + | |
41 | + class Meta: | |
42 | + permissions = ( | |
43 | + ('create_admin', u'Moลผe nadawaฤ dowolne role'), | |
44 | + ) | |
45 | + | |
46 | +def filtering_mode(user): | |
47 | + return user.usersettings.filter_search | |
... | ... |
accounts/templates/manage_groups.html
0 โ 100644
1 | +++ a/accounts/templates/manage_groups.html | |
1 | +{% extends "base.html" %} | |
2 | +{% load ingroup %} | |
3 | + | |
4 | +{% block extrahead %} | |
5 | + <script type="text/javascript" src="{{ MEDIA_URL }}js/manage-groups.js"></script> | |
6 | +{% endblock %} | |
7 | + | |
8 | +{% block title %}Role uลผytkownikรณw{% endblock %} | |
9 | + | |
10 | +{% block content %} | |
11 | + <h3>Role uลผytkownikรณw</h3> | |
12 | + <table id="user-groups"> | |
13 | + <tr> | |
14 | + <th>nazwa</th> | |
15 | + {% for group in groups %} | |
16 | + <th>{{ group.name }}</th> | |
17 | + {% endfor %} | |
18 | + </tr> | |
19 | + {% for u in users %} | |
20 | + <tr> | |
21 | + <td>{{ u.username }}</td> | |
22 | + {% for group in groups %} | |
23 | + <td> | |
24 | + <input | |
25 | + type="checkbox" | |
26 | + id="group-{{ group.pk }}-{{ u.pk }}" | |
27 | + {% if u|ingroup:group %}checked="checked"{% endif %}/> | |
28 | + </td> | |
29 | + {% endfor %} | |
30 | + </tr> | |
31 | + {% endfor %} | |
32 | + </table> | |
33 | +{% endblock %} | |
... | ... |
accounts/templates/registration/activate.html
0 โ 100644
1 | +++ a/accounts/templates/registration/activate.html | |
1 | +{% extends "base.html" %} | |
2 | +{% load i18n %} | |
3 | +{% load url from future %} | |
4 | + | |
5 | +{% block content %} | |
6 | + | |
7 | +{% blocktrans %} | |
8 | +<h1>Activation</h1> | |
9 | +<p>Your account is now activated. Go <a href="{% url 'main' %}">here</a> to continue.</p> | |
10 | + | |
11 | +{% endblocktrans %} | |
12 | + | |
13 | +{% endblock %} | |
... | ... |
accounts/templates/registration/activation_email.txt
0 โ 100644
accounts/templates/registration/activation_email_subject.txt
0 โ 100644
accounts/templates/registration/login.html
0 โ 100644
1 | +++ a/accounts/templates/registration/login.html | |
1 | +{% extends "base.html" %} | |
2 | +{% load i18n %} | |
3 | +{% load url from future %} | |
4 | + | |
5 | +{% block content %} | |
6 | +{% if form.errors and not form.non_field_errors and not form.this_is_the_login_form.errors %} | |
7 | +<p class="errornote"> | |
8 | +{% blocktrans count form.errors.items|length as counter %}Please correct the error below.{% plural %}Please correct the errors below.{% endblocktrans %} | |
9 | +</p> | |
10 | +{% endif %} | |
11 | + | |
12 | +{% if form.non_field_errors or form.this_is_the_login_form.errors %} | |
13 | +{% for error in form.non_field_errors|add:form.this_is_the_login_form.errors %} | |
14 | +<p class="errornote"> | |
15 | + {{ error }} | |
16 | +</p> | |
17 | +{% endfor %} | |
18 | +{% endif %} | |
19 | + | |
20 | +<div id="content-main"> | |
21 | +<form action="" method="post" id="login-form">{% csrf_token %} | |
22 | + {{ form.as_p }} | |
23 | + <div class="submit-row"> | |
24 | + <label> </label><input type="submit" value="{% trans 'Log in' %}" /> | |
25 | + </div> | |
26 | +</form> | |
27 | + | |
28 | +<a href="{% url 'auth_password_reset' %}">Nie pamiฤtam hasลa</a> | |
29 | + | |
30 | +<script type="text/javascript"> | |
31 | +document.getElementById('id_username').focus() | |
32 | +</script> | |
33 | +</div> | |
34 | +{% endblock %} | |
... | ... |
accounts/templates/registration/logout.html
0 โ 100644
1 | +++ a/accounts/templates/registration/logout.html | |
1 | +{% extends "base.html" %} | |
2 | +{% load i18n %} | |
3 | +{% load url from future %} | |
4 | + | |
5 | +{% block content %} | |
6 | + | |
7 | +<p>{% trans "Thanks for spending some quality time with the Web site today." %}</p> | |
8 | + | |
9 | +<p><a href="{% url 'auth_login' %}">{% trans 'Log in again' %}</a></p> | |
10 | + | |
11 | +{% endblock %} | |
... | ... |
accounts/templates/registration/password_change_done.html
0 โ 100644
1 | +++ a/accounts/templates/registration/password_change_done.html | |
1 | +{% extends "base.html" %} | |
2 | +{% load i18n %} | |
3 | +{% load url from future %} | |
4 | + | |
5 | +{% block userlinks %}{% trans 'Change password' %} / <a href="{{ url 'auth_logout' }}">{% trans 'Log out' %}</a>{% endblock %} | |
6 | + | |
7 | +{% block title %}{% trans 'Password change successful' %}{% endblock %} | |
8 | + | |
9 | +{% block content %} | |
10 | + | |
11 | +<h1>{% trans 'Password change successful' %}</h1> | |
12 | + | |
13 | +<p>{% trans 'Your password was changed.' %}</p> | |
14 | + | |
15 | +{% endblock %} | |
... | ... |
accounts/templates/registration/password_change_form.html
0 โ 100644
1 | +++ a/accounts/templates/registration/password_change_form.html | |
1 | +{% extends "base.html" %} | |
2 | +{% load i18n %} | |
3 | +{% load url from future %} | |
4 | +{% block userlinks %}{% trans 'Change password' %} / <a href="{% url 'auth_logout' %}">{% trans 'Log out' %}</a>{% endblock %} | |
5 | + | |
6 | +{% block title %}{% trans 'Password change' %}{% endblock %} | |
7 | + | |
8 | +{% block content %}<div id="content-main"> | |
9 | + | |
10 | +<form action="" method="post">{% csrf_token %} | |
11 | +<div> | |
12 | +{% if form.errors %} | |
13 | + <p class="errornote"> | |
14 | + {% blocktrans count form.errors.items|length as counter %}Please correct the error below.{% plural %}Please correct the errors below.{% endblocktrans %} | |
15 | + </p> | |
16 | +{% endif %} | |
17 | + | |
18 | +<h1>{% trans 'Password change' %}</h1> | |
19 | + | |
20 | +<p>{% trans "Please enter your old password, for security's sake, and then enter your new password twice so we can verify you typed it in correctly." %}</p> | |
21 | + | |
22 | +<fieldset class="module aligned wide"> | |
23 | + | |
24 | +<div class="form-row"> | |
25 | + {{ form.old_password.errors }} | |
26 | + <label for="id_old_password" class="required">{% trans 'Old password' %}:</label>{{ form.old_password }} | |
27 | +</div> | |
28 | + | |
29 | +<div class="form-row"> | |
30 | + {{ form.new_password1.errors }} | |
31 | + <label for="id_new_password1" class="required">{% trans 'New password' %}:</label>{{ form.new_password1 }} | |
32 | +</div> | |
33 | + | |
34 | +<div class="form-row"> | |
35 | +{{ form.new_password2.errors }} | |
36 | + <label for="id_new_password2" class="required">{% trans 'Password (again)' %}:</label>{{ form.new_password2 }} | |
37 | +</div> | |
38 | + | |
39 | +</fieldset> | |
40 | + | |
41 | +<div class="submit-row"> | |
42 | + <input type="submit" value="{% trans 'Change my password' %}" class="default" /> | |
43 | +</div> | |
44 | + | |
45 | +<script type="text/javascript">document.getElementById("id_old_password").focus();</script> | |
46 | +</div> | |
47 | +</form></div> | |
48 | + | |
49 | +{% endblock %} | |
... | ... |
accounts/templates/registration/password_reset_complete.html
0 โ 100644
1 | +++ a/accounts/templates/registration/password_reset_complete.html | |
1 | +{% extends "base.html" %} | |
2 | +{% load i18n %} | |
3 | + | |
4 | +{% block title %}{% trans 'Password reset complete' %}{% endblock %} | |
5 | + | |
6 | +{% block content %} | |
7 | + | |
8 | +<h1>{% trans 'Password reset complete' %}</h1> | |
9 | + | |
10 | +<p>{% trans "Your password has been set. You may go ahead and log in now." %}</p> | |
11 | + | |
12 | +<p><a href="{{ login_url }}">{% trans 'Log in' %}</a></p> | |
13 | + | |
14 | +{% endblock %} | |
... | ... |
accounts/templates/registration/password_reset_confirm.html
0 โ 100644
1 | +++ a/accounts/templates/registration/password_reset_confirm.html | |
1 | +{% extends "base.html" %} | |
2 | +{% load i18n %} | |
3 | + | |
4 | +{% block title %}{% trans 'Password reset' %}{% endblock %} | |
5 | + | |
6 | +{% block content %} | |
7 | + | |
8 | +{% if validlink %} | |
9 | + | |
10 | +<h1>{% trans 'Enter new password' %}</h1> | |
11 | + | |
12 | +<p>{% trans "Please enter your new password twice so we can verify you typed it in correctly." %}</p> | |
13 | + | |
14 | +<form action="" method="post">{% csrf_token %} | |
15 | +{{ form.new_password1.errors }} | |
16 | +<p class="aligned wide"><label for="id_new_password1">{% trans 'New password:' %}</label>{{ form.new_password1 }}</p> | |
17 | +{{ form.new_password2.errors }} | |
18 | +<p class="aligned wide"><label for="id_new_password2">{% trans 'Confirm password:' %}</label>{{ form.new_password2 }}</p> | |
19 | +<p><input type="submit" value="{% trans 'Change my password' %}" /></p> | |
20 | +</form> | |
21 | + | |
22 | +{% else %} | |
23 | + | |
24 | +<h1>{% trans 'Password reset unsuccessful' %}</h1> | |
25 | + | |
26 | +<p>{% trans "The password reset link was invalid, possibly because it has already been used. Please request a new password reset." %}</p> | |
27 | + | |
28 | +{% endif %} | |
29 | + | |
30 | +{% endblock %} | |
... | ... |
accounts/templates/registration/password_reset_done.html
0 โ 100644
1 | +++ a/accounts/templates/registration/password_reset_done.html | |
1 | +{% extends "base.html" %} | |
2 | +{% load i18n %} | |
3 | + | |
4 | +{% block title %}{% trans 'Password reset successful' %}{% endblock %} | |
5 | + | |
6 | +{% block content %} | |
7 | + | |
8 | +<h1>{% trans 'Password reset successful' %}</h1> | |
9 | + | |
10 | +<p>{% trans "We've e-mailed you instructions for setting your password to the e-mail address you submitted. You should be receiving it shortly." %}</p> | |
11 | + | |
12 | +{% endblock %} | |
... | ... |
accounts/templates/registration/password_reset_email.html
0 โ 100644
1 | +++ a/accounts/templates/registration/password_reset_email.html | |
1 | +{% load i18n %}{% load url from future %}{% autoescape off %} | |
2 | + | |
3 | +Szanowny Uลผytkowniku! | |
4 | + | |
5 | +Ten list jest czฤลciฤ procedury ustanawiania lub zmiany hasลa w systemie Kuลบnia. | |
6 | +Jeลผeli fakt, ลผe masz konto w systemie Kuลบnia jest dla Ciebie | |
7 | +zaskoczeniem, zechciej ten list uznaฤ za doniesienie, ลผe wลaลnie | |
8 | +zaลoลผyliลmy Ci konto. | |
9 | + | |
10 | +Aby wprowadziฤ nowe hasลo, przejdลบ na nastฤpujฤ cฤ stronฤ: | |
11 | +{% block reset_link %} | |
12 | +{{ protocol }}://{{ domain }}{% url 'django.contrib.auth.views.password_reset_confirm' uidb36=uid token=token %} | |
13 | +{% endblock %} | |
14 | + | |
15 | +Twoja nazwa uลผytkownika: {{ user.username }} | |
16 | + | |
17 | +Z wyrazami szacunku | |
18 | +Zespรณล Kuลบni | |
19 | +{% endautoescape %} | |
... | ... |
accounts/templates/registration/password_reset_form.html
0 โ 100644
1 | +++ a/accounts/templates/registration/password_reset_form.html | |
1 | +{% extends "base.html" %} | |
2 | +{% load i18n %} | |
3 | + | |
4 | +{% block title %}{% trans "Password reset" %}{% endblock %} | |
5 | + | |
6 | +{% block content %} | |
7 | + | |
8 | +<h1>{% trans "Password reset" %}</h1> | |
9 | + | |
10 | +<p>{% trans "Forgotten your password? Enter your e-mail address below, and we'll e-mail instructions for setting a new one." %}</p> | |
11 | + | |
12 | +<form action="" method="post">{% csrf_token %} | |
13 | +{{ form.email.errors }} | |
14 | +<p><label for="id_email">{% trans 'E-mail address:' %}</label> {{ form.email }} <input type="submit" value="{% trans 'Reset my password' %}" /></p> | |
15 | +</form> | |
16 | + | |
17 | +{% endblock %} | |
... | ... |
accounts/templates/registration/registration_complete.html
0 โ 100644
1 | +++ a/accounts/templates/registration/registration_complete.html | |
1 | +{% extends "base.html" %} | |
2 | +{% load i18n %} | |
3 | + | |
4 | +{% block content %} | |
5 | + | |
6 | +{% blocktrans %} | |
7 | +<h1>Thank you</h1> | |
8 | +<p>An email has been sent to you. You need to click link in it to activate your account.</p> | |
9 | +{% endblocktrans %} | |
10 | + | |
11 | +{% endblock %} | |
... | ... |
accounts/templates/registration/registration_form.html
0 โ 100644
1 | +++ a/accounts/templates/registration/registration_form.html | |
1 | +{% extends "base.html" %} | |
2 | +{% load i18n %} | |
3 | +{% load url from future %} | |
4 | + | |
5 | +{% block extrahead %} | |
6 | + <style> | |
7 | + label { | |
8 | + vertical-align: top; | |
9 | + width: 100px; | |
10 | + display: inline-block; | |
11 | + } | |
12 | + </style> | |
13 | +{% endblock %} | |
14 | + | |
15 | +{% block title %}{% trans 'Registration' %}{% endblock %} | |
16 | + | |
17 | +{% block content %}<div id="content-main"> | |
18 | + | |
19 | +<form action="" method="post">{% csrf_token %} | |
20 | +<div> | |
21 | +{% if form.errors %} | |
22 | + <p class="errornote"> | |
23 | + {% blocktrans count form.errors.items|length as counter %}Please correct the error below.{% plural %}Please correct the errors below.{% endblocktrans %} | |
24 | + </p> | |
25 | +{% endif %} | |
26 | + | |
27 | +<h1>{% trans 'Registration' %}</h1> | |
28 | + | |
29 | +{{ form.as_p }} | |
30 | + | |
31 | +<div class="submit-row"> | |
32 | + <input type="submit" value="{% trans 'Add user' %}" class="default" /> | |
33 | +</div> | |
34 | + | |
35 | +</div> | |
36 | +</form></div> | |
37 | + | |
38 | +{% endblock %} | |
... | ... |
accounts/templates/settings.html
0 โ 100644
1 | +++ a/accounts/templates/settings.html | |
1 | +{% extends "base.html" %} | |
2 | +{% load url from future %} | |
3 | + | |
4 | +{% block title %}Ustawienia{% endblock %} | |
5 | + | |
6 | +{% block content %} | |
7 | +<h1>Ustawienia uลผytkownika</h1> | |
8 | +<form method="post" action=""> | |
9 | + {{ form.as_p }} | |
10 | + {% csrf_token %} | |
11 | + <input type="hidden" name="next" value="{{ next }}"/> | |
12 | + <button type="submit"> | |
13 | + Zapisz | |
14 | + </button> | |
15 | +</form> | |
16 | +<p> | |
17 | + <a href="{% url 'auth_password_change' %}">Zmiana hasลa</a> | |
18 | +</p> | |
19 | +{% endblock %} | |
... | ... |
accounts/tests.py
0 โ 100644
1 | +++ a/accounts/tests.py | |
1 | +""" | |
2 | +This file demonstrates writing tests using the unittest module. These will pass | |
3 | +when you run "manage.py test". | |
4 | + | |
5 | +Replace this with more appropriate tests for your application. | |
6 | +""" | |
7 | + | |
8 | +from django.test import TestCase | |
9 | + | |
10 | + | |
11 | +class SimpleTest(TestCase): | |
12 | + def test_basic_addition(self): | |
13 | + """ | |
14 | + Tests that 1 + 1 always equals 2. | |
15 | + """ | |
16 | + self.assertEqual(1 + 1, 2) | |
... | ... |
accounts/util.py
0 โ 100644
1 | +++ a/accounts/util.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from django.db import connection | |
4 | +from django.contrib.auth.models import User | |
5 | + | |
6 | +def set_history(user): | |
7 | + cursor = connection.cursor() | |
8 | + cursor.execute("SELECT set_config('var.user_id', %s, false)", [str(user.id)]) | |
9 | + | |
10 | +def bot_history(): | |
11 | + set_history(User.objects.get(username=u'Kuลบniobot')) | |
... | ... |
accounts/views.py
0 โ 100644
1 | +++ a/accounts/views.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from django.core.urlresolvers import reverse | |
4 | +from django.http import HttpResponseRedirect | |
5 | +from django.contrib.auth.decorators import permission_required, login_required | |
6 | +from django.contrib.auth.models import User, Group | |
7 | +from common.decorators import render | |
8 | +from accounts.forms import AddUserForm, SettingsForm | |
9 | +from accounts.models import UserSettings, manager_groups | |
10 | + | |
11 | +@permission_required('auth.add_user') | |
12 | +@render('registration/registration_form.html') | |
13 | +def register(request): | |
14 | + if request.method == 'POST': | |
15 | + form = AddUserForm(data=request.POST) | |
16 | + if form.is_valid(): | |
17 | + new_user = form.save(request=request) | |
18 | + return HttpResponseRedirect(reverse('registration_complete')) | |
19 | + else: | |
20 | + form = AddUserForm() | |
21 | + if not request.user.has_perm('accounts.create_admin'): | |
22 | + choices = ((g.pk, g.name) for g in manager_groups()) | |
23 | + form.fields['groups'].choices = choices | |
24 | + return {'form': form} | |
25 | + | |
26 | +@login_required | |
27 | +@render() | |
28 | +def settings(request): | |
29 | + user_settings, created = UserSettings.objects.get_or_create(user=request.user) | |
30 | + if request.method == 'POST': | |
31 | + form = SettingsForm(data=request.POST, instance=user_settings) | |
32 | + if form.is_valid(): | |
33 | + form.save() | |
34 | + return HttpResponseRedirect(request.POST['next']) | |
35 | + else: | |
36 | + form = SettingsForm( | |
37 | + instance=user_settings, | |
38 | + vocabularies=request.user.editable_vocabularies.all()) | |
39 | + return {'form': form, 'next': request.META.get('HTTP_REFERER', '')} | |
40 | + | |
41 | +@permission_required('auth.add_user') | |
42 | +@render() | |
43 | +def manage_groups(request): | |
44 | + users = User.objects.filter(is_superuser=False) | |
45 | + if request.user.has_perm('accounts.create_admin'): | |
46 | + groups = Group.objects.all() | |
47 | + else: | |
48 | + groups = manager_groups() | |
49 | + js_vars = {'ajax_set_group': reverse('set_group')} | |
50 | + return {'users': users, 'groups': groups, 'js_vars': js_vars} | |
... | ... |
common/__init__.py
0 โ 100644
1 | +++ a/common/__init__.py | |
... | ... |
common/context_processors.py
0 โ 100644
1 | +++ a/common/context_processors.py | |
1 | +# -*- coding: utf-8 -*- | |
2 | + | |
3 | +def error_proc(request): | |
4 | + if request.session.get('error', ''): | |
5 | + error = request.session.pop('error') | |
6 | + return {'error': error} | |
7 | + else: | |
8 | + return {} | |
9 | + | |
10 | +def message_proc(request): | |
11 | + if request.session.get('message', ''): | |
12 | + message = request.session.pop('message') | |
13 | + return {'alert_message': message} | |
14 | + else: | |
15 | + return {} | |
... | ... |
common/decorators.py
0 โ 100644
1 | +++ a/common/decorators.py | |
1 | +# -*- coding: utf-8 -*- | |
2 | + | |
3 | +from functools import wraps | |
4 | +from inspect import getargspec | |
5 | +from django.http import HttpResponse | |
6 | +from django.utils.simplejson import loads as json_decode, dumps as json_encode | |
7 | +from django.utils.functional import curry | |
8 | +from django.conf import settings | |
9 | +from django.shortcuts import render_to_response | |
10 | +from django.template import RequestContext, generic_tag_compiler, Node | |
11 | +from django.template import TemplateSyntaxError, Variable | |
12 | +from django.db import transaction | |
13 | +from common.util import stringify_keys | |
14 | + | |
15 | +class AjaxError(Exception): | |
16 | + pass | |
17 | + | |
18 | +def json_decode_fallback(value): | |
19 | + try: | |
20 | + return json_decode(value) | |
21 | + except ValueError: | |
22 | + return value | |
23 | + | |
24 | +def ajax(login_required=True, method=None, encode_result=True): | |
25 | + def decorator(fun): | |
26 | + @wraps(fun) | |
27 | + def ajax_view(request): | |
28 | + kwargs = {} | |
29 | + request_params = None | |
30 | + if method == 'post': | |
31 | + request_params = request.POST | |
32 | + elif method == 'get': | |
33 | + request_params = request.GET | |
34 | + fun_params, xx, fun_kwargs, xxxx = getargspec(fun) | |
35 | + if request_params: | |
36 | + request_params = dict((key, json_decode_fallback(value)) | |
37 | + for key, value in request_params.iteritems() | |
38 | + if fun_kwargs or key in fun_params) | |
39 | + kwargs.update(stringify_keys(request_params)) | |
40 | + res = None | |
41 | + if login_required and not request.user.is_authenticated(): | |
42 | + res = {'result': 'logout'} | |
43 | + if not res: | |
44 | + try: | |
45 | + res = fun(request, **kwargs) | |
46 | + except AjaxError as e: | |
47 | + transaction.rollback() | |
48 | + res = {'result': e.args[0]} | |
49 | + if encode_result: | |
50 | + if 'result' not in res: | |
51 | + res['result'] = 'ok' | |
52 | + return HttpResponse(json_encode(res), mimetype='application/json') | |
53 | + else: | |
54 | + return res | |
55 | + return ajax_view | |
56 | + return decorator | |
57 | + | |
58 | +def render(template=None, mimetype=None): | |
59 | + mimetype = mimetype or settings.DEFAULT_CONTENT_TYPE | |
60 | + template1 = template | |
61 | + def decorator(func): | |
62 | + template = template1 # no cรณลผ... | |
63 | + if not template: | |
64 | + template = func.__name__ + '.html' | |
65 | + @wraps(func) | |
66 | + def renderer(request, *args, **kw): | |
67 | + output = func(request, *args, **kw) | |
68 | + if isinstance(output, (list, tuple)): | |
69 | + return render_to_response(output[1], output[0], | |
70 | + RequestContext(request), mimetype=mimetype) | |
71 | + elif isinstance(output, dict): | |
72 | + return render_to_response(template, output, | |
73 | + RequestContext(request), mimetype=mimetype) | |
74 | + return output | |
75 | + return renderer | |
76 | + return decorator | |
77 | + | |
78 | +def simple_tag(register, takes_context=False): | |
79 | + def decorator(func): | |
80 | + params, xx, xxx, defaults = getargspec(func) | |
81 | + if takes_context: | |
82 | + if params[0] == 'context': | |
83 | + params = params[1:] | |
84 | + else: | |
85 | + raise TemplateSyntaxError("Any tag function decorated with takes_context=True must have a first argument of 'context'") | |
86 | + | |
87 | + class SimpleNode(Node): | |
88 | + def __init__(self, vars_to_resolve): | |
89 | + self.vars_to_resolve = map(Variable, vars_to_resolve) | |
90 | + | |
91 | + def render(self, context): | |
92 | + resolved_vars = [var.resolve(context) for var in self.vars_to_resolve] | |
93 | + if takes_context: | |
94 | + args = [context] + resolved_vars | |
95 | + else: | |
96 | + args = resolved_vars | |
97 | + try: | |
98 | + return func(*args) | |
99 | + except Exception as e: | |
100 | + raise | |
101 | + return type(e).__name__+': '+str(e) | |
102 | + | |
103 | + compile_func = curry(generic_tag_compiler, params, defaults, getattr(func, "_decorated_function", func).__name__, SimpleNode) | |
104 | + compile_func.__doc__ = func.__doc__ | |
105 | + register.tag(getattr(func, "_decorated_function", func).__name__, compile_func) | |
106 | + return func | |
107 | + return decorator | |
... | ... |
common/forms.py
0 โ 100644
1 | +++ a/common/forms.py | |
1 | +# -*- coding: utf-8 -*- | |
2 | + | |
3 | +from django import forms | |
4 | +from django.utils.encoding import force_unicode | |
5 | + | |
6 | +def hidden_id(id): | |
7 | + return forms.CharField(initial=id, widget=forms.HiddenInput()) | |
8 | + | |
9 | +# http://www.djangosnippets.org/snippets/863/ z moimi zmianami | |
10 | +class ChoiceWithOtherRenderer(forms.RadioSelect.renderer): | |
11 | + """RadioFieldRenderer that renders its last choice with a placeholder.""" | |
12 | + def __init__(self, *args, **kwargs): | |
13 | + super(ChoiceWithOtherRenderer, self).__init__(*args, **kwargs) | |
14 | + self.choices, self.other = self.choices[:-1], self.choices[-1] | |
15 | + | |
16 | + def __iter__(self): | |
17 | + for input in super(ChoiceWithOtherRenderer, self).__iter__(): | |
18 | + yield input | |
19 | + id = '%s_%s' % (self.attrs['id'], self.other[0]) if 'id' in self.attrs else '' | |
20 | + label_for = ' for="%s"' % id if id else '' | |
21 | + checked = '' if not force_unicode(self.other[0]) == self.value else 'checked="true" ' | |
22 | + yield '<label%s><input type="radio" id="%s" value="%s" name="%s" %s/> %s</label> %%s' % ( | |
23 | + label_for, id, self.other[0], self.name, checked, self.other[1]) | |
24 | + | |
25 | +class ChoiceWithOtherWidget(forms.MultiWidget): | |
26 | + """MultiWidget for use with ChoiceWithOtherField.""" | |
27 | + def __init__(self, choices): | |
28 | + widgets = [ | |
29 | + forms.RadioSelect(choices=choices, renderer=ChoiceWithOtherRenderer), | |
30 | + forms.TextInput | |
31 | + ] | |
32 | + super(ChoiceWithOtherWidget, self).__init__(widgets) | |
33 | + | |
34 | + def decompress(self, value): | |
35 | + if not value: | |
36 | + return [None, None] | |
37 | + return value | |
38 | + | |
39 | + def format_output(self, rendered_widgets): | |
40 | + """Format the output by substituting the "other" choice into the first widget.""" | |
41 | + return rendered_widgets[0] % rendered_widgets[1] | |
42 | + | |
43 | +class ChoiceWithOtherField(forms.MultiValueField): | |
44 | + """ | |
45 | + ChoiceField with an option for a user-submitted "other" value. | |
46 | + """ | |
47 | + def __init__(self, *args, **kwargs): | |
48 | + fields = [ | |
49 | + forms.ChoiceField(widget=forms.RadioSelect(renderer=ChoiceWithOtherRenderer), *args, **kwargs), | |
50 | + forms.CharField(required=False) | |
51 | + ] | |
52 | + widget = ChoiceWithOtherWidget(choices=kwargs['choices']) | |
53 | + kwargs.pop('choices') | |
54 | + self._was_required = kwargs.pop('required', True) | |
55 | + kwargs['required'] = False | |
56 | + super(ChoiceWithOtherField, self).__init__(widget=widget, fields=fields, *args, **kwargs) | |
57 | + | |
58 | + def compress(self, value): | |
59 | + if self._was_required and (not value or value[0] in (None, '')): | |
60 | + raise forms.ValidationError(self.error_messages['required']) | |
61 | + if not value: | |
62 | + return [None, u''] | |
63 | + return (value[0], value[1] | |
64 | + if force_unicode(value[0]) == force_unicode(self.fields[0].choices[-1][0]) | |
65 | + else self.fields[0].choices[int(value[0]) - 1][1]) | |
0 | 66 | \ No newline at end of file |
... | ... |
common/middleware.py
0 โ 100644
1 | +++ a/common/middleware.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from django.db import connection | |
4 | + | |
5 | +class MyMiddleware(object): | |
6 | + def process_request(self, request): | |
7 | + if request.user.is_authenticated(): | |
8 | + cursor = connection.cursor() | |
9 | + cursor.execute("SELECT set_config('var.user_id', %s::text, true)", [request.user.pk]) | |
... | ... |
common/templates/error.html
0 โ 100644
common/templates/message.html
0 โ 100644
common/templates/navigation.html
0 โ 100644
common/templatetags/__init__.py
0 โ 100644
1 | +++ a/common/templatetags/__init__.py | |
... | ... |
common/templatetags/format_date.py
0 โ 100644
1 | +++ a/common/templatetags/format_date.py | |
1 | +# -*- coding: utf-8 -*- | |
2 | + | |
3 | +from django.template import Library | |
4 | + | |
5 | +register = Library() | |
6 | + | |
7 | +@register.filter | |
8 | +def format_date(date): | |
9 | + return date.strftime('%d.%m.%Y %H:%M') | |
10 | + | |
11 | +@register.filter | |
12 | +def format_date_exact(date): | |
13 | + return date.strftime('%d.%m.%Y %H:%M:%S.%f') | |
... | ... |
common/templatetags/get.py
0 โ 100644
common/templatetags/ingroup.py
0 โ 100644
common/templatetags/json.py
0 โ 100644
1 | +++ a/common/templatetags/json.py | |
1 | +# -*- coding: utf-8 -*- | |
2 | + | |
3 | +from django.core.serializers import serialize | |
4 | +from django.db.models.query import QuerySet | |
5 | +from django.utils import simplejson | |
6 | +from django.utils.safestring import mark_safe | |
7 | +from django.template import Library | |
8 | + | |
9 | +register = Library() | |
10 | + | |
11 | +@register.filter | |
12 | +def jsonify(object): | |
13 | + if isinstance(object, QuerySet): | |
14 | + return serialize('json', object) | |
15 | + return mark_safe(simplejson.dumps(object)) | |
... | ... |
common/templatetags/script.py
0 โ 100644
1 | +++ a/common/templatetags/script.py | |
1 | +# -*- coding: utf-8 -*- | |
2 | +from django import template | |
3 | + | |
4 | +register = template.Library() | |
5 | + | |
6 | +@register.simple_tag | |
7 | +def script(): | |
8 | + return '<script type="text/javascript"> /* <![CDATA[ */' | |
9 | + | |
10 | +@register.simple_tag | |
11 | +def endscript(): | |
12 | + return '/* ]]> */ </script>' | |
... | ... |
common/util.py
0 โ 100644
1 | +++ a/common/util.py | |
1 | +# -*- coding: utf-8 -*- | |
2 | + | |
3 | +import re | |
4 | +import sys | |
5 | +from htmlentitydefs import name2codepoint | |
6 | +from django.utils.encoding import smart_unicode, force_unicode | |
7 | +from django.conf.urls import defaults | |
8 | +from django.http import HttpResponseRedirect, Http404 | |
9 | + | |
10 | + | |
11 | +def debug(entry, text): | |
12 | + print>>sys.stderr, (u'%s: %s' % (entry, text)).encode('utf-8') | |
13 | + | |
14 | +def error_redirect(request, error, url='/'): | |
15 | + request.session['error'] = error | |
16 | + return HttpResponseRedirect(url) | |
17 | + | |
18 | +def message_redirect(request, message, url='/'): | |
19 | + request.session['message'] = message | |
20 | + return HttpResponseRedirect(url) | |
21 | + | |
22 | +def make_form(request, form_class, **kwargs): | |
23 | + if request.POST.get('det', '') == form_class.base_fields['det'].initial: | |
24 | + return form_class(data=request.POST, files=request.FILES, **kwargs) | |
25 | + else: | |
26 | + return form_class(**kwargs) | |
27 | + | |
28 | +def invert(l): | |
29 | + return dict((e,nr) for (nr,e) in enumerate(l)) | |
30 | + | |
31 | +def generator_slice(generator, count): | |
32 | + res = [] | |
33 | + try: | |
34 | + for i in range(count): | |
35 | + res.append(generator.next()) | |
36 | + except StopIteration: | |
37 | + pass | |
38 | + return res | |
39 | + | |
40 | +def url(regex, view, **kwargs): | |
41 | + if 'name' not in kwargs: | |
42 | + kwargs['name'] = view | |
43 | + return defaults.url(regex, view, **kwargs) | |
44 | + | |
45 | +def stringify_keys(dictionary): | |
46 | + return dict((keyword.encode('ascii'), value) | |
47 | + for keyword, value in dictionary.iteritems()) | |
48 | + | |
49 | +# copypasta ze standardowego moduลu bisect | |
50 | +def bisect_left(a, x, lo=0, hi=None, cmp=None): | |
51 | + if cmp is None: | |
52 | + cmp = __builtins__.cmp | |
53 | + if lo < 0: | |
54 | + raise ValueError('lo must be non-negative') | |
55 | + if hi is None: | |
56 | + hi = len(a) | |
57 | + while lo < hi: | |
58 | + mid = (lo+hi)//2 | |
59 | + if cmp(a[mid], x) < 0: lo = mid+1 | |
60 | + else: hi = mid | |
61 | + return lo | |
62 | + | |
63 | +def no_history(): | |
64 | + from django.db import connection | |
65 | + cursor = connection.cursor() | |
66 | + cursor.execute("SELECT set_config('var.user_id', '0', false)") | |
67 | + | |
68 | +def reverse(seq): | |
69 | + return seq[::-1] | |
70 | + | |
71 | +def flatten(seq): | |
72 | + return [item for subseq in seq for item in subseq] | |
73 | + | |
74 | +def suffix(string, length): | |
75 | + return string[-length:] if length > 0 else '' | |
76 | + | |
77 | +def suffixes(s): | |
78 | + return [s[i:] for i in range(len(s)+1)] | |
79 | + | |
80 | +def cut_end(s, end): | |
81 | + n = len(end) | |
82 | + if n == 0: | |
83 | + return s | |
84 | + else: | |
85 | + return s[:-n] | |
86 | + | |
87 | +def error_messages(form): | |
88 | + return '\n'.join( | |
89 | + '%s: %s' % (form.fields[k].label, ' '.join(v)) if k != '__all__' | |
90 | + else ' '.join(v) | |
91 | + for k, v in form.errors.iteritems()) | |
... | ... |
data/model_danych.dia
0 โ 100644
No preview for this file type
dictionary/__init__.py
0 โ 100644
1 | +++ a/dictionary/__init__.py | |
... | ... |
dictionary/ajax_export.py
0 โ 100644
1 | +++ a/dictionary/ajax_export.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from common.decorators import render, ajax, AjaxError | |
4 | +from dictionary.forms import MagicQualifierForm | |
5 | + | |
6 | +@render() | |
7 | +@ajax(method='get', encode_result=False) | |
8 | +def magic_qualifier_row(request): | |
9 | + return {'form': MagicQualifierForm(prefix='magic_NUM')} | |
... | ... |
dictionary/ajax_filters.py
0 โ 100644
1 | +++ a/dictionary/ajax_filters.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from common.decorators import ajax, AjaxError | |
4 | +from dictionary.models import SavedFilter | |
5 | + | |
6 | +@ajax(method='post') | |
7 | +def save_filter(request, name, serialized_filter, super=False, force=False): | |
8 | + existing_filter = SavedFilter.objects.filter(user=request.user, name=name) | |
9 | + if force or not existing_filter: | |
10 | + if existing_filter: | |
11 | + filter = existing_filter[0] | |
12 | + else: | |
13 | + filter = SavedFilter() | |
14 | + filter.name = name | |
15 | + filter.user = request.user | |
16 | + filter.serialized_filter = serialized_filter | |
17 | + filter.super = super | |
18 | + try: | |
19 | + filter.save() | |
20 | + except ValueError as e: | |
21 | + raise AjaxError(e.data) | |
22 | + else: | |
23 | + return {'exists': True} | |
24 | + return {} | |
25 | + | |
26 | +@ajax(method='get') | |
27 | +def get_filters(request): | |
28 | + filters = [{ | |
29 | + 'id': 'filter%s' % filter.pk, | |
30 | + 'name': filter.name, | |
31 | + 'json': filter.serialized_filter, | |
32 | + } | |
33 | + for filter in SavedFilter.objects.filter(user=request.user)] | |
34 | + return {'filters': filters} | |
35 | + | |
36 | +@ajax(method='post') | |
37 | +def delete_filter(request, id): | |
38 | + filter_id = id[len('filter'):] | |
39 | + filter = SavedFilter.objects.get(pk=filter_id) | |
40 | + if filter.user != request.user: | |
41 | + raise AjaxError('access denied') | |
42 | + filter.delete() | |
43 | + return {} | |
... | ... |
dictionary/ajax_history.py
0 โ 100644
1 | +++ a/dictionary/ajax_history.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from common.decorators import render, ajax, AjaxError | |
4 | +from dictionary.models import History | |
5 | +from dictionary.history import lexeme_tables | |
6 | + | |
7 | +@render() | |
8 | +@ajax(method='get', encode_result=False) | |
9 | +def history_table(request, id): | |
10 | + history_items = History.objects.filter(lexeme__pk=id).order_by('-transaction_began') | |
11 | + transaction_tables = lexeme_tables(history_items) | |
12 | + return {'transaction_tables': transaction_tables} | |
... | ... |
dictionary/ajax_jqgrid.py
0 โ 100644
1 | +++ a/dictionary/ajax_jqgrid.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +import math | |
4 | +from accounts.models import filtering_mode | |
5 | +from common.decorators import render, ajax | |
6 | + | |
7 | +class JqGridAjax(object): | |
8 | + model = None | |
9 | + search_field = None | |
10 | + field_translation = {} | |
11 | + | |
12 | + @classmethod | |
13 | + def translate_field(self, field): | |
14 | + if field in self.field_translation: | |
15 | + return self.field_translation[field] | |
16 | + else: | |
17 | + return field | |
18 | + | |
19 | + @staticmethod | |
20 | + def sort_field_special_case(rule): | |
21 | + return rule['field'] | |
22 | + | |
23 | + @classmethod | |
24 | + def get_sort_field(self, rule): | |
25 | + field = rule['field'] | |
26 | + field = self.sort_field_special_case(rule) | |
27 | + field = self.translate_field(field) | |
28 | + if rule['order'] == 'desc': | |
29 | + field = '-' + field | |
30 | + return field | |
31 | + | |
32 | + @staticmethod | |
33 | + def sort_queryset_special_case(queryset, field): | |
34 | + return queryset | |
35 | + | |
36 | + @classmethod | |
37 | + def sort_queryset(self, queryset, sort_rules): | |
38 | + order_list = [] | |
39 | + for rule in sort_rules: | |
40 | + queryset = self.sort_queryset_special_case(queryset, rule) | |
41 | + order_list.append(self.get_sort_field(rule)) | |
42 | + return queryset.extra(order_by=order_list) | |
43 | + | |
44 | + lookup_translation = { | |
45 | + 'eq': 'exact', | |
46 | + 'ne': '-exact', | |
47 | + 'bw': 'startswith', | |
48 | + 'bn': '-startswith', | |
49 | + 'ew': 'endswith', | |
50 | + 'en': '-endswith', | |
51 | + 'cn': 'contains', | |
52 | + 'nc': '-contains', | |
53 | + 're': 'regex', | |
54 | + 'nr': '-regex', | |
55 | + #'se': 'surely', | |
56 | + #'sd': '-maybe', | |
57 | + #'me': 'maybe', | |
58 | + #'md': '-surely', | |
59 | + 'le': 'lte', | |
60 | + 'ge': 'gte', | |
61 | + } | |
62 | + | |
63 | + @staticmethod | |
64 | + def filter_special_case(filter, lookup, negated, queryset): | |
65 | + return False, filter['field'], {}, queryset | |
66 | + | |
67 | + @classmethod | |
68 | + def apply_filter(self, queryset, filter): | |
69 | + lookup = self.lookup_translation[filter['op']] | |
70 | + negated = (lookup[0] == '-') | |
71 | + lookup = lookup.lstrip('-') | |
72 | + data = filter['data'] | |
73 | + special, field, arg, queryset = self.filter_special_case( | |
74 | + filter, lookup, negated, queryset) | |
75 | + if not special: | |
76 | + arg = {(field + '__' + lookup): data} | |
77 | + if negated: | |
78 | + new_qs = queryset.exclude(**arg) | |
79 | + else: | |
80 | + new_qs = queryset.filter(**arg).distinct() | |
81 | + return new_qs | |
82 | + | |
83 | + @classmethod | |
84 | + def get_queryset(self, *args): | |
85 | + return self.model.objects.all() | |
86 | + | |
87 | + @classmethod | |
88 | + def get_empty_queryset(self): | |
89 | + return self.model.objects.none() | |
90 | + | |
91 | + @classmethod | |
92 | + def apply_filters(self, filters, *args): | |
93 | + queryset = self.get_queryset(*args) | |
94 | + if filters: | |
95 | + if filters['groupOp'] == 'AND': | |
96 | + for filter in filters['rules']: | |
97 | + queryset = self.apply_filter(queryset, filter) | |
98 | + elif filters['groupOp'] == 'OR': | |
99 | + new_queryset = self.get_empty_queryset() | |
100 | + for filter in filters['rules']: | |
101 | + new_queryset |= self.apply_filter(queryset, filter) | |
102 | + queryset = new_queryset | |
103 | + return queryset | |
104 | + | |
105 | + @staticmethod | |
106 | + def apply_mask(queryset, mask, sort_rules): | |
107 | + abstract | |
108 | + | |
109 | + @staticmethod | |
110 | + def filter_value_special_case(queryset, rule, from_value, upward): | |
111 | + return False, queryset | |
112 | + | |
113 | + # filtruje queryset wedลug pola z reguลy rule od wartoลci from, | |
114 | + # wartoลci dalsze w porzฤ dku jeลli upward, w przeciwnym razie bliลผsze | |
115 | + @classmethod | |
116 | + def filter_value(self, queryset, rule, from_value, upward): | |
117 | + greater = (rule['order'] == 'asc') == upward | |
118 | + special, queryset = self.filter_value_special_case( | |
119 | + queryset, rule, from_value, upward) | |
120 | + if special: | |
121 | + return queryset | |
122 | + if greater: | |
123 | + lookup = '__gte' | |
124 | + else: | |
125 | + lookup = '__lte' | |
126 | + field = self.translate_field(rule['field']) | |
127 | + return queryset.filter(**{field + lookup: from_value}) | |
128 | + | |
129 | + # id instancji z search_field rownym mask badz takiej, ktora bylaby nastepna | |
130 | + # po instancji z search_field rรณwnym mask w danym sortowaniu. | |
131 | + # Jezeli nie ma 'wiekszej' instancji badz reguly sortowania nie uwzgledniaja | |
132 | + # search_field, metoda zwroci pierwsza instancje w danym sortowaniu | |
133 | + # | |
134 | + # beznadziejna nazwa metody... | |
135 | + @classmethod | |
136 | + def get_pk(self, mask, filters, sort_rules, filtering_mode, *args): | |
137 | + whole_queryset = self.apply_filters(filters, *args) | |
138 | + queryset = whole_queryset | |
139 | + matching = self.apply_mask(queryset, mask, sort_rules) | |
140 | + if matching.count() > 0: | |
141 | + matching = self.sort_queryset(matching, sort_rules) | |
142 | + return matching[0].pk | |
143 | + else: | |
144 | + #gdy nie ma pasujฤ cego | |
145 | + rule = sort_rules[0] | |
146 | + if rule['field'] == self.search_field: | |
147 | + queryset = self.filter_value( | |
148 | + queryset, rule, from_value=mask, upward=True) | |
149 | + if queryset.count() == 0: | |
150 | + queryset = whole_queryset | |
151 | + queryset = self.sort_queryset(queryset, sort_rules) | |
152 | + return queryset[0].pk | |
153 | + | |
154 | + @staticmethod | |
155 | + def get_field_special_case(field, instance): | |
156 | + return False, None | |
157 | + | |
158 | + @classmethod | |
159 | + def get_field(self, field, instance): | |
160 | + special, value = self.get_field_special_case(field, instance) | |
161 | + if not special: | |
162 | + value = getattr(instance, field) | |
163 | + return (self.translate_field(field), value) | |
164 | + | |
165 | + # indeks wiersza w danym sortowaniu, w ktรณrym | |
166 | + # znajdzie siฤ instancja o danym id | |
167 | + @classmethod | |
168 | + def row_index(self, pk, filters, sort_rules, filtering_mode, mask, *args): | |
169 | + selected = self.model.objects.get(pk=pk) | |
170 | + queryset = self.apply_filters(filters, *args) | |
171 | + if filtering_mode: | |
172 | + queryset = self.apply_mask(queryset, mask, sort_rules) | |
173 | + count = queryset.count() | |
174 | + if count == 0: | |
175 | + return 0, 0 | |
176 | + for rule in sort_rules: | |
177 | + field = rule['field'] | |
178 | + field, data = self.get_field(field, selected) | |
179 | + preceding = self.filter_value( | |
180 | + queryset, rule, from_value=data, upward=False) | |
181 | + return preceding.count(), count | |
182 | + | |
183 | + # teลผ beznadziejna nazwa | |
184 | + @classmethod | |
185 | + def find_id(self, filtering_mode, selected_pk, filters, sort_rules, mask, | |
186 | + *args): | |
187 | + index, count = self.row_index(selected_pk, filters, sort_rules, | |
188 | + filtering_mode, mask, *args) | |
189 | + return { | |
190 | + 'rowIndex': index, | |
191 | + 'records': count, | |
192 | + } | |
193 | + | |
194 | + @classmethod | |
195 | + def get_location(self, filtering_mode, sort_rules, filters, mask, *args): | |
196 | + queryset = self.apply_filters(filters, *args) | |
197 | + count = queryset.count() | |
198 | + # nie wiem, czy ma sens - wzorรณw i tak jest maลo, a leksemy sฤ keszowane | |
199 | + if count > 0 and mask == '': | |
200 | + return { | |
201 | + 'rowIndex': 0, | |
202 | + 'selected_id': queryset[0].pk, | |
203 | + 'records': count, | |
204 | + } | |
205 | + if filtering_mode: | |
206 | + queryset = self.apply_mask(queryset, mask, sort_rules) | |
207 | + if queryset.count() > 0: | |
208 | + selected_pk = self.get_pk( | |
209 | + mask, filters, sort_rules, filtering_mode, *args) | |
210 | + index, _count = self.row_index( | |
211 | + selected_pk, filters, sort_rules, filtering_mode, mask, *args) | |
212 | + else: | |
213 | + index = None | |
214 | + selected_pk = None | |
215 | + return { | |
216 | + 'rowIndex': index, | |
217 | + 'selected_id': selected_pk, | |
218 | + 'records': count, | |
219 | + } | |
220 | + | |
221 | + @classmethod | |
222 | + def get_sorted_queryset(self, filtering_mode, sort_rules, filters, mask, *args): | |
223 | + queryset = self.apply_filters(filters, *args) | |
224 | + if filtering_mode: | |
225 | + queryset = self.apply_mask(queryset, mask, sort_rules) | |
226 | + return self.sort_queryset(queryset, sort_rules) | |
227 | + | |
228 | + @staticmethod | |
229 | + def count_pages(count, page, limit): | |
230 | + total_pages = int(math.ceil(float(count) / limit)) | |
231 | + if limit < 0: | |
232 | + limit = 0 | |
233 | + page = min(page, total_pages) | |
234 | + start = limit * (page - 1) | |
235 | + start = max(start, 0) | |
236 | + response_rowcount = min(limit, count - start) | |
237 | + return total_pages, start, response_rowcount | |
238 | + | |
239 | + @staticmethod | |
240 | + def response_row(instance): | |
241 | + abstract | |
242 | + | |
243 | + @classmethod | |
244 | + def make_response(self, response_qs, count, page, total_pages): | |
245 | + rows = [{ | |
246 | + 'id': instance.pk, | |
247 | + 'cell': self.response_row(instance), | |
248 | + } for instance in response_qs] | |
249 | + return { | |
250 | + 'page': page, | |
251 | + 'total': total_pages, | |
252 | + 'records': count, | |
253 | + 'rows': rows, | |
254 | + } | |
255 | + | |
256 | + @classmethod | |
257 | + def get_page(self, filtering_mode, page, limit, sort_rules, filters, mask, | |
258 | + *args): | |
259 | + queryset = self.get_sorted_queryset( | |
260 | + filtering_mode, sort_rules, filters, mask, *args) | |
261 | + count = queryset.count() | |
262 | + total_pages, start, response_rowcount = self.count_pages(count, page, limit) | |
263 | + response_qs = queryset[start:start + response_rowcount] | |
264 | + return self.make_response(response_qs, count, page, total_pages) | |
265 | + | |
266 | +@render('sort_dialog.html') | |
267 | +@ajax(method='post', encode_result=False) | |
268 | +def sort_rules(request, colModel, colNames): | |
269 | + sort_rules = [] | |
270 | + for ui_name, model in zip(colNames, colModel): | |
271 | + if model.get('sortable', True): | |
272 | + rule = {} | |
273 | + rule['code_name'] = model['index'] | |
274 | + rule['ui_name'] = ui_name | |
275 | + sort_rules.append(rule) | |
276 | + return {'sort_rules' : sort_rules} | |
... | ... |
dictionary/ajax_lexeme_jqgrid.py
0 โ 100644
1 | +++ a/dictionary/ajax_lexeme_jqgrid.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from django.utils.simplejson import dumps as json_encode, loads as json_decode | |
4 | +from django.db.models import Count | |
5 | +from dictionary.models import Lexeme, Pattern, visible_vocabularies | |
6 | +from dictionary.ajax_jqgrid import JqGridAjax | |
7 | +from accounts.models import filtering_mode | |
8 | +from common.decorators import ajax, AjaxError | |
9 | +from common.util import bisect_left, reverse | |
10 | +from django.core.cache import cache | |
11 | + | |
12 | +class LexemeGrid(JqGridAjax): | |
13 | + model = Lexeme | |
14 | + search_field = 'entry' | |
15 | + field_translation = { | |
16 | + 'part_of_speech': 'part_of_speech__symbol', | |
17 | + } | |
18 | + | |
19 | + @staticmethod | |
20 | + def sort_field_special_case(rule): | |
21 | + if rule['field'] == 'entry' and rule['a_tergo']: | |
22 | + return 'rev' | |
23 | + else: | |
24 | + return rule['field'] | |
25 | + | |
26 | + @staticmethod | |
27 | + def sort_queryset_special_case(queryset, rule): | |
28 | + if rule['field'] == 'entry' and rule['a_tergo']: | |
29 | + return queryset.extra(select={'rev': "reverse(haslo)"}) | |
30 | + else: | |
31 | + return queryset | |
32 | + | |
33 | + @staticmethod | |
34 | + def filter_special_case(filter, lookup, negated, queryset): | |
35 | + field, data = filter['field'], filter['data'] | |
36 | + if field == 'form': | |
37 | + field = 'lexemeform__form' | |
38 | + elif field == 'lexeme_qualifier': | |
39 | + field = 'qualifiers__pk' | |
40 | + elif field == 'lip_qualifier': | |
41 | + field = 'lexemeinflectionpattern__qualifiers__pk' | |
42 | + elif field == 'classification_value': | |
43 | + field = 'classificationvalue__pk' | |
44 | + if field in ('pattern_name', 'inflection_characteristic'): | |
45 | + if field == 'pattern_name': | |
46 | + lip_lookup = 'pattern__pk' | |
47 | + try: | |
48 | + p = Pattern.objects.get(name=data) | |
49 | + data = p.pk | |
50 | + except Pattern.DoesNotExist: | |
51 | + data = 0 | |
52 | + where = ("EXISTS (SELECT odmieniasie.l_id FROM odmieniasie WHERE " | |
53 | + "odmieniasie.w_id <> %s AND leksemy.id = odmieniasie.l_id)") | |
54 | + elif field == 'inflection_characteristic': | |
55 | + lip_lookup = 'inflection_characteristic__entry' | |
56 | + where = ("EXISTS (SELECT odmieniasie.l_id FROM odmieniasie JOIN charfle " | |
57 | + "ON odmieniasie.charfl = charfle.id WHERE " | |
58 | + "charfle.charfl <> %s AND leksemy.id = odmieniasie.l_id)") | |
59 | + #if not negated or lookup == 'maybe': | |
60 | + arg = {('lexemeinflectionpattern__%s' % lip_lookup): data} | |
61 | + #else: | |
62 | + # arg = {} | |
63 | + if negated: # or lookup == 'surely': | |
64 | + #if not negated: | |
65 | + # where = 'NOT ' + where | |
66 | + queryset = queryset.extra(where=[where], params=[data]) | |
67 | + elif field == 'containing_vocabulary': | |
68 | + return False, 'vocabularies__id', 'id', queryset | |
69 | + elif field == 'owner_vocabulary': | |
70 | + arg = {'owner_vocabulary__id': data} | |
71 | + elif field == 'pattern_count': | |
72 | + queryset = queryset.annotate( | |
73 | + pc=Count('lexemeinflectionpattern__pattern', distinct=True)) | |
74 | + return False, 'pc', lookup, queryset | |
75 | + elif field == 'ic_count': | |
76 | + queryset = queryset.annotate( | |
77 | + icc=Count('lexemeinflectionpattern__inflection_characteristic', | |
78 | + distinct=True)) | |
79 | + return False, 'icc', lookup, queryset | |
80 | + else: | |
81 | + return False, field, {}, queryset | |
82 | + return True, None, arg, queryset | |
83 | + | |
84 | + @staticmethod | |
85 | + def get_queryset(vocabularies): | |
86 | + return Lexeme.objects.filter( | |
87 | + deleted=False, vocabularies__in=vocabularies).distinct() | |
88 | + | |
89 | + @staticmethod | |
90 | + def apply_mask(lexemes, mask, sort_rules): | |
91 | + if mask == '': | |
92 | + return lexemes | |
93 | + for rule in sort_rules: | |
94 | + if rule['field'] == 'entry': | |
95 | + if not rule['a_tergo']: | |
96 | + matching_lexemes = lexemes.filter(entry__istartswith=mask) | |
97 | + else: | |
98 | + matching_lexemes = lexemes.filter(entry__iendswith=mask) | |
99 | + break | |
100 | + else: | |
101 | + matching_lexemes = lexemes.filter(entry__istartswith=mask) | |
102 | + return matching_lexemes | |
103 | + | |
104 | + @staticmethod | |
105 | + def filter_value_special_case(queryset, rule, from_value, greater): | |
106 | + if rule['field'] == 'entry' and rule['a_tergo']: | |
107 | + if greater: | |
108 | + comp = '>=' | |
109 | + else: | |
110 | + comp = '<=' | |
111 | + queryset = queryset.extra(where=["reverse(haslo) " + comp + " %s"], | |
112 | + params=[reverse(from_value)]) | |
113 | + return True, queryset | |
114 | + else: | |
115 | + return False, queryset | |
116 | + | |
117 | + @staticmethod | |
118 | + def get_field_special_case(field, lexeme): | |
119 | + if field == 'part_of_speech': | |
120 | + return True, lexeme.part_of_speech.symbol | |
121 | + else: | |
122 | + return False, None | |
123 | + | |
124 | + @staticmethod | |
125 | + def response_row(lexeme): | |
126 | + lip_data = lexeme.lip_data() | |
127 | + cont_vocabs = '/'.join(lexeme.vocabularies.values_list('id', flat=True)) | |
128 | + return [ | |
129 | + lexeme.id, | |
130 | + lexeme.entry, | |
131 | + lexeme.part_of_speech.symbol, | |
132 | + lip_data['patterns'], | |
133 | + '', # brak liczby wzorรณw | |
134 | + lip_data['inflection_characteristics'], | |
135 | + '', # brak liczby charfli | |
136 | + '', # brak formy | |
137 | + cont_vocabs, | |
138 | + lexeme.owner_vocabulary.id, | |
139 | + dict(Lexeme.STATUS_CHOICES).get(lexeme.status), | |
140 | + '', # brak komentarza | |
141 | + ] | |
142 | + | |
143 | + # indeks wiersza w danym sortowaniu, w ktรณrym | |
144 | + # znajdzie siฤ instancja o danym id | |
145 | + @classmethod | |
146 | + def row_index( | |
147 | + self, pk, filters, sort_rules, filtering_mode, mask, vocabularies): | |
148 | + pk_list = get_pk_list( | |
149 | + sort_rules, filters, vocabularies, mask, filtering_mode) | |
150 | + | |
151 | + count = len(pk_list) | |
152 | + if count == 0: | |
153 | + return 0, 0 | |
154 | + return pk_list.index(pk), count | |
155 | + | |
156 | + # id instancji z search_field rownym mask badz takiej, ktora bylaby nastepna | |
157 | + # po instancji z search_field rรณwnym mask w danym sortowaniu. | |
158 | + # Jezeli nie ma 'wiekszej' instancji badz reguly sortowania nie uwzgledniaja | |
159 | + # search_field, metoda zwroci pierwsza instancje w danym sortowaniu | |
160 | + @classmethod | |
161 | + def get_pk(self, mask, filters, sort_rules, filtering_mode, vocabularies): | |
162 | + pk_list = get_pk_list( | |
163 | + sort_rules, filters, vocabularies, mask, filtering_mode) | |
164 | + count = len(pk_list) | |
165 | + if count == 0: | |
166 | + return None, None, 0 | |
167 | + # nie podoba mi siฤ w ogรณle caลa ta idea | |
168 | + if len(sort_rules) == 0 or sort_rules[0]['field'] != self.search_field: | |
169 | + selected_pk = super(self, LexemeGrid).get_pk( | |
170 | + mask, filters, sort_rules, filtering_mode, vocabularies) | |
171 | + index, count = self.row_index( | |
172 | + selected_pk, filters, sort_rules, filtering_mode, mask, vocabularies) | |
173 | + return selected_pk, index, count | |
174 | + | |
175 | + index = bisect_left(pk_list, mask, cmp=make_lexeme_cmp(sort_rules[0])) | |
176 | + if index == count: | |
177 | + index -= 1 | |
178 | + return pk_list[index], index, count | |
179 | + | |
180 | + @classmethod | |
181 | + def get_location(self, filtering_mode, sort_rules, filters, mask, *args): | |
182 | + selected_pk, index, count = self.get_pk( | |
183 | + mask, filters, sort_rules, filtering_mode, *args) | |
184 | + return { | |
185 | + 'rowIndex': index, | |
186 | + 'selected_id': selected_pk, | |
187 | + 'records': count, | |
188 | + } | |
189 | + | |
190 | +import locale | |
191 | +locale.setlocale(locale.LC_ALL, 'pl_PL.UTF-8') | |
192 | + | |
193 | +def make_lexeme_cmp(rule): | |
194 | + def lexeme_cmp(pk, mask): | |
195 | + e1 = Lexeme.objects.get(pk=pk).entry | |
196 | + e2 = mask | |
197 | + if rule['a_tergo']: | |
198 | + e1 = reverse(e1) | |
199 | + e2 = reverse(e2) | |
200 | + result = locale.strcoll(e1, e2) | |
201 | + if rule['order'] == 'desc' and e2 != '': | |
202 | + result = -result | |
203 | + return result | |
204 | + return lexeme_cmp | |
205 | + | |
206 | +# Zapytanie o indeks wiersza o pewnym id przy danym sortowaniu | |
207 | +@ajax(method='get') | |
208 | +def find_id(request, id, sort_rules, mask, filters=None): | |
209 | + vocabularies = visible_vocabularies(request.user) | |
210 | + return LexemeGrid.find_id( | |
211 | + filtering_mode(request.user), id, filters, sort_rules, mask, vocabularies) | |
212 | + | |
213 | +# Zapytanie o id oraz indeks pierwszego wiersza przy danym sortowaniu, | |
214 | +# ktรณrego hasลo rozpoczyna siฤ od mask. | |
215 | +# 'selected_id' == None, jeลli takiego nie ma | |
216 | +@ajax(method='get') | |
217 | +def get_location(request, sort_rules, filters=None, mask=''): | |
218 | + vocabularies = visible_vocabularies(request.user) | |
219 | + return LexemeGrid.get_location( | |
220 | + filtering_mode(request.user), sort_rules, filters, mask, vocabularies) | |
221 | + | |
222 | +# twรณr Miลosza - trzeba kiedyล poprawiฤ | |
223 | +def cache_key(sort_rules, filters, vocabularies, mask, filtering_mode): | |
224 | + key = json_encode(sort_rules) + json_encode(filters) | |
225 | + for vocabulary in vocabularies: | |
226 | + key += vocabulary.id; | |
227 | + if filtering_mode: | |
228 | + key += mask; | |
229 | + return key | |
230 | + | |
231 | +def get_cached_lexemes(sort_rules, filters, vocabularies, mask, filtering_mode): | |
232 | + key = cache_key(sort_rules, filters, vocabularies, mask, filtering_mode) | |
233 | + return cache.get(key) | |
234 | + | |
235 | +def cache_lexemes(pk_list, sort_rules, filters, vocabularies, mask, | |
236 | + filtering_mode): | |
237 | + key = cache_key(sort_rules, filters, vocabularies, mask, filtering_mode) | |
238 | + cache.set(key, pk_list) | |
239 | + key_list = cache.get('key_list', []) | |
240 | + if key not in key_list: | |
241 | + key_list.append(key) | |
242 | + cache.set('key_list', key_list) | |
243 | + | |
244 | +def get_pk_list(sort_rules, filters, vocabularies, mask, filtering_mode, | |
245 | + filtering = False): | |
246 | + if not filtering: | |
247 | + pk_list = get_cached_lexemes(sort_rules, filters, vocabularies, mask, | |
248 | + filtering_mode) | |
249 | + else: | |
250 | + pk_list = None | |
251 | + if pk_list == None: | |
252 | + lexemes = LexemeGrid.get_sorted_queryset( | |
253 | + filtering_mode, sort_rules, filters, mask, vocabularies) | |
254 | + if 'rev' in lexemes.query.extra_select: | |
255 | + pk_list = list(row[0] for row in lexemes.values_list('pk', 'rev')) | |
256 | + else: | |
257 | + pk_list = list(lexemes.values_list('pk', flat=True)) | |
258 | + cache_lexemes(pk_list, sort_rules, filters, vocabularies, mask, | |
259 | + filtering_mode) | |
260 | + return pk_list | |
261 | + | |
262 | +@ajax(method='get') | |
263 | +def get_lexemes(request, page, rows, sort_rules, filters=None, mask='', | |
264 | + target_page=0, totalrows=0, filtering=False): | |
265 | + request.session['sort_rules'] = json_encode(sort_rules) | |
266 | + request.session['filters'] = json_encode(filters) | |
267 | + page = target_page or page | |
268 | + limit = totalrows or rows | |
269 | + vocabularies = visible_vocabularies(request.user) | |
270 | + | |
271 | + pk_list = get_pk_list(sort_rules, filters, vocabularies, mask, | |
272 | + filtering_mode(request.user), filtering) | |
273 | + count = len(pk_list) | |
274 | + total_pages, start, response_rowcount = LexemeGrid.count_pages( | |
275 | + count, page, limit) | |
276 | + pk_list = pk_list[start:start + response_rowcount] | |
277 | + lexemes = [Lexeme.objects.get(pk=pk) for pk in pk_list] | |
278 | + return LexemeGrid.make_response(lexemes, count, page, total_pages) | |
... | ... |
dictionary/ajax_lexeme_view.py
0 โ 100644
1 | +++ a/dictionary/ajax_lexeme_view.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +import json | |
4 | +from django.contrib.auth.models import User | |
5 | +from django.core.cache import cache | |
6 | +from django.db.models import Max | |
7 | +from dictionary.models import Lexeme, LexemeInflectionPattern, PartOfSpeech, \ | |
8 | + Pattern, InflectionCharacteristic, Vocabulary, LexemeAssociation, Qualifier, \ | |
9 | + prepare_table, visible_vocabularies, ClassificationValue, CrossReference, \ | |
10 | + TableTemplate, get_root, InputLexeme, set_vocabulary | |
11 | +from dictionary.forms import LexemeEditForm, LIPEditForm, ClassificationForm, \ | |
12 | + CrossReferenceForm | |
13 | +from common.decorators import render, ajax, AjaxError | |
14 | +from common.util import error_messages | |
15 | + | |
16 | +from dictionary.ajax_history import * | |
17 | +from dictionary.ajax_lexeme_jqgrid import * | |
18 | +from dictionary.ajax_prompter import * | |
19 | +from dictionary.ajax_filters import * | |
20 | + | |
21 | + | |
22 | +@render('inflection_tables.html') | |
23 | +@ajax(method='get', encode_result=False) | |
24 | +def get_inflection_tables(request, variant, id): | |
25 | + lexeme = Lexeme.objects.get(pk=id) | |
26 | + if lexeme.owner_vocabulary not in visible_vocabularies(request.user): | |
27 | + raise AjaxError('access denied') | |
28 | + qualifiers = Qualifier.objects.filter( | |
29 | + vocabulary__in=visible_vocabularies(request.user)) | |
30 | + tables = lexeme.inflection_tables(variant, qualifiers=qualifiers) | |
31 | + return {'tables': tables} | |
32 | + | |
33 | +@render('inflection_table.html') | |
34 | +@ajax(method='get', encode_result=False) | |
35 | +def table_preview(request, id, pattern, inflection_characteristic, lip_id, | |
36 | + entry=None, pos=None): | |
37 | + lexeme = Lexeme.objects.get(pk=id) | |
38 | + if lexeme.owner_vocabulary not in visible_vocabularies(request.user): | |
39 | + raise AjaxError('access denied') | |
40 | + if entry is None: | |
41 | + entry = lexeme.entry | |
42 | + if pos is None: | |
43 | + pos = lexeme.part_of_speech.symbol | |
44 | + try: | |
45 | + pattern = Pattern.objects.get(name=pattern) | |
46 | + inflection_characteristic = InflectionCharacteristic.objects.get( | |
47 | + pk=inflection_characteristic) | |
48 | + if lip_id.startswith('lip_add'): | |
49 | + lip = LexemeInflectionPattern(lexeme=lexeme, index=0) | |
50 | + else: | |
51 | + lip = LexemeInflectionPattern.objects.get(pk=int(lip_id[3:])) | |
52 | + lip.pattern = pattern | |
53 | + lip.inflection_characteristic = inflection_characteristic | |
54 | + lip.root = get_root(entry, pos, pattern, inflection_characteristic) | |
55 | + qualifiers = Qualifier.objects.filter( | |
56 | + vocabulary__in=visible_vocabularies(request.user)) | |
57 | + table = lip.inflection_table('0', separated=True, qualifiers=qualifiers, | |
58 | + edit_view=True) | |
59 | + prepare_table(table) | |
60 | + except Pattern.DoesNotExist: | |
61 | + table = None | |
62 | + except TableTemplate.DoesNotExist: | |
63 | + table = None | |
64 | + return {'table': table} | |
65 | + | |
66 | +@render() | |
67 | +@ajax(method='get', encode_result=False) | |
68 | +def odm_forms(request, id): | |
69 | + to_return = {} | |
70 | + l = Lexeme.objects.get(pk=id) | |
71 | + if l.owner_vocabulary not in visible_vocabularies(request.user): | |
72 | + raise AjaxError('access denied') | |
73 | + odm_lexemes = [] | |
74 | + for l in InputLexeme.objects.filter(entry=l.entry): | |
75 | + odm_lexemes.append(list(l.inputform_set.values_list('form', flat=True))) | |
76 | + to_return['odm_lexemes'] = odm_lexemes | |
77 | + return to_return | |
78 | + | |
79 | +@render() | |
80 | +@ajax(method='get', encode_result=False) | |
81 | +def lexeme_edit_form(request, id): | |
82 | + to_return = {} | |
83 | + l = Lexeme.objects.get(pk=id) | |
84 | + if l.owner_vocabulary not in visible_vocabularies(request.user): | |
85 | + raise AjaxError('access denied') | |
86 | + editable = l.perm(request.user, 'change') | |
87 | + to_return['editable'] = editable | |
88 | + owner = l.owner_vocabulary | |
89 | + visible_vocabs = visible_vocabularies(request.user) | |
90 | + r_vocabularies = visible_vocabs.exclude(pk=owner.pk) | |
91 | + containing = r_vocabularies.filter(lexemeassociation__lexeme=l) | |
92 | + ro_vocabularies = containing.exclude(editors=request.user) | |
93 | + rw_vocabularies = r_vocabularies.filter(editors=request.user) | |
94 | + all_rw_vocabularies = l.rw_vocabularies(request.user) | |
95 | + #all_ro_vocabularies = visible_vocabs.exclude(editors=request.user) | |
96 | + ro_owner = owner not in all_rw_vocabularies | |
97 | + to_return['multiple_editable'] = editable and bool(rw_vocabularies) | |
98 | + if ro_owner: | |
99 | + ro_qualifiers = l.qualifiers.filter(vocabulary=owner) #__in=all_ro_vocabularies) | |
100 | + else: | |
101 | + ro_qualifiers = [] | |
102 | + to_return['owner'] = owner | |
103 | + to_return['ro_vocabularies'] = ro_vocabularies | |
104 | + to_return['ro_qualifiers'] = ro_qualifiers | |
105 | + | |
106 | + to_return['form'] = LexemeEditForm( | |
107 | + instance=l, editable=editable, vocabularies=rw_vocabularies, | |
108 | + containing=rw_vocabularies.filter( | |
109 | + pk__in=containing.values_list('pk', flat=True)), | |
110 | + rw_vocabularies=all_rw_vocabularies, | |
111 | + all_rw_vocabularies=request.user.editable_vocabularies.all()) | |
112 | + to_return['id'] = l.pk | |
113 | + to_return['part_of_speech'] = l.part_of_speech.symbol | |
114 | + to_return['classification_forms'] = make_classification_forms( | |
115 | + l, owner, editable); | |
116 | + | |
117 | + lips = l.lexemeinflectionpattern_set.all() | |
118 | + to_return['lip_forms'] = [ | |
119 | + (LIPEditForm( | |
120 | + part_of_speech=l.part_of_speech, instance=lip, prefix='lip' + str(lip.pk), | |
121 | + rw_vocabularies=all_rw_vocabularies, editable=editable), | |
122 | + lip.qualifiers.filter(vocabulary=owner) if ro_owner else []) | |
123 | + for lip in lips] | |
124 | + crs = l.refs_to.filter(to_lexeme__deleted=False).order_by('type__index') | |
125 | + to_return['cross_references'] = crs | |
126 | + return to_return | |
127 | + | |
128 | +def make_classification_forms(lexeme, vocabulary=None, editable=True): | |
129 | + if not vocabulary: | |
130 | + vocabulary = lexeme.owner_vocabulary | |
131 | + classifications = vocabulary.classifications.all() | |
132 | + classification_forms = [] | |
133 | + for c in classifications: | |
134 | + values = lexeme.classification_values(c) | |
135 | + classification_forms.append( | |
136 | + ClassificationForm( | |
137 | + classification=c, values=values, prefix='cl' + str(c.pk), | |
138 | + editable=editable)) | |
139 | + return classification_forms | |
140 | + | |
141 | +@render() | |
142 | +@ajax(method='get', encode_result=False) | |
143 | +def classification_forms(request, lexeme_id, vocab_id): | |
144 | + l = Lexeme.objects.get(pk=lexeme_id) | |
145 | + if vocab_id: | |
146 | + vocab = Vocabulary.objects.get(pk=vocab_id) | |
147 | + else: | |
148 | + vocab = None | |
149 | + forms = make_classification_forms(l, vocab) | |
150 | + return {'forms': forms} | |
151 | + | |
152 | +@render('lexeme_edit_form_row.html') | |
153 | +@ajax(method='get', encode_result=False) | |
154 | +def new_lip_edit_row(request, id, pos_id): | |
155 | + l = Lexeme.objects.get(pk=id) | |
156 | + if l.owner_vocabulary not in request.user.editable_vocabularies.all(): | |
157 | + raise AjaxError('access denied') | |
158 | + if not pos_id: | |
159 | + raise AjaxError(u'Nieokreลlona czฤลฤ mowy.') | |
160 | + pos = PartOfSpeech.objects.get(pk=pos_id) | |
161 | + rw_vocabularies = l.rw_vocabularies(request.user) | |
162 | + lip_form = LIPEditForm(part_of_speech=pos, prefix='lip_add_NUM', | |
163 | + rw_vocabularies=rw_vocabularies) | |
164 | + return {'lip_form': lip_form, 'editable': True} | |
165 | + | |
166 | +@render('cross_reference_row.html') | |
167 | +@ajax(method='get', encode_result=False) | |
168 | +def new_cross_reference_row(request, id, pos_id): | |
169 | + l = Lexeme.objects.get(pk=id) | |
170 | + if l.owner_vocabulary not in request.user.editable_vocabularies.all(): | |
171 | + raise AjaxError('access denied') | |
172 | + pos = PartOfSpeech.objects.get(pk=pos_id) | |
173 | + cr_form = CrossReferenceForm(lexeme=l, pos=pos, prefix='cr_add_NUM') | |
174 | + return {'cr_form': cr_form, 'editable': True} | |
175 | + | |
176 | +@ajax(method='post') | |
177 | +def update_lexeme(request, form_data, mask=''): | |
178 | + form_dict = dict((x['name'], x['value']) for x in form_data) | |
179 | + l = Lexeme.objects.get(pk=form_dict['id']) | |
180 | + if not l.perm(request.user, 'view'): | |
181 | + raise AjaxError('access denied') | |
182 | + owner = l.owner_vocabulary | |
183 | + created = l.entry == '' | |
184 | + visible_vocabs = visible_vocabularies(request.user) | |
185 | + r_vocabularies = visible_vocabs.exclude(pk=owner.pk) | |
186 | + rw_vocabularies = r_vocabularies.filter(editors=request.user) | |
187 | + all_rw_vocabularies = l.rw_vocabularies(request.user) | |
188 | + if not l.perm(request.user, 'change'): | |
189 | + return update_lexeme_qualifiers( | |
190 | + l, all_rw_vocabularies, rw_vocabularies, form_dict, form_data) | |
191 | + form = LexemeEditForm( | |
192 | + data=form_dict, instance=l, vocabularies=rw_vocabularies, | |
193 | + rw_vocabularies=all_rw_vocabularies, | |
194 | + all_rw_vocabularies=request.user.editable_vocabularies.all()) | |
195 | + if form.is_valid(): | |
196 | + l = form.save() | |
197 | + l.responsible = request.user | |
198 | + l.deleted = False | |
199 | + l.save() | |
200 | + else: | |
201 | + raise AjaxError(error_messages(form)) | |
202 | + | |
203 | + for vocab in rw_vocabularies: | |
204 | + set_vocabulary(l, vocab, vocab in form.cleaned_data['vocabularies']) | |
205 | + new_owner = form.cleaned_data.get('new_owner') | |
206 | + if new_owner and new_owner != owner: | |
207 | + set_vocabulary(l, new_owner, True) | |
208 | + l.owner_vocabulary = new_owner | |
209 | + set_vocabulary(l, owner, False) | |
210 | + l.save() | |
211 | + for qualifier in form.fields['qualifiers'].queryset: | |
212 | + qualifier.set_for(l, qualifier in form.cleaned_data['qualifiers']) | |
213 | + | |
214 | + classifications = l.owner_vocabulary.classifications.all() | |
215 | + for c in classifications: | |
216 | + classification_form = ClassificationForm( | |
217 | + data=form_dict, classification=c, prefix='cl' + str(c.pk)) | |
218 | + if classification_form.is_valid(): | |
219 | + cvs = ClassificationValue.objects.filter( | |
220 | + pk__in=classification_form.cleaned_data['values']) | |
221 | + l_cvs = l.classification_values(c) | |
222 | + for cv in l_cvs: | |
223 | + if cv.pk not in classification_form.cleaned_data['values']: | |
224 | + cv.lexemes.remove(l) #add | |
225 | + for cv in cvs: | |
226 | + if cv not in l_cvs: | |
227 | + cv.lexemes.add(l) #add | |
228 | + else: | |
229 | + raise AjaxError(error_messages(classification_form)) | |
230 | + for cv in l.classificationvalue_set.all(): | |
231 | + if cv.classification not in classifications: | |
232 | + cv.lexemes.remove(l) #add | |
233 | + | |
234 | + submitted_lips = [] | |
235 | + submitted_crs = [] | |
236 | + for prefix in form_dict['deleted']: | |
237 | + pk = int(prefix[3:]) | |
238 | + LexemeInflectionPattern.objects.get(pk=pk).delete() | |
239 | + for cr_pk in form_dict['deleted_cr']: | |
240 | + CrossReference.objects.get(pk=cr_pk).delete() | |
241 | + for pair in form_data: | |
242 | + # moลผe (?) siฤ psuฤ, jeลli jest zลy charfl | |
243 | + name = pair['name'] | |
244 | + prefix = name.split('-')[0] | |
245 | + if name.startswith('lip') and prefix not in submitted_lips: | |
246 | + submitted_lips.append(prefix) | |
247 | + if prefix.startswith('lip_add'): | |
248 | + lip = LexemeInflectionPattern() | |
249 | + lip.lexeme = l | |
250 | + else: | |
251 | + pk = int(prefix[3:]) | |
252 | + lip = LexemeInflectionPattern.objects.get(pk=pk) | |
253 | + form_dict[prefix + '-qualifiers'] = get_list( | |
254 | + form_data, prefix + '-qualifiers') | |
255 | + lip_form = LIPEditForm( | |
256 | + part_of_speech=l.part_of_speech, data=form_dict, prefix=prefix, | |
257 | + instance=lip, rw_vocabularies=all_rw_vocabularies, | |
258 | + index=len(submitted_lips)) | |
259 | + if lip_form.is_valid(): | |
260 | + lip = lip_form.save() | |
261 | + lip.root = l.get_root(lip.pattern, lip.inflection_characteristic) | |
262 | + if lip.root is None: | |
263 | + raise AjaxError(u'Niepasujฤ ce zakoลczenie formy podstawowej.') | |
264 | + for qualifier in lip_form.fields['qualifiers'].queryset: | |
265 | + qualifier.set_for( | |
266 | + lip, qualifier in lip_form.cleaned_data['qualifiers']) | |
267 | + lip.save() | |
268 | + else: | |
269 | + raise AjaxError(error_messages(lip_form)) | |
270 | + if name.startswith('cr_add') and prefix not in submitted_crs: | |
271 | + submitted_crs.append(prefix) | |
272 | + cr_form = CrossReferenceForm(data=form_dict, prefix=prefix) | |
273 | + if cr_form.is_valid(): | |
274 | + cr_form.save() | |
275 | + else: | |
276 | + raise AjaxError(error_messages(cr_form)) | |
277 | + l.refresh_forms() | |
278 | + if created: | |
279 | + sort_rules = json.loads(request.session['sort_rules']) | |
280 | + filters = json.loads(request.session['filters']) | |
281 | + key = cache_key( | |
282 | + sort_rules, filters, visible_vocabs, mask, filtering_mode(request.user)) | |
283 | + pk_list = cache.get(key) | |
284 | + if pk_list: | |
285 | + pk_list = [l.pk] + pk_list | |
286 | + cache.set(key, pk_list) | |
287 | + return {} | |
288 | + | |
289 | +def update_lexeme_qualifiers(lexeme, vocabularies, rw_vocabularies, form_dict, | |
290 | + form_data): | |
291 | + qualifiers = Qualifier.objects.filter(vocabulary__in=vocabularies) | |
292 | + for qualifier in qualifiers: | |
293 | + qualifier.set_for( | |
294 | + lexeme, unicode(qualifier.pk) in form_dict['qualifiers']) | |
295 | + for vocab in rw_vocabularies: | |
296 | + set_vocabulary(lexeme, vocab, vocab.pk in form_dict['vocabularies']) | |
297 | + submitted_lips = [] | |
298 | + for pair in form_data: | |
299 | + name = pair['name'] | |
300 | + value = pair['value'] | |
301 | + prefix = name.split('-')[0] | |
302 | + if name.startswith('lip') and prefix not in submitted_lips: | |
303 | + submitted_lips.append(prefix) | |
304 | + pk = int(prefix[3:]) | |
305 | + lip = LexemeInflectionPattern.objects.get(pk=pk) | |
306 | + lip_qualifiers = get_list(form_data, prefix + '-qualifiers') | |
307 | + for qualifier in qualifiers: | |
308 | + qualifier.set_for(lip, unicode(qualifier.pk) in lip_qualifiers) | |
309 | + return {} | |
310 | + | |
311 | +def get_list(form_data, name): | |
312 | + return [pair['value'] for pair in form_data if pair['name'] == name] | |
313 | + | |
314 | +@ajax(method='post') | |
315 | +def delete_lexeme(request, id): | |
316 | + id = int(id) | |
317 | + l = Lexeme.objects.get(pk=id) | |
318 | + if not l.perm(request.user, 'change'): | |
319 | + raise AjaxError('access denied') | |
320 | + l.deleted = True | |
321 | + l.save() | |
322 | + key_list = cache.get('key_list', []) | |
323 | + for key in key_list: | |
324 | + pk_list = cache.get(key) | |
325 | + if pk_list and id in pk_list: | |
326 | + pk_list.remove(id) | |
327 | + cache.set(key, pk_list) | |
328 | + return {} | |
329 | + | |
330 | +@ajax(method='get') | |
331 | +def check_pos(request, pos_id, ic_id): | |
332 | + ic = InflectionCharacteristic.objects.get(pk=ic_id) | |
333 | + if InflectionCharacteristic.objects.filter( | |
334 | + entry=ic.entry, part_of_speech__pk=pos_id): | |
335 | + return {'answer': 'yes'} | |
336 | + else: | |
337 | + return {'answer': 'no'} | |
338 | + | |
339 | +@ajax(method='get') | |
340 | +def check_pattern(request, pattern_name, ic_id): | |
341 | + lips = LexemeInflectionPattern.objects.filter( | |
342 | + inflection_characteristic__pk=ic_id, pattern__name=pattern_name) | |
343 | + lips = lips.exclude(lexeme__status='cand', lexeme__deleted=True) | |
344 | + if lips.exists(): | |
345 | + return {'answer': 'yes'} | |
346 | + else: | |
347 | + return {'answer': 'no'} | |
348 | + | |
349 | +@ajax(method='get') | |
350 | +def get_ics(request, pos_id): | |
351 | + return {'ics': list(InflectionCharacteristic.objects.filter( | |
352 | + part_of_speech__pk=pos_id).values_list('pk', 'entry'))} | |
353 | + | |
354 | +START_ID = 1000000 | |
355 | + | |
356 | +@ajax(method='post') | |
357 | +def create_lexeme(request, vocab_id): | |
358 | + owner = Vocabulary.objects.get(pk=vocab_id) | |
359 | + if request.user not in owner.editors.all(): | |
360 | + raise AjaxError('access denied') | |
361 | + next_id = Lexeme.objects.filter( | |
362 | + pk__gte=START_ID).aggregate(Max('id'))['id__max'] | |
363 | + next_id = next_id + 1 if next_id else START_ID | |
364 | + l = Lexeme() | |
365 | + l.id = next_id | |
366 | + l.homonym_number = 1 # ลผal | |
367 | + l.part_of_speech = PartOfSpeech.objects.get(symbol='subst') | |
368 | + l.owner_vocabulary = owner | |
369 | + l.status = 'cand' | |
370 | + l.responsible = request.user | |
371 | + l.deleted = True # proste i genialne! | |
372 | + l.save() | |
373 | + LexemeAssociation.objects.create(lexeme=l, vocabulary=owner) | |
374 | + return {'id': l.id} | |
375 | + | |
376 | +@ajax(method='get', login_required=True) | |
377 | +def homonym_count(request, entry, lexeme_id): | |
378 | + vocabs = visible_vocabularies(request.user) | |
379 | + lexemes = Lexeme.objects.filter( | |
380 | + deleted=False, entry=entry).exclude(pk=lexeme_id) | |
381 | + return {'count': lexemes.count()} | |
382 | + | |
383 | +@ajax(method='post') | |
384 | +def save_columns(request, col_model, col_names, remap): | |
385 | + request.session['colModel'] = col_model | |
386 | + request.session['colNames'] = col_names | |
387 | + request.session['remap'] = remap | |
388 | + return {} | |
389 | + | |
390 | + | |
391 | +@ajax(method='post') | |
392 | +def add_vocabulary(request, name): | |
393 | + if not request.user.has_perm('dictionary.manage_vocabulary'): | |
394 | + raise AjaxError('access denied') | |
395 | + vocab = Vocabulary.objects.create(id=name) | |
396 | + vocab.managers.add(request.user) #add | |
397 | + return {} | |
398 | + | |
399 | +@ajax(method='get') | |
400 | +def vocabulary_privileges(request, name): | |
401 | + if not request.user.has_perm('dictionary.manage_vocabulary'): | |
402 | + raise AjaxError('access denied') | |
403 | + vocab = Vocabulary.objects.get(id=name) | |
404 | + to_return = {} | |
405 | + to_return['managers'] = list(vocab.managers.values_list('pk', flat=True)) | |
406 | + to_return['viewers'] = list(vocab.all_viewers().values_list('pk', flat=True)) | |
407 | + to_return['editors'] = list(vocab.editors.values_list('pk', flat=True)) | |
408 | + return to_return | |
409 | + | |
410 | +@ajax(method='post') | |
411 | +def set_vocabulary_privilege(request, name, id, type, set): | |
412 | + if not request.user.has_perm('dictionary.manage_vocabulary'): | |
413 | + raise AjaxError('access denied') | |
414 | + vocab = Vocabulary.objects.get(id=name) | |
415 | + user = User.objects.get(pk=id) | |
416 | + if type == 'view': | |
417 | + related_manager = vocab.viewers | |
418 | + elif type == 'change': | |
419 | + related_manager = vocab.editors | |
420 | + elif type == 'manage': | |
421 | + related_manager = vocab.managers | |
422 | + if set: | |
423 | + related_manager.add(user) #add | |
424 | + else: | |
425 | + related_manager.remove(user) #add | |
426 | + return {} | |
... | ... |
dictionary/ajax_pattern_view.py
0 โ 100644
1 | +++ a/dictionary/ajax_pattern_view.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from django.utils.simplejson import dumps as json_encode | |
4 | +from dictionary.models import Pattern, Ending, BaseFormLabel, PatternType, \ | |
5 | + visible_vocabularies, Qualifier | |
6 | +from dictionary.forms import PatternEditForm, PatternTypeForm, QualifierForm | |
7 | +from dictionary.ajax_jqgrid import JqGridAjax | |
8 | +from accounts.models import filtering_mode | |
9 | +from common.decorators import render, ajax, AjaxError | |
10 | + | |
11 | + | |
12 | +class PatternGrid(JqGridAjax): | |
13 | + model = Pattern | |
14 | + search_field = 'name' | |
15 | + | |
16 | + @staticmethod | |
17 | + def filter_special_case(filter, lookup, negated, queryset): | |
18 | + field = filter['field'] | |
19 | + if field == 'part_of_speech': | |
20 | + field = 'type__lexical_class__symbol' | |
21 | + elif field == 'type': | |
22 | + field = 'type__entry' | |
23 | + return False, field, {}, queryset | |
24 | + | |
25 | + @staticmethod | |
26 | + def apply_mask(patterns, mask, sort_rules): | |
27 | + return patterns.filter(name__istartswith=mask) | |
28 | + | |
29 | + @staticmethod | |
30 | + def response_row(pattern): | |
31 | + return [ | |
32 | + pattern.name, | |
33 | + pattern.type.entry, | |
34 | + pattern.type.lexical_class.symbol, | |
35 | + ] | |
36 | + | |
37 | + | |
38 | +# Zapytanie o indeks wiersza o pewnym id przy danym sortowaniu | |
39 | +@ajax(method='get') | |
40 | +def find_id(request, id, sort_rules, mask, filters=None): | |
41 | + return PatternGrid.find_id( | |
42 | + filtering_mode(request.user), id, filters, sort_rules, mask) | |
43 | + | |
44 | + | |
45 | +# Zapytanie o id oraz indeks wiersza przy danym sortowaniu | |
46 | +# pierwszego wiersza, ktรณrego hasลo rozpoczyna siฤ od mask. | |
47 | +# 'selected_id' < 0, jeลli takiego nie ma | |
48 | +@ajax(method='get') | |
49 | +def get_location(request, sort_rules, filters=None, mask=''): | |
50 | + return PatternGrid.get_location( | |
51 | + filtering_mode(request.user), sort_rules, filters, mask) | |
52 | + | |
53 | + | |
54 | +@ajax(method='get') | |
55 | +def get_patterns(request, page, rows, sort_rules, filters=None, mask='', | |
56 | + target_page=0, totalrows=0): | |
57 | + request.session['pattern-sort_rules'] = json_encode(sort_rules) | |
58 | + request.session['pattern-filters'] = json_encode(filters) | |
59 | + page = target_page or page | |
60 | + limit = totalrows or rows | |
61 | + return PatternGrid.get_page( | |
62 | + filtering_mode(request.user), page, limit, sort_rules, filters, mask) | |
63 | + | |
64 | + | |
65 | +@render() | |
66 | +@ajax(method='get', encode_result=False) | |
67 | +def pattern_edit_form(request, id): | |
68 | + if not request.user.has_perm('dictionary.view_pattern'): | |
69 | + raise AjaxError('access denied') | |
70 | + to_return = {} | |
71 | + p = Pattern.objects.get(pk=id) | |
72 | + editable = request.user.has_perm('dictionary.change_pattern') | |
73 | + to_return['form'] = PatternEditForm(instance=p, editable=editable) | |
74 | + to_return['type_form'] = PatternTypeForm(instance=p.type, editable=editable) | |
75 | + to_return['id'] = p.pk | |
76 | + to_return['editable'] = editable | |
77 | + bfls = p.type.base_form_labels() | |
78 | + ending_groups = dict((bfl, []) for bfl in bfls) | |
79 | + endings = p.endings.order_by('base_form_label', 'index') | |
80 | + visible_vocabs = visible_vocabularies(request.user) | |
81 | + vocabularies = visible_vocabs.filter(editors=request.user) | |
82 | + for e in endings: | |
83 | + q_form = QualifierForm( | |
84 | + qualified=e, rw_vocabularies=vocabularies, editable=editable, | |
85 | + prefix='end%s' % e.pk) | |
86 | + ending_groups[e.base_form_label].append((e, q_form)) | |
87 | + to_return['ending_groups'] = ending_groups | |
88 | + return to_return | |
89 | + | |
90 | + | |
91 | +# mogลoby iลฤ przez js_vars... | |
92 | +@render('ending_row.html') | |
93 | +@ajax(method='get', encode_result=False) | |
94 | +def new_ending_row(request): | |
95 | + ending = {'string': '', 'id': 'add-NUM'} | |
96 | + visible_vocabs = visible_vocabularies(request.user) | |
97 | + vocabularies = visible_vocabs.filter(editors=request.user) | |
98 | + form = QualifierForm(rw_vocabularies=vocabularies, prefix='add-NUM') | |
99 | + return {'ending': ending, 'editable': True, 'form': form} | |
100 | + | |
101 | + | |
102 | +@ajax(method='post') | |
103 | +def update_pattern(request, form_data): | |
104 | + if not request.user.has_perm('dictionary.change_pattern'): | |
105 | + raise AjaxError('access denied') | |
106 | + form_dict = dict((x['name'], x['value']) for x in form_data) | |
107 | + p = Pattern.objects.get(pk=form_dict['id']) | |
108 | + form = PatternEditForm(data=form_dict, instance=p) | |
109 | + type_form = PatternTypeForm(data=form_dict) | |
110 | + if type_form.is_valid(): | |
111 | + type_qs = PatternType.objects.filter( | |
112 | + entry=type_form.cleaned_data['entry'], | |
113 | + lexical_class=type_form.cleaned_data['lexical_class']) | |
114 | + else: | |
115 | + raise AjaxError('invalid data') | |
116 | + if form.is_valid() and len(type_qs) == 1: | |
117 | + form.save() | |
118 | + p.type = type_qs[0] | |
119 | + p.save() | |
120 | + for ending_pk in form_dict['deleted']: | |
121 | + Ending.objects.get(pk=int(ending_pk)).delete() | |
122 | + visible_vocabs = visible_vocabularies(request.user) | |
123 | + vocabularies = visible_vocabs.filter(editors=request.user) | |
124 | + qualifiers = Qualifier.objects.filter(vocabulary__in=vocabularies) | |
125 | + for bfl_endings in form_dict['ending_list']: | |
126 | + endings_data = bfl_endings['endings'] | |
127 | + bfl = BaseFormLabel.objects.get(entry=bfl_endings['base_form_label']) | |
128 | + endings = [] | |
129 | + for index, ending_data in zip(range(1, len(endings_data) + 1), endings_data): | |
130 | + quals = set(int(q) for q in ending_data['qualifiers']) | |
131 | + if ending_data['id'] == 'add': | |
132 | + ending = Ending.objects.create( | |
133 | + pattern=p, base_form_label=bfl, string=ending_data['string'], | |
134 | + index=index) | |
135 | + else: | |
136 | + ending = Ending.objects.get(pk=int(ending_data['id'])) | |
137 | + ending.index = index | |
138 | + ending.string = ending_data['string'] | |
139 | + ending.save() | |
140 | + for qualifier in qualifiers: | |
141 | + qualifier.set_for(ending, qualifier.pk in quals) | |
142 | + endings.append(ending) | |
143 | + else: | |
144 | + raise AjaxError('invalid data') | |
145 | + return {} | |
146 | + | |
147 | + | |
148 | +@ajax(method='post') | |
149 | +def save_columns(request, col_model, col_names, remap): | |
150 | + request.session['pattern-colModel'] = col_model | |
151 | + request.session['pattern-colNames'] = col_names | |
152 | + request.session['pattern-remap'] = remap | |
153 | + return {} | |
... | ... |
dictionary/ajax_prompter.py
0 โ 100644
1 | +++ a/dictionary/ajax_prompter.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from common.decorators import render, ajax, AjaxError | |
4 | +from common.util import suffix, cut_end | |
5 | +from dictionary.models import Lexeme, LexemeInflectionPattern, Pattern, \ | |
6 | + InflectionCharacteristic, prepare_table, visible_vocabularies, get_root, \ | |
7 | + ClassificationValue, Classification | |
8 | +from dictionary.pattern_blacklist import blacklist | |
9 | + | |
10 | +commonness = Classification.objects.get(name=u'pospolitoลฤ') | |
11 | + | |
12 | +LIP_ROWS = 10 | |
13 | + | |
14 | +def make_list(vocabularies, entry, pos, ic, cvs, bl_check): | |
15 | + lips = LexemeInflectionPattern.objects.filter( | |
16 | + lexeme__deleted=False, lexeme__vocabularies__in=vocabularies).distinct() | |
17 | + lips = lips.filter( | |
18 | + lexeme__part_of_speech__symbol=pos).exclude(lexeme__status='cand') | |
19 | + if ic: | |
20 | + lips = lips.filter(inflection_characteristic=ic) | |
21 | + if cvs: | |
22 | + lips = lips.filter(lexeme__classificationvalue__in=cvs) | |
23 | + else: | |
24 | + lips = lips.exclude(lexeme__classificationvalue=None) | |
25 | + lips = lips.order_by('lexeme__entry') | |
26 | + feature_sets = set() | |
27 | + bad_inflections = set() | |
28 | + chosen_lips = [] | |
29 | + for suf_len in xrange(len(entry), 0, -1): | |
30 | + suf = suffix(entry, suf_len) | |
31 | + suf_lips = lips.filter(lexeme__entry__endswith=suf) | |
32 | + if suf_len < len(entry): | |
33 | + suf1 = suffix(entry, suf_len + 1) | |
34 | + suf_lips = suf_lips.exclude(lexeme__entry__endswith=suf1) | |
35 | + for p0, ic0 in bad_inflections: | |
36 | + suf_lips.exclude(pattern=p0, inflection_characteristic=ic0) | |
37 | + for p0, ic0 in feature_sets: | |
38 | + suf_lips = suf_lips.exclude( | |
39 | + pattern=p0, inflection_characteristic=ic0) | |
40 | + for lip in suf_lips: | |
41 | + #lip = suf_lips[0] | |
42 | + p = lip.pattern | |
43 | + #suf_lips = suf_lips.exclude(pattern=p) | |
44 | + if p.name in blacklist and bl_check: continue | |
45 | + if ic: | |
46 | + l_ic = ic | |
47 | + else: | |
48 | + l_ic = lip.inflection_characteristic | |
49 | + if (p, l_ic) in bad_inflections: continue | |
50 | + if (p, l_ic) in feature_sets: continue | |
51 | + if cvs: | |
52 | + l_cvs = lip.lexeme.classification_values(commonness) & cvs | |
53 | + else: | |
54 | + l_cvs = lip.lexeme.classification_values(commonness) | |
55 | + if get_root(entry, pos, p, l_ic) is not None: | |
56 | + l_root = lip.lexeme.get_root(p, l_ic) | |
57 | + l_end = lip.lexeme.entry[len(l_root):] | |
58 | + l_entry = u'%sยท%s' % (l_root, l_end) | |
59 | + if len(l_end) < len(suf): | |
60 | + suf = suffix(l_entry, suf_len + 1) | |
61 | + chosen_lips.append((lip, l_cvs, cut_end(l_entry, suf), suf)) | |
62 | + feature_sets.add((p, l_ic)) | |
63 | + if len(chosen_lips) == LIP_ROWS: | |
64 | + break | |
65 | + else: | |
66 | + bad_inflections.add((p, l_ic)) | |
67 | + if len(chosen_lips) == LIP_ROWS: | |
68 | + break | |
69 | + return chosen_lips | |
70 | + | |
71 | +@render() | |
72 | +@ajax(method='get', encode_result=False) | |
73 | +def prompter_list(request, entry, pos_id, ic_id, commonness_ids, | |
74 | + ic_check, cv_check, bl_check): | |
75 | + vocabularies = visible_vocabularies(request.user) | |
76 | + if ic_check: | |
77 | + ic = InflectionCharacteristic.objects.get(pk=ic_id) | |
78 | + else: | |
79 | + ic = None | |
80 | + if cv_check: # and commonness not in ('undefined', 'None'): | |
81 | + cvs = ClassificationValue.objects.filter(pk__in=commonness_ids) | |
82 | + else: | |
83 | + cvs = None | |
84 | + lips = make_list(vocabularies, entry, pos_id, ic, cvs, bl_check) | |
85 | + # zakลadamy, ลผe symbol == pk | |
86 | + return {'lips': lips} | |
... | ... |
dictionary/forms.py
0 โ 100644
1 | +++ a/dictionary/forms.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from django.forms import * | |
4 | +from django.contrib.auth.models import User | |
5 | +from common.forms import hidden_id | |
6 | +from dictionary.models import Lexeme, LexemeInflectionPattern, Ending, \ | |
7 | + InflectionCharacteristic, Pattern, PatternType, Classification, \ | |
8 | + ClassificationValue, Vocabulary, CrossReference, \ | |
9 | + Qualifier, QualifierExclusionClass, Variant | |
10 | + | |
11 | +def disable_field(field): | |
12 | + if type(field.widget) in (Textarea, TextInput): | |
13 | + field.widget.attrs['readonly'] = 'readonly' | |
14 | + else: | |
15 | + field.widget.attrs['disabled'] = 'disabled' | |
16 | + | |
17 | +# ze StackOverflow, http://stackoverflow.com/questions/3737116/ | |
18 | +class QualifiersField(ModelMultipleChoiceField): | |
19 | + def __init__(self, **kwargs): | |
20 | + super(QualifiersField, self).__init__(Qualifier.objects.none(), **kwargs) | |
21 | + self.to_field_name = None #? | |
22 | + | |
23 | + def set_qualifiers(self, vocabularies, qualified=None): | |
24 | + qualifiers = Qualifier.objects.filter( | |
25 | + vocabulary__in=vocabularies, deleted=False) | |
26 | + if not qualifiers: | |
27 | + disable_field(self) | |
28 | + return | |
29 | + self.queryset = queryset = qualifiers.order_by('vocabulary', 'label') | |
30 | + if qualified and qualified.pk: | |
31 | + initial_qualifiers = qualified.qualifiers.filter( | |
32 | + vocabulary__in=vocabularies) | |
33 | + self.initial = initial_qualifiers | |
34 | + | |
35 | + queryset = queryset.select_related() | |
36 | + vocabulary = None | |
37 | + self.choices = [] | |
38 | + q_list = [] | |
39 | + for qualifier in queryset: | |
40 | + if not vocabulary: | |
41 | + vocabulary = qualifier.vocabulary | |
42 | + if vocabulary != qualifier.vocabulary: | |
43 | + self.choices.append((vocabulary.id, q_list)) | |
44 | + vocabulary = qualifier.vocabulary | |
45 | + q_list = [] | |
46 | + q_list.append((qualifier.pk, qualifier.label)) | |
47 | + self.choices.append((vocabulary.id, q_list)) | |
48 | + | |
49 | +class QualifierField(ModelChoiceField): | |
50 | + def __init__(self, **kwargs): | |
51 | + qualifiers = Qualifier.objects.filter(deleted=False).order_by( | |
52 | + 'vocabulary', 'label') | |
53 | + super(QualifierField, self).__init__(qualifiers, **kwargs) | |
54 | + if not qualifiers: # oznacza ลผe w systemie nie ma ลผadnych kwalifikatorรณw | |
55 | + disable_field(self) | |
56 | + return | |
57 | + | |
58 | + qualifiers = qualifiers.select_related() | |
59 | + vocabulary = None | |
60 | + self.choices = [] | |
61 | + q_list = [] | |
62 | + for qualifier in qualifiers: | |
63 | + if not vocabulary: | |
64 | + vocabulary = qualifier.vocabulary | |
65 | + if vocabulary != qualifier.vocabulary: | |
66 | + self.choices.append((vocabulary.id, q_list)) | |
67 | + vocabulary = qualifier.vocabulary | |
68 | + q_list = [] | |
69 | + q_list.append((qualifier.pk, qualifier.label)) | |
70 | + self.choices.append((vocabulary.id, q_list)) | |
71 | + | |
72 | +class LexemeEditForm(ModelForm): | |
73 | + vocabularies = ModelMultipleChoiceField( | |
74 | + queryset=Vocabulary.objects.none(), required=False) | |
75 | + qualifiers = QualifiersField(required=False) | |
76 | + new_owner = ModelChoiceField( | |
77 | + queryset=Vocabulary.objects.none(), required=False) | |
78 | + | |
79 | + def __init__(self, vocabularies, rw_vocabularies, all_rw_vocabularies, | |
80 | + editable=True, containing=None, **kwargs): | |
81 | + super(LexemeEditForm, self).__init__(**kwargs) | |
82 | + instance = getattr(self, 'instance', None) | |
83 | + self.fields['entry'].required = True | |
84 | + self.fields['new_owner'].queryset = all_rw_vocabularies | |
85 | + if vocabularies is not None: | |
86 | + self.fields['vocabularies'].queryset = vocabularies | |
87 | + self.fields['vocabularies'].initial = containing | |
88 | + owner_choices = all_rw_vocabularies.values_list('pk', 'id') | |
89 | + self.fields['new_owner'].choices = owner_choices | |
90 | + if instance and instance.id: | |
91 | + self.fields['new_owner'].initial = instance.owner_vocabulary | |
92 | + if vocabularies.count() == 0: | |
93 | + disable_field(self.fields['vocabularies']) | |
94 | + self.fields['qualifiers'].set_qualifiers(rw_vocabularies, self.instance) | |
95 | + if not editable: | |
96 | + if instance and instance.id: | |
97 | + for name, field in self.fields.iteritems(): | |
98 | + if name not in ('qualifiers', 'vocabularies'): | |
99 | + disable_field(field) | |
100 | + | |
101 | + def clean_comment(self): | |
102 | + data = self.cleaned_data['comment'] | |
103 | + return data.replace('\r', '') | |
104 | + | |
105 | + class Meta: | |
106 | + model = Lexeme | |
107 | + fields = ( | |
108 | + 'part_of_speech', | |
109 | + 'entry', | |
110 | + 'entry_suffix', | |
111 | + 'status', | |
112 | + 'gloss', | |
113 | + 'comment', | |
114 | + ) | |
115 | + widgets = { | |
116 | + 'comment': Textarea(attrs={'cols': 40, 'rows': 5}), | |
117 | + 'entry_suffix': TextInput(attrs={'size': 5}), | |
118 | + 'gloss': TextInput(attrs={'size': 40}) | |
119 | + } | |
120 | + | |
121 | +class LIPEditForm(ModelForm): | |
122 | + pattern_name = CharField(widget=TextInput( | |
123 | + attrs={'class': 'pattern', 'size': '10'}), label=u'Wzรณr') | |
124 | + qualifiers = QualifiersField( | |
125 | + required=False, label=u'Kwal.', | |
126 | + widget = SelectMultiple(attrs={'class':'lip-qualifiers'})) | |
127 | + | |
128 | + def __init__(self, part_of_speech, rw_vocabularies, editable=True, index=None, | |
129 | + **kwargs): | |
130 | + super(LIPEditForm, self).__init__(**kwargs) | |
131 | + ic_choices = InflectionCharacteristic.objects.filter( | |
132 | + part_of_speech=part_of_speech).values_list('pk', 'entry') | |
133 | + self.fields['inflection_characteristic'].choices = ic_choices | |
134 | + instance = getattr(self, 'instance', None) | |
135 | + self.fields['qualifiers'].set_qualifiers(rw_vocabularies, instance) | |
136 | + if instance and instance.id: | |
137 | + self.fields['pattern_name'].initial = instance.pattern.name | |
138 | + if not editable: | |
139 | + for name, field in self.fields.iteritems(): | |
140 | + if name != 'qualifiers': | |
141 | + disable_field(field) | |
142 | + self.index = index | |
143 | + | |
144 | + def clean_pattern_name(self): | |
145 | + cleaned_data = self.cleaned_data | |
146 | + try: | |
147 | + pattern = Pattern.objects.get(name=cleaned_data['pattern_name']) | |
148 | + except Pattern.DoesNotExist: | |
149 | + raise ValidationError(u'Niepoprawna nazwa wzoru.') | |
150 | + ic = cleaned_data['inflection_characteristic'] | |
151 | + if pattern.type.lexical_class != ic.part_of_speech.lexical_class: | |
152 | + raise ValidationError(u'Wzรณr nie pasuje do czฤลci mowy.') | |
153 | + cleaned_data['pattern'] = pattern | |
154 | + return cleaned_data['pattern_name'] | |
155 | + | |
156 | + def save(self, *args, **kwargs): | |
157 | + lip = self.instance | |
158 | + lip.pattern = self.cleaned_data['pattern'] | |
159 | + if self.index: | |
160 | + lip.index = self.index | |
161 | + super(LIPEditForm, self).save(*args, **kwargs) | |
162 | + lip.save() | |
163 | + return lip | |
164 | + | |
165 | + class Meta: | |
166 | + model = LexemeInflectionPattern | |
167 | + fields = ['inflection_characteristic'] | |
168 | + widgets = { | |
169 | + 'inflection_characteristic': Select( | |
170 | + attrs={'class': 'inflection-characteristic'}), | |
171 | + } | |
172 | + | |
173 | +class ClassificationForm(Form): | |
174 | + values = TypedMultipleChoiceField( | |
175 | + choices=[], empty_value=None, coerce=int, | |
176 | + widget = SelectMultiple(attrs={'class': 'classification-values'})) | |
177 | + | |
178 | + def __init__(self, classification, editable=True, values=None, | |
179 | + **kwargs): | |
180 | + super(ClassificationForm, self).__init__(**kwargs) | |
181 | + self.fields['values'].label = classification.name | |
182 | + self.fields['values'].choices = classification.make_choices() | |
183 | + if values: | |
184 | + self.fields['values'].initial = [value.pk for value in values] | |
185 | + if not editable: | |
186 | + for _name, field in self.fields.iteritems(): | |
187 | + disable_field(field) | |
188 | + | |
189 | +class AddClassificationForm(ModelForm): | |
190 | + det = hidden_id('classification_form') | |
191 | + | |
192 | + class Meta: | |
193 | + model = Classification | |
194 | + | |
195 | +class AddClassificationValueForm(ModelForm): | |
196 | + det = hidden_id('value_form') | |
197 | + | |
198 | + def __init__(self, classification=None, **kwargs): | |
199 | + super(AddClassificationValueForm, self).__init__(**kwargs) | |
200 | + if classification is not None: | |
201 | + parent_choices = [('', u'<brak>')] + classification.make_choices() | |
202 | + self.fields['parent_node'].choices = parent_choices | |
203 | + self.fields['classification'].initial = classification.pk | |
204 | + | |
205 | + class Meta: | |
206 | + model = ClassificationValue | |
207 | + fields = ['label', 'parent_node', 'classification'] | |
208 | + widgets = { | |
209 | + 'classification': HiddenInput(), | |
210 | + } | |
211 | + | |
212 | +class AddExclusionClassForm(ModelForm): | |
213 | + det = hidden_id('add_exclusion_class') | |
214 | + | |
215 | + def __init__(self, vocabulary=None, **kwargs): | |
216 | + super(AddExclusionClassForm, self).__init__(**kwargs) | |
217 | + self.fields['vocabulary'].initial = vocabulary | |
218 | + | |
219 | + class Meta: | |
220 | + model = QualifierExclusionClass | |
221 | + widgets = { | |
222 | + 'vocabulary': HiddenInput(), | |
223 | + } | |
224 | + | |
225 | +class ExclusionClassForm(Form): | |
226 | + det = hidden_id('remove_exclusion_class') | |
227 | + ec = ModelChoiceField( | |
228 | + queryset=QualifierExclusionClass.objects.all(), label=u'') # dziura | |
229 | + | |
230 | + def __init__(self, queryset=None, **kwargs): | |
231 | + super(ExclusionClassForm, self).__init__(**kwargs) | |
232 | + if queryset: | |
233 | + self.fields['ec'].queryset = queryset | |
234 | + | |
235 | +class AddQualifierForm(ModelForm): | |
236 | + det = hidden_id('add_qualifier') | |
237 | + | |
238 | + def __init__(self, vocabulary=None, **kwargs): | |
239 | + super(AddQualifierForm, self).__init__(**kwargs) | |
240 | + self.fields['vocabulary'].initial = vocabulary | |
241 | + if vocabulary: | |
242 | + self.fields['exclusion_class'].queryset = (QualifierExclusionClass.objects | |
243 | + .filter(vocabulary=vocabulary).distinct()) | |
244 | + | |
245 | + def clean(self): | |
246 | + cleaned_data = self.cleaned_data | |
247 | + ec = cleaned_data['exclusion_class'] | |
248 | + if ec: | |
249 | + ec_vocab = ec.vocabulary | |
250 | + if ec_vocab and ec_vocab != cleaned_data['vocabulary']: | |
251 | + raise ValidationError(u'Klasa wykluczania niezgodna ze sลownikiem.') | |
252 | + return cleaned_data | |
253 | + | |
254 | + class Meta: | |
255 | + model = Qualifier | |
256 | + fields = ['label', 'exclusion_class', 'vocabulary'] | |
257 | + widgets = { | |
258 | + 'vocabulary': HiddenInput(), | |
259 | + } | |
260 | + | |
261 | +class ChangeClassForm(ModelForm): | |
262 | + def __init__(self, **kwargs): | |
263 | + super(ChangeClassForm, self).__init__(**kwargs) | |
264 | + exclusion_classes = ( | |
265 | + self.instance.vocabulary.qualifierexclusionclass_set.all()) | |
266 | + self.fields['exclusion_class'].queryset = exclusion_classes | |
267 | + | |
268 | + def clean(self): | |
269 | + ec = self.cleaned_data['exclusion_class'] | |
270 | + q = self.instance | |
271 | + if ec: | |
272 | + ec_qualifiers = ec.qualifier_set.all() | |
273 | + if Lexeme.objects.filter(deleted=False, qualifiers=q).filter( | |
274 | + qualifiers__in=ec_qualifiers): | |
275 | + raise ValidationError(u'Kolizja w klasie wykluczania') | |
276 | + if LexemeInflectionPattern.objects.filter( | |
277 | + deleted=False, qualifiers=q).filter(qualifiers__in=ec_qualifiers): | |
278 | + raise ValidationError(u'Kolizja w klasie wykluczania') | |
279 | + if Ending.objects.filter(qualifiers=q).filter( | |
280 | + qualifiers__in=ec_qualifiers): | |
281 | + raise ValidationError(u'Kolizja w klasie wykluczania') | |
282 | + return self.cleaned_data | |
283 | + | |
284 | + class Meta: | |
285 | + model = Qualifier | |
286 | + fields = ['exclusion_class'] | |
287 | + | |
288 | +class CrossReferenceForm(ModelForm): | |
289 | + def __init__(self, lexeme=None, pos=None, **kwargs): | |
290 | + super(CrossReferenceForm, self).__init__(**kwargs) | |
291 | + if lexeme is not None: | |
292 | + crtypes = lexeme.part_of_speech.crtype_to.all() | |
293 | + type_choices = crtypes.values_list('pk', 'desc') | |
294 | + self.fields['type'].widget.choices = type_choices | |
295 | + self.fields['from_lexeme'].initial = lexeme.pk | |
296 | + | |
297 | + def clean(self): | |
298 | + cleaned_data = self.cleaned_data | |
299 | + from_lexeme = cleaned_data.get('from_lexeme') | |
300 | + to_lexeme = cleaned_data.get('to_lexeme') | |
301 | + cr_type = cleaned_data.get('type') | |
302 | + if from_lexeme and to_lexeme and cr_type: | |
303 | + from_pos = from_lexeme.part_of_speech | |
304 | + to_pos = to_lexeme.part_of_speech | |
305 | + if cr_type.from_pos != from_pos or cr_type.to_pos != to_pos: | |
306 | + raise ValidationError(u'Nieprawidลowa czฤลฤ mowy w odsyลaczu.') | |
307 | + return cleaned_data | |
308 | + | |
309 | + class Meta: | |
310 | + model = CrossReference | |
311 | + fields = ['type', 'from_lexeme', 'to_lexeme'] | |
312 | + widgets = { | |
313 | + 'type': Select(), | |
314 | + 'from_lexeme': HiddenInput(), | |
315 | + 'to_lexeme': TextInput(attrs={'size': 8}), | |
316 | + } | |
317 | + | |
318 | +class VocabularyForm(Form): | |
319 | + det = hidden_id('vocabulary_form') | |
320 | + vocabulary = ModelChoiceField( | |
321 | + label=u"dodaj sลownik", queryset=Vocabulary.objects.none()) | |
322 | + classification = ModelChoiceField( | |
323 | + widget=HiddenInput(), queryset=Classification.objects.all()) | |
324 | + | |
325 | + def __init__(self, queryset, classification=None, **kwargs): | |
326 | + super(VocabularyForm, self).__init__(**kwargs) | |
327 | + self.fields['vocabulary'].queryset = queryset | |
328 | + if classification: | |
329 | + self.fields['classification'].initial = classification | |
330 | + | |
331 | +class PatternEditForm(ModelForm): | |
332 | + def __init__(self, editable=True, **kwargs): | |
333 | + super(PatternEditForm, self).__init__(**kwargs) | |
334 | + if not editable: | |
335 | + instance = getattr(self, 'instance', None) | |
336 | + if instance and instance.id: | |
337 | + for _name, field in self.fields.iteritems(): | |
338 | + disable_field(field) | |
339 | + | |
340 | + class Meta: | |
341 | + model = Pattern | |
342 | + fields = ('name', 'example', 'basic_form_ending', 'status', 'comment') | |
343 | + | |
344 | +class PatternTypeForm(ModelForm): | |
345 | + def __init__(self, editable=True, **kwargs): | |
346 | + super(PatternTypeForm, self).__init__(**kwargs) | |
347 | + if not editable: | |
348 | + instance = getattr(self, 'instance', None) | |
349 | + if instance and instance.id: | |
350 | + for _name, field in self.fields.iteritems(): | |
351 | + disable_field(field) | |
352 | + | |
353 | + class Meta: | |
354 | + model = PatternType | |
355 | + fields = ('lexical_class', 'entry') | |
356 | + | |
357 | +class QualifierForm(Form): | |
358 | + qualifiers = QualifiersField( | |
359 | + required=False, label=u'Kwalifikatory', | |
360 | + widget = SelectMultiple(attrs={'class':'qualifiers'})) | |
361 | + | |
362 | + def __init__(self, rw_vocabularies, qualified=None, editable=True, | |
363 | + **kwargs): | |
364 | + super(QualifierForm, self).__init__(**kwargs) | |
365 | + self.fields['qualifiers'].set_qualifiers(rw_vocabularies, qualified) | |
366 | + if not editable: | |
367 | + disable_field(self.fields['qualifiers']) | |
368 | + | |
369 | +class UserFilterForm(Form): | |
370 | + user = ModelChoiceField( | |
371 | + queryset=User.objects.order_by('username'), label=u'Uลผytkownik') | |
372 | + | |
373 | +class TimeFromForm(Form): | |
374 | + time_from = DateField(input_formats=['%d.%m.%Y'], label=u'Od', | |
375 | + widget=DateInput(attrs={'class': 'datepicker'})) | |
376 | + | |
377 | +class TimeToForm(Form): | |
378 | + time_to = DateField(input_formats=['%d.%m.%Y'], label=u'Do', | |
379 | + widget=DateInput(attrs={'class': 'datepicker'})) | |
380 | + | |
381 | +class ExportForm(Form): | |
382 | + vocabs = ModelMultipleChoiceField( | |
383 | + queryset=Vocabulary.objects.all(), label=u'sลowniki') | |
384 | + antivocabs = ModelMultipleChoiceField( | |
385 | + queryset=Vocabulary.objects.all(), label=u'antysลowniki', required=False) | |
386 | + variant = ModelChoiceField(queryset=Variant.objects.all(), label=u'wariant') | |
387 | + refl = BooleanField(label=u'tagi z "refl"', required=False) | |
388 | + #excluding_qualifiers = QualifiersField( | |
389 | + # required=False, label=u'kwal. wykluczajฤ ce') | |
390 | + | |
391 | + def __init__(self, **kwargs): | |
392 | + super(ExportForm, self).__init__(**kwargs) | |
393 | + #self.fields['excluding_qualifiers'].set_qualifiers(Vocabulary.objects.all()) | |
394 | + | |
395 | +class MagicQualifierForm(Form): | |
396 | + qualifier = QualifierField(label=u'kwal. leksemu') | |
397 | + regex = CharField(label=u'wzorzec tagu', required=False) | |
... | ... |
dictionary/history.py
0 โ 100644
1 | +++ a/dictionary/history.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from django.core.exceptions import ObjectDoesNotExist | |
4 | +from dictionary.models import Lexeme, LexemeInflectionPattern, \ | |
5 | + Pattern, InflectionCharacteristic, ClassificationValue, Qualifier, History, \ | |
6 | + CrossReferenceType | |
7 | + | |
8 | +attribute_translation = { | |
9 | + # Leksem | |
10 | + ('leksemy', 'haslo'): u'hasลo', | |
11 | + ('leksemy', 'haslosuf'): u'sufiks hasลa', | |
12 | + ('leksemy', 'glosa'): u'glosa', | |
13 | + ('leksemy', 'pos'): u'czฤลฤ mowy', | |
14 | + ('leksemy', 'slownik'): u'sลownik wลaลciciel', | |
15 | + ('leksemy', 'status'): u'status', | |
16 | + ('leksemy', 'komentarz'): u'komentarz', | |
17 | + # Odmiana | |
18 | + ('odmieniasie', 'oind'): u'kolejnoลฤ', | |
19 | + ('odmieniasie', 'charfl'): u'charakterystyka fleksyjna', | |
20 | + ('odmieniasie', 'w_id'): u'wzรณr', | |
21 | +} | |
22 | + | |
23 | +# brzydkie - do pokazywania usuniฤtych leksemรณw | |
24 | +attribute_translation_list = [ | |
25 | + # Leksem | |
26 | + ('leksemy', 'haslo', u'hasลo'), | |
27 | + ('leksemy', 'haslosuf', u'sufiks hasลa'), | |
28 | + ('leksemy', 'glosa', u'glosa'), | |
29 | + ('leksemy', 'pos', u'czฤลฤ mowy'), | |
30 | + ('leksemy', 'slownik', u'sลownik wลaลciciel'), | |
31 | + ('leksemy', 'status', u'status'), | |
32 | + ('leksemy', 'komentarz', u'komentarz'), | |
33 | + # Odmiana | |
34 | + ('odmieniasie', 'oind', u'kolejnoลฤ'), | |
35 | + ('odmieniasie', 'charfl', u'charakterystyka fleksyjna'), | |
36 | + ('odmieniasie', 'w_id', u'wzรณr'), | |
37 | +] | |
38 | + | |
39 | +lexeme_attribute_order = [ | |
40 | + u'hasลo', | |
41 | + u'sufiks hasลa', | |
42 | + u'glosa', | |
43 | + u'czฤลฤ mowy', | |
44 | + u'sลownik wลaลciciel', | |
45 | + u'status', | |
46 | + u'komentarz', | |
47 | + #u'kwalifikator', | |
48 | + #u'klasyfikacja', | |
49 | + #u'slownik', | |
50 | + #u'odsyลacz', | |
51 | +] | |
52 | + | |
53 | +def get_lexeme_attr(attr, lexeme): | |
54 | + if attr == 'haslo': | |
55 | + return lexeme.entry | |
56 | + elif attr == 'haslosuf': | |
57 | + return lexeme.entry_suffix | |
58 | + elif attr == 'glosa': | |
59 | + return lexeme.gloss | |
60 | + elif attr == 'pos': | |
61 | + return lexeme.part_of_speech.symbol | |
62 | + elif attr == 'slownik': | |
63 | + return lexeme.owner_vocabulary.id | |
64 | + elif attr == 'status': | |
65 | + return lexeme.status | |
66 | + elif attr == 'komentarz': | |
67 | + return lexeme.comment | |
68 | + | |
69 | +def get_lip_attr(attr, lip): | |
70 | + if attr == 'oind': | |
71 | + return lip.index | |
72 | + elif attr == 'charfl': | |
73 | + return lip.inflection_characteristic.entry | |
74 | + elif attr == 'w_id': | |
75 | + return lip.pattern.name | |
76 | + | |
77 | +lip_attribute_order = [ | |
78 | + u'kolejnoลฤ', | |
79 | + u'charakterystyka fleksyjna', | |
80 | + u'wzรณr', | |
81 | + #u'kwalifikator', | |
82 | +] | |
83 | + | |
84 | +def prepare_value(table, column, value): | |
85 | + try: | |
86 | + if column == 'qualifier_id': | |
87 | + prepared = Qualifier.objects.get(pk=int(value)).label | |
88 | + elif column == 'status': | |
89 | + prepared = dict(Lexeme.STATUS_CHOICES).get(value) | |
90 | + elif column == 'charfl': | |
91 | + prepared = InflectionCharacteristic.objects.get(pk=int(value)).entry | |
92 | + elif column == 'w_id': | |
93 | + prepared = Pattern.objects.get(pk=int(value)).name | |
94 | + elif column == 'classificationvalue_id': | |
95 | + cv = ClassificationValue.objects.get(pk=int(value)) | |
96 | + prepared = (cv.label, cv.classification.name) | |
97 | + else: | |
98 | + prepared = value | |
99 | + except ObjectDoesNotExist: | |
100 | + prepared = None | |
101 | + except ValueError: | |
102 | + prepared = None | |
103 | + return prepared | |
104 | + | |
105 | +def transaction_table(transaction_data): | |
106 | + transaction_dict = {} | |
107 | + lips = {} | |
108 | + classifications = [] | |
109 | + qualifiers = [] | |
110 | + vocabs = [] | |
111 | + crs = {} | |
112 | + lip_qualifiers = {} | |
113 | + deleted=False | |
114 | + for item1 in transaction_data: | |
115 | + table = item1.table_name | |
116 | + column = item1.column_name | |
117 | + if (table == 'leksemy' and column == 'usuniety' and | |
118 | + item1.new_value == 'true' and item1.old_value == 'false'): | |
119 | + deleted = True | |
120 | + attr = None | |
121 | + if (table, column) in attribute_translation: | |
122 | + attr = attribute_translation[(table, column)] | |
123 | + before, after = tuple( | |
124 | + prepare_value(table, column, v) | |
125 | + for v in (item1.old_value, item1.new_value)) | |
126 | + before_after = ( | |
127 | + before if item1.operation != 'INSERT' else None, | |
128 | + after if item1.operation != 'DELETE' else None) | |
129 | + if attr: | |
130 | + if table not in ('odmieniasie', 'kwalifikatory_odmieniasiow'): | |
131 | + transaction_dict[attr] = before_after | |
132 | + elif table == 'odmieniasie': | |
133 | + if item1.row_id not in lips: | |
134 | + lips[item1.row_id] = {} | |
135 | + lips[item1.row_id][attr] = before_after | |
136 | + if column == 'classificationvalue_id': | |
137 | + classifications.append(before_after) | |
138 | + if table == 'kwalifikatory_leksemow' and column == 'qualifier_id': | |
139 | + qualifiers.append(before_after) | |
140 | + if table == 'leksemy_w_slownikach' and column == 'slownik': | |
141 | + vocabs.append(before_after) | |
142 | + if table == 'odsylacze': | |
143 | + if item1.row_id not in crs: | |
144 | + crs[item1.row_id] = {} | |
145 | + crs[item1.row_id][column] = before_after | |
146 | + if table == 'kwalifikatory_odmieniasiow': | |
147 | + if item1.row_id not in lip_qualifiers: | |
148 | + lip_qualifiers[item1.row_id] = {} | |
149 | + if column: # stare DELETE nie bฤdฤ widoczne | |
150 | + lip_qualifiers[item1.row_id][column] = before_after | |
151 | + if deleted: | |
152 | + return deleted_lexeme_table(transaction_data[0].lexeme) | |
153 | + rows = [] | |
154 | + for attr in lexeme_attribute_order: | |
155 | + if attr in transaction_dict: | |
156 | + rows.append((attr, transaction_dict[attr])) | |
157 | + for before_after in qualifiers: | |
158 | + rows.append((u'kwalifikator', before_after)) | |
159 | + for before, after in classifications: | |
160 | + if before: | |
161 | + classification_name = before[1] | |
162 | + before = before[0] | |
163 | + if after: | |
164 | + classification_name = after[1] | |
165 | + after = after[0] | |
166 | + attr = u'klasyfikacja: %s' % classification_name | |
167 | + rows.append((attr, (before, after))) | |
168 | + for before_after in vocabs: | |
169 | + rows.append((u'sลownik', before_after)) | |
170 | + for cr_data in crs.itervalues(): | |
171 | + attr = u'odsyลacz' | |
172 | + before_after = [] | |
173 | + for i in (0, 1): | |
174 | + try: | |
175 | + if cr_data['l_id_do'][i] is not None: | |
176 | + l = Lexeme.objects.get(pk=int(cr_data['l_id_do'][i])) | |
177 | + ic = l.lip_data()['inflection_characteristics'] | |
178 | + cr_type = CrossReferenceType.objects.get(pk=cr_data['typods_id'][i]) | |
179 | + prepared = ' '.join((cr_type.symbol, unicode(l), ic)) | |
180 | + else: | |
181 | + prepared = None | |
182 | + except Lexeme.DoesNotExist: | |
183 | + prepared = None | |
184 | + before_after.append(prepared) | |
185 | + rows.append((attr, tuple(before_after))) | |
186 | + lip_dict = {} | |
187 | + for lip_id, lip_data in lips.iteritems(): | |
188 | + lip_dict[lip_id] = [] | |
189 | + for attr in lip_attribute_order: | |
190 | + if attr in lip_data: | |
191 | + lip_dict[lip_id].append((attr, lip_data[attr])) | |
192 | + for q_data in lip_qualifiers.itervalues(): | |
193 | + if q_data: # stare DELETE... | |
194 | + attr = u'kwalifikator' | |
195 | + lip_data = q_data['lexemeinflectionpattern_id'] | |
196 | + lip_id = int(lip_data[0] or lip_data[1]) | |
197 | + if lip_id not in lips: | |
198 | + lip_dict[lip_id] = [] | |
199 | + lip_dict[lip_id].append((attr, q_data['qualifier_id'])) | |
200 | + lip_tables = [] | |
201 | + for lip_id, lip_data in lip_dict.iteritems(): | |
202 | + lip = LexemeInflectionPattern.objects.filter(pk=lip_id) | |
203 | + if lip: | |
204 | + lip = lip[0] | |
205 | + header = '%s/%s' % (lip.inflection_characteristic.entry, lip.pattern.name) | |
206 | + else: | |
207 | + records = History.objects.filter( | |
208 | + operation='DELETE', table_name='odmieniasie', row_id=lip_id) | |
209 | + try: | |
210 | + ic_id = records.get(column_name='charfl').old_value | |
211 | + pattern_id = records.get(column_name='w_id').old_value | |
212 | + ic = InflectionCharacteristic.objects.get(pk=ic_id) | |
213 | + pattern = Pattern.objects.get(pk=pattern_id) | |
214 | + header = u'%s/%s ' % (ic.entry, pattern.name) | |
215 | + except ObjectDoesNotExist: # stare DELETE... | |
216 | + header = '' | |
217 | + header += u'(usuniฤta)' | |
218 | + lip_tables.append((header, lip_data)) | |
219 | + return rows, lip_tables | |
220 | + | |
221 | +def deleted_lexeme_table(lexeme): | |
222 | + rows = [] | |
223 | + for table, column, attr in attribute_translation_list: | |
224 | + if table == 'leksemy': | |
225 | + rows.append( | |
226 | + (attr, | |
227 | + (prepare_value(table, column, get_lexeme_attr(column, lexeme)), | |
228 | + None))) | |
229 | + for q in lexeme.qualifiers.all(): | |
230 | + rows.append((u'kwalifikator', (q.label, None))) | |
231 | + for classification in lexeme.owner_vocabulary.classifications.all(): | |
232 | + attr = u'klasyfikacja: %s' % classification.name | |
233 | + cvs = lexeme.classification_values(classification) | |
234 | + for cv in cvs: | |
235 | + rows.append((attr, (cv.label, None))) | |
236 | + for vocab in lexeme.vocabularies.all(): | |
237 | + rows.append((u'sลownik', (vocab.id, None))) | |
238 | + for cr in lexeme.refs_to.all(): | |
239 | + attr = u'odsyลacz' | |
240 | + rows.append((attr, ( | |
241 | + ' '.join([cr.type.symbol, cr.to_lexeme, | |
242 | + cr.to_lexeme.lip_data()['inflection_characteristics']]), None))) | |
243 | + lip_tables = [] | |
244 | + for lip in lexeme.lexemeinflectionpattern_set.all(): | |
245 | + lip_data = [] | |
246 | + for table, column, attr in attribute_translation_list: | |
247 | + if table == 'odmieniasie': | |
248 | + lip_data.append((attr, (get_lip_attr(column, lip), None))) | |
249 | + for q in lip.qualifiers.all(): | |
250 | + attr = u'kwalifikator' | |
251 | + lip_data.append((attr, (q.label, None))) | |
252 | + header = '%s/%s' % (lip.inflection_characteristic.entry, lip.pattern.name) | |
253 | + lip_tables.append((header, lip_data)) | |
254 | + return rows, lip_tables | |
255 | + | |
256 | +def lexeme_table(transaction_data, last_tb): | |
257 | + rows, lip_tables = transaction_table(transaction_data) | |
258 | + try: | |
259 | + lexeme = transaction_data[0].lexeme | |
260 | + except Lexeme.DoesNotExist: | |
261 | + lexeme = u'(usuniฤty)' # aktualnie niemoลผliwe | |
262 | + return { | |
263 | + 'rows': rows, | |
264 | + 'lip_tables': lip_tables, | |
265 | + 'user': transaction_data[0].user, | |
266 | + 'date': last_tb, | |
267 | + 'lexeme': lexeme, | |
268 | + } | |
269 | + | |
270 | +def lexeme_table_from_row(row): | |
271 | + transaction_data = History.objects.filter( | |
272 | + lexeme__pk=row['lexeme'], user__pk=row['user'], | |
273 | + transaction_began=row['transaction_began']) | |
274 | + return lexeme_table(transaction_data, row['transaction_began']) | |
275 | + | |
276 | +def lexeme_tables(history_items): | |
277 | + transaction_data = [] | |
278 | + last_tb = None | |
279 | + for item in history_items: | |
280 | + if last_tb and item.transaction_began != last_tb: | |
281 | + yield lexeme_table(transaction_data, last_tb) | |
282 | + transaction_data = [] | |
283 | + last_tb = item.transaction_began | |
284 | + transaction_data.append(item) | |
285 | + if transaction_data: | |
286 | + yield lexeme_table(transaction_data, last_tb) | |
... | ... |
dictionary/management/__init__.py
0 โ 100644
1 | +++ a/dictionary/management/__init__.py | |
... | ... |
dictionary/management/commands/__init__.py
0 โ 100644
1 | +++ a/dictionary/management/commands/__init__.py | |
... | ... |
dictionary/management/commands/check_morfologik.py
0 โ 100644
1 | +++ a/dictionary/management/commands/check_morfologik.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +import sys | |
4 | +from django.core.management.base import BaseCommand, CommandError | |
5 | +from common.util import debug | |
6 | +from dictionary.models import Lexeme | |
7 | + | |
8 | +class Command(BaseCommand): | |
9 | + args = '<symbol czฤลci mowy> <nazwa pliku wejลciowego>' | |
10 | + help = 'Check Morfologik import' | |
11 | + | |
12 | + def handle(self, lc_sym, input_file, **options): | |
13 | + check_morfologik(lc_sym, input_file) | |
14 | + | |
15 | +# i tak nie ma ลผadnych q* aktualnie... | |
16 | +v_forms = { | |
17 | + ('1', 'allq'): u'', | |
18 | + ('1', 'all'): u'cie|my|sz', | |
19 | + ('2', 'all'): u'', | |
20 | + ('3', 'all'): u'', | |
21 | + ('3', 'ndk'): u'c', | |
22 | + ('3', 'pact'): u'ca|cฤ |ce|cego|cej|cemu|cy|cych|cym|cymi', | |
23 | + ('4', 'all'): u'|ลผe|my|myลผ|cie|cieลผ', | |
24 | + ('5', 'allq'): u'', | |
25 | + ('6', 'all'): u'|by|byล|bym', | |
26 | + ("6'", 'dk'): u'szy', | |
27 | + ('7', 'all'): u'em|eล', | |
28 | + ('8', 'allq'): u'o|oby', | |
29 | + ('8', 'all'): u'a|aby|abyล|abym|am|aล|obym|obyล|om|oล|' | |
30 | + u'y|yby|ybyลcie|ybyลmy|yลcie|yลmy', | |
31 | + ('9', 'all'): u'i|iby|ibyลcie|ibyลmy|iลcie|iลmy', | |
32 | + ('10', 'all'): u'o', | |
33 | + ('10', 'ppas'): u'a|ฤ |e|ego|ej|emu|y|ych|ym|ymi', | |
34 | + ('11', 'ger'): u'ie|ia|iach|iami|iem|iom|iu', | |
35 | + ('11pg', 'ger'): u'', | |
36 | + ('12', 'ppas'): u'', | |
37 | +} | |
38 | + | |
39 | +def get_forms(l, lc_sym): | |
40 | + if lc_sym != 'v': | |
41 | + l_forms = set(l.lexemeform_set.values_list('form', flat=True)) | |
42 | + if lc_sym == 'adj': | |
43 | + neg = l.refs_to.filter(type__symbol='adjnie', to_lexeme__deleted=False) | |
44 | + if neg: | |
45 | + l_neg = neg[0].to_lexeme | |
46 | + neg_forms = l_neg.lexemeform_set.values_list('form', flat=True) | |
47 | + added_forms = l_neg.all_forms(label_filter='^0|3\+$') | |
48 | + l_forms |= set(form for form in neg_forms if form not in added_forms) | |
49 | + else: | |
50 | + tags = ['allq'] | |
51 | + if l.refs_to.filter(type__symbol='verpact', to_lexeme__deleted=False): | |
52 | + tags.append('pact') | |
53 | + if l.refs_to.filter(type__symbol='verppas', to_lexeme__deleted=False): | |
54 | + tags.append('ppas') | |
55 | + if l.refs_to.filter(type__symbol='verger', to_lexeme__deleted=False): | |
56 | + tags.append('ger') | |
57 | + lips = l.lexemeinflectionpattern_set.all() | |
58 | + if not lips: | |
59 | + return set() | |
60 | + ic = lips[0].inflection_characteristic.entry | |
61 | + q = ic.startswith('q') | |
62 | + if not q: | |
63 | + tags.append('all') | |
64 | + if 'ndk' in ic: | |
65 | + tags.append('ndk') | |
66 | + if 'dk' in ic.replace('ndk', ''): | |
67 | + tags.append('dk') | |
68 | + base_forms = {} | |
69 | + for lip in l.lexemeinflectionpattern_set.all(): | |
70 | + for ending in lip.pattern.endings.all(): | |
71 | + bfl = ending.base_form_label.entry | |
72 | + if bfl not in base_forms: | |
73 | + base_forms[bfl] = set() | |
74 | + base_forms[bfl].add(lip.root + ending.string) | |
75 | + l_forms = set() | |
76 | + for (label, tag), suffixes in v_forms.iteritems(): | |
77 | + if tag in tags and label in base_forms: | |
78 | + new_forms = set() | |
79 | + for base_form in base_forms[label]: | |
80 | + new_forms |= set(base_form + suffix for suffix in suffixes.split('|')) | |
81 | + l_forms |= new_forms | |
82 | + if tag in ('pact', 'ppas', 'ger'): | |
83 | + l_forms |= set('nie' + form for form in new_forms) | |
84 | + return l_forms | |
85 | + | |
86 | +def check_forms(lc_sym, forms): | |
87 | + entry = forms[0] | |
88 | + forms = set(forms) | |
89 | + morf_lexemes = Lexeme.objects.filter( | |
90 | + deleted=False, lexemeassociation__vocabulary__id='Morfologik', entry=entry, | |
91 | + part_of_speech__lexical_class__symbol=lc_sym) | |
92 | + for l in morf_lexemes: | |
93 | + if l.part_of_speech.lexical_class.symbol != lc_sym: | |
94 | + continue | |
95 | + l_forms = get_forms(l, lc_sym) | |
96 | + if l_forms == set(): | |
97 | + break # brak dopasowania nas tu nie interesuje | |
98 | + if forms == l_forms: | |
99 | + break | |
100 | + if lc_sym == 'subst': | |
101 | + if l.lexemeinflectionpattern_set.filter( | |
102 | + inflection_characteristic__entry='m1') and u'formฤ depr' in l.comment: | |
103 | + if forms | l.all_forms(label_filter='^pl:nom$') == l_forms: | |
104 | + break | |
105 | + if (u'rozszerzone singulare' in l.comment | |
106 | + or u'rozszerzyฤ sgtant' in l.comment | |
107 | + or l.owner_vocabulary.id != 'Morfologik'): | |
108 | + if forms == l.all_forms(label_filter='^sg:'): | |
109 | + break | |
110 | + elif lc_sym == 'adj': | |
111 | + #if u' -o' in l.comment: | |
112 | + if forms | l.all_forms(label_filter='^0$') == l_forms: | |
113 | + break | |
114 | + else: # ลผaden nie pasowaล | |
115 | + print entry.encode('utf-8') | |
116 | + for l in morf_lexemes: | |
117 | + l_forms = get_forms(l, lc_sym) | |
118 | + missing = ', '.join(forms - l_forms) | |
119 | + extra = ', '.join(l_forms - forms) | |
120 | + print ('%s|%s' % (missing, extra)).encode('utf-8') | |
121 | + | |
122 | +def check_morfologik(lc_sym, input_file): | |
123 | + with open(input_file) as file: | |
124 | + forms = [] | |
125 | + for line in file: | |
126 | + line = line.decode('utf-8').rstrip('\n') | |
127 | + if line == '': | |
128 | + check_forms(lc_sym, forms) | |
129 | + forms = [] | |
130 | + else: | |
131 | + form, tag = line.split('\t') | |
132 | + forms.append(form) | |
... | ... |
dictionary/management/commands/create_classifications.py
0 โ 100644
1 | +++ a/dictionary/management/commands/create_classifications.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from django.core.management.base import BaseCommand, CommandError | |
4 | +from dictionary.models import Classification, ClassificationValue | |
5 | + | |
6 | +class Command(BaseCommand): | |
7 | + args = 'none' | |
8 | + help = 'Creates default classifications' | |
9 | + | |
10 | + def handle(self, **options): | |
11 | + create_classifications() | |
12 | + | |
13 | + | |
14 | +def get_permission(codename): | |
15 | + return Permission.objects.get(codename=codename) | |
16 | + | |
17 | +def create_classifications(): | |
18 | + Classification.objects.all().delete() | |
19 | + ClassificationValue.objects.all().delete() | |
20 | + commonness = Classification.objects.create(name=u'pospolitoลฤ') | |
21 | + | |
22 | + common_name = ClassificationValue.objects.create( | |
23 | + label=u'pospolita', parent_node=None, classification=commonness) | |
24 | + proper_name = ClassificationValue.objects.create( | |
25 | + label=u'wลasna', parent_node=None, classification=commonness) | |
26 | + person = ClassificationValue.objects.create( | |
27 | + label=u'osoba', parent_node=proper_name, classification=commonness) | |
28 | + ClassificationValue.objects.create( | |
29 | + label=u'imiฤ', parent_node=person, classification=commonness) | |
30 | + ClassificationValue.objects.create( | |
31 | + label=u'nazwisko', parent_node=person, classification=commonness) | |
32 | + ClassificationValue.objects.create( | |
33 | + label=u'patronimicum', parent_node=person, classification=commonness) | |
34 | + ClassificationValue.objects.create( | |
35 | + label=u'przydomek', parent_node=person, classification=commonness) | |
36 | + ClassificationValue.objects.create( | |
37 | + label=u'pseudonim', parent_node=person, classification=commonness) | |
38 | + ClassificationValue.objects.create( | |
39 | + label=u'geograficzna', parent_node=proper_name, classification=commonness) | |
40 | + ClassificationValue.objects.create( | |
41 | + label=u'organizacja', parent_node=proper_name, classification=commonness) | |
... | ... |
dictionary/management/commands/create_forms.py
0 โ 100644
1 | +++ a/dictionary/management/commands/create_forms.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +import sys | |
4 | +from django.db import connection, transaction | |
5 | +from django.core.management.base import BaseCommand, CommandError | |
6 | +from common.util import debug | |
7 | +from dictionary.models import Lexeme | |
8 | + | |
9 | +class Command(BaseCommand): | |
10 | + args = 'none' | |
11 | + help = 'Creates forms for filtering' | |
12 | + | |
13 | + def handle(self, **options): | |
14 | + create_forms() | |
15 | + | |
16 | + | |
17 | +def create_forms(): | |
18 | + cursor = connection.cursor() | |
19 | + | |
20 | + transaction.commit_unless_managed() | |
21 | + transaction.enter_transaction_management() | |
22 | + transaction.managed(True) | |
23 | + | |
24 | + cursor.execute('''delete from dictionary_lexemeform''') | |
25 | + # ew. moลผna odfiltrowaฤ te z wygenerowanymi? | |
26 | + select_query = '''select l.id as lexeme_id, prefiks||rdzen||zak||sufiks as form | |
27 | + from leksemy l | |
28 | + join odmieniasie o on l.id = o.l_id | |
29 | + join charfle ch on ch.id = o.charfl | |
30 | + join wzory w on (o.w_id = w.id) | |
31 | + join szablony_tabel s on ( | |
32 | + w.typ=s.wtyp | |
33 | + and o.charfl=s.charfl) | |
34 | + join klatki k on k.st_id = s.id | |
35 | + join zakonczenia z on (o.w_id=z.w_id and k.efobaz=z.efobaz) | |
36 | + where wariant='1' ''' # jaki wariant? | |
37 | + cursor.execute('''insert into dictionary_lexemeform (lexeme_id, form) | |
38 | + (%s)''' % select_query) | |
39 | + | |
40 | + transaction.commit() | |
41 | + transaction.leave_transaction_management() | |
... | ... |
dictionary/management/commands/create_groups.py
0 โ 100644
1 | +++ a/dictionary/management/commands/create_groups.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from django.contrib.auth.models import Permission, Group | |
4 | +from django.core.management.base import BaseCommand, CommandError | |
5 | + | |
6 | +class Command(BaseCommand): | |
7 | + args = 'none' | |
8 | + help = 'Creates default groups' | |
9 | + | |
10 | + def handle(self, **options): | |
11 | + create_groups() | |
12 | + | |
13 | + | |
14 | +def get_permission(codename): | |
15 | + return Permission.objects.get(codename=codename) | |
16 | + | |
17 | +def create_groups(): | |
18 | + Group.objects.all().delete() | |
19 | + observers = Group.objects.create(name=u'Obserwator') | |
20 | + lexicographers = Group.objects.create(name=u'Leksykograf') | |
21 | + superlexicographers = Group.objects.create(name=u'Superleksykograf') | |
22 | + patternmakers = Group.objects.create(name=u'Wzorzysta') | |
23 | + managers = Group.objects.create(name=u'Wydawca') | |
24 | + admins = Group.objects.create(name=u'Administrator') | |
25 | + | |
26 | + observers.permissions.add(get_permission('view_lexeme')) | |
27 | + #observers.permissions.add(get_permission('add_comment')) | |
28 | + observers.permissions.add(get_permission('view_pattern')) | |
29 | + lexicographers.permissions.add(get_permission('view_lexeme')) | |
30 | + lexicographers.permissions.add(get_permission('change_lexeme')) | |
31 | + lexicographers.permissions.add(get_permission('view_pattern')) | |
32 | + superlexicographers.permissions.add(get_permission('view_lexeme')) | |
33 | + superlexicographers.permissions.add(get_permission('change_lexeme')) | |
34 | + superlexicographers.permissions.add(get_permission('lexeme_priority')) | |
35 | + superlexicographers.permissions.add(get_permission('view_pattern')) | |
36 | + # moลผe dokonywaฤ zmian w Banku bลฤdรณw | |
37 | + # moลผe dokonywaฤ zmian w zakลadce Etykiety | |
38 | + patternmakers.permissions.add(get_permission('view_pattern')) | |
39 | + patternmakers.permissions.add(get_permission('change_pattern')) | |
40 | + managers.permissions.add(get_permission('manage_vocabulary')) | |
41 | + managers.permissions.add(get_permission('view_lexeme')) | |
42 | + managers.permissions.add(get_permission('view_all_lexemes')) | |
43 | + managers.permissions.add(get_permission('view_pattern')) | |
44 | + managers.permissions.add(get_permission('add_user')) | |
45 | + # zarzฤ dzanie rolami uลผytkownikรณw: | |
46 | + managers.permissions.add(get_permission('change_group')) | |
47 | + admins.permissions.add(get_permission('add_user')) | |
48 | + admins.permissions.add(get_permission('change_group')) | |
49 | + admins.permissions.add(get_permission('create_admin')) | |
... | ... |
dictionary/management/commands/create_users.py
0 โ 100644
1 | +++ a/dictionary/management/commands/create_users.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +from django.contrib.auth.models import Permission, Group, User | |
4 | +from django.core.management.base import BaseCommand, CommandError | |
5 | +from dictionary.models import Vocabulary | |
6 | +from accounts.models import UserSettings | |
7 | + | |
8 | +class Command(BaseCommand): | |
9 | + args = 'none' | |
10 | + help = 'Creates default users' | |
11 | + | |
12 | + def handle(self, **options): | |
13 | + create_users() | |
14 | + | |
15 | + | |
16 | +def create_users(): | |
17 | + observers = Group.objects.get(name=u'Obserwator') | |
18 | + lexicographers = Group.objects.get(name=u'Leksykograf') | |
19 | + superlexicographers = Group.objects.get(name=u'Superleksykograf') | |
20 | + patternmakers = Group.objects.get(name=u'Wzorzysta') | |
21 | + managers = Group.objects.get(name=u'Wydawca') | |
22 | + #admins = Group.objects.get(name=u'Administrator') | |
23 | + | |
24 | + SGJP = Vocabulary.objects.get(id='SGJP') | |
25 | + SJPDor = Vocabulary.objects.get(id='SJPDor') | |
26 | + zmiotki = Vocabulary.objects.get(id='zmiotki') | |
27 | + WSJP = Vocabulary.objects.get(id='WSJP') | |
28 | + Morfologik, created = Vocabulary.objects.get_or_create(id='Morfologik') | |
29 | + PoliMorf, created = Vocabulary.objects.get_or_create(id='PoliMorf') | |
30 | + | |
31 | + User.objects.all().delete() | |
32 | + sgjp = User.objects.create_user('sgjp', 'sgjp@example.com', 'sgjp') | |
33 | + nzm = User.objects.create_user('nzm', 'nzm@example.com', 'nzm') | |
34 | + redsgjp = User.objects.create_user('redsgjp', 'redsgjp@example.com', 'redsgjp') | |
35 | + rednzm = User.objects.create_user('rednzm', 'rednzm@example.com', 'rednzm') | |
36 | + supersgjp = User.objects.create_user('supersgjp', 'supersgjp@example.com', 'supersgjp') | |
37 | + supernzm = User.objects.create_user('supernzm', 'supernzm@example.com', 'supernzm') | |
38 | + wzornik = User.objects.create_user('wzornik', 'wzornik@example.com', 'wzornik') | |
39 | + | |
40 | + users = (sgjp, nzm, redsgjp, rednzm, supersgjp, supernzm, wzornik) | |
41 | + for user in users: | |
42 | + UserSettings.objects.create(user=user) | |
43 | + | |
44 | + sgjp_vocabs = (SGJP, SJPDor, zmiotki, WSJP) | |
45 | + nzm_vocabs = (zmiotki, Morfologik, PoliMorf) | |
46 | + | |
47 | + sgjp.groups.add(managers) | |
48 | + for vocab in sgjp_vocabs: | |
49 | + sgjp.managed_vocabularies.add(vocab) | |
50 | + nzm.groups.add(managers) | |
51 | + for vocab in nzm_vocabs: | |
52 | + nzm.managed_vocabularies.add(vocab) | |
53 | + | |
54 | + redsgjp.groups.add(lexicographers) | |
55 | + for vocab in sgjp_vocabs: | |
56 | + redsgjp.visible_vocabularies.add(vocab) | |
57 | + redsgjp.editable_vocabularies.add(vocab) | |
58 | + redsgjp.visible_vocabularies.add(PoliMorf) | |
59 | + | |
60 | + rednzm.groups.add(lexicographers) | |
61 | + for vocab in nzm_vocabs: | |
62 | + rednzm.visible_vocabularies.add(vocab) | |
63 | + rednzm.editable_vocabularies.add(vocab) | |
64 | + rednzm.visible_vocabularies.add(SGJP) | |
65 | + rednzm.visible_vocabularies.add(WSJP) | |
66 | + rednzm.visible_vocabularies.add(SJPDor) | |
67 | + | |
68 | + supersgjp.groups.add(superlexicographers) | |
69 | + for vocab in sgjp_vocabs: | |
70 | + supersgjp.visible_vocabularies.add(vocab) | |
71 | + supersgjp.editable_vocabularies.add(vocab) | |
72 | + supersgjp.visible_vocabularies.add(PoliMorf) | |
73 | + | |
74 | + supernzm.groups.add(superlexicographers) | |
75 | + for vocab in nzm_vocabs: | |
76 | + supernzm.visible_vocabularies.add(vocab) | |
77 | + supernzm.editable_vocabularies.add(vocab) | |
78 | + supernzm.visible_vocabularies.add(SGJP) | |
79 | + supernzm.visible_vocabularies.add(WSJP) | |
80 | + supernzm.visible_vocabularies.add(SJPDor) | |
81 | + | |
82 | + wzornik.groups.add(superlexicographers) | |
83 | + for vocab in sgjp_vocabs: | |
84 | + wzornik.visible_vocabularies.add(vocab) | |
85 | + wzornik.editable_vocabularies.add(vocab) | |
86 | + wzornik.visible_vocabularies.add(PoliMorf) | |
87 | + wzornik.groups.add(patternmakers) | |
88 | + | |
89 | +if __name__ == '__main__': | |
90 | + create_users() | |
... | ... |
dictionary/management/commands/disamb.py
0 โ 100755
1 | +++ a/dictionary/management/commands/disamb.py | |
1 | +#!/usr/bin/env python | |
2 | +#-*- coding:utf-8 -*- | |
3 | + | |
4 | +import sys | |
5 | +from django.db import connection, transaction | |
6 | +from django.core.management.base import BaseCommand, CommandError | |
7 | +from common.util import debug | |
8 | +from dictionary.util import expand_tag | |
9 | + | |
10 | +class Command(BaseCommand): | |
11 | + args = '<nazwa pliku wejลciowego>' | |
12 | + help = 'Dezambiguacja tagรณw z Morfologika' | |
13 | + | |
14 | + def handle(self, input_file, **options): | |
15 | + parse_file(input_file) | |
16 | + | |
17 | + | |
18 | +base_tag = {} | |
19 | + | |
20 | +form_categories = { | |
21 | + 'adj': ['adj'], | |
22 | + 'verb': ['subst:ger', 'verb', 'pact', 'ppas', 'pant', 'pcon'], | |
23 | + 'subst': ['subst'], | |
24 | + 'adv': ['adv'], | |
25 | +} | |
26 | + | |
27 | +def all_tags(form, filter_base=False): | |
28 | + tags = [] | |
29 | + for tag, base in zip(form['tags'], form['base']): | |
30 | + if not filter_base or base: | |
31 | + tags += expand_tag(tag) | |
32 | + return tags | |
33 | + | |
34 | +def base_tag(tag): | |
35 | + pos = tag.split(':')[0] | |
36 | + if tag.startswith('subst:ger'): | |
37 | + return False | |
38 | + if pos in ('num', 'subst', 'adj'): | |
39 | + return 'nom' in tag | |
40 | + if pos == 'verb': | |
41 | + return 'inf' in tag or 'winien' in tag | |
42 | + if pos in ('refl', 'pact', 'ppas'): | |
43 | + return False | |
44 | + return True | |
45 | + | |
46 | +# forms: lista {form: forma, tags: lista moลผliwych tagรณw)? | |
47 | +def disamb_lexeme(forms): | |
48 | + base_form_tags = (tag for tag, base | |
49 | + in zip(forms[0]['tags'], forms[0]['base']) | |
50 | + if base and base_tag(tag)) | |
51 | + possible_pos = set(tag.split(':')[0] for tag in base_form_tags) | |
52 | + entry = forms[0]['form'] | |
53 | + if forms[0]['tags'][0] == '-': | |
54 | + print >>sys.stderr, (u'%s: nierozpoznana forma podstawowa' % | |
55 | + entry).encode('utf-8') | |
56 | + return None, None | |
57 | + if len(possible_pos) == 1: | |
58 | + pos = list(possible_pos)[0] | |
59 | + else: | |
60 | + print >>sys.stderr, (u'%s: niejednoznaczna czฤลฤ mowy' % | |
61 | + entry).encode('utf-8') | |
62 | + return None, None | |
63 | + cats = form_categories.get(pos, [pos]) | |
64 | + new_forms = [] | |
65 | + other_lexemes = [] | |
66 | + for form in forms: | |
67 | + new_tags = [] | |
68 | + for tag, base in zip(form['tags'], form['base']): | |
69 | + for cat in cats: | |
70 | + if tag.startswith(cat) and base: | |
71 | + new_tags.append(tag) | |
72 | + break | |
73 | + else: # nie pasowaลo | |
74 | + if pos == 'verb': | |
75 | + tags = tag.split('+') | |
76 | + fixed = None | |
77 | + if tags[0].startswith('subst'): | |
78 | + fixed = ['subst:ger' + tag[len('subst'):] for tag in tags] | |
79 | + elif tags[0].startswith('adj'): | |
80 | + start = None | |
81 | + if any(form['form'].endswith(end) for end in [u'cy', u'ca', u'ce', | |
82 | + u'cฤ ', u'cego', u'cej', u'cemu', u'cym', u'cych', u'cymi']): | |
83 | + start = 'pact' | |
84 | + else: | |
85 | + start = 'ppas' | |
86 | + if start: | |
87 | + fixed = [start + tag[len('adj'):] for tag in tags] | |
88 | + if fixed: | |
89 | + new_tags.append('+'.join(fixed)) | |
90 | + if new_tags: | |
91 | + form['tags'] = new_tags | |
92 | + new_forms.append(form) | |
93 | + else: | |
94 | + if pos == 'adj' and 'adv' in (tag.split(':')[0] for tag in form['tags']): | |
95 | + form['tags'] = [tag for tag in form['tags'] if tag.startswith('adv')] | |
96 | + other_lexemes.append([form]) | |
97 | + nie_prefix = '' | |
98 | + while forms[0]['form'].startswith(nie_prefix): | |
99 | + nie_prefix += 'nie' | |
100 | + if not form['form'].startswith(nie_prefix): | |
101 | + print >>sys.stderr, (u'advadj: %s %s' % (form['form'], forms[0]['form'])).encode('utf-8') | |
102 | + else: | |
103 | + form['tags'] = [pos + ':irreg'] | |
104 | + new_forms.append(form) | |
105 | + #print >>sys.stderr, (u'odrzucona forma: %s %s [%s]' % | |
106 | + # (form['form'], ', '.join(form['tags']), pos)).encode('utf-8') | |
107 | + | |
108 | +# if len(new_forms[0]['tags']) == 1: | |
109 | +# if pos not in base_tag: | |
110 | +# base_tag[pos] = set() | |
111 | +# base_tag[pos].add(new_forms[0]['tags'][0]) | |
112 | + | |
113 | + if pos == 'subst': | |
114 | + # ujednoznaczniฤ rodzaj... niezguลa, sezamek | |
115 | + genders = set(tag[-1][0] for tag in all_tags(new_forms[0])) | |
116 | + if len(genders) == 1: | |
117 | + gender = list(genders)[0] | |
118 | + else: | |
119 | + genders = set(tag[-1][0] for tag in all_tags(new_forms[0], filter_base=True)) | |
120 | + if len(genders) == 1: | |
121 | + gender = list(genders)[0] | |
122 | + else: | |
123 | + good_genders = [] | |
124 | + for gender in genders: | |
125 | + for form in new_forms: | |
126 | + for tag in all_tags(form): | |
127 | + if tag[-1][0] in (gender, 'i'): | |
128 | + break # jest | |
129 | + else: # nie ma | |
130 | + break | |
131 | + else: # ok | |
132 | + good_genders.append(gender) | |
133 | + if len(good_genders) != 1: | |
134 | + print >> sys.stderr, (u'%s: nie da siฤ ujednoznaczniฤ rodzaju' % | |
135 | + entry).encode('utf-8') | |
136 | + return None, None | |
137 | + gender = good_genders[0] | |
138 | + # znamy rodzaj, przesiewamy | |
139 | + for form in new_forms: | |
140 | + good_tags = [] | |
141 | + for tag in all_tags(form): | |
142 | + if tag[-1][0] in (gender, 'i') or (tag[-1] == 'depr' and gender == 'm'): | |
143 | + good_tags.append(':'.join(tag)) | |
144 | + if good_tags: | |
145 | + form['tags'] = good_tags | |
146 | + else: | |
147 | + form['tags'] = [pos + ':irreg'] | |
148 | + return new_forms, other_lexemes | |
149 | + | |
150 | +def print_forms(forms): | |
151 | + for form in forms: | |
152 | + for tag in form['tags']: | |
153 | + print ('%s\t%s' % (form['form'], tag)).encode('utf-8') | |
154 | ||
155 | + | |
156 | +def parse_file(path): | |
157 | + with open(path) as file: | |
158 | + forms = [] | |
159 | + for line in file: | |
160 | + line = line.decode('utf-8').rstrip('\n') | |
161 | + if line.startswith('Processed '): | |
162 | + break | |
163 | + if line == '': | |
164 | + disambiguated, other_lexemes = disamb_lexeme(forms) | |
165 | + if disambiguated: | |
166 | + print_forms(disambiguated) | |
167 | + for l in other_lexemes: | |
168 | + print_forms(l) | |
169 | + forms = [] | |
170 | + else: | |
171 | + form, base, tag = line.split('\t') | |
172 | + if not forms: | |
173 | + entry = form | |
174 | + if not forms or form != forms[-1]['form']: | |
175 | + forms.append({'form': form, 'base': [], 'tags': []}) | |
176 | + forms[-1]['tags'].append(tag) | |
177 | + forms[-1]['base'].append( | |
178 | + base == entry or tag == 'adv:comp' | |
179 | + or (tag.startswith('subst:pl') and 'nom' in tag)) # brzydko... | |
180 | +# for pos, tags in base_tag.iteritems(): | |
181 | +# print ('base %s: %s' % (pos, ', '.join(tags))).encode('utf-8') | |
182 | + | |
183 | +if __name__ == '__main__': | |
184 | + import sys | |
185 | + parse_file(sys.argv[1]) | |
... | ... |
dictionary/management/commands/edit_words.py
0 โ 100644
1 | +++ a/dictionary/management/commands/edit_words.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +import sys | |
4 | +from django.core.management.base import BaseCommand, CommandError | |
5 | +from common.util import debug | |
6 | +from accounts.util import bot_history | |
7 | +from dictionary.models import Lexeme, Vocabulary, set_vocabulary | |
8 | + | |
9 | +class Command(BaseCommand): | |
10 | + args = '<input file name>' | |
11 | + help = 'Does something with a list of words' | |
12 | + | |
13 | + def handle(self, filename, **options): | |
14 | + edit_words(open(filename)) | |
15 | + | |
16 | +def edit_words(input_file): | |
17 | + bot_history() | |
18 | + anty = Vocabulary.objects.get(id='antyMorfeusz') | |
19 | + for line in input_file: | |
20 | + word = line.decode('utf-8').strip() | |
21 | + lexemes = Lexeme.objects.filter(entry=word) | |
22 | + for lexeme in lexemes: | |
23 | + set_vocabulary(lexeme, anty, True) | |
24 | + # lexeme.save() | |
... | ... |
dictionary/management/commands/export_lexemes.py
0 โ 100644
1 | +++ a/dictionary/management/commands/export_lexemes.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +import sys | |
4 | +from django.core.management.base import BaseCommand, CommandError | |
5 | +from django.db import connection | |
6 | +from common.util import debug, flatten | |
7 | +from dictionary.models import CrossReferenceType | |
8 | + | |
9 | +class Command(BaseCommand): | |
10 | + args = 'none' | |
11 | + help = 'Temporary export script' | |
12 | + | |
13 | + def handle(self, **options): | |
14 | + export_lexemes() | |
15 | + | |
16 | + | |
17 | +adjpredykatywne = [ | |
18 | + u'ciekaw', | |
19 | + u'godzien', | |
20 | + u'gotรณw', | |
21 | + u'ลaskaw', | |
22 | + u'ลwiadom', | |
23 | + u'winien', | |
24 | + u'zdrรณw', | |
25 | +# wฤ tpliwe: | |
26 | + u'dลuลผen', | |
27 | + u'miลoลciw', | |
28 | + u'praw', | |
29 | + u'wesรณล', | |
30 | + u'ลผyw', | |
31 | +] | |
32 | + | |
33 | +suffix_translation = { | |
34 | + u'': 'nonrefl', | |
35 | + u' siฤ': 'refl', | |
36 | + u' sobie': 'refl', | |
37 | + u' siฤ/sobie': 'refl', | |
38 | + u' siฤ?': 'refl', | |
39 | + u' (siฤ)': 'refl.nonrefl', | |
40 | + u' (siฤ)': 'refl.nonrefl', | |
41 | + u' (sobie)': 'refl.nonrefl', | |
42 | + u' (siฤ)?': 'refl.nonrefl', | |
43 | +} | |
44 | + | |
45 | +def qualifier_clause(q_id): | |
46 | + return '''not exists ( | |
47 | + select * from kwalifikatory_leksemow where lexeme_id = l.id and | |
48 | + qualifier_id = %(q)d) and not exists ( | |
49 | + select * from kwalifikatory_odmieniasiow where qualifier_id = %(q)d and | |
50 | + lexemeinflectionpattern_id = o.id) and not exists ( | |
51 | + select * from kwalifikatory_zakonczen where qualifier_id = %(q)d and | |
52 | + ending_id = z.id) and ''' % {'q': q_id} | |
53 | + | |
54 | +def magic_qualifier_clause(): | |
55 | + return '''and not (tag like %s and exists ( | |
56 | + select kw.id | |
57 | + from kwalifikatory kw | |
58 | + join kwalifikatory_leksemow kwl on kw.id = kwl.qualifier_id | |
59 | + where kwl.lexeme_id = l.id and kw.id = %s)) ''' | |
60 | + | |
61 | + | |
62 | +def export_lexemes(data=None, output_file=None): | |
63 | + if not data: | |
64 | + data = { | |
65 | + 'vocabs': ['PoliMorf'], | |
66 | + 'antivocabs': [], | |
67 | + 'variant': 'Morfeusz', | |
68 | + 'excluding_qualifiers': [], | |
69 | + 'magic_qualifiers': [], | |
70 | + 'refl': False, | |
71 | + } | |
72 | + if output_file is None: | |
73 | + output_file = sys.stdout | |
74 | + vocabs_placeholders = ', '.join('%s' for v in data['vocabs']) | |
75 | + if data['antivocabs']: | |
76 | + antivocabs_placeholders = ', '.join('%s' for v in data['antivocabs']) | |
77 | + antivocabs_clause = '''not exists ( | |
78 | + select * from leksemy_w_slownikach ls2 where ls2.l_id = l.id | |
79 | + and ls2.slownik in (%s)) and''' % antivocabs_placeholders | |
80 | + else: | |
81 | + antivocabs_clause = '' | |
82 | + qualifier_clauses = ''.join( | |
83 | + qualifier_clause(q_id) for q_id in data['excluding_qualifiers']) | |
84 | + magic_qualifier_clauses = ''.join( | |
85 | + magic_qualifier_clause() for pattern, q_id in data['magic_qualifiers']) | |
86 | + crtypes = ['comadv','comadj','gerver','pactver','ppasver'] | |
87 | + crtype_ids = CrossReferenceType.objects.filter( | |
88 | + symbol__in=crtypes).values_list('pk', flat=True) | |
89 | + cursor = connection.cursor() | |
90 | + query = """ | |
91 | + select haslo, prefiks||rdzen||zak||sufiks, l.pos, ch.charfl, tag, | |
92 | + l.id as leksem_id, haslosuf | |
93 | + from leksemy l | |
94 | + join leksemy_w_slownikach ls on (ls.l_id = l.id) | |
95 | + join odmieniasie o on (o.l_id = l.id) | |
96 | + join charfle ch on ch.id = o.charfl | |
97 | + join wzory w on (o.w_id = w.id) | |
98 | + join szablony_tabel s on ( | |
99 | + w.typ=s.wtyp | |
100 | + and o.charfl=s.charfl) | |
101 | + join klatki k on k.st_id = s.id | |
102 | + join zakonczenia z on (o.w_id=z.w_id and k.efobaz=z.efobaz) | |
103 | + join efobazy e on (e.id = k.efobaz) | |
104 | + where ls.slownik in (%(vocabs)s) and %(antivocabs)s %(x_qual)s | |
105 | + l.status<>'cand' and wariant=%%s and | |
106 | + l.pos in ('v','subst','osc','adj','adv', 'num','advndm','burk','comp', | |
107 | + 'conj','interj','prep','qub','ppron','pred') and | |
108 | + l.usuniety = false %(magic)s | |
109 | + --and haslo < 'b' | |
110 | + union all | |
111 | + -- wymagajฤ ce gniazdowania przy hasลowaniu: adjcom, advcom, derywaty: | |
112 | + select g.haslo as haslo, prefiks||rdzen||zak||sufiks, l.pos, ch.charfl, tag, | |
113 | + l.id as leksem_id, g.haslosuf -- l.haslosuf? | |
114 | + from leksemy l | |
115 | + join leksemy_w_slownikach ls on (ls.l_id = l.id) | |
116 | + join odsylacze on l.id=l_id_od | |
117 | + join leksemy g on (l_id_do=g.id and g.usuniety = false) | |
118 | + join odmieniasie o on l.id=o.l_id | |
119 | + join charfle ch on ch.id = o.charfl | |
120 | + join wzory w on (o.w_id = w.id) | |
121 | + join szablony_tabel s on ( | |
122 | + w.typ=s.wtyp | |
123 | + and o.charfl=s.charfl) | |
124 | + join klatki k on k.st_id = s.id | |
125 | + join zakonczenia z on (o.w_id=z.w_id and k.efobaz=z.efobaz) | |
126 | + where ls.slownik in (%(vocabs)s) and %(antivocabs)s %(x_qual)s | |
127 | + typods_id in (%(crtype_ids)s) and | |
128 | + l.pos in ('adjcom','advcom','ger','pact','ppas') and | |
129 | + wariant=%%s and l.status<>'cand' and l.usuniety = false | |
130 | + --and g.haslo < 'b' | |
131 | + order by haslo, leksem_id | |
132 | + """ % { | |
133 | + 'vocabs': vocabs_placeholders, | |
134 | + 'antivocabs': antivocabs_clause, | |
135 | + 'x_qual': qualifier_clauses, | |
136 | + 'magic': magic_qualifier_clauses, | |
137 | + 'crtype_ids': ', '.join(str(pk) for pk in crtype_ids), # brzydko, oj tam | |
138 | + } | |
139 | + params_part = (list(data['vocabs']) + list(data['antivocabs']) + | |
140 | + [data['variant']]) | |
141 | + params = params_part + flatten(data['magic_qualifiers']) + params_part | |
142 | + cursor.execute(query, params) | |
143 | + refl = data['refl'] | |
144 | + for row in cursor: | |
145 | + entry, form, pos, _ic, tag, _id, suffix = row | |
146 | + form = form.lstrip('+') # odmienne postfiksy | |
147 | + if tag == 'adja': | |
148 | + form = form.rstrip('+') | |
149 | + if tag == 'adjc': | |
150 | + if form not in adjpredykatywne: | |
151 | + tag = "adj:sg:nom:m1.m2.m3:pos|adj:sg:acc:m3:pos" | |
152 | + if refl and pos in ('v', 'pact', 'ger'): | |
153 | + if suffix in suffix_translation: | |
154 | + tag += ':' + suffix_translation[suffix] | |
155 | + else: | |
156 | + debug(entry, u'Nieznana siฤnoลฤ: %s' % suffix) | |
157 | + | |
158 | + output_file.write((u'%s\t%s\t%s\n' % (form, entry, tag)).encode('utf-8')) | |
... | ... |
dictionary/management/commands/extra_crs.py
0 โ 100644
1 | +++ a/dictionary/management/commands/extra_crs.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +import sys | |
4 | +from django.core.management.base import BaseCommand, CommandError | |
5 | +from common.util import no_history | |
6 | +from dictionary.models import Lexeme, Vocabulary, CrossReference | |
7 | + | |
8 | +# UWAGA: zepsute! (po rewolucji w odsyลaczach) | |
9 | +# Nie wiem, czy warto to poprawiaฤ... | |
10 | + | |
11 | +class Command(BaseCommand): | |
12 | + args = '<type> <input file>' | |
13 | + help = 'Adds extra cross-references' | |
14 | + | |
15 | + def handle(self, type, input_file, **options): | |
16 | + return | |
17 | + add_crs(type, input_file) | |
18 | + | |
19 | +def debug(entry, text): | |
20 | + print>>sys.stderr, (u'%s: %s' % (entry, text)).encode('utf-8') | |
21 | + | |
22 | +def connect_real(from_l, to_l, cr_type): | |
23 | + created1 = created2 = False | |
24 | + if from_l.source == 'Morfologik': | |
25 | + cr1, created1 = CrossReference.objects.get_or_create( | |
26 | + from_lexeme=from_l, to_lexeme=to_l, type=cr_type[0]) | |
27 | + if to_l.source == 'Morfologik': | |
28 | + cr2, created2 = CrossReference.objects.get_or_create( | |
29 | + from_lexeme=to_l, to_lexeme=from_l, type=cr_type[1]) | |
30 | + if created1 or created2: | |
31 | + debug(from_l.entry, u'Powiฤ zano z %s' % to_l.entry) | |
32 | + | |
33 | +def connect_dummy(from_l, to_l, cr_type): | |
34 | + created1 = not CrossReference.objects.filter( | |
35 | + from_lexeme=from_l, to_lexeme=to_l, type=cr_type[0]) | |
36 | + created2 = not CrossReference.objects.filter( | |
37 | + from_lexeme=to_l, to_lexeme=from_l, type=cr_type[1]) | |
38 | + if created1 or created2: | |
39 | + debug(from_l.entry, u'Powiฤ zanoby z %s' % to_l.entry) | |
40 | + if 'Morfologik' not in (from_l.source, to_l.source): | |
41 | + debug(from_l.entry, u'Powiฤ zanoby leksemy spoza Morfologika!') | |
42 | + | |
43 | +connect = connect_dummy | |
44 | + | |
45 | +def make_detail(qs, desc, morf, details): | |
46 | + if qs.count() > 1: | |
47 | + morf_count = qs.filter(pk__in=morf).count() | |
48 | + details.append( | |
49 | + u'%s: %s, w tym z Morfologika: %s' % (desc, qs.count(), morf_count)) | |
50 | + | |
51 | +def which_lacks(qs1, desc1, qs2, desc2): | |
52 | + if qs2.count() > 0: | |
53 | + return desc2 | |
54 | + elif qs1.count() > 0: | |
55 | + return desc1 | |
56 | + else: | |
57 | + return u'obu' | |
58 | + | |
59 | + | |
60 | +# duลผo copypasty, ale chyba nie warto refaktoryzowaฤ | |
61 | +def add_crs(type, path): | |
62 | + no_history() | |
63 | + morfologik = Vocabulary.objects.get(id='Morfologik') | |
64 | + morf = morfologik.owned_lexemes_pk() | |
65 | + lexemes = Lexeme.objects.filter( | |
66 | + deleted=False, lexemeassociation__vocabulary__id='Morfologik') | |
67 | + adv = lexemes.filter(part_of_speech__symbol__in=('adv', 'advndm')) | |
68 | + adj = lexemes.filter(part_of_speech__symbol='adj') | |
69 | + advcom = lexemes.filter(part_of_speech__symbol='advcom') | |
70 | + with open(path) as file: | |
71 | + if type == 'advnie': | |
72 | + cr_type = ('nieadj', 'adjnie') | |
73 | + for line in file: | |
74 | + advneg_e, base_e = line.strip().decode('utf-8').split() | |
75 | + advnegs = adv.filter(entry=advneg_e) | |
76 | + if adv.filter(entry=base_e): | |
77 | + advs = adv.filter(entry=base_e) | |
78 | + if advnegs.count() > 1 or advs.count() > 1: | |
79 | + details = [] | |
80 | + make_detail(advnegs, u'zanegowane', morf, details) | |
81 | + make_detail(advs, u'niezanegowane', morf, details) | |
82 | + debug(advneg_e, u'Niejednoznacznoลฤ: %s' % '; '.join(details)) | |
83 | + elif advnegs.count() == 0 or advs.count() == 0: | |
84 | + lack = which_lacks( | |
85 | + advnegs, u'zanegowanego', advs, u'niezanegowanego') | |
86 | + debug(advneg_e, u'Brak %s' % lack) | |
87 | + else: | |
88 | + connect(advnegs[0], advs[0], cr_type) | |
89 | + elif adj.filter(entry=base_e): | |
90 | + # najpierw trzeba odpaliฤ advadj! | |
91 | + adjs = adj.filter( | |
92 | + entry=base_e, refs_to__type='adjadv', | |
93 | + refs_to__to_lexeme__deleted=False) | |
94 | + adjs = adjs | adj.filter( | |
95 | + entry=base_e, refs_from__type='advadj', | |
96 | + refs_from__from_lexeme__deleted=False) | |
97 | + adjs = adjs.distinct() | |
98 | + if advnegs.count() > 1 or adjs.count() > 1: | |
99 | + details = [] | |
100 | + make_detail(advnegs, u'zanegowane', morf, details) | |
101 | + make_detail(adjs, u'przymiotniki', morf, details) | |
102 | + debug(advneg_e, u'Niejednoznacznoลฤ: %s' % '; '.join(details)) | |
103 | + elif advnegs.count() == 0 or adjs.count() == 0: | |
104 | + lack = which_lacks( | |
105 | + advnegs, u'zanegowanego', adjs, u'przymiotnika') | |
106 | + debug(advneg_e, u'Brak %s' % lack) | |
107 | + else: | |
108 | + advs = [cr.to_lexeme.pk for cr | |
109 | + in adjs[0].refs_to.filter( | |
110 | + type='adjadv', to_lexeme__deleted=False)] | |
111 | + advs += [cr.from_lexeme.pk for cr | |
112 | + in adjs[0].refs_from.filter( | |
113 | + type='advadj', from_lexeme__deleted=False)] | |
114 | + advs = adv.filter(pk__in=advs).distinct() | |
115 | + if len(advs) > 1: | |
116 | + details = [] | |
117 | + make_detail(advs, u'niezanegowane', morf, details) | |
118 | + debug(advneg_e, u'Niejednoznacznoลฤ: %s' % '; '.join(details)) | |
119 | + elif len(advs) == 0: | |
120 | + debug(advneg_e, u'Brak niezanegowanego') | |
121 | + else: | |
122 | + connect(advnegs[0], advs[0], cr_type) | |
123 | + else: | |
124 | + debug(advneg_e, u'Brak drugiego leksemu [przymiotnik lub przysลรณwek]') | |
125 | + elif type == 'advadj': | |
126 | + cr_type = ('advadj', 'adjadv') | |
127 | + for line in file: | |
128 | + adv_e, adj_e = line.strip().decode('utf-8').split() | |
129 | + advs = adv.filter(entry=adv_e, part_of_speech__symbol='adv') | |
130 | + adjs = adj.filter(entry=adj_e, patterns__name__contains='').distinct() | |
131 | + if advs.count() > 1 or adjs.count() > 1: | |
132 | + details = [] | |
133 | + make_detail(advs, u'przysลรณwki', morf, details) | |
134 | + make_detail(adjs, u'przymiotniki', morf, details) | |
135 | + debug(adv_e, u'Niejednoznacznoลฤ: %s' % '; '.join(details)) | |
136 | + elif advs.count() == 0 or adjs.count() == 0: | |
137 | + lack = which_lacks(advs, u'przysลรณwka', adjs, u'przymiotnika') | |
138 | + debug(adv_e, u'Brak %s' % lack) | |
139 | + else: | |
140 | + connect(advs[0], adjs[0], cr_type) | |
141 | + elif type == 'advcom': | |
142 | + cr_type = ('advcom', 'comadv') | |
143 | + for line in file: | |
144 | + com_e, adv_e = line.strip().decode('utf-8').split() | |
145 | + advs = adv.filter(entry=adv_e) | |
146 | + coms = advcom.filter(entry=com_e) | |
147 | + if advs.count() > 1 or coms.count() > 1: | |
148 | + details = [] | |
149 | + make_detail(advs, u'rรณwne', morf, details) | |
150 | + make_detail(coms, u'wyลผsze', morf, details) | |
151 | + debug(adv_e, u'Niejednoznacznoลฤ: %s' % '; '.join(details)) | |
152 | + elif advs.count() == 0 or coms.count() == 0: | |
153 | + lack = which_lacks(advs, u'rรณwnego', coms, u'wyลผszego') | |
154 | + debug(adv_e, u'Brak %s' % lack) | |
155 | + else: | |
156 | + connect(advs[0], coms[0], cr_type) | |
157 | + elif type == 'adjadvc': # do puszczenia na koniec | |
158 | + cr_type = ('adjadvc', 'advcadj') | |
159 | + # uch! | |
160 | + advs = Lexeme.objects.filter( | |
161 | + refs_to__type='advadj', refs_to__to_lexeme__deleted=False) | |
162 | + advs = advs | Lexeme.objects.filter( | |
163 | + refs_from__type='adjadv', refs_from__from_lexeme__deleted=False) | |
164 | + advs_with_com = advs.filter( | |
165 | + refs_to__type='advcom', refs_to__to_lexeme__deleted=False) | |
166 | + advs_with_com = advs_with_com | advs.filter( | |
167 | + refs_from__type='comadv', refs_from__from_lexeme__deleted=False) | |
168 | + advs = advs_with_com.distinct() | |
169 | + for adv in advs: | |
170 | + adjs = Lexeme.objects.filter( | |
171 | + refs_to__type='adjadv', refs_to__to_lexeme=adv, | |
172 | + refs_to__to_lexeme__deleted=False) | |
173 | + adjs = adjs | Lexeme.objects.filter( | |
174 | + refs_from__type='advadj', refs_from__from_lexeme=adv) | |
175 | + adjs = adjs.distinct() | |
176 | + advcoms = Lexeme.objects.filter( | |
177 | + refs_to__type='comadv', refs_to__to_lexeme=adv, | |
178 | + refs_to__to_lexeme__deleted=False) | |
179 | + advcoms = advcoms | Lexeme.objects.filter( | |
180 | + refs_from__type='advcom', refs_from__from_lexeme=adv) | |
181 | + for adj in adjs: | |
182 | + for advcom in advcoms: | |
183 | + if not adj.refs_to.filter(to_lexeme=advcom, type='adjadvc'): | |
184 | + connect(adj, advcom, cr_type) | |
... | ... |
dictionary/management/commands/fix_morfologik.py
0 โ 100644
1 | +++ a/dictionary/management/commands/fix_morfologik.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +import sys | |
4 | +from django.core.management.base import BaseCommand, CommandError | |
5 | +from common.util import no_history, debug | |
6 | +from dictionary.models import Lexeme, Pattern, Vocabulary, LexemeAssociation, \ | |
7 | + Qualifier, InflectionCharacteristic | |
8 | + | |
9 | +class Command(BaseCommand): | |
10 | + args = 'none' | |
11 | + help = 'Fixes various issues with Morfologik import' | |
12 | + | |
13 | + def handle(self, **options): | |
14 | + fix_morfologik() | |
15 | + | |
16 | +morfologik = Vocabulary.objects.get(id='Morfologik') | |
17 | +morf = morfologik.owned_lexemes_pk() | |
18 | +existing = Lexeme.objects.filter(deleted=False) | |
19 | + | |
20 | +def sgtant_qualifiers(): | |
21 | + sgtant = existing.filter(comment__contains='singulare tantum') | |
22 | + q, created = Qualifier.objects.get_or_create( | |
23 | + label='blm', vocabulary=morfologik) | |
24 | + for l in sgtant: | |
25 | + l.qualifiers.add(q) #add | |
26 | + | |
27 | +def cand_ndm(): | |
28 | + for l in existing.filter( | |
29 | + status='desc', lexemeinflectionpattern__pattern__name='0000'): | |
30 | + l.status = 'cand' | |
31 | + l.save() | |
32 | + for l in existing.filter( | |
33 | + status='desc', lexemeinflectionpattern__pattern__name='P00'): | |
34 | + l.status = 'cand' | |
35 | + l.save() | |
36 | + for l in (existing.filter( | |
37 | + status='desc', lexemeinflectionpattern__pattern__name='ndm') | |
38 | + .exclude(part_of_speech__symbol='adv')): | |
39 | + l.status = 'cand' | |
40 | + l.save() | |
41 | + | |
42 | +def fix_ow(): | |
43 | + owy = existing.filter( | |
44 | + source='Morfologik', part_of_speech__symbol='subst', | |
45 | + entry__regex='^[A-Zฤฤฤลลรลลปลน].*รณw$') | |
46 | + m3 = InflectionCharacteristic.objects.get( | |
47 | + entry='m3', part_of_speech__symbol='subst') | |
48 | + for l in owy: | |
49 | + for lip in l.lexemeinflectionpattern_set.all(): | |
50 | + lip.inflection_characteristic = m3 | |
51 | + lip.save() | |
52 | + debug(u'รณw', u'%s poprawionych' % owy.count()) | |
53 | + | |
54 | +def fix_stwo(): | |
55 | + stwo = existing.filter( | |
56 | + entry__endswith='stwo', source='Morfologik', | |
57 | + lexemeinflectionpattern__pattern__name='0173o') | |
58 | + p1 = InflectionCharacteristic.objects.get( | |
59 | + entry='p1', part_of_speech__symbol='subst') | |
60 | + p0205 = Pattern.objects.get(name='0205') | |
61 | + for l in stwo: | |
62 | + for lip in l.lexemeinflectionpattern_set.all(): | |
63 | + lip.inflection_characteristic = p1 | |
64 | + lip.pattern = p0205 | |
65 | + lip.save() | |
66 | + debug(u'stwo', u'%s poprawionych' % stwo.count()) | |
67 | + | |
68 | +def fix_morfologik(): | |
69 | + no_history() | |
70 | + sgtant_qualifiers() | |
71 | + cand_ndm() | |
72 | + fix_ow() | |
73 | + fix_stwo() | |
... | ... |
dictionary/management/commands/fix_osc.py
0 โ 100644
1 | +++ a/dictionary/management/commands/fix_osc.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +import sys | |
4 | +from django.core.management.base import BaseCommand, CommandError | |
5 | +from common.util import no_history, debug | |
6 | +from dictionary.models import Lexeme, Vocabulary, LexemeAssociation, \ | |
7 | + PartOfSpeech, CrossReference | |
8 | + | |
9 | +class Command(BaseCommand): | |
10 | + args = 'none' | |
11 | + help = 'Fixes osc' | |
12 | + | |
13 | + def handle(self, **options): | |
14 | + fix_osc() | |
15 | + | |
16 | +# aktualnie ลฤ czymy tylko jeลli razem z -oลciฤ przyszedล odpowiedni przymiotnik | |
17 | +# lub wersja niezanegowana (dla zanegowanych) | |
18 | + | |
19 | +def fix_osc(): | |
20 | + no_history() | |
21 | + morfologik = Vocabulary.objects.get(id='Morfologik') | |
22 | + morf = morfologik.owned_lexemes_pk() | |
23 | + existing = Lexeme.objects.filter(deleted=False) | |
24 | + morf_osc = existing.filter( | |
25 | + pk__in=morf, part_of_speech__symbol='subst', entry__endswith=u'oลฤ') | |
26 | + for lexeme in morf_osc: | |
27 | + if lexeme.entry.endswith(u'joลฤ'): | |
28 | + base = lexeme.entry[:-4] | |
29 | + else: | |
30 | + base = lexeme.entry[:-3] | |
31 | + options = (base + 'i', base + 'y', base) | |
32 | + adjs = existing.filter( | |
33 | + pk__in=morf, part_of_speech__lexical_class__symbol='adj', | |
34 | + entry__in=options) | |
35 | + if adjs.count() > 1: | |
36 | + debug(lexeme.entry, u'Niejednoznaczny przymiotnik ลบrรณdลowy') | |
37 | + if adjs: | |
38 | + lexeme.part_of_speech = PartOfSpeech.objects.get(symbol='osc') | |
39 | + lexeme.save() | |
40 | + negs = CrossReference.objects.filter( | |
41 | + from_lexeme__in=adjs, to_lexeme__deleted=False, type__symbol='nieadj') | |
42 | + if negs: | |
43 | + # wszystkie przymiotniki z Morfologika majฤ negacjฤ nie+, nie nie-+ | |
44 | + # wyglฤ da na to, ลผe w M nie ma nie-...-oลci... | |
45 | + assert lexeme.entry.startswith('nie') | |
46 | + nonnegs = existing.filter(pk__in=morf, entry=lexeme.entry[4:]) | |
47 | + if nonnegs.count() > 1: | |
48 | + debug(lexeme.entry, u'Niejednoznaczna wersja niezanegowana') | |
49 | + if not nonnegs: | |
50 | + debug(lexeme.entry, u'Nie znaleziono wersji niezanegowanej') | |
51 | + else: | |
52 | + for l in nonnegs: | |
53 | + cr = CrossReference( | |
54 | + from_lexeme=lexeme, to_lexeme=l, type__symbol='nieadj') | |
55 | + cr.save() | |
56 | + cr = CrossReference( | |
57 | + from_lexeme=l, to_lexeme=lexeme, type__symbol='adjnie') | |
58 | + cr.save() | |
59 | + debug(lexeme.entry, u'Dopisano jako negacjฤ osc') | |
60 | + else: | |
61 | + for adj in adjs: | |
62 | + cr = CrossReference( | |
63 | + from_lexeme=lexeme, to_lexeme=adj, type__symbol='oscadj') | |
64 | + cr.save() | |
65 | + cr = CrossReference( | |
66 | + from_lexeme=adj, to_lexeme=lexeme, type__symbol='adjosc') | |
67 | + cr.save() | |
68 | + debug(lexeme.entry, u'Dopisano jako osc') | |
... | ... |
dictionary/management/commands/fix_surnames.py
0 โ 100644
1 | +++ a/dictionary/management/commands/fix_surnames.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +import sys | |
4 | +from django.core.management.base import BaseCommand, CommandError | |
5 | +from common.util import no_history, debug | |
6 | +from dictionary.models import Lexeme, Vocabulary, LexemeAssociation | |
7 | + | |
8 | +class Command(BaseCommand): | |
9 | + args = 'none' | |
10 | + help = 'Fixes SGJP surnames which come from Morfologik as adjectives' | |
11 | + | |
12 | + def handle(self, **options): | |
13 | + fix_surnames() | |
14 | + | |
15 | +def fix_surnames(): | |
16 | + no_history() | |
17 | + morfologik = Vocabulary.objects.get(id='Morfologik') | |
18 | + SGJP = Vocabulary.objects.get(id='SGJP') | |
19 | + morf = morfologik.owned_lexemes_pk() | |
20 | + sgjp = SGJP.owned_lexemes_pk() | |
21 | + existing = Lexeme.objects.filter(deleted=False) | |
22 | + sgjp_subst = existing.filter( # jak odsiewam po SGJP, to nic nie zostaje... | |
23 | + part_of_speech__symbol='subst', | |
24 | + entry__regex=u'^[A-Zฤฤฤลลรลลปลน]') | |
25 | + morf_surnames = existing.filter( | |
26 | + pk__in=morf, part_of_speech__symbol='adj', entry__regex=u'^[A-Zฤฤฤลลรลลปลน]') | |
27 | + subst_entries = set(sgjp_subst.values_list('entry', flat=True)) | |
28 | + surnames_entries = set(morf_surnames.values_list('entry', flat=True)) | |
29 | + entries = subst_entries & surnames_entries | |
30 | + lexemes = morf_surnames.filter(entry__in=entries) | |
31 | + for lexeme in lexemes: | |
32 | + if lexeme.entry[-1] not in 'iy': | |
33 | + debug(lexeme.entry, u'Nie jest nazwiskiem rodzaju mฤskiego') | |
34 | + continue | |
35 | + m = existing.filter( | |
36 | + pk__in=sgjp, entry=lexeme.entry, part_of_speech__symbol='subst', | |
37 | + lexemeinflectionpattern__inflection_characteristic__entry='m1') | |
38 | + female = lexeme.entry[:-1] + 'a' | |
39 | + f = existing.filter( | |
40 | + pk__in=sgjp, entry=female, part_of_speech__symbol='subst', | |
41 | + lexemeinflectionpattern__inflection_characteristic__entry='f') | |
42 | + if m.count() == 0 or f.count() == 0: | |
43 | + debug(lexeme.entry, u'Brak homonimu w SGJP') | |
44 | + elif m.count() > 1 or f.count() > 1: | |
45 | + debug(lexeme.entry, u'Niejednoznaczne homonimy w SGJP') | |
46 | + else: | |
47 | + m = m[0] | |
48 | + f = f[0] | |
49 | + if morfologik not in m.vocabularies.all(): | |
50 | + la = LexemeAssociation(lexeme=m, vocabulary=morfologik) | |
51 | + la.save() | |
52 | + else: | |
53 | + debug(lexeme.entry, u'Juลผ jest dopisany do Morfologika [m]') | |
54 | + if morfologik not in f.vocabularies.all(): | |
55 | + la = LexemeAssociation(lexeme=f, vocabulary=morfologik) | |
56 | + la.save() | |
57 | + else: | |
58 | + debug(lexeme.entry, u'Juลผ jest dopisany do Morfologika [f]') | |
59 | + lexeme.delete() | |
60 | + debug(lexeme.entry, u'Wykonano') | |
... | ... |
dictionary/management/commands/import_data.py
0 โ 100644
1 | +++ a/dictionary/management/commands/import_data.py | |
1 | +#-*- coding:utf-8 -*- | |
2 | + | |
3 | +import sqlite3 | |
4 | +import datetime | |
5 | +from django.db import connection, transaction | |
6 | +from django.core.management.base import BaseCommand, CommandError | |
7 | +from django.contrib.auth.models import User | |
8 | + | |
9 | +from common.util import no_history | |
10 | +from dictionary.models import * | |
11 | + | |
12 | +DEFAULT_DATABASE = 'data/sgjp.db' | |
13 | +MINI_LEXEME_COUNT = 500 | |
14 | +mini = False | |
15 | +sql = True | |
16 | +cursor = connection.cursor() | |
17 | +db = None | |
18 | +cur = None | |
19 | +no_history() | |
20 | + | |
21 | +class Command(BaseCommand): | |
22 | + args = '<input db filename>' | |
23 | + help = 'Imports initial data' | |
24 | + | |
25 | + #TODO opcja -mini (?) | |
26 | + def handle(self, db_name=DEFAULT_DATABASE, **options): | |
27 | + global db | |
28 | + db = db_name | |
29 | + delete_and_import() | |
30 | + | |
31 | +def get_cursor(db): | |
32 | + conn = sqlite3.connect(db) | |
33 | + conn.row_factory = sqlite3.Row | |
34 | + return conn.cursor() | |
35 | + | |
36 | + | |
37 | +def import_lexical_classes(): | |
38 | + lc = LexicalClass(symbol='inne') | |
39 | + lc.save() | |
40 | + for row in cur.execute('select distinct pos from wzory'): | |
41 | + if not sql: | |
42 | + lc = LexicalClass(symbol=row['pos']) | |
43 | + lc.save() | |
44 | + else: | |
45 | + cursor.execute("INSERT INTO czescimowy (czm) VALUES (%s)", | |
46 | + [row['pos']]) | |
47 | + | |
48 | + | |
49 | +def import_parts_of_speech(): | |
50 | + cur2 = get_cursor(db) | |
51 | + other = 'inne' | |
52 | + lcs = {} | |
53 | + for row in cur.execute('select distinct wzory.pos, leksemy.pos from wzory ' | |
54 | + 'natural join odmieniasie join leksemy on ' | |
55 | + 'leksemy.nr = odmieniasie.nr'): | |
56 | + lcs[row[1]] = row[0] | |
57 | + print row | |
58 | + for row in cur.execute('SELECT pos FROM klasygramatyczne'): | |
59 | + lc = lcs.get(row['pos'], other) | |
60 | + if not sql: | |
61 | + pos = PartOfSpeech(symbol=row['pos']) | |
62 | + pos.lexical_class = LexicalClass.objects.get(symbol=lc) | |
63 | + pos.save() | |
64 | + else: | |
65 | + cursor.execute("INSERT INTO klasygramatyczne (pos, czm) VALUES (%s, %s)", | |
66 | + [row['pos'], lc]) | |
67 | + | |
68 | + | |
69 | +def import_base_form_labels(): | |
70 | + query_result = cur.execute(""" | |
71 | + SELECT efobaz FROM paradygmaty | |
72 | + UNION | |
73 | + SELECT efobaz FROM zakonczenia | |
74 | + """) | |
75 | + for row in query_result: | |
76 | + if not sql: | |
77 | + bfl = BaseFormLabel(entry=row[0]) | |
78 | + bfl.save() | |
79 | + else: | |
80 | + cursor.execute("INSERT INTO efobazy (efobaz) VALUES (%s)", [row[0]]) | |
81 | + | |
82 | +# tymczasowa tabelka - powinna przychodziฤ z bazฤ | |
83 | +# (albo zrobiฤ w kodzie wyjฤ tek na p123... jak juลผ i tak sฤ wyjฤ tki) | |
84 | +basic_form_labels = { | |
85 | + '0-': '1', | |
86 | + '3+': '1', | |
87 | + 'f': 'sg:nom', | |
88 | + 'm1': 'sg:nom', | |
89 | + 'm2': 'sg:nom', | |
90 | + 'm3': 'sg:nom', | |
91 | + 'n1': 'sg:nom', | |
92 | + 'n2': 'sg:nom', | |
93 | + 'p1': 'pl:nom:mo', | |
94 | + 'p2': 'pl:nom', | |
95 | + 'p3': 'pl:nom', | |
96 | + 'pri': 'sg:nom', # albo pl. teraz juลผ naprawdฤ siฤ nie da. | |
97 | + 'sec': 'sg:nom', | |
98 | +} | |
99 | + | |
100 | +# to chyba nie jest najlepsze rozwiฤ zanie... | |
101 | +basic_form_labels_pos = { | |
102 | + 'v': '5', | |
103 | + 'ger': '11', | |
104 | + 'pact': '3', | |
105 | + 'ppas': '10', | |
106 | + 'appas': '10', | |
107 | + 'pred': '5', | |
108 | +} | |
109 | + | |
110 | +ics = {} | |
111 | + | |
112 | +def import_inflection_characteristics(): | |
113 | + for row in cur.execute('SELECT DISTINCT charfl, pos FROM paradygmaty'): | |
114 | + if row['charfl'] == '': | |
115 | + bfl_entry = '1' if row['pos'] in ('adj', 'adjcom') else '' | |
116 | + else: | |
117 | + bfl_entry = basic_form_labels.get(row['charfl'], '') | |
118 | + if row['pos'] in basic_form_labels_pos: | |
119 | + bfl_entry = basic_form_labels_pos[row['pos']] | |
120 | + bfl = BaseFormLabel.objects.get(entry=bfl_entry) | |
121 | + if not sql: | |
122 | + ic = InflectionCharacteristic( | |
123 | + entry=row['charfl'], basic_form_label=bfl, | |
124 | + part_of_speech=PartOfSpeech.objects.get(pk=row['pos'])) | |
125 | + ic.save() | |
126 | + else: | |
127 | + cursor.execute("INSERT INTO charfle (charfl, pos, efobaz) " | |
128 | + "VALUES (%s, %s, %s)", [row['charfl'], row['pos'], bfl.pk]) | |
129 | + for ic in InflectionCharacteristic.objects.all(): | |
130 | + ics[(ic.entry, ic.part_of_speech.symbol)] = ic | |
131 | + | |
132 | + | |
133 | +sgjp_domain = ('SGJP', 'WSJP', 'SJPDor', 'zmiotki') | |
134 | + | |
135 | + | |
136 | +def import_vocabularies(): | |
137 | + try: | |
138 | + sgjp = User.objects.get(username='sgjp') | |
139 | + except: | |
140 | + sgjp = None | |
141 | + result = cur.execute(""" | |
142 | + SELECT slownik FROM leksemy | |
143 | + UNION | |
144 | + SELECT slownik_uz FROM slowniki_uzywajace | |
145 | + """) | |
146 | + for row in result: | |
147 | + v = Vocabulary() | |
148 | + v.id = row[0] | |
149 | + v.save() | |
150 | + if sgjp and v.id in sgjp_domain: | |
151 | + v.managers.add(sgjp) #add | |
152 | + | |
153 | + | |
154 | +def import_qualifiers(): | |
155 | + sgjp = Vocabulary.objects.get(id='SGJP') | |
156 | + query_result = cur.execute(""" | |
157 | + SELECT okwal FROM odmieniasie | |
158 | + UNION | |
159 | + SELECT zkwal FROM zakonczenia | |
160 | + UNION | |
161 | + SELECT lkwal FROM leksemy | |
162 | + """) | |
163 | + added = set() | |
164 | + for row in query_result: | |
165 | + if row[0]: | |
166 | + for qualifier in row[0].split('|'): | |
167 | + if qualifier not in added: | |
168 | + added.add(qualifier) | |
169 | + if not sql: | |
170 | + q = Qualifier(label=qualifier, vocabulary=sgjp) | |
171 | + q.save() | |
172 | + else: | |
173 | + cursor.execute("INSERT INTO kwalifikatory (kwal, slownik) " | |
174 | + "VALUES (%s, %s)", [qualifier, sgjp.pk]) | |
175 | + | |
176 | + | |
177 | +mini_lexeme_query = 'SELECT %s FROM leksemy LIMIT ?' | |
178 | + | |
179 | +def import_lexemes(): | |
180 | + if mini: | |
181 | + result = cur.execute(mini_lexeme_query % '*', (MINI_LEXEME_COUNT,)) | |
182 | + else: | |
183 | + result = cur.execute('SELECT * FROM leksemy') | |
184 | + date = datetime.datetime.now() | |
185 | + cv_table = dict(ClassificationValue.objects.values_list('label', 'pk')) | |
186 | + for row in result: | |
187 | + slownik = row['slownik'] | |
188 | + status = 'conf' if slownik != 'zmiotki' else 'cand' | |
189 | + if not sql: | |
190 | + l = Lexeme() | |
191 | + l.id = row['nr'] | |
192 | + l.entry = row['haslo'] | |
193 | + l.part_of_speech = PartOfSpeech.objects.get(pk=row['pos']) | |
194 | + l.comment = row['komentarz'] | |
195 | + l.source = 'SGJP' # potrzebne to w ogรณle? | |
196 | + l.status = status | |
197 | + l.gloss = row['glosa'] or '' | |
198 | + l.entry_suffix = row['haslosuf'] or '' | |
199 | + cv = ClassificationValue.objects.get(label=row['pospolitosc']) | |
200 | + cv.lexemes.add(l) #add | |
201 | + | |
202 | + #FIKCJA : | |
203 | + l.homonym_number = 1 | |
204 | + | |
205 | + l.owner_vocabulary = Vocabulary.objects.get(pk=slownik) | |
206 | + l.save() | |
207 | + LexemeAssociation.objects.create( | |
208 | + vocabulary=Vocabulary.objects.get(pk=slownik), | |
209 | + lexeme=l) | |
210 | + # brak kwalifikatorรณw - nieauktualne | |
211 | + else: | |
212 | + cv_pk = cv_table[row['pospolitosc']] | |
213 | + cursor.execute( | |
214 | + "INSERT INTO leksemy (id, haslo, haslosuf, glosa, hom, pos, zrodlo, " | |
215 | + "status, komentarz, data_modyfikacji, slownik) VALUES (%s, %s, %s, %s, " | |
216 | + "%s, %s, %s, %s, %s, %s, %s)", [row['nr'], row['haslo'], | |
217 | + row['haslosuf'] or '', row['glosa'] or '', 1, row['pos'], 'SGJP', | |
218 | + status, row['komentarz'], date, row['slownik']]) | |
219 | + cursor.execute( | |
220 | + "INSERT INTO leksemy_w_slownikach (l_id, slownik) " | |
221 | + "VALUES (%s, %s)", [row['nr'], slownik]) | |
222 | + cursor.execute( | |
223 | + "INSERT INTO wartosci_klasyfikacji_lexemes (classificationvalue_id, " | |
224 | + "lexeme_id) VALUES (%s, %s)", [cv_pk, row['nr']]) | |
225 | + if row['lkwal']: | |
226 | + for qual in row['lkwal'].split('|'): | |
227 | + q_id = Qualifier.objects.get(label=qual).pk | |
228 | + cursor.execute( | |
229 | + "INSERT INTO kwalifikatory_leksemow (lexeme_id, " | |
230 | + "qualifier_id) VALUES (%s, %s)", [row['nr'], q_id]) | |
231 | + | |
232 | + | |
233 | +def import_lexeme_associations(): | |
234 | + if mini: | |
235 | + result = cur.execute('SELECT * FROM slowniki_uzywajace WHERE nr in (%s)' % | |
236 | + (mini_lexeme_query % 'nr'), [MINI_LEXEME_COUNT]) | |
237 | + else: | |
238 | + result = cur.execute('SELECT * FROM slowniki_uzywajace') | |
239 | + for row in result: | |
240 | + if not sql: | |
241 | + v = Vocabulary.objects.get(pk=row['slownik_uz']) | |
242 | + l = Lexeme.objects.get(pk=row['nr']) | |
243 | + LexemeAssociation.objects.create(vocabulary=v, lexeme=l) | |
244 | + else: | |
245 | + cursor.execute( | |
246 | + "INSERT INTO leksemy_w_slownikach (l_id, slownik) " | |
247 | + "VALUES (%s, %s)", [row['nr'], row['slownik_uz']]) | |
248 | + | |
249 | + | |
250 | +def import_cross_reference_types(): | |
251 | + result = cur.execute('select distinct l1.pos pos1, l2.pos pos2, t.* ' | |
252 | + 'from odsylacze o join leksemy l1 on nrod=l1.nr ' | |
253 | + 'join leksemy l2 on nrdo=l2.nr ' | |
254 | + 'join typyodsylaczy t on t.typods=o.typods') | |
255 | + for row in result: | |
256 | + t = CrossReferenceType() | |
257 | + t.symbol = row['typods'] | |
258 | + t.desc = row['naglowek'] | |
259 | + t.index = row['kolejnosc'] | |
260 | + t.from_pos = PartOfSpeech.objects.get(symbol=row['pos1']) | |
261 | + t.to_pos = PartOfSpeech.objects.get(symbol=row['pos2']) | |
262 | + t.save() | |
263 | + | |
264 | + | |
265 | +def import_cross_references(): | |
266 | + if mini: | |
267 | + result = cur.execute('SELECT * FROM odsylacze WHERE ' | |
268 | + 'nrod in (%(subq)s) and nrdo in (%(subq)s)' % | |
269 | + {'subq': mini_lexeme_query % 'nr'}, | |
270 | + [MINI_LEXEME_COUNT, MINI_LEXEME_COUNT]) | |
271 | + else: | |
272 | + result = cur.execute('SELECT * FROM odsylacze') | |
273 | + for row in result: | |
274 | + if row['nrod'] and row['nrdo']: # no bo nie wiem jak to interpretowaฤ | |
275 | + l_from = Lexeme.objects.get(pk=row['nrod']) | |
276 | + l_to = Lexeme.objects.get(pk=row['nrdo']) | |
277 | + cr_type = CrossReferenceType( | |
278 | + symbol=row['typods'], from_pos=l_from.part_of_speech, | |
279 | + to_pos=l_to.part_of_speech) | |
280 | + if not sql: | |
281 | + CrossReference.objects.create( | |
282 | + from_lexeme=l_from, to_lexeme=to_lexeme, type=cr_type) | |
283 | + else: | |
284 | + cursor.execute( | |
285 | + "INSERT INTO odsylacze (l_id_od, l_id_do, typods_id) " | |
286 | + "VALUES (%s, %s, %s)", [row['nrod'], row['nrdo'], cr_type.pk]) | |
287 | + | |
288 | + | |
289 | +def import_pattern_types(): | |
290 | + for row in cur.execute('SELECT DISTINCT typr, pos FROM paradygmaty'): | |
291 | + lc = PartOfSpeech.objects.get(symbol=row['pos']).lexical_class | |
292 | + PatternType.objects.get_or_create(lexical_class=lc, entry=row['typr']) | |
293 | + # prowizorka z powodu pustej klasy 'skr' | |
294 | + for row in cur.execute('SELECT DISTINCT typr, pos FROM wzory'): | |
295 | + lc = LexicalClass.objects.get(symbol=row['pos']) | |
296 | + PatternType.objects.get_or_create(lexical_class=lc, entry=row['typr']) | |
297 | + | |
298 | + | |
299 | +def import_patterns(): | |
300 | + for row in cur.execute('SELECT * FROM wzory'): | |
301 | + pt = PatternType.objects.get( | |
302 | + lexical_class__symbol=row['pos'], entry=row['typr']) | |
303 | + status = 'temp' | |
304 | + example = '-' | |
305 | + if not sql: | |
306 | + p = Pattern() | |
307 | + p.name = row['wzor'] | |
308 | + p.type = pt | |
309 | + p.basic_form_ending = row['zakp'] | |
310 | + | |
311 | + p.example = example | |
312 | + p.status = status | |
313 | + p.save() | |
314 | + else: | |
315 | + cursor.execute( | |
316 | + "INSERT INTO wzory (w_id, typ, przyklad, zakp, status, komentarz) " | |
317 | + "VALUES (%s, %s, %s, %s, %s, %s)", | |
318 | + [row['wzor'], pt.pk, example, row['zakp'], status, '']) | |
319 | + | |
320 | + | |
321 | +def import_lexeme_inflection_patterns(): | |
322 | + if mini: | |
323 | + result = cur.execute( | |
324 | + 'SELECT * FROM odmieniasie WHERE nr IN (%s)' % (mini_lexeme_query % 'nr'), | |
325 | + (MINI_LEXEME_COUNT,)) | |
326 | + else: | |
327 | + result = cur.execute('SELECT * FROM odmieniasie') | |
328 | + pos_table = dict(Lexeme.objects.values_list('pk', 'part_of_speech')) | |
329 | + pattern_pk_table = dict(Pattern.objects.values_list('name', 'pk')) | |
330 | + for row in result: | |
331 | + if not sql: | |
332 | + lip = LexemeInflectionPattern() | |
333 | + lip.lexeme = Lexeme.objects.get(id=row['nr']) | |
334 | + lip.index = row['oskl'] | |
335 | + lip.pattern = Pattern.objects.get(name=row['wzor']) | |
336 | + lip.inflection_characteristic = ics[(row['charfl'], | |
337 | + lip.lexeme.part_of_speech)] | |
338 | + lip.root = row['rdzen'] | |
339 | + lip.save() | |
340 | + # nieaktualne | |
341 | + if row['okwal']: | |
342 | + lip.qualifiers.add(Qualifier.objects.get(label=row['okwal'])) #add | |
343 | + else: | |
344 | + pos = pos_table[row['nr']] | |
345 | + pattern_pk = pattern_pk_table[row['wzor']] | |
346 | + charfl_id = ics[(row['charfl'], pos)].pk | |
347 | + cursor.execute( | |
348 | + "INSERT INTO odmieniasie (l_id, oind, w_id, charfl, rdzen) " | |
349 | + "VALUES (%s, %s, %s, %s, %s) ", [row['nr'], row['oskl'], pattern_pk, | |
350 | + charfl_id, row['rdzen']]) | |
351 | + if row['okwal']: | |
352 | + cursor.execute("select currval('odmieniasie_id_seq')") | |
353 | + last_id = cursor.fetchone()[0] | |
354 | + for qual in row['okwal'].split('|'): | |
355 | + q_id = Qualifier.objects.get(label=qual).pk | |
356 | + cursor.execute( | |
357 | + "INSERT INTO kwalifikatory_odmieniasiow (lexemeinflectionpattern_id, " | |
358 | + "qualifier_id) VALUES (%s, %s)", [last_id, q_id]) | |
359 | + | |
360 | +def import_endings(): | |
361 | + if sql: | |
362 | + pattern_pk_table = dict(Pattern.objects.values_list('name', 'pk')) | |
363 | + bfl_table = dict(BaseFormLabel.objects.values_list('entry', 'pk')) | |
364 | + for row in cur.execute('SELECT * FROM zakonczenia'): | |
365 | + if row['zak'] is not None: | |
366 | + if not sql: | |
367 | + e = Ending() | |
368 | + e.pattern = Pattern.objects.get(name=row['wzor']) | |
369 | + e.base_form_label = BaseFormLabel.objects.get(entry=row['efobaz']) | |
370 | + e.string = row['zak'] | |
371 | + e.index = row['nrskl'] | |
372 | + e.save() | |
373 | + for qual in row['zkwal'].split('|'): | |
374 | + e.qualifiers.add(Qualifier.objects.get(label=qual)) #add | |
375 | + else: | |
376 | + pattern_pk = pattern_pk_table[row['wzor']] | |
377 | + if pattern_pk: | |
378 | + efobaz_id = bfl_table[row['efobaz']] | |
379 | + cursor.execute( | |
380 | + "INSERT INTO zakonczenia (w_id, efobaz, zind, zak) VALUES " | |
381 | + "(%s, %s, %s, %s)", | |
382 | + [pattern_pk, efobaz_id, row['nrskl'], row['zak']]) | |
383 | + if row['zkwal']: | |
384 | + cursor.execute("select currval('zakonczenia_id_seq')") | |
385 | + last_id = cursor.fetchone()[0] | |
386 | + for qual in row['zkwal'].split('|'): | |
387 | + q_id = Qualifier.objects.get(label=qual).pk | |
388 | + cursor.execute( | |
389 | + "INSERT INTO kwalifikatory_zakonczen (ending_id, qualifier_id) " | |
390 | + "VALUES (%s, %s)", [last_id, q_id]) | |
391 | + | |
392 | + | |
393 | +def import_tables(): | |
394 | + bfl_table = dict(BaseFormLabel.objects.values_list('entry', 'pk')) | |
395 | + for row in cur.execute('SELECT * FROM paradygmaty'): | |
396 | + lc = PartOfSpeech.objects.get(symbol=row['pos']).lexical_class | |
397 | + variant, _created = Variant.objects.get_or_create(id=row['wariant']) | |
398 | + tt_data = { | |
399 | + 'variant': variant, | |
400 | + 'pattern_type': PatternType.objects.get( | |
401 | + entry=row['typr'], lexical_class=lc), | |
402 | + 'inflection_characteristic': InflectionCharacteristic.objects.get( | |
403 | + entry=row['charfl'], part_of_speech__symbol=row['pos']), | |
404 | + } | |
405 | + tt, _created = TableTemplate.objects.get_or_create(**tt_data) | |
406 | + if not sql: | |
407 | + c = Cell() | |
408 | + c.table_template = tt | |
409 | + c.base_form_label = BaseFormLabel.objects.get(entry=row['efobaz']) | |
410 | + c.tag = row['morf'] | |
411 | + c.prefix = row['pref'] | |
412 | + c.suffix = row['suf'] | |
413 | + c.index = row['kskl'] | |
414 | + c.save() | |
415 | + if row['row']: | |
416 | + tc = TableCell() | |
417 | + tc.cell = c | |
418 | + tc.row = row['row'] | |
419 | + tc.col = row['col'] | |
420 | + tc.rowspan = row['rowspan'] | |
421 | + tc.colspan = row['colspan'] | |
422 | + tc.save() | |
423 | + else: | |
424 | + efobaz_id = bfl_table[row['efobaz']] | |
425 | + cursor.execute( | |
426 | + "INSERT INTO klatki (st_id, efobaz, tag, prefiks, sufiks, kind) " | |
427 | + "VALUES (%s, %s, %s, %s, %s, %s)", [tt.pk, efobaz_id, row['morf'], | |
428 | + row['pref'], row['suf'], row['kskl']]) | |
429 | + if row['row']: | |
430 | + cursor.execute("select currval('klatki_id_seq')") | |
431 | + last_id = cursor.fetchone()[0] | |
432 | + cursor.execute( | |
433 | + "INSERT INTO komorki_tabel (k_id, row, col, rowspan, colspan) " | |
434 | + "VALUES (%s, %s, %s, %s, %s)", [last_id, row['row'], | |
435 | + row['col'], row['rowspan'], row['colspan']]) | |
436 | + | |
437 | + | |
438 | +def import_table_headers(): | |
439 | + for row in cur.execute('SELECT * FROM naglowkiwierszy'): | |
440 | + if row['styl'] != 'b' and row['nagl']: | |
441 | + tts = TableTemplate.objects.filter( | |
442 | + variant__id=row['wariant'], pattern_type__entry=row['typr'], | |
443 | + inflection_characteristic__entry=row['charfl'], | |
444 | + inflection_characteristic__part_of_speech__symbol=row['pos']) | |
445 | + if tts: | |
446 | + tt = tts[0] | |
447 | + if not sql: | |
448 | + th = TableHeader() | |
449 | + th.table_template = tt | |
450 | + th.row = row['row'] | |
451 | + th.col = row['col'] | |
452 | + th.rowspan = row['rowspan'] | |
453 | + th.colspan = row['colspan'] | |
454 | + th.label = row['nagl'] | |
455 | + th.horizontal = row['styl'] == 'h' | |
456 | + th.save() | |
457 | + else: | |
458 | + cursor.execute( | |
459 | + "INSERT INTO naglowki_tabel (st_id, row, col, rowspan, colspan, " | |
460 | + "nagl, wierszowy) VALUES (%s, %s, %s, %s, %s, %s, %s)", | |
461 | + [tt.pk, row['row'], row['col'], row['rowspan'], row['colspan'], | |
462 | + row['nagl'], row['styl'] == 'h']) | |
463 | + | |
464 | + | |
465 | +def single_import(fun, db): | |
466 | + global cur, cursor | |
467 | + transaction.commit_unless_managed() | |
468 | + transaction.enter_transaction_management() | |
469 | + transaction.managed(True) | |
470 | + cur = get_cursor(db) | |
471 | + cursor = connection.cursor() | |
472 | + fun() | |
473 | + cur.close() | |
474 | + cursor.close() | |
475 | + transaction.commit() | |
476 | + transaction.leave_transaction_management() | |
477 | + | |
478 | + | |
479 | +def delete_and_import(): | |
480 | + transaction.commit_unless_managed() | |
481 | + transaction.enter_transaction_management() | |
482 | + transaction.managed(True) | |
483 | + models = ( | |
484 | + Qualifier, | |
485 | + LexicalClass, | |
486 | + PartOfSpeech, | |
487 | + BaseFormLabel, | |
488 | + InflectionCharacteristic, | |
489 | + Lexeme, | |
490 | + PatternType, | |
491 | + Pattern, | |
492 | + LexemeInflectionPattern, | |
493 | + Ending, | |
494 | + TableTemplate, | |
495 | + Cell, | |
496 | + TableCell, | |
497 | + Vocabulary, | |
498 | + LexemeAssociation, | |
499 | + ) | |
500 | + print 'deleting old data...' | |
501 | + for model in models: | |
502 | + model.objects.all().delete() | |
503 | + | |
504 | + global cur | |
505 | + cur = get_cursor(db) | |
506 | + print 'importing lexical classes...' | |
507 | + import_lexical_classes() | |
508 | + print 'importing parts of speech' | |
509 | + import_parts_of_speech() | |
510 | + print 'importing base form labels' | |
511 | + import_base_form_labels() | |
512 | + print 'importing inflection characteristics' | |
513 | + import_inflection_characteristics() | |
514 | + print 'importing vocabularies...' | |
515 | + import_vocabularies() | |
516 | + print 'importing qualifiers...' | |
517 | + import_qualifiers() | |
518 | + print 'importing lexemes...' | |
519 | + import_lexemes() | |
520 | + print 'importing lexeme associations...' | |
521 | + import_lexeme_associations() | |
522 | + print 'importing cross-reference types...' | |
523 | + import_cross_reference_types() | |
524 | + print 'importing cross-references...' | |
525 | + import_cross_references() | |
526 | + print 'importing pattern types...' | |
527 | + import_pattern_types() | |
528 | + print 'importing patterns...' | |
529 | + import_patterns() | |
530 | + print 'importing lexeme inflection patterns...' | |
531 | + import_lexeme_inflection_patterns() | |
532 | + print 'importing endings...' | |
533 | + import_endings() | |
534 | + print 'importing table templates...' | |
535 | + import_tables() | |
536 | + print 'importing table headers...' | |
537 | + import_table_headers() | |
538 | + cur.close() | |
539 | + cursor.close() | |
540 | + transaction.commit() | |
541 | + transaction.leave_transaction_management() | |
542 | + | |
543 | +import sys | |
544 | +if __name__ == '__main__': | |
545 | + if sys.argv[-1] == '-mini': | |
546 | + mini = True | |
547 | + del sys.argv[-1] | |
548 | + if len(sys.argv) > 1: | |
549 | + db = sys.argv[1] | |
550 | + else: | |
551 | + db = DEFAULT_DATABASE | |
552 | + delete_and_import() | |
... | ... |