ajax_lexeme_slickgrid.py 8.95 KB
#-*- coding:utf-8 -*-
from hashlib import md5

from django.db.models import Count
from django.core.cache import cache

from dictionary.models import Lexeme, filter_visible, visible_vocabularies
from dictionary.ajax_slickgrid import SlickGridQuery
from common.decorators import ajax
from common.util import bisect_left, reverse, json_encode

import locale
locale.setlocale(locale.LC_ALL, 'pl_PL.UTF-8')

class LexemeQuery(SlickGridQuery):
    model = Lexeme
    search_field = 'entry'
    sort_field_translation = {
        'part_of_speech': 'part_of_speech__symbol',
    }

    filter_field_translation = {
        'form': 'lexemeform__form',
        'lexeme_qualifier': 'qualifiers__pk',
        'lip_qualifier': 'lexemeinflectionpattern__qualifiers__pk',
        'classification_value': 'classificationvalue__pk',
        'pattern_name': 'lexemeinflectionpattern__pattern__name',
        'inflection_characteristic':
            'lexemeinflectionpattern__inflection_characteristic__symbol',
        'containing_vocabulary': 'vocabularies__pk',
        'owner_vocabulary': 'owner_vocabulary__pk',
        'pattern_count': 'pc',
        'ic_count': 'icc',
        'cr_type': 'refs_to__type__pk',
    }

    def get_sort_field(self, rule):
        new_rule = dict(rule)
        if rule['field'] == 'entry' and rule['a_tergo']:
            new_rule['field'] = 'rev'
        return super(LexemeQuery, self).get_sort_field(new_rule)

    def sort_queryset(self, queryset):
        for rule in self.sort_rules:
            if rule['field'] == 'entry' and rule['a_tergo']:
                queryset = queryset.extra(select={'rev': "reverse(haslo)"})
        return super(LexemeQuery, self).sort_queryset(queryset)

    def apply_filter_rule(self, queryset, rule):
        lookup = self.lookup_translation[rule['op']]
        negated = (lookup[0] == '-')
        field, data = rule['field'], rule['data']
        if field == 'pattern_count':
            queryset = queryset.annotate(
                pc=Count('lexemeinflectionpattern__pattern', distinct=True))
        elif field == 'ic_count':
            queryset = queryset.annotate(
                icc=Count('lexemeinflectionpattern__inflection_characteristic',
                    distinct=True))
        elif field == 'qualifier':
            where = '''(
                exists (
                  select * from kwalifikatory_leksemow where lexeme_id = leksemy.id and
                    qualifier_id = %s) or
                exists (
                  select * from kwalifikatory_odmieniasiow join odmieniasie o on
                    lexemeinflectionpattern_id = o.id
                  where
                    qualifier_id = %s and o.l_id = leksemy.id) or
                exists (
                  select * from
                    odmieniasie o
                    join wzory w on (o.w_id = w.id)
                    join szablony_tabel s on (w.typ = s.wtyp and o.charfl = s.charfl)
                    join klatki k on k.st_id = s.id
                    join zakonczenia z on (o.w_id = z.w_id and k.efobaz = z.efobaz)
                    join kwalifikatory_zakonczen kz on (z.id = kz.ending_id)
                  where o.l_id = leksemy.id and s.wariant = '1' and
                    kz.qualifier_id = %s)
                )'''
            if negated:
                where = 'not ' + where
            queryset = queryset.extra(where=[where], params=[data] * 3)
            return queryset
        return super(LexemeQuery, self).apply_filter_rule(queryset, rule)

    def get_queryset(self):
        lexemes = super(LexemeQuery, self).get_queryset()
        return filter_visible(lexemes, self.user)

    def apply_mask(self, lexemes):
        if self.mask == '':
            return lexemes
        for rule in self.sort_rules:
            if rule['field'] == 'entry':
                if not rule['a_tergo']:
                    matching_lexemes = lexemes.filter(
                        entry__istartswith=self.mask)
                else:
                    matching_lexemes = lexemes.filter(
                        entry__iendswith=self.mask)
                break
        else:
            matching_lexemes = lexemes.filter(entry__istartswith=self.mask)
        return matching_lexemes

    # nieużywane
    def filter_value(self, queryset, rule, from_value, upward):
        greater = (rule['order'] == 'asc') == upward
        if rule['field'] == 'entry' and rule['a_tergo']:
            if greater:
                comp = '>='
            else:
                comp = '<='
            return queryset.extra(where=["reverse(haslo) " + comp + " %s"],
                params=[reverse(from_value)])
        else:
            return super(LexemeQuery, self).filter_value(
                queryset, rule, from_value, upward)

    def response_row(self, lexeme):
        lip_data = lexeme.lip_data()
        cont_vocabs = '/'.join(v.id for v in lexeme.vocabularies.all())
        return {
            'id': lexeme.id,
            'entry': lexeme.entry,
            'pos': lexeme.part_of_speech.symbol,
            'patterns': lip_data['patterns'],
            'ics': lip_data['inflection_characteristics'],
            'vocabs': cont_vocabs,
            'owner': lexeme.owner_vocabulary.id,
            'status': dict(Lexeme.STATUS_CHOICES).get(lexeme.status),
        }

    # indeks wiersza w danym sortowaniu, w którym
    # znajdzie się instancja o danym id
    def row_index(self, lexeme_id):
        id_list = self.get_id_list()
        count = len(id_list)
        if count == 0:
            return 0, 0
        return id_list.index(lexeme_id), count

    def search_index(self):
        id_list = self.get_id_list()
        count = len(id_list)
        if count == 0:
            return 0
        first_rule = self.sort_rules[0]
        if first_rule['field'] != self.search_field:
            return 0

        index = bisect_left(id_list, self.mask,
            cmp=make_lexeme_cmp(first_rule))
        if index == count:
            index -= 1
        return index

    def cache_key(self):
        key = json_encode(self.sort_rules) + json_encode(self.filter)
        for vocabulary in visible_vocabularies(self.user):
            key += vocabulary.id
        if self.filtering_mode():
            key += self.mask
        return md5(key).hexdigest()

    def get_cached_lexemes(self):
        key = self.cache_key()
        return cache.get(key)

    def cache_lexemes(self, id_list):
        key = self.cache_key()
        cache.set(key, id_list)
        key_list = cache.get('key_list', [])
        if key not in key_list:
            key_list.append(key)
        cache.set('key_list', key_list)

    def get_id_list(self, force_reload=False):
        if not force_reload:
            id_list = self.get_cached_lexemes()
        else:
            id_list = None
        if id_list is None:
            lexemes = self.get_sorted_queryset()
            if 'rev' in lexemes.query.extra_select:
                id_list = list(row[0] for row in lexemes.values_list('id', 'rev'))
            else:
                id_list = list(lexemes.values_list('id', flat=True))
            self.cache_lexemes(id_list)
        return id_list

def make_lexeme_cmp(rule):
    def lexeme_cmp(lexeme_id, mask):
        e1 = Lexeme.objects.get(id=lexeme_id).entry
        e2 = mask
        if rule['a_tergo']:
            e1 = reverse(e1)
            e2 = reverse(e2)
        result = locale.strcoll(e1, e2)
        if rule['order'] == 'desc' and e2 != '':
            result = -result
        return result

    return lexeme_cmp

# Zapytanie o indeks wiersza o pewnym id przy danym sortowaniu
@ajax(method='get')
def find_id(request, id, sort_rules, mask, filter=None):
    query = LexemeQuery(
        filter=filter, sort_rules=sort_rules, mask=mask, user=request.user)
    index, count = LexemeQuery.row_index(id, query)
    return {
        'index': index,
        'count': count,
    }

@ajax(method='get')
def search_index(request, sort_rules, filter, search=''):
    query = LexemeQuery(
        filter=filter, sort_rules=sort_rules, mask=search, user=request.user)
    return {
        'index': query.search_index()
    }

@ajax(method='get')
def get_lexemes(request, from_page, to_page, rows, sort_rules, filter,
                mask='', force_reload=False):
    request.session['sort_rules'] = sort_rules
    request.session['filter'] = filter
    query = LexemeQuery(
        filter=filter, sort_rules=sort_rules, mask=mask, user=request.user)
    id_list = query.get_id_list(force_reload)
    count = len(id_list)
    start, response_rowcount = query.count_pages(from_page, to_page, rows)
    sublist = id_list[start:start + response_rowcount]
    lexemes_qs = Lexeme.objects.filter(id__in=sublist).select_related(
        'owner_vocabulary', 'part_of_speech').prefetch_related(
        'lexemeinflectionpattern_set__pattern',
        'lexemeinflectionpattern_set__inflection_characteristic',
        'vocabularies')
    lexemes_dict = dict((l.id, l) for l in lexemes_qs)
    lexemes = [lexemes_dict[lexeme_id] for lexeme_id in sublist]
    return query.make_response(lexemes, count, from_page)