ajax_lexeme_slickgrid.py 7.84 KB
#-*- coding:utf-8 -*-
from hashlib import md5

from django.db.models import Count
from django.core.cache import cache

from dictionary.models import Lexeme, filter_visible, visible_vocabularies, LexemeAttribute
from dictionary.ajax_slickgrid import SlickGridQuery
from common.decorators import ajax
from common.util import bisect_left, reverse, json_encode

import locale
locale.setlocale(locale.LC_ALL, 'pl_PL.UTF-8')

class LexemeQuery(SlickGridQuery):
    model = Lexeme
    sort_field = 'entry'

    filter_field_translation = {
        'form': 'lexemeform__form',
        'lexeme_qualifier': 'qualifiers__id',
        'lip_qualifier': 'lexemeinflectionpattern__qualifiers__id',
        'qualifier': 'qualifiers_cache__id',
        'classification_value': 'classificationvalue__id',
        'pattern_name': 'lexemeinflectionpattern__pattern__name',
        'pattern_type': 'lexemeinflectionpattern__pattern__type_id',
        'gender': 'lexemeinflectionpattern__gender_id',
        'containing_vocabulary': 'vocabularies__id',
        'owner_vocabulary': 'owner_vocabulary_id',
        'pattern_count': 'pc',
        'gender_count': 'gc',
        'cr_type': 'refs_to__type_id',
    }

    def sort_queryset(self, queryset):
        order_list = [self.sort_field, 'id']
        if self.sort_rules[0] == 'a_tergo':
            queryset = queryset.extra(select={'rev': "reverse(haslo)"})
            order_list[0] = 'rev'
        return queryset.extra(order_by=order_list)

    def apply_filter_rule(self, queryset, rule):
        lookup = self.lookup_translation[rule['op']]
        negated = (lookup[0] == '-')
        field, data = rule['field'], rule['data']
        new_rule = dict(rule)
        if field == 'pattern_count':
            queryset = queryset.annotate(
                pc=Count('lexemeinflectionpattern__pattern', distinct=True))
        elif field == 'gender_count':
            queryset = queryset.annotate(
                gc=Count('lexemeinflectionpattern__gender', distinct=True))
        elif field in ('lexeme_qualifier', 'lip_qualifier', 'qualifier'):
            if int(data) == 0:
                new_rule['op'] = 'isnull' if negated else '-isnull'
                new_rule['data'] = False
        elif field.startswith('extra'):
            attr = LexemeAttribute.objects.get(id=int(field.split('-')[1]))
            if attr.closed:
                new_rule['field'] = 'lexemeav__attribute_value'
            else:
                new_rule['field'] = 'lexemeav__attribute_value__value'
            if (attr.closed and int(data) == 0) or data == '':
                new_rule['field'] = 'lexemeav__attribute_value'
                new_rule['op'] = 'in' if negated else '-in'
                values = attr.values.all()
                new_rule['data'] = tuple(values)
        return super(LexemeQuery, self).apply_filter_rule(queryset, new_rule)

    def get_queryset(self):
        lexemes = super(LexemeQuery, self).get_queryset()
        return filter_visible(lexemes, self.user)

    def apply_mask(self, lexemes):
        if self.mask == '':
            return lexemes
        if self.sort_rules[0] == 'a_fronte':
            matching_lexemes = lexemes.filter(entry__istartswith=self.mask)
        else:
            matching_lexemes = lexemes.filter(entry__iendswith=self.mask)
        return matching_lexemes

    # nieużywane
    def filter_from(self, queryset, from_value, upward):
        if self.sort_rules[0] == 'a_tergo':
            if upward:
                comp = '>='
            else:
                comp = '<='
            return queryset.extra(where=["reverse(haslo) " + comp + " %s"],
                params=[reverse(from_value)])
        else:
            return super(LexemeQuery, self).filter_from(
                queryset, from_value, upward)

    def response_row(self, lexeme):
        lip_data = lexeme.lip_data()
        cont_vocabs = '/'.join(v.id for v in lexeme.vocabularies.all())
        return {
            'id': lexeme.id,
            'entry': lexeme.entry,
            'pos': lexeme.part_of_speech.symbol,
            'patterns': lip_data['patterns'],
            'genders': lip_data['genders'],
            'vocabs': cont_vocabs,
            'owner': lexeme.owner_vocabulary.id,
            'status': dict(Lexeme.STATUS_CHOICES).get(lexeme.status),
        }

    # indeks wiersza w danym sortowaniu, w którym
    # znajdzie się rekord o danym id
    def row_index(self, lexeme_id):
        id_list = self.get_id_list()
        if len(id_list) == 0:
            return None
        try:
            return id_list.index(lexeme_id)
        except ValueError:
            return None

    def search_index(self):
        id_list = self.get_id_list()
        count = len(id_list)
        if count == 0:
            return 0

        index = bisect_left(id_list, self.mask, cmp=self.lexeme_cmp())
        if index == count:
            index -= 1
        return index

    def cache_key(self):
        key = json_encode((self.sort_rules, self.filter), ensure_ascii=True)
        for vocabulary in visible_vocabularies(self.user):
            key += vocabulary.id
        if self.filtering_mode():
            key += self.mask
        return md5(key).hexdigest()

    def get_cached_lexemes(self):
        key = self.cache_key()
        cached = cache.get(key)
        cache.set(key, cached)
        return cached

    def cache_lexemes(self, id_list):
        key = self.cache_key()
        cache.set(key, id_list)
        key_list = cache.get('key_list', [])
        if key not in key_list:
            key_list.append(key)
        cache.set('key_list', key_list)

    def get_id_list(self, force_reload=False):
        if not force_reload:
            id_list = self.get_cached_lexemes()
        else:
            id_list = None
        if id_list is None:
            lexemes = self.get_sorted_queryset()
            if 'rev' in lexemes.query.extra_select:
                id_list = list(row[0] for row in lexemes.values_list('id', 'rev'))
            else:
                id_list = list(lexemes.values_list('id', flat=True))
            self.cache_lexemes(id_list)
        return id_list

    def lexeme_cmp(self):
        def fun(lexeme_id, mask):
            e1 = Lexeme.objects.get(id=lexeme_id).entry
            e2 = mask
            if self.sort_rules[0] == 'a_tergo':
                e1 = reverse(e1)
                e2 = reverse(e2)
            result = locale.strcoll(e1, e2)
            return result

        return fun

# Zapytanie o indeks wiersza o pewnym id przy danym sortowaniu
@ajax(method='get')
def row_index(request, id, sort_rules, filter, mask):
    query = LexemeQuery(
        filter=filter, sort_rules=sort_rules, mask=mask, user=request.user)
    return {'index': query.row_index(id)}

@ajax(method='get')
def search_index(request, sort_rules, filter, search=''):
    query = LexemeQuery(
        filter=filter, sort_rules=sort_rules, mask=search, user=request.user)
    return {'index': query.search_index()}

@ajax(method='get')
def get_lexemes(request, from_page, to_page, rows, sort_rules, filter,
                mask='', force_reload=False):
    request.session['sort_rules'] = sort_rules
    request.session['filter'] = filter
    query = LexemeQuery(
        filter=filter, sort_rules=sort_rules, mask=mask, user=request.user)
    id_list = query.get_id_list(force_reload)
    count = len(id_list)
    start, response_rowcount = query.count_pages(from_page, to_page, rows)
    sublist = id_list[start:start + response_rowcount]
    lexemes_qs = Lexeme.objects.filter(id__in=sublist).select_related(
        'owner_vocabulary', 'part_of_speech').prefetch_related(
        'lexemeinflectionpattern_set__pattern',
        'lexemeinflectionpattern_set__gender',
        'vocabularies')
    lexemes_dict = dict((l.id, l) for l in lexemes_qs)
    lexemes = [lexemes_dict[lexeme_id] for lexeme_id in sublist]
    return {
        'rows': query.prepare_rows(lexemes),
        'count': count,
        'page': from_page,
    }