ajax_lexeme_slickgrid.py 13 KB
# -*- coding: utf-8 -*-
from hashlib import md5

from django.db.models import Count
from django.core.cache import cache

from dictionary.models import Lexeme, filter_visible, visible_vocabularies, \
    LexemeAttribute
from dictionary.ajax_slickgrid import SlickGridQuery
from common.decorators import ajax
from common.util import bisect_left, reverse, json_encode

import locale
locale.setlocale(locale.LC_ALL, 'pl_PL.UTF-8')

ASPECT = LexemeAttribute.objects.get(name=u'aspekt')


class LexemeQuery(SlickGridQuery):
    model = Lexeme
    sort_field = 'entry'
    default_columns = ('entry', 'pos')
    column_data = {
        'id': lambda lexeme: lexeme.id,
        'entry': lambda lexeme: lexeme.entry,
        'pos': lambda lexeme: lexeme.part_of_speech_id,
        'patterns': lambda lexeme: lexeme.lip_data()['patterns'],
        'genders': lambda lexeme: lexeme.lip_data()['genders'],
        'vocabs': lambda lexeme:
            '/'.join(lexeme.vocabularies.values_list('id', flat=True)),
        'owner': lambda lexeme: lexeme.owner_vocabulary_id,
        'status': lambda lexeme: dict(Lexeme.STATUS_CHOICES).get(lexeme.status),
        'qualifiers': lambda lexeme:
            ', '.join(lexeme.qualifiers.values_list('label', flat=True)),
        'aspect': lambda lexeme:
            getattr(lexeme.attribute_value(ASPECT), 'value', ''),
    }

    filter_field_translation = {
        'form': 'lexemeform__form',
        'lexeme_qualifier': 'qualifiers__id',
        'lip_qualifier': 'lexemeinflectionpattern__qualifiers__id',
        'qualifier': 'qualifiers_cache__id',
        'classification_value': 'classificationvalue__id',
        'pattern_name': 'lexemeinflectionpattern__pattern__name',
        'pattern_type': 'lexemeinflectionpattern__pattern__type_id',
        'gender': 'lexemeinflectionpattern__gender_id',
        'containing_vocabulary': 'vocabularies__id',
        'owner_vocabulary': 'owner_vocabulary_id',
        'pattern_count': 'pc',
        'gender_count': 'gc',
        'cr_type': 'refs_to__type_id',
        'borrowing_source': 'borrowing_source_id',
        'responsible': 'responsible_id',
    }

    def sort_queryset(self, queryset):
        order_list = [self.sort_field, 'id']
        if self.sort_rules[0] == 'a_tergo':
            queryset = queryset.extra(select={'rev': "reverse(haslo)"})
            order_list[0] = 'rev'
        return queryset.extra(order_by=order_list)

    def apply_filter_rule(self, queryset, rule):
        lookup = self.lookup_translation[rule['op']]
        negated = (lookup[0] == '-')
        field, data = rule['field'], rule['data']
        new_rule = dict(rule)
        if field == 'pattern_count':
            queryset = queryset.annotate(
                pc=Count('lexemeinflectionpattern__pattern', distinct=True))
        elif field == 'gender_count':
            queryset = queryset.annotate(
                gc=Count('lexemeinflectionpattern__gender', distinct=True))
        elif field in ('lexeme_qualifier', 'lip_qualifier', 'qualifier',
                       'borrowing_source', 'responsible'):
            if int(data) == 0:
                new_rule['op'] = 'isnull' if negated else '-isnull'
                lookup = new_rule['op']
                new_rule['data'] = False
        elif field.startswith('extra'):
            attr = LexemeAttribute.objects.get(id=int(field.split('-')[1]))
            new_rule['field'] = 'lexemeav__attribute_value'
            if attr.closed and int(data) == 0:
                new_rule['op'] = 'in' if negated else '-in'
                lookup = new_rule['op']
                values = attr.values.all()
                new_rule['data'] = tuple(values)
            elif not attr.closed:
                new_rule['field'] = 'value'
                # lekki abuse
                matching_values = super(LexemeQuery, self).apply_filter_rule(
                    attr.values, new_rule)
                new_rule = {
                    'field': 'lexemeav__attribute_value',
                    'op': 'in',
                    'data': tuple(matching_values),
                }
            if new_rule['op'] == '-in':
                pass
        # optymalizacja warunków "różne od"
        ident = lambda x: x
        indirect_fields = {
            'containing_vocabulary': (
                'slownik', 'leksemy_w_slownikach', 'l_id', ident),
            'lexeme_qualifier': (
                'qualifier_id', 'kwalifikatory_leksemow', 'lexeme_id', int),
            'qualifier': (
                'qualifier_id', 'dictionary_lexemeformqualifier', 'lexeme_id',
                int),
            'classification_value': (
                'classification_value_id', 'dictionary_lexemecv', 'lexeme_id',
                int),
            'gender': (
                'gender_id', 'odmieniasie', 'l_id', int),
            'cr_type': (
                'type_id', 'odsylacze', 'l_id_od', int),
            'extra': (
                'attribute_value_id', 'dictionary_lexemeav', 'lexeme_id', int),
        }
        if (field in indirect_fields or field.startswith('extra')) \
                and lookup == '-exact':
            if field.startswith('extra'):
                field1, table, field2, f = indirect_fields['extra']
            else:
                field1, table, field2, f = indirect_fields[field]
            return queryset.extra(where=[
                '''%%s NOT IN (SELECT %s FROM %s WHERE %s = leksemy.id)'''
                % (field1, table, field2)], params=[f(data)])
        if field.startswith('extra') and lookup == '-in':
            field1, table, field2, f = indirect_fields['extra']
            for value in new_rule['data']:
                queryset = queryset.extra(where=[
                    '''%%s NOT IN (SELECT %s FROM %s WHERE %s = leksemy.id)'''
                    % (field1, table, field2)], params=[value.id])
            return queryset
        indirect_fields2 = {
            'pattern_name': (
                'w_id', 'odmieniasie', 'wzory', 'w_id', 'id', 'l_id', ident),
            'pattern_type': (
                'typ', 'odmieniasie', 'wzory', 'w_id', 'id', 'l_id', int),
            'lip_qualifier': (
                'qualifier_id', 'odmieniasie', 'kwalifikatory_odmieniasiow',
                'id', 'lexemeinflectionpattern_id', 'l_id', int),
        }
        if field in indirect_fields2 and lookup == '-exact':
            field1, table1, table2, join1, join2, field2, f = \
                indirect_fields2[field]
            return queryset.extra(where=[
                '''%%s NOT IN (SELECT b.%s FROM %s a
                INNER JOIN %s b ON (a.%s = b.%s) WHERE a.%s = leksemy.id)'''
                % (field1, table1, table2, join1, join2, field2)
            ], params=[f(data)])
        return super(LexemeQuery, self).apply_filter_rule(queryset, new_rule)

    def get_queryset(self):
        lexemes = super(LexemeQuery, self).get_queryset()
        return filter_visible(lexemes, self.user)

    # nieużywane
    def filter_from(self, queryset, from_value, upward):
        if self.sort_rules[0] == 'a_tergo':
            if upward:
                comp = '>='
            else:
                comp = '<='
            return queryset.extra(
                where=["reverse(haslo) " + comp + " %s"],
                params=[reverse(from_value)])
        else:
            return super(LexemeQuery, self).filter_from(
                queryset, from_value, upward)

    # indeks wiersza w danym sortowaniu, w którym
    # znajdzie się rekord o danym id
    def row_index(self, lexeme_id):
        id_list = self.get_id_list()
        if len(id_list) == 0:
            return None
        try:
            return id_list.index(lexeme_id)
        except ValueError:
            return None

    def search_index(self, mask):
        id_list = self.get_id_list()
        count = len(id_list)
        if count == 0:
            return 0

        index = bisect_left(id_list, mask, cmp=self.lexeme_cmp())
        if index == count:
            index -= 1
        return index

    def cache_key(self):
        key = json_encode((self.sort_rules, self.filter), ensure_ascii=True)
        for vocabulary in visible_vocabularies(self.user):
            key += vocabulary.id
        return md5(key.encode()).hexdigest()

    def get_cached_lexemes(self, refresh=True):
        key = self.cache_key()
        cached = cache.get(key)
        if refresh:
            cache.set(key, cached)
        return cached

    def cache_lexemes(self, id_list):
        key = self.cache_key()
        cache.set(key, id_list)
        key_list = cache.get('key_list', [])
        if key not in key_list:
            key_list.append(key)
        cache.set('key_list', key_list)

    def get_id_list(self, force_reload=False, refresh=True):
        if not force_reload:
            id_list = self.get_cached_lexemes(refresh=refresh)
        else:
            id_list = None
        if id_list is None:
            lexemes = self.get_sorted_queryset()
            if 'rev' in lexemes.query.extra_select:
                id_list = list(
                    row[0] for row in lexemes.values_list('id', 'rev'))
            else:
                id_list = list(lexemes.values_list('id', flat=True))
            self.cache_lexemes(id_list)
        return id_list

    def lexeme_cmp(self):
        def fun(lexeme_id, mask):
            e1 = Lexeme.objects.get(id=lexeme_id).entry
            e2 = mask
            if self.sort_rules[0] == 'a_tergo':
                e1 = reverse(e1)
                e2 = reverse(e2)
            result = locale.strcoll(e1, e2)
            return result

        return fun

    def export_list(self, columns, output_file):
        column_translation = {
            'pos': 'part_of_speech_id',
            'patterns': 'pattern_list',
            'genders': 'gender_list',
            'vocabs': 'vocab_list',
            'owner': 'owner_vocabulary_id'
        }
        lexemes = Lexeme.objects.filter(id__in=self.get_id_list())
        if 'patterns' in columns:
            lexemes = lexemes.extra(select={
                'pattern_list':
                    "select coalesce(string_agg(distinct w.w_id, '/'), '') "
                    "from wzory w join odmieniasie o on w.id = o.w_id "
                    "where o.l_id = leksemy.id",
            })
        if 'genders' in columns:
            lexemes = lexemes.extra(select={
                'gender_list':
                    "select coalesce(string_agg(distinct g.symbol, '/'), '') "
                    "from dictionary_gender g "
                    "join odmieniasie o on g.id = o.gender_id "
                    "where o.l_id = leksemy.id",
            })
        if 'vocabs' in columns:
            lexemes = lexemes.extra(select={
                'vocab_list':
                    "select coalesce(string_agg(ls.slownik, '/'), '') "
                    "from leksemy_w_slownikach ls "
                    "where ls.l_id = leksemy.id",
            })
        fields = [
            column_translation.get(column, column) for column in columns]
        for row in lexemes.values(*fields):
            print >>output_file,  '\t'.join(
                unicode(row[field]) for field in fields)


# Zapytanie o indeks wiersza o pewnym id przy danym sortowaniu
@ajax(login_required=False, method='get')
def row_index(request, id, sort_rules, filter):
    query = LexemeQuery(
        filter=filter, sort_rules=sort_rules, user=request.user)
    return {'index': query.row_index(id)}


@ajax(login_required=False, method='get')
def search_index(request, sort_rules, filter, search=''):
    query = LexemeQuery(
        filter=filter, sort_rules=sort_rules, user=request.user)
    return {'index': query.search_index(search)}


@ajax(login_required=False, method='get')
def get_lexemes(request, from_page, to_page, rows, sort_rules, filter,
                force_reload=False):
    request.session['sort_rules'] = sort_rules
    request.session['filter'] = filter
    columns = request.session.get('columns')
    query = LexemeQuery(
        filter=filter, sort_rules=sort_rules, user=request.user,
        columns=columns)
    id_list = query.get_id_list(force_reload)
    count = len(id_list)
    start, response_rowcount = query.count_pages(from_page, to_page, rows)
    sublist = id_list[start:start + response_rowcount]
    lexemes_qs = prefetch(Lexeme.objects.filter(id__in=sublist))
    lexemes_dict = dict((l.id, l) for l in lexemes_qs)
    lexemes = [lexemes_dict[lexeme_id] for lexeme_id in sublist]
    return {
        'rows': query.prepare_rows(lexemes),
        'count': count,
        'page': from_page,
    }


@ajax(login_required=False, method='get')
def search_by_form(request, sort_rules, filter, exponent):
    query = LexemeQuery(
        filter=filter, sort_rules=sort_rules, user=request.user)
    id_list = query.get_id_list()
    lexemes = query.get_queryset().filter(lexemeform__form=exponent)
    rows = query.prepare_rows(lexemes)
    for row in rows:
        row['row'] = id_list.index(row['id'])
    return {
        'rows': rows,
    }


def prefetch(queryset):
    return queryset.select_related(
        'owner_vocabulary', 'part_of_speech').prefetch_related(
        'lexemeinflectionpattern_set__pattern',
        'lexemeinflectionpattern_set__gender', 'vocabularies')