ajax_lexeme_jqgrid.py 9.02 KB
#-*- coding:utf-8 -*-

from django.utils.simplejson import dumps as json_encode
from django.db.models import Count
from dictionary.models import Lexeme, filter_visible, visible_vocabularies
from dictionary.ajax_jqgrid import JqGridAjax, JqGridQuery
from common.decorators import ajax
from common.util import bisect_left, reverse
from django.core.cache import cache

class LexemeGrid(JqGridAjax):
  model = Lexeme
  search_field = 'entry'
  field_translation = {
    'part_of_speech': 'part_of_speech__symbol',
  }

  @staticmethod
  def sort_field_special_case(rule):
    if rule['field'] == 'entry' and rule['a_tergo']:
      return 'rev'
    else:
      return rule['field']

  @staticmethod
  def sort_queryset_special_case(queryset, rule):
    if rule['field'] == 'entry' and rule['a_tergo']:
      return queryset.extra(select={'rev': "reverse(haslo)"})
    else:
      return queryset

  @staticmethod
  def filter_special_case(filter, lookup, negated, queryset):
    field, data = filter['field'], filter['data']
    special = False
    field_translation = {
      'form': 'lexemeform__form',
      'lexeme_qualifier': 'qualifiers__pk',
      'lip_qualifier': 'lexemeinflectionpattern__qualifiers__pk',
      'classification_value': 'classificationvalue__pk',
      'pattern_name': 'lexemeinflectionpattern__pattern__name',
      'inflection_characteristic':
        'lexemeinflectionpattern__inflection_characteristic__entry',
      'containing_vocabulary': 'vocabularies__pk',
      'owner_vocabulary': 'owner_vocabulary__pk',
      'pattern_count': 'pc',
      'ic_count': 'icc',
    }
    if field == 'pattern_count':
      queryset = queryset.annotate(
        pc=Count('lexemeinflectionpattern__pattern', distinct=True))
    elif field == 'ic_count':
      queryset = queryset.annotate(
        icc=Count('lexemeinflectionpattern__inflection_characteristic',
          distinct=True))
    elif field == 'qualifier':
      where = '''(
        exists (
          select * from kwalifikatory_leksemow where lexeme_id = leksemy.id and
            qualifier_id = %s) or
        exists (
          select * from kwalifikatory_odmieniasiow join odmieniasie o on
            lexemeinflectionpattern_id = o.id
          where
            qualifier_id = %s and o.l_id = leksemy.id) or
        exists (
          select * from
            odmieniasie o
            join wzory w on (o.w_id = w.id)
            join szablony_tabel s on (w.typ = s.wtyp and o.charfl = s.charfl)
            join klatki k on k.st_id = s.id
            join zakonczenia z on (o.w_id = z.w_id and k.efobaz = z.efobaz)
            join kwalifikatory_zakonczen kz on (z.id = kz.ending_id)
          where o.l_id = leksemy.id and s.wariant = '1' and
            kz.qualifier_id = %s)
      )'''
      if negated:
        where = 'not ' + where
      queryset = queryset.extra(where=[where], params=[data] * 3)
      special = True
    elif negated and field in (
        'containing_vocabulary', 'lexeme_qualifier', 'classification_value',
        'pattern_name', 'inflection_characteristic'):
      # TODO dodać jakieś extra tables i where, żeby uniknąć podzapytania
      pass
    return special, field_translation.get(field, field), queryset

  @classmethod
  def get_queryset(cls, query):
    lexemes = super(LexemeGrid, cls).get_queryset(query)
    return filter_visible(lexemes, query.user)

  @staticmethod
  def apply_mask(lexemes, mask, sort_rules):
    if mask == '':
      return lexemes
    for rule in sort_rules:
      if rule['field'] == 'entry':
        if not rule['a_tergo']:
          matching_lexemes = lexemes.filter(entry__istartswith=mask)
        else:
          matching_lexemes = lexemes.filter(entry__iendswith=mask)
        break
    else:
      matching_lexemes = lexemes.filter(entry__istartswith=mask)
    return matching_lexemes

  @staticmethod
  def filter_value_special_case(queryset, rule, from_value, greater):
    if rule['field'] == 'entry' and rule['a_tergo']:
      if greater:
        comp = '>='
      else:
        comp = '<='
      queryset = queryset.extra(where=["reverse(haslo) " + comp + " %s"],
                                params=[reverse(from_value)])
      return True, queryset
    else:
      return False, queryset

  @staticmethod
  def get_field_special_case(field, lexeme):
    if field == 'part_of_speech':
      return True, lexeme.part_of_speech.symbol
    else:
      return False, None

  @staticmethod
  def response_row(lexeme):
    lip_data = lexeme.lip_data()
    cont_vocabs = '/'.join(lexeme.vocabularies.values_list('id', flat=True))
    return [
      lexeme.id,
      lexeme.entry,
      lexeme.part_of_speech.symbol,
      lip_data['patterns'],
      '', # brak liczby wzorów
      lip_data['inflection_characteristics'],
      '', # brak liczby charfli
      '', # brak formy
      cont_vocabs,
      lexeme.owner_vocabulary.id,
      dict(Lexeme.STATUS_CHOICES).get(lexeme.status),
      '', # brak komentarza
    ]

  # indeks wiersza w danym sortowaniu, w którym
  # znajdzie się instancja o danym id
  @classmethod
  def row_index(cls, pk, query):
    pk_list = get_pk_list(query)
    count = len(pk_list)
    if count == 0:
      return 0, 0
    return pk_list.index(pk), count

  # id instancji z search_field rownym mask badz takiej, ktora bylaby nastepna
  # po instancji z search_field równym mask w danym sortowaniu.
  # Jezeli nie ma 'wiekszej' instancji badz reguly sortowania nie uwzgledniaja
  # search_field, metoda zwroci pierwsza instancje w danym sortowaniu
  @classmethod
  def get_pk(cls, query):
    pk_list = get_pk_list(query)
    count = len(pk_list)
    if count == 0:
      return None, None, 0
    # nie podoba mi się w ogóle cała ta idea
    sort_rules = query.sort_rules
    assert len(sort_rules) >= 0
    if sort_rules[0]['field'] != cls.search_field:
      selected_pk = super(cls, LexemeGrid).get_pk(query)
      index, count = cls.row_index(selected_pk, query)
      return selected_pk, index, count

    index = bisect_left(pk_list, query.mask, cmp=make_lexeme_cmp(sort_rules[0]))
    if index == count:
      index -= 1
    return pk_list[index], index, count

  @classmethod
  def get_location(cls, query):
    selected_pk, index, count = cls.get_pk(query)
    return {
      'rowIndex': index,
      'selected_id': selected_pk,
      'records': count,
    }

import locale
locale.setlocale(locale.LC_ALL, 'pl_PL.UTF-8')

def make_lexeme_cmp(rule):
  def lexeme_cmp(pk, mask):
    e1 = Lexeme.objects.get(pk=pk).entry
    e2 = mask
    if rule['a_tergo']:
      e1 = reverse(e1)
      e2 = reverse(e2)
    result = locale.strcoll(e1, e2)
    if rule['order'] == 'desc' and e2 != '':
      result = -result
    return result
  return lexeme_cmp

# Zapytanie o indeks wiersza o pewnym id przy danym sortowaniu
@ajax(method='get')
def find_id(request, id, sort_rules, mask, filters=None):
  query = JqGridQuery(
    filters=filters, sort_rules=sort_rules, mask=mask, user=request.user)
  return LexemeGrid.find_id(id, query)

# Zapytanie o id oraz indeks pierwszego wiersza przy danym sortowaniu,
# którego hasło rozpoczyna się od mask.
# 'selected_id' == None, jeśli takiego nie ma
@ajax(method='get')
def get_location(request, sort_rules, mask='', filters=None):
  query = JqGridQuery(
    filters=filters, sort_rules=sort_rules, mask=mask, user=request.user)
  return LexemeGrid.get_location(query)

# twór Miłosza - trzeba kiedyś poprawić
def cache_key(query):
  key = json_encode(query.sort_rules) + json_encode(query.filters)
  for vocabulary in visible_vocabularies(query.user):
    key += vocabulary.id
  if query.filtering_mode():
    key += query.mask
  return key

def get_cached_lexemes(query):
  key = cache_key(query)
  return cache.get(key)

def cache_lexemes(pk_list, query):
  key = cache_key(query)
  cache.set(key, pk_list)
  key_list = cache.get('key_list', [])
  if key not in key_list:
    key_list.append(key)
  cache.set('key_list', key_list)

def get_pk_list(query, force_reload=False):
  if not force_reload:
    pk_list = get_cached_lexemes(query)
  else:
    pk_list = None
  if pk_list is None:
    lexemes = LexemeGrid.get_sorted_queryset(query)
    if 'rev' in lexemes.query.extra_select:
      pk_list = list(row[0] for row in lexemes.values_list('pk', 'rev'))
    else:
      #print lexemes.values_list('pk', flat=True).query
      pk_list = list(lexemes.values_list('pk', flat=True))
    cache_lexemes(pk_list, query)
  return pk_list

@ajax(method='get')
def get_lexemes(request, page, rows, sort_rules, filters=None, mask='',
                target_page=0, totalrows=0, force_reload=False):
  request.session['sort_rules'] = json_encode(sort_rules)
  request.session['filters'] = json_encode(filters)
  page = target_page or page
  limit = totalrows or rows
  query = JqGridQuery(
    filters=filters, sort_rules=sort_rules, mask=mask, user=request.user)
  pk_list = get_pk_list(query, force_reload)
  count = len(pk_list)
  total_pages, start, response_rowcount = LexemeGrid.count_pages(
    count, page, limit)
  pk_list = pk_list[start:start + response_rowcount]
  lexemes = [Lexeme.objects.get(pk=pk) for pk in pk_list]
  return LexemeGrid.make_response(lexemes, count, page, total_pages)