ajax_lexeme_jqgrid.py 9.78 KB
#-*- coding:utf-8 -*-

from django.utils.simplejson import dumps as json_encode, loads as json_decode
from django.db.models import Count
from dictionary.models import Lexeme, Pattern, visible_vocabularies
from dictionary.ajax_jqgrid import JqGridAjax
from accounts.models import filtering_mode
from common.decorators import ajax, AjaxError
from common.util import bisect_left, reverse
from django.core.cache import cache

class LexemeGrid(JqGridAjax):
  model = Lexeme
  search_field = 'entry'
  field_translation = {
    'part_of_speech': 'part_of_speech__symbol',
  }

  @staticmethod
  def sort_field_special_case(rule):
    if rule['field'] == 'entry' and rule['a_tergo']:
      return 'rev'
    else:
      return rule['field']

  @staticmethod
  def sort_queryset_special_case(queryset, rule):
    if rule['field'] == 'entry' and rule['a_tergo']:
      return queryset.extra(select={'rev': "reverse(haslo)"})
    else:
      return queryset

  @staticmethod
  def filter_special_case(filter, lookup, negated, queryset):
    field, data = filter['field'], filter['data']
    if field == 'form':
      field = 'lexemeform__form'
    elif field == 'lexeme_qualifier':
      field = 'qualifiers__pk'
    elif field == 'lip_qualifier':
      field = 'lexemeinflectionpattern__qualifiers__pk'
    elif field == 'classification_value':
      field = 'classificationvalue__pk'
    if field in ('pattern_name', 'inflection_characteristic'):
      if field == 'pattern_name':
        lip_lookup = 'pattern__pk'
        try:
          p = Pattern.objects.get(name=data)
          data = p.pk
        except Pattern.DoesNotExist:
          data = 0
        where = ("EXISTS (SELECT odmieniasie.l_id FROM odmieniasie WHERE "
                 "odmieniasie.w_id <> %s AND leksemy.id = odmieniasie.l_id)")
      elif field == 'inflection_characteristic':
        lip_lookup = 'inflection_characteristic__entry'
        where = ("EXISTS (SELECT odmieniasie.l_id FROM odmieniasie JOIN charfle "
                 "ON odmieniasie.charfl = charfle.id WHERE "
                 "charfle.charfl <> %s AND leksemy.id = odmieniasie.l_id)")
      #if not negated or lookup == 'maybe':
      arg = {('lexemeinflectionpattern__%s' % lip_lookup): data}
      #else:
      #  arg = {}
      if negated: # or lookup == 'surely':
        #if not negated:
        #  where = 'NOT ' + where
        queryset = queryset.extra(where=[where], params=[data])
    elif field == 'containing_vocabulary':
      return False, 'vocabularies__id', 'id', queryset
    elif field == 'owner_vocabulary':
      arg = {'owner_vocabulary__id': data}
    elif field == 'pattern_count':
      queryset = queryset.annotate(
        pc=Count('lexemeinflectionpattern__pattern', distinct=True))
      return False, 'pc', lookup, queryset
    elif field == 'ic_count':
      queryset = queryset.annotate(
        icc=Count('lexemeinflectionpattern__inflection_characteristic',
                 distinct=True))
      return False, 'icc', lookup, queryset
    else:
      return False, field, {}, queryset
    return True, None, arg, queryset

  @staticmethod
  def get_queryset(vocabularies):
    return Lexeme.objects.filter(
      deleted=False, vocabularies__in=vocabularies).distinct()

  @staticmethod
  def apply_mask(lexemes, mask, sort_rules):
    if mask == '':
      return lexemes
    for rule in sort_rules:
      if rule['field'] == 'entry':
        if not rule['a_tergo']:
          matching_lexemes = lexemes.filter(entry__istartswith=mask)
        else:
          matching_lexemes = lexemes.filter(entry__iendswith=mask)
        break
    else:
      matching_lexemes = lexemes.filter(entry__istartswith=mask)
    return matching_lexemes

  @staticmethod
  def filter_value_special_case(queryset, rule, from_value, greater):
    if rule['field'] == 'entry' and rule['a_tergo']:
      if greater:
        comp = '>='
      else:
        comp = '<='
      queryset = queryset.extra(where=["reverse(haslo) " + comp + " %s"],
                                params=[reverse(from_value)])
      return True, queryset
    else:
      return False, queryset

  @staticmethod
  def get_field_special_case(field, lexeme):
    if field == 'part_of_speech':
      return True, lexeme.part_of_speech.symbol
    else:
      return False, None

  @staticmethod
  def response_row(lexeme):
    lip_data = lexeme.lip_data()
    cont_vocabs = '/'.join(lexeme.vocabularies.values_list('id', flat=True))
    return [
      lexeme.id,
      lexeme.entry,
      lexeme.part_of_speech.symbol,
      lip_data['patterns'],
      '', # brak liczby wzorów
      lip_data['inflection_characteristics'],
      '', # brak liczby charfli
      '', # brak formy
      cont_vocabs,
      lexeme.owner_vocabulary.id,
      dict(Lexeme.STATUS_CHOICES).get(lexeme.status),
      '', # brak komentarza
    ]

  # indeks wiersza w danym sortowaniu, w którym
  # znajdzie się instancja o danym id
  @classmethod
  def row_index(
      self, pk, filters, sort_rules, filtering_mode, mask, vocabularies):
    pk_list = get_pk_list(
      sort_rules, filters, vocabularies, mask, filtering_mode)

    count = len(pk_list)
    if count == 0:
      return 0, 0
    return pk_list.index(pk), count

  # id instancji z search_field rownym mask badz takiej, ktora bylaby nastepna
  # po instancji z search_field równym mask w danym sortowaniu.
  # Jezeli nie ma 'wiekszej' instancji badz reguly sortowania nie uwzgledniaja
  # search_field, metoda zwroci pierwsza instancje w danym sortowaniu
  @classmethod
  def get_pk(self, mask, filters, sort_rules, filtering_mode, vocabularies):
    pk_list = get_pk_list(
      sort_rules, filters, vocabularies, mask, filtering_mode)
    count = len(pk_list)
    if count == 0:
      return None, None, 0
    # nie podoba mi się w ogóle cała ta idea
    if len(sort_rules) == 0 or sort_rules[0]['field'] != self.search_field:
      selected_pk = super(self, LexemeGrid).get_pk(
        mask, filters, sort_rules, filtering_mode, vocabularies)
      index, count = self.row_index(
        selected_pk, filters, sort_rules, filtering_mode, mask, vocabularies)
      return selected_pk, index, count

    index = bisect_left(pk_list, mask, cmp=make_lexeme_cmp(sort_rules[0]))
    if index == count:
      index -= 1
    return pk_list[index], index, count

  @classmethod
  def get_location(self, filtering_mode, sort_rules, filters, mask, *args):
    selected_pk, index, count = self.get_pk(
      mask, filters, sort_rules, filtering_mode, *args)
    return {
      'rowIndex': index,
      'selected_id': selected_pk,
      'records': count,
    }

import locale
locale.setlocale(locale.LC_ALL, 'pl_PL.UTF-8')

def make_lexeme_cmp(rule):
  def lexeme_cmp(pk, mask):
    e1 = Lexeme.objects.get(pk=pk).entry
    e2 = mask
    if rule['a_tergo']:
      e1 = reverse(e1)
      e2 = reverse(e2)
    result = locale.strcoll(e1, e2)
    if rule['order'] == 'desc' and e2 != '':
      result = -result
    return result
  return lexeme_cmp

# Zapytanie o indeks wiersza o pewnym id przy danym sortowaniu
@ajax(method='get')
def find_id(request, id, sort_rules, mask, filters=None):
  vocabularies = visible_vocabularies(request.user)
  return LexemeGrid.find_id(
    filtering_mode(request.user), id, filters, sort_rules, mask, vocabularies)

# Zapytanie o id oraz indeks pierwszego wiersza przy danym sortowaniu,
# którego hasło rozpoczyna się od mask.
# 'selected_id' == None, jeśli takiego nie ma
@ajax(method='get')
def get_location(request, sort_rules, filters=None, mask=''):
  vocabularies = visible_vocabularies(request.user)
  return LexemeGrid.get_location(
    filtering_mode(request.user), sort_rules, filters, mask, vocabularies)

# twór Miłosza - trzeba kiedyś poprawić
def cache_key(sort_rules, filters, vocabularies, mask, filtering_mode):
  key = json_encode(sort_rules) + json_encode(filters)
  for vocabulary in vocabularies:
    key += vocabulary.id;
  if filtering_mode:
    key += mask;
  return key

def get_cached_lexemes(sort_rules, filters, vocabularies, mask, filtering_mode):
  key = cache_key(sort_rules, filters, vocabularies, mask, filtering_mode)
  return cache.get(key)

def cache_lexemes(pk_list, sort_rules, filters, vocabularies, mask,
                  filtering_mode):
  key = cache_key(sort_rules, filters, vocabularies, mask, filtering_mode)
  cache.set(key, pk_list)
  key_list = cache.get('key_list', [])
  if key not in key_list:
    key_list.append(key)
  cache.set('key_list', key_list)

def get_pk_list(sort_rules, filters, vocabularies, mask, filtering_mode,
                filtering = False):
  if not filtering:
    pk_list = get_cached_lexemes(sort_rules, filters, vocabularies, mask,
                                 filtering_mode)
  else:
    pk_list = None
  if pk_list == None:
    lexemes = LexemeGrid.get_sorted_queryset(
      filtering_mode, sort_rules, filters, mask, vocabularies)
    if 'rev' in lexemes.query.extra_select:
      pk_list = list(row[0] for row in lexemes.values_list('pk', 'rev'))
    else:
      pk_list = list(lexemes.values_list('pk', flat=True))
    cache_lexemes(pk_list, sort_rules, filters, vocabularies, mask,
                  filtering_mode)
  return pk_list

@ajax(method='get')
def get_lexemes(request, page, rows, sort_rules, filters=None, mask='',
                target_page=0, totalrows=0, filtering=False):
  request.session['sort_rules'] = json_encode(sort_rules)
  request.session['filters'] = json_encode(filters)
  page = target_page or page
  limit = totalrows or rows
  vocabularies = visible_vocabularies(request.user)

  pk_list = get_pk_list(sort_rules, filters, vocabularies, mask,
                        filtering_mode(request.user), filtering)
  count = len(pk_list)
  total_pages, start, response_rowcount = LexemeGrid.count_pages(
    count, page, limit)
  pk_list = pk_list[start:start + response_rowcount]
  lexemes = [Lexeme.objects.get(pk=pk) for pk in pk_list]
  return LexemeGrid.make_response(lexemes, count, page, total_pages)