diff --git a/INSTALL_PL b/INSTALL_PL
index 582b6ce..05c0460 100644
--- a/INSTALL_PL
+++ b/INSTALL_PL
@@ -9,6 +9,8 @@ Zainstaluj Django w wersji 1.4.8:
 
 Zainstaluj Django south:
 >> sudo apt-get install python-django-south
+lub:
+>> sudo pip install South
 
 Zainstaluj Django extensions w wersji 1.6.7:
 >> sudo pip install django-extensions==1.6.7
diff --git a/dictionary/management/commands/add_nverb_entries.py b/dictionary/management/commands/add_nverb_entries.py
new file mode 100644
index 0000000..df07e6f
--- /dev/null
+++ b/dictionary/management/commands/add_nverb_entries.py
@@ -0,0 +1,207 @@
+#-*- coding:utf-8 -*-
+
+import codecs
+from operator import itemgetter
+
+from django.core.management.base import BaseCommand
+
+from dictionary.models import Lemma, Lemma_Status, POS, Vocabulary, \
+                              get_or_create_entry
+from dictionary.management.commands.load_initial_nverb_frames import add_initial_frames_by_entries
+from dictionary.management.commands.load_entries_relations import add_relations_by_nverb_entries
+
+VERBS_IN_DICT = 2000
+ORDERING = '300M'
+
+################# NOUNS ################################
+#VERBAL_NOUNS_PATH = 'data/nverbs/nouns/merged_nouns-freq.txt'
+#NOUNS_VAL_PATH = 'data/nverbs/nouns/merged_nouns_val-poss.txt'
+
+# loading initial entries
+NOUNS_ADDED_PATH = 'data/nverbs/nouns2consider/added-merged_nouns_val_20171102.txt'
+NOUNS_ERROR_PATH = 'data/nverbs/nouns2consider/error-merged_nouns_val_20171102.txt'
+
+NOUNS_FRAMES_PATH = 'data/nverbs/nouns2consider/merged_nouns_val-poss.txt' # hasla z ramkami walencyjnymi z tajnego
+SELECTED_NOUNS_PATH = 'data/nverbs/nouns2consider/nouns+verb2consider-clarin2-add.txt'
+
+# adding entries relations
+NOUN_VERB_RELATIONS_PATH = 'data/nverbs/nouns2consider/nouns+verb2consider-clarin2-add.txt'
+NOUN_ADJ_RELATIONS_PATH = 'data/nverbs/nouns2consider/nouns+adj2consider-clarin2.txt'
+
+################## ADJS ################################
+#VERBAL_ADJS_PATH = 'data/nverbs/adjs/merged_adjs-freq.txt'
+##ADJS_VAL_PATH = 'data/nverbs/adjs/merged_adjs_val-P1.txt'
+#ADJS_RELATIONS_PATH = 'data/nverbs/adjs/ver_adjs+verb-freq_cuted.txt'
+#
+## loading initial entries
+#ADJS_ADDED_PATH = 'data/nverbs/adjs/added-merged_adjs_val_20141219.txt'
+#ADJS_ERROR_PATH = 'data/nverbs/adjs/error-merged_adjs_val_20141219.txt'
+#ADJS_FRAMES_PATH = 'data/nverbs/adjs/merged_adjs_val-P1.txt'
+#
+## adding entries relations
+#ADJ_VERB_RELATIONS_PATH = 'data/nverbs/adjs/merged_adjs+verb-freq.txt'
+#
+################## ADVS ################################
+#VERBAL_ADVS_PATH = 'data/nverbs/advs/merged_advs-sel-1M-300M.txt' # frekwencja tylko wybrana
+#ADVS_VAL_PATH = 'data/nverbs/advs/merged_advs_val_popr_usu_gdyby_20141113.txt'
+#
+## loading initial entries
+#ADVS_ADDED_PATH = 'data/nverbs/advs/added-merged_advs_val.txt'
+#ADVS_ERROR_PATH = 'data/nverbs/advs/error-merged_advs_val.txt'
+#ADVS_FRAMES_PATH = 'data/nverbs/advs/merged_advs_val_popr_usu_gdyby_20141113.txt'
+
+## adding entries relations # na razie brak danych
+#ADV_VERB_RELATIONS_PATH = 'data/nverbs/adjs/merged_adjs+verb-freq.txt'
+
+
+class Command(BaseCommand):
+    args = 'none'
+
+    def handle(self, **options):
+        # load nouns
+#        entries_with_val = get_entries(NOUNS_VAL_PATH)
+#        entries = get_entries_by_freq(VERBAL_NOUNS_PATH, ORDERING)
+#        load_entries(entries, B_entries, 'data/added_nouns_20140627.txt', ORDERING, 'noun', 
+#                     'clarin_nouns', 1, 1, 0)      
+
+        # load nouns
+        entries_to_add = get_entries(SELECTED_NOUNS_PATH)
+        ordered_entries = get_entries_by_freq(SELECTED_NOUNS_PATH, ORDERING)
+        #related_entries = get_related_entries(NOUNS_RELATIONS_PATH, 'noun')
+        added_entries = load_entries(ordered_entries, entries_to_add, 'data/nverbs/nouns2consider/added_nouns_20171103.txt', ORDERING, 'noun',
+                                     'clarin2_nouns', 3, 3, 0)
+        add_initial_frames_by_entries(added_entries, 
+                                      NOUNS_FRAMES_PATH, NOUNS_ADDED_PATH, NOUNS_ERROR_PATH, 
+                                      'noun')
+        add_relations_by_nverb_entries(added_entries, NOUN_VERB_RELATIONS_PATH, 'verb', 'noun')
+        #add_relations_by_nverb_entries(added_entries, NOUN_ADJ_RELATIONS_PATH, 'adj', 'noun')
+        
+#        # load adjectives
+##        entries_with_val = get_entries(ADJS_VAL_PATH)
+#        entries = get_entries_by_freq(VERBAL_ADJS_PATH, ORDERING)
+#        related_entries = get_related_entries(ADJS_RELATIONS_PATH, 'adj')
+#        added_entries = load_entries(entries, related_entries, 'data/added_adjs_20141219.txt', ORDERING, 'adj', 
+#                                     'clarin_adjs', 3, 3, 0)
+#        add_initial_frames_by_entries(added_entries, 
+#                                      ADJS_FRAMES_PATH, ADJS_ADDED_PATH, ADJS_ERROR_PATH, 
+#                                      'adj')
+#        add_relations_by_nverb_entries(added_entries, ADJ_VERB_RELATIONS_PATH, 'adj')
+
+#        # load adverbs
+#        entries_with_val = get_entries(ADVS_VAL_PATH)
+#        entries = get_entries_by_freq(VERBAL_ADVS_PATH, ORDERING)
+#        added_entries = load_entries(entries, entries_with_val, 'data/added_advs_20141114.txt', ORDERING, 'adv', 
+#                                     'clarin_advs', 1, 1, 0)
+#        add_initial_frames_by_entries(added_entries, 
+#                                      ADVS_FRAMES_PATH, ADVS_ADDED_PATH, ADVS_ERROR_PATH, 
+#                                      'adverb')
+#        add_relations_by_nverb_entries(added_entries, ADJ_VERB_RELATIONS_PATH, 'adj')
+    
+def get_entries(entries_path):
+    entries = []
+    try:
+        entries_file = codecs.open(entries_path, "rt", 'utf-8')
+        for line in entries_file:
+            line_ls = line.split('\t')
+            entries.append({'entry'  : line_ls[0].strip()})
+        entries = list(set(entries))
+    finally:
+        entries_file.close()
+        return entries 
+    
+def get_entries_by_freq(entries_path, ordering):
+    entries = []
+    try:
+        freq_file = codecs.open(entries_path, "rt", 'utf-8')
+        for line in freq_file:
+            line_ls = line.split()
+            entries.append({'entry'  : line_ls[0].strip(),
+                            'freq_1M': int(line_ls[1].strip()),
+                            'freq_300M': int(line_ls[2].strip())})
+        entries = sorted(entries, key=itemgetter('freq_%s' % ordering), reverse=True)
+    finally:
+        freq_file.close()
+        return entries
+
+def load_entries(sorted_entries, entries_to_add, added_path, ordering, pos_tag, 
+                  dict_basename, first_dict_idx, last_dict_idx, min_freq):   
+    print 'Loading entries!!' 
+    try:
+        added_entries = []
+        added_file = codecs.open(added_path, "wt", 'utf-8')
+        dict = first_dict_idx
+        new_last_dict = last_dict_idx
+        pos_obj = POS.objects.get(tag=pos_tag)
+        verbs_per_dict = VERBS_IN_DICT
+        initial_status = Lemma_Status.objects.order_by('priority')[0]
+        for entry in sorted_entries:
+            found_entry = next((item for item in entries_to_add if item['entry'] == entry['entry']), None)
+            if found_entry and entry['freq_%s' % ordering] >= min_freq:
+                if dict == 0:
+                    new_voc = Vocabulary(name=dict_basename)
+                    new_voc.save()
+                else:
+                    new_voc = Vocabulary(name=dict_basename+str(dict))
+                    new_voc.save()
+                    
+                lemmas = Lemma.objects.filter(entry = entry['entry'])
+                if lemmas.count() == 0:
+                    if verbs_per_dict == 0:
+                        verbs_per_dict = VERBS_IN_DICT
+                        dict += 1
+                        if dict > new_last_dict:
+                            break
+                        else:
+                            new_voc = Vocabulary(name=dict_basename+str(dict))
+                            new_voc.save()
+                    val_entry, created = get_or_create_entry(entry['entry'], pos_obj)   
+                    new_lemma = Lemma(entry=entry['entry'], 
+                                      entry_obj=val_entry,
+                                      vocabulary=new_voc, 
+                                      status=initial_status, 
+                                      old=False,
+                                      frequency_300M=entry['freq_300M'], 
+                                      frequency_1M=entry['freq_1M'])
+                    new_lemma.save()
+                    verbs_per_dict -= 1
+                    added_file.write('%s\t%s\t%s\n' % (entry['entry'],
+                                                       entry['freq_1M'],
+                                                       entry['freq_300M']))
+                    added_entries.append(entry['entry'])
+                    print entry
+    finally:
+        added_file.close()
+        return added_entries
+    
+def get_related_entries(relations_path, pos_tag): 
+    print 'Checking relations!!!'
+    entries = []
+    pos = POS.objects.get(tag=pos_tag)
+    try:
+        relations_file = codecs.open(relations_path, "rt", 'utf-8')
+        for line in relations_file:
+            #print line
+            line_ls = line.split()
+            verb = line_ls[3].lstrip('(').strip()
+            nverb = line_ls[0].strip()
+            if (not Lemma.objects.filter(entry=nverb).exists() and 
+                Lemma.objects.filter(entry=verb, entry_obj__pos__tag='verb').exists()):
+                #entries.append(nverb)
+                verb_active_lemma = Lemma.objects.get(entry=verb, 
+                                                      entry_obj__pos__tag='verb',
+                                                      old=False)
+                lemma_status_str = verb_active_lemma.status.status
+                if (not lemma_status_str == u'w obróbce' and 
+                    not lemma_status_str == u'do obróbki'):
+                    if (verb_active_lemma.frames.count() == 1 and 
+                        verb_active_lemma.frames.filter(text_rep=u'subj{np(str)}').exists()): 
+                        pass
+                    else:
+                        entries.append({'entry'  : nverb,
+                                        'verb'   : verb,
+                                        'freq_1M': int(line_ls[1].strip()),
+                                        'freq_300M': int(line_ls[2].strip())}) 
+                        print line
+    finally:
+        relations_file.close()
+        return entries
diff --git a/dictionary/management/commands/get_lemmas_list.py b/dictionary/management/commands/get_lemmas_list.py
new file mode 100644
index 0000000..c471d4a
--- /dev/null
+++ b/dictionary/management/commands/get_lemmas_list.py
@@ -0,0 +1,32 @@
+# -*- coding:utf-8 -*-
+
+import codecs
+import datetime
+import os
+
+from django.core.management.base import BaseCommand
+
+from dictionary.models import Lemma
+from settings import PROJECT_PATH
+
+
+POS = 'verb'
+OUTPATH = os.path.join(PROJECT_PATH, 'data', '%ss-%s.txt' % (POS, datetime.datetime.now().strftime('%Y%m%d')))
+
+
+class Command(BaseCommand):
+    help = 'Get lemmas existing in Walenty'
+
+    def handle(self, *args, **options):
+        lemmas = Lemma.objects.filter(old=False, entry_obj__pos__tag=POS)
+        lemmas = lemmas.exclude(status__status=u'do usunięcia').order_by('entry_obj__name')
+        write_lemmas(lemmas)
+
+
+def write_lemmas(lemmas):
+    try:
+        outfile = codecs.open(OUTPATH, 'w', 'utf-8')
+        for lemma in lemmas:
+            outfile.write('%s\n' % lemma.entry_obj.name)
+    finally:
+        outfile.close()
diff --git a/dictionary/management/commands/load_entries_relations.py b/dictionary/management/commands/load_entries_relations.py
new file mode 100644
index 0000000..1d4fb4b
--- /dev/null
+++ b/dictionary/management/commands/load_entries_relations.py
@@ -0,0 +1,112 @@
+#-*- coding:utf-8 -*-
+
+import codecs
+
+from django.core.management.base import BaseCommand
+
+from dictionary.models import Lemma, POS, get_or_create_entry
+
+NOUN_VERB_RELATIONS_PATH = 'data/nverbs/nouns/nouns+verb-freq.txt'
+
+ADJ_VERB_RELATIONS_PATH = 'data/nverbs/adjs/merged_adjs+verb-freq.txt'
+
+CHECK_PATH = 'data/nverbs/nouns/deriv_nouns-adj-freq-sel.txt'
+
+class Command(BaseCommand):
+    args = 'none'
+    help = """
+    Add relations between entries from given file.
+    """
+
+    def handle(self, **options):
+        #add_relations(NOUN_VERB_RELATIONS_PATH, 'noun')   
+        #add_relations(ADJ_VERB_RELATIONS_PATH, 'adj')
+        check_if_deriv_good_to_add('adj', 'noun', 'data/nverbs/nouns/deriv_nouns-adj-existing-20150928.txt')
+
+def add_relations(entries_path, pos_tag): 
+    entries = []
+    pos = POS.objects.get(tag=pos_tag)
+    try:
+        freq_file = codecs.open(entries_path, "rt", 'utf-8')
+        for line in freq_file:
+            #print line
+            line_ls = line.split()
+            verb = line_ls[3].lstrip('(').strip()
+            try:
+                nverb = line_ls[0].strip()
+                verb_obj = Lemma.objects.get(old=False, entry=verb, entry_obj__pos__tag='verb')
+                nverb_obj = Lemma.objects.get(old=False, entry=nverb, entry_obj__pos=pos)
+                entry = {'entry'  : nverb,
+                         'verb'   : verb,
+                         'freq_1M': int(line_ls[1].strip()),
+                         'freq_300M': int(line_ls[2].strip())}
+                nverb_entry, created = get_or_create_entry(entry['entry'], pos)
+#                try:
+#                    val_entry = Entry.objects.get(name=entry['entry'])
+#                    if val_entry.pos.tag == 'verb':
+#                        continue
+#                    val_entry.pos = pos
+#                    val_entry.save()
+#                except Entry.DoesNotExist:
+#                    val_entry = Entry(name=entry['entry'], pos=pos)
+#                    val_entry.save()
+                verb_entry = verb_obj.entry_obj
+                verb_entry.rel_entries.add(nverb_entry)
+                nverb_entry.rel_entries.add(verb_entry)
+                print line
+            except Lemma.DoesNotExist:
+                pass
+    finally:
+        freq_file.close()
+        return entries
+
+def add_relations_by_nverb_entries(entries, entries_path, from_pos_tag, to_pos_tag): 
+    print 'Adding relations!'
+    from_pos = POS.objects.get(tag=from_pos_tag)
+    to_pos = POS.objects.get(tag=to_pos_tag)
+    try:
+        freq_file = codecs.open(entries_path, "rt", 'utf-8')
+        for line in freq_file:
+            #print line
+            line_ls = line.split()
+            verb = line_ls[3].lstrip('(').strip()
+            try:
+                nverb = line_ls[0].strip()
+                if nverb in entries:
+                    verb_obj = Lemma.objects.get(old=False, entry=verb, entry_obj__pos=from_pos)
+                    nverb_obj = Lemma.objects.get(old=False, entry=nverb, entry_obj__pos=to_pos)
+                    entry = {'entry'  : nverb,
+                             'verb'   : verb,
+                             'freq_1M': int(line_ls[1].strip()),
+                             'freq_300M': int(line_ls[2].strip())}
+                    nverb_entry = nverb_obj.entry_obj
+                    verb_entry = verb_obj.entry_obj
+                    verb_entry.rel_entries.add(nverb_entry)
+                    nverb_entry.rel_entries.add(verb_entry)
+                    print line
+            except Lemma.DoesNotExist:
+                pass
+    finally:
+        freq_file.close()
+
+def check_if_deriv_good_to_add(from_pos_tag, to_pos_tag, outpath):
+    #try:
+        freq_file = codecs.open(CHECK_PATH, "rt", 'utf-8')
+        good_file = codecs.open(outpath, "wt", 'utf-8')
+        for line in freq_file:
+            line_ls = line.split()
+            to_entry = line_ls[0].strip()
+            from_entry = line_ls[3].lstrip('(').strip()
+            if not Lemma.objects.filter(old=False, entry=to_entry, 
+                                        entry_obj__pos__tag=to_pos_tag).exists():
+                try:
+                    from_lemma = Lemma.objects.get(old=False, entry=from_entry, 
+                                                   entry_obj__pos__tag=from_pos_tag)
+                    good_file.write(line)
+                    print line
+                except Lemma.DoesNotExist:
+                    pass
+    #finally:
+        good_file.close()
+        freq_file.close()
+    
\ No newline at end of file
diff --git a/dictionary/management/commands/load_initial_nverb_frames.py b/dictionary/management/commands/load_initial_nverb_frames.py
new file mode 100644
index 0000000..5cc92f3
--- /dev/null
+++ b/dictionary/management/commands/load_initial_nverb_frames.py
@@ -0,0 +1,239 @@
+#-*- coding:utf-8 -*-
+
+#Copyright (c) 2014, Bartłomiej Nitoń
+#All rights reserved.
+
+#Redistribution and use in source and binary forms, with or without modification, are permitted provided 
+#that the following conditions are met:
+
+#    Redistributions of source code must retain the above copyright notice, this list of conditions and 
+#    the following disclaimer.
+#    Redistributions in binary form must reproduce the above copyright notice, this list of conditions 
+#    and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED 
+# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 
+# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 
+# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+# POSSIBILITY OF SUCH DAMAGE.
+
+import codecs
+import itertools
+from operator import itemgetter
+
+from django.core.management.base import BaseCommand
+
+#from dictionary.common_func import arg_data_to_arg, args_to_position, \
+#                                   positions_to_frame
+from dictionary.models import Argument, Argument_Model, Frame_Opinion, \
+                              Frame_Opinion_Value, Lemma, positions_to_frame, \
+                              get_or_create_position
+                              
+
+NOUNS_ADDED_PATH = 'data/nverbs/nouns/added-merged_nouns_val.txt'
+NOUNS_ERROR_PATH = 'data/nverbs/nouns/error-merged_nouns_val.txt'
+NOUNS_FRAMES_PATH = 'data/nverbs/nouns/merged_nouns_val-poss.txt'
+
+ADJS_ADDED_PATH = 'data/nverbs/adjs/added-merged_adjs_val.txt'
+ADJS_ERROR_PATH = 'data/nverbs/adjs/error-merged_adjs_val.txt'
+ADJS_FRAMES_PATH = 'data/nverbs/adjs/merged_adjs_val-P1.txt'
+
+class Command(BaseCommand):
+    args = 'none'
+    help = """
+    Adds initial nverb frames.
+    """
+
+    def handle(self, **options):
+        #add_initial_frames(NOUNS_FRAMES_PATH, NOUNS_ADDED_PATH, NOUNS_ERROR_PATH, 'noun')   
+        add_initial_frames(ADJS_FRAMES_PATH, ADJS_ADDED_PATH, ADJS_ERROR_PATH, 'adj')    
+        
+def add_initial_frames(frames_path, added_path, error_path, pos_tag):
+    try:
+        added_file = codecs.open(added_path, "wt", 'utf-8')
+        error_file = codecs.open(error_path, "wt", 'utf-8')
+        frames_file = codecs.open(frames_path, "rt", 'utf-8') 
+        for line in frames_file:
+            line = line.strip()
+            pred_val = ''
+            if line.startswith('%'):
+                continue
+            lemma, frames_str, pred_val = get_frames_info(line)
+            try:
+                lemma_obj = Lemma.objects.get(entry=lemma, old=False, 
+                                              status__status=u'do obróbki',
+                                              entry_obj__pos__tag=pos_tag)
+                #lemma_obj.frames.clear()
+                print lemma_obj
+                try:
+                    parse_and_add_frames(lemma_obj, frames_str, pred_val)
+                    added_file.write(u'%s\n' % line)
+                except:
+                    error_file.write(u'%s\n' % line)
+            except Lemma.DoesNotExist:
+                pass
+    finally:
+        added_file.close()
+        error_file.close()
+        frames_file.close()
+        
+def add_initial_frames_by_entries(entries, frames_path, added_path, error_path, pos_tag):
+    print 'Adding initial frames!'
+    try:
+        added_file = codecs.open(added_path, "wt", 'utf-8')
+        error_file = codecs.open(error_path, "wt", 'utf-8')
+        frames_file = codecs.open(frames_path, "rt", 'utf-8')
+        for line in frames_file:
+            line = line.strip()
+            pred_val = ''
+            if line.startswith('%'):
+                continue
+            lemma, frames_str, pred_val = get_frames_info(line)
+            if lemma in entries:
+                try:
+                    lemma_obj = Lemma.objects.get(entry=lemma, old=False, 
+                                                  status__status=u'do obróbki',
+                                                  entry_obj__pos__tag=pos_tag)
+                    print lemma_obj
+                    try:
+                        parse_and_add_frames(lemma_obj, frames_str, pred_val)
+                        added_file.write(u'%s\n' % line)
+                    except:
+                        error_file.write(u'%s\n' % line)
+                except Lemma.DoesNotExist:
+                    pass
+    finally:
+        added_file.close()
+        error_file.close()
+        frames_file.close()
+        
+def get_frames_info(line):
+    predicativity_val = ''
+    line_parts = line.split('\t')
+    lemma = line_parts[0].strip()
+    frames_str = line_parts[1].strip()
+    if len(line_parts) == 3 and line_parts[2] == 'PRED':
+        predicativity_val = 'pred'
+    return lemma, frames_str, predicativity_val
+
+def parse_and_add_frames(lemma_obj, frames_str, predicativity_val): 
+    poss_ls = []
+    valence_ls = [arg.strip() for arg in frames_str.split('+')]
+    for pos_arg in valence_ls:
+        pos_arg = pos_arg.strip()
+        possible_args = pos_arg.split('/')
+        possible_args = coordinate_arguments(possible_args)
+        poss_ls.append(possible_args)
+    confs = itertools.product(*poss_ls)
+    for frame_args in list(confs):
+        frame_args = list(set(frame_args)) #--> tutaj byl fuckup i tworzyly sie dziwne pozycje majace ten sam argument kilkukrotnie, moze ta linijka pomoze
+        frame_obj, frame_opinion_obj = create_frame(frame_args, predicativity_val)
+        lemma_obj.frames.add(frame_obj)
+        lemma_obj.frame_opinions.add(frame_opinion_obj)
+        
+def coordinate_arguments(arguments):
+    coordinated_args = []
+    for arg in arguments:
+        arg_type, attributes = arg_from_text_rep(arg)
+        case, preposition = get_arg_case_and_preposition(arg)
+        coordinated_arg = next((arg for arg in coordinated_args if (arg['case'] == case and 
+                                                                    arg['preposition'] == preposition)), None)
+        if coordinated_arg and case:
+            coordinated_arg['argument'] += ';%s' % arg
+        else:
+            coordinated_arg = {'argument': arg,
+                               'case': case,
+                               'preposition': preposition}
+            coordinated_args.append(coordinated_arg)
+        if arg_type == 'ncp':
+            additional_arg = u'np(%s)' % case
+            coordinated_arg['argument'] += ';%s' % additional_arg
+        elif arg_type == 'prepncp':
+            additional_arg = u'prepnp(%s,%s)' % (preposition, case)
+            coordinated_arg['argument'] += ';%s' % additional_arg
+    
+    return [arg['argument'] for arg in coordinated_args] 
+
+def arg_from_text_rep(argument):
+    attributes = []
+    arg_parts = argument.split('(')
+    arg_type = arg_parts[0]
+    if len(arg_parts) > 1:
+        attributes = arg_parts[1].rstrip(')').replace("'", "").split(',')
+    return arg_type, attributes
+
+def get_arg_case_and_preposition(argument):
+    case = ''
+    preposition = ''
+    argument = arg_conversion(argument)
+    arg_type, attributes = arg_from_text_rep(argument)
+    argument_model = Argument_Model.objects.get(arg_model_name=arg_type)
+    attribute_models = argument_model.atribute_models.order_by('priority')
+    for attr_model, attr_text_rep in zip(attribute_models, attributes):
+        if attr_model.atr_model_name == u'PRZYPADEK':
+            case = attr_text_rep
+        elif attr_model.atr_model_name == u'PRZYIMEK':
+            preposition = attr_text_rep
+    return case, preposition  
+        
+def arg_conversion(arg_text_rep):
+    arg_text_rep = arg_text_rep.replace('!', '').replace('*', '').replace('?', '')
+    if arg_text_rep == 'advp':
+        arg_text_rep = u'xp(_)'
+    elif arg_text_rep.startswith('comprepnp'):
+        arg_text_rep = arg_text_rep.replace("'", "").replace(',gen', '')
+    return arg_text_rep
+
+def create_frame(frame_args, predicativity_val):
+    positions_objs, frame_opinion_value = get_positions(frame_args)
+    frame_obj = positions_to_frame(positions_objs, 
+                                   reflex='', 
+                                   negativity='', 
+                                   predicativity=predicativity_val, 
+                                   aspect='')
+    frame_opinion_obj, xx = Frame_Opinion.objects.get_or_create(frame=frame_obj, 
+                                                                value=frame_opinion_value)
+    return frame_obj, frame_opinion_obj
+
+def get_positions(args_strs):
+    poss_objs = []
+    frame_opinions = []
+    for poss_args_str in args_strs:
+        frame_opinions.append(possible_frame_opinion(poss_args_str))
+        poss_objs.append(create_position(poss_args_str))
+    frame_opinion = sorted(frame_opinions, key=itemgetter('priority'), reverse=False)[0]
+    frame_opinion_value = Frame_Opinion_Value.objects.get(value=frame_opinion['opinion'])
+    return poss_objs, frame_opinion_value
+
+def possible_frame_opinion(arg_str):
+    opinion = {'opinion': 'pewny',
+               'priority': '4'}
+    if '!' in arg_str:
+        opinion = {'opinion': u'zły',
+                   'priority': '1'}
+    elif '?' in arg_str:
+        opinion = {'opinion': u'wątpliwy',
+                   'priority': '2'}
+    elif '*' in arg_str:
+        opinion = {'opinion': u'archaiczny',
+                   'priority': '3'}
+    return opinion
+
+def create_position(args_str):
+    arg_objs = []
+    for arg_text_rep in args_str.split(';'):
+        arg_text_rep = arg_conversion(arg_text_rep)
+#        try:
+        arg_obj = Argument.objects.get(text_rep=arg_text_rep)
+#        except Argument.DoesNotExist: # TODO wylaczac przy wstepnym wrzucaniu hasel
+#            arg_type, attributes = arg_from_text_rep(arg_text_rep)
+#            arg_obj = arg_data_to_arg(arg_type, attributes)
+        arg_objs.append(arg_obj)
+    pos_obj = get_or_create_position(categories=[], arguments=arg_objs)
+    return pos_obj
+
+
+    
\ No newline at end of file