ENIAMlexSemantics.ml 21.2 KB
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
(*
 *  ENIAMlexSemantics is a library that assigns tokens with lexicosemantic information.
 *  Copyright (C) 2016-2017 Wojciech Jaworski <wjaworski atSPAMfree mimuw dot edu dot pl>
 *  Copyright (C) 2016-2017 Institute of Computer Science Polish Academy of Sciences
 *
 *  This library is free software: you can redistribute it and/or modify
 *  it under the terms of the GNU Lesser General Public License as published by
 *  the Free Software Foundation, either version 3 of the License, or
 *  (at your option) any later version.
 *
 *  This library is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU Lesser General Public License for more details.
 *
 *  You should have received a copy of the GNU Lesser General Public License
 *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
 *)

open ENIAMtokenizerTypes
open ENIAMsubsyntaxTypes
open ENIAMlexSemanticsTypes
open ENIAMwalTypes
open Xstd
(*
let string_of_lex_sems tokens lex_sems =
  String.concat "\n" (List.rev (Int.fold 0 (ExtArray.size lex_sems - 1) [] (fun l id ->
    let t = ExtArray.get lex_sems id in
    let t2 = ExtArray.get tokens id in
    let orth = t2.ENIAMtokenizerTypes.orth in
    let lemma = ENIAMtokens.string_of_token t2.ENIAMtokenizerTypes.token in
    let lroles = if snd t.lroles = "" then fst t.lroles else fst t.lroles ^ " " ^ snd t.lroles in
    let core = Printf.sprintf "%3d %s %s %s" id orth lemma lroles in
    let senses = Xlist.map t.senses (fun (sense,hipero,weight) ->
      Printf.sprintf "%s[%s]%.2f" sense (String.concat "," hipero) weight) in
    let valence = Xlist.map t.valence (ENIAMwalStringOf.fnum_frame "") in
    let simple_valence = Xlist.map t.simple_valence (ENIAMwalStringOf.fnum_frame "") in
    (* let semantics =  *)
    (String.concat "\n    " ([core] @ senses @ valence @ simple_valence)) :: l)))

let find_senses t = (* FIXME: sensy zawierające 'się' *)
  match t.token with
    Lemma(lemma,pos,_) -> ENIAMplWordnet.find_senses lemma pos
  | Proper(_,_,_,senses) -> ENIAMplWordnet.find_proper_senses senses
  | _ -> []
*)
let rec find a l i =
  if a.(i) = max_int then (
    a.(i) <- i;
    i) else
  if a.(i) = i then (
    Xlist.iter l (fun j -> a.(j) <- i);
    i) else
  find a (i :: l) a.(i)

let union a i j =
  if i = j then i else
  let x = min i j in
  let y = max i j in
  a.(y) <- x;
  x

let rec split_tokens_into_groups_sentence a = function
    RawSentence s -> ()
  | StructSentence([],_) -> ()
  | StructSentence((id,_,_) :: paths,_) ->
      ignore (Xlist.fold paths (find a [] id) (fun m (id,_,_) ->
        union a m (find a [] id)))
  | DepSentence(paths) ->
      if Array.length paths = 0 then () else
      let id,_,_ = paths.(0) in
      ignore (Int.fold 1 (Array.length paths - 1) (find a [] id) (fun m i ->
        let id,_,_ = paths.(i) in
        union a m (find a [] id)))
  | QuotedSentences sentences ->
      Xlist.iter sentences (fun p ->
        split_tokens_into_groups_sentence a p.sentence)
  | AltSentence l -> Xlist.iter l (fun (mode,sentence) ->
        split_tokens_into_groups_sentence a sentence)

let rec split_tokens_into_groups_paragraph a = function
    RawParagraph s -> ()
  | StructParagraph sentences ->
      Xlist.iter sentences (fun p -> split_tokens_into_groups_sentence a p.sentence)
  | AltParagraph l -> Xlist.iter l (fun (mode,paragraph) ->
      split_tokens_into_groups_paragraph a paragraph)

let rec split_tokens_into_groups_text a = function
    RawText s -> ()
  | StructText paragraphs ->
      Xlist.iter paragraphs (split_tokens_into_groups_paragraph a)
  | AltText l -> Xlist.iter l (fun (mode,text) ->
      split_tokens_into_groups_text a text)

let split_tokens_into_groups size text =
  let a = Array.make size max_int in
  split_tokens_into_groups_text a text;
  Int.iter 1 (Array.length a - 1) (fun i ->
    if a.(i) <> max_int then a.(i) <- a.(a.(i)));
  let map = Int.fold 1 (Array.length a - 1) IntMap.empty (fun map i ->
    if a.(i) = max_int then map else
    IntMap.add_inc map a.(i) [i] (fun l -> i :: l)) in
  IntMap.fold map [] (fun l _ v -> v :: l)

let assign_valence tokens lex_sems group =
  let lexemes = Xlist.fold group StringSet.empty (fun lexemes id ->
      let lemma = ENIAMtokens.get_lemma (ExtArray.get tokens id).token in
      StringSet.add lexemes lemma) in
  let entries,schemata,connected = ENIAMwalReduce.select_entries lexemes in
  Xlist.iter group (fun id ->
      let lemma = ENIAMtokens.get_lemma (ExtArray.get tokens id).token in
      let pos = ENIAMtokens.get_pos (ExtArray.get tokens id).token in
      let pos2 = ENIAMvalence.simplify_pos pos in
      let schemata = try Entries.find schemata pos2 lemma with Not_found -> ENIAMvalence.get_default_valence pos2 in
      let entries = try Entries.find entries pos lemma with Not_found -> [] in
      let schemata = List.flatten (Xlist.map schemata (fun (opinion,neg,pred,aspect,schema) ->
          ENIAMvalence.transform_entry pos lemma neg pred aspect schema)) in (* FIXME: gubię opinię *)
      let schemata = ENIAMadjuncts.simplify_valence pos pos2 lemma schemata in
      let schemata = Xlist.map schemata (fun (selectors,schema) ->
          selectors,ENIAMvalence2.render_simple_schema schema) in
      let entries = List.flatten (Xlist.map entries (ENIAMvalence.transform_lex_entry pos lemma)) in
      let entries = Xlist.map entries (fun (selectors,entry) ->
          selectors,ENIAMvalence2.render_lex_entry entry) in
      ExtArray.set lex_sems id {(ExtArray.get lex_sems id) with
                                schemata = schemata; lex_entries=entries})

(* TODO:
   slashe
   test
   zgranie z LCGlexicon
*)

  (* let lexemes = Xlist.fold group Entries.empty (fun lexemes id ->
      let lemma = ENIAMtokens.get_lemma (ExtArray.get tokens id).token in

      match (ExtArray.get tokens id).token with
        Lemma(lemma,pos,_) ->
        Entries.add_inc lexemes (ENIAMvalence.simplify_pos pos) lemma pos (*(StringSet.singleton pos) (fun set -> StringSet.add set pos)*)
      | Proper(lemma,pos,_,_) ->
        (* let pos = match pos with
            "subst" -> "psubst"
          | "depr" -> "pdepr"
          | _ -> pos (*failwith ("assign_valence: Proper " ^ pos ^ " " ^ lemma)*) in *)
        Entries.add_inc lexemes (ENIAMvalence.simplify_pos pos) lemma pos (*StringSet.singleton pos) (fun set -> StringSet.add set pos*)
          (* StringMap.add_inc lexemes lemma (StringSet.singleton pos) (fun set -> StringSet.add set pos) (* nazwy własne mają przypisywaną domyślną walencję rzeczowników *) *)
      | _ -> lexemes) in
  let lexemes = Entries.map2 (fun l -> StringSet.to_list (StringSet.of_list l)) in
  let lexemes_set = Entries.fold lexemes StringSet.empty (fun lexemes_set _ lemma _ -> StringSet.add lexemes_set lemma) in
  let schemata,entries = ENIAMvalence.prepare_selected_valence schemata entries lexemes in
  let schemata = ENIAMadjuncts.simplify_valence schemata in
  let schemata = Entries.map schemata (fun pos lemma (selectors,schema) ->
      selectors,ENIAMvalence2.render_schema schema) in
  let entries = Entries.map entries (fun pos lemma (selectors,entry) ->
      selectors,ENIAMvalence2.render_lex_entry entry) in *)


(*
let assign_valence tokens lex_sems group =
  let lexemes = Xlist.fold group StringMap.empty (fun lexemes id ->
    match (ExtArray.get tokens id).token with
      Lemma(lemma,pos,_) ->
        StringMap.add_inc lexemes lemma (StringSet.singleton pos) (fun set -> StringSet.add set pos)
    | Proper(lemma,pos,_,_) ->
        let pos = match pos with
          "subst" -> "psubst"
        | "depr" -> "pdepr"
        | _ -> pos (*failwith ("assign_valence: Proper " ^ pos ^ " " ^ lemma)*) in
        StringMap.add_inc lexemes lemma (StringSet.singleton pos) (fun set -> StringSet.add set pos) (* nazwy własne mają przypisywaną domyślną walencję rzeczowników *)
    | _ -> lexemes) in
  let valence = ENIAMwalenty.find_frames lexemes in
  Xlist.iter group (fun id ->
    match (ExtArray.get tokens id).token with
      Lemma(lemma,pos,_) ->
        ExtArray.set lex_sems id {(ExtArray.get lex_sems id) with
          valence=try Xlist.rev_map (StringMap.find (StringMap.find valence lemma) pos) (fun frame -> 0,frame) with Not_found -> []}
    | Proper(lemma,pos,interp,_) ->
        ExtArray.set lex_sems id {(ExtArray.get lex_sems id) with
          valence=(try Xlist.rev_map (StringMap.find (StringMap.find valence lemma)
            (if pos = "subst" || pos = "depr" then "p" ^ pos else pos)) (fun frame -> 0,frame) with Not_found -> [](*failwith ("assign_valence: Proper(" ^ lemma ^ "," ^ pos ^ ")")*))};
        ExtArray.set tokens id {(ExtArray.get tokens id) with token=Lemma(lemma,pos,interp)}
    | _ -> ())

let get_prefs_schema prefs schema =
  Xlist.fold schema prefs (fun prefs t ->
    Xlist.fold t.sel_prefs prefs StringSet.add)

let map_prefs_schema senses schema =
  Xlist.map schema (fun t ->
    if Xlist.mem t.morfs (Phrase Pro) || Xlist.mem t.morfs (Phrase ProNG) then t else
    {t with sel_prefs = Xlist.fold t.sel_prefs [] (fun l s ->
      if StringSet.mem senses s then s :: l else l)})

let disambiguate_senses lex_sems group =
  let prefs = Xlist.fold group (StringSet.singleton "ALL") (fun prefs id ->
    Xlist.fold (ExtArray.get lex_sems id).valence prefs (fun prefs -> function
      _,Frame(_,schema) -> get_prefs_schema prefs schema
    | _,LexFrame(_,_,_,schema) -> get_prefs_schema prefs schema
    | _,ComprepFrame(_,_,_,schema) -> get_prefs_schema prefs schema)) in
  let hipero = Xlist.fold group (StringSet.singleton "ALL") (fun hipero id ->
    Xlist.fold (ExtArray.get lex_sems id).senses hipero (fun hipero (_,l,_) ->
      Xlist.fold l hipero StringSet.add)) in
  let senses = StringSet.intersection prefs hipero in
  let is_zero = StringSet.mem hipero "0" in
  let senses = if is_zero then StringSet.add senses "0" else senses in
  Xlist.iter group (fun id ->
    let t = ExtArray.get lex_sems id in
    ExtArray.set lex_sems id {t with valence = if is_zero then t.valence else
        Xlist.map t.valence (function
          n,Frame(a,schema) -> n,Frame(a,map_prefs_schema senses schema)
        | n,LexFrame(s,p,r,schema) -> n,LexFrame(s,p,r,map_prefs_schema senses schema)
        | n,ComprepFrame(s,p,r,schema) -> n,ComprepFrame(s,p,r,map_prefs_schema senses schema));
      senses = Xlist.map t.senses (fun (s,l,w) ->
        s, List.rev (Xlist.fold l [] (fun l s -> if StringSet.mem senses s then s :: l else l)),w)})



(* FIXME: problem ComprepNP i PrepNCP *)
(* FIXME: problem gdy ten sam token występuje w  kilku ścieżkach *)
let generate_verb_prep_adjuncts preps =
  Xlist.map preps (fun (lemma,case) -> ENIAMwalFrames.verb_prep_adjunct_schema_field lemma case)

let generate_verb_comprep_adjuncts compreps =
  Xlist.map compreps (fun lemma -> ENIAMwalFrames.verb_comprep_adjunct_schema_field lemma)

let generate_verb_compar_adjuncts compars =
  Xlist.map compars (fun lemma -> ENIAMwalFrames.verb_compar_adjunct_schema_field lemma)

let generate_noun_prep_adjuncts preps =
  ENIAMwalFrames.noun_prep_adjunct_schema_field preps

let generate_noun_compar_adjuncts compars =
  ENIAMwalFrames.noun_compar_adjunct_schema_field compars

let generate_adj_compar_adjuncts compars =
  ENIAMwalFrames.noun_compar_adjunct_schema_field compars

let compars = StringSet.of_list ["jak";"jako";"niż";"niczym";"niby";"co"]

let generate_prep_adjunct_tokens tokens group =
  let map = Xlist.fold group StringMap.empty (fun map id ->
    let t = ExtArray.get tokens id in
    match t.token with
      Lemma(lemma,"prep",interp) ->
        let map = if lemma = "po" then StringMap.add map "po:postp" ("po","postp") else map in
        if StringSet.mem compars lemma then map else
        Xlist.fold interp map (fun map -> function
          [cases] -> Xlist.fold cases map (fun map case -> StringMap.add map (lemma ^ ":" ^ case) (lemma,case))
        | [cases;_] -> Xlist.fold cases map (fun map case -> StringMap.add map (lemma ^ ":" ^ case) (lemma,case))
        | _ -> map)
    | _ -> map) in
  StringMap.fold map [] (fun l _ v -> v :: l)

let generate_comprep_adjunct_tokens tokens group =
  let lemmas = Xlist.fold group StringSet.empty (fun lemmas id ->
    let t = ExtArray.get tokens id in
    match t.token with
      Lemma(lemma,_,_) -> StringSet.add lemmas lemma
    | _ -> lemmas) in
  StringMap.fold ENIAMwalFrames.comprep_reqs [] (fun compreps comprep reqs ->
    let b = Xlist.fold reqs true (fun b s -> b && StringSet.mem lemmas s) in
    if b then comprep :: compreps else compreps)

let generate_compar_adjunct_tokens tokens group =
  let set = Xlist.fold group StringSet.empty (fun set id ->
    let t = ExtArray.get tokens id in
    match t.token with
      Lemma(lemma,"prep",interp) ->
        if not (StringSet.mem compars lemma) then set else
        StringSet.add set lemma
    | _ -> set) in
  StringSet.to_list set

let is_measure = function
    NounAtrs(_,_,Common "measure") -> true
  | _ -> false

let remove_meaning = function
    DefaultAtrs(m,r,o,neg,p,a) -> DefaultAtrs([],r,o,neg,p,a)
  | EmptyAtrs m -> EmptyAtrs []
  | NounAtrs(m,nsyn,s(*,typ*)) -> NounAtrs([],nsyn,s(*,typ*))
  | AdjAtrs(m,c,adjsyn(*,adjsem,typ*)) -> AdjAtrs([],c,adjsyn(*,adjsem,typ*))
  | PersAtrs(m,le,neg,mo,t,au,a) -> PersAtrs([],le,neg,mo,t,au,a)
  | GerAtrs(m,le,neg,a) -> GerAtrs([],le,neg,a)
  | NonPersAtrs(m,le,role,role_attr,neg,a) -> NonPersAtrs([],le,role,role_attr,neg,a)
  | _ -> failwith "remove_meaning"

let assign_simplified_valence tokens lex_sems group =
  let preps = generate_prep_adjunct_tokens tokens group in
  let compreps = generate_comprep_adjunct_tokens tokens group in
  let compars = generate_compar_adjunct_tokens tokens group in
  let verb_prep_adjuncts = generate_verb_prep_adjuncts preps in
  let verb_comprep_adjuncts = generate_verb_comprep_adjuncts compreps in
  let verb_compar_adjuncts = generate_verb_compar_adjuncts compars in
  let noun_prep_adjuncts = generate_noun_prep_adjuncts preps compreps in
  let noun_compar_adjuncts = generate_noun_compar_adjuncts compars in
  let adj_compar_adjuncts = generate_adj_compar_adjuncts compars in
  let verb_adjuncts = ENIAMwalFrames.verb_adjuncts_simp @ verb_prep_adjuncts @ verb_comprep_adjuncts @ verb_compar_adjuncts in
  let noun_adjuncts = ENIAMwalFrames.noun_adjuncts_simp @ [noun_prep_adjuncts] @ [noun_compar_adjuncts] in
  let noun_measure_adjuncts = ENIAMwalFrames.noun_measure_adjuncts_simp @ [noun_prep_adjuncts] @ [noun_compar_adjuncts] in
  let adj_adjuncts = ENIAMwalFrames.adj_adjuncts_simp @ [adj_compar_adjuncts] in
  let adv_adjuncts = ENIAMwalFrames.adv_adjuncts_simp @ [adj_compar_adjuncts] in
  Xlist.iter group (fun id ->
    let t = ExtArray.get lex_sems id in
    let pos = match (ExtArray.get tokens id).token with
        Lemma(_,pos,_) -> ENIAMwalFrames.simplify_pos pos
      | _ -> "" in
    let lex_frames,frames = Xlist.fold t.valence ([],StringMap.empty) (fun (lex_frames,frames) -> function
        _,(Frame(attrs,schema) as frame) ->
          let attrs = remove_meaning attrs in
          lex_frames, StringMap.add_inc frames (ENIAMwalStringOf.frame_atrs attrs) (attrs,[schema,frame]) (fun (_,l) -> attrs, (schema,frame) :: l)
      | _,frame -> frame :: lex_frames, frames) in
    let simp_frames,full_frames,n = Xlist.fold lex_frames ([],[],1) (fun (simp_frames,full_frames,n) frame ->
      (n,frame) :: simp_frames, (n,frame) :: full_frames, n+1) in
    let simp_frames,full_frames,_ = StringMap.fold frames (simp_frames,full_frames,n) (fun (simp_frames,full_frames,n) _ (attrs,schemata) ->
      Xlist.fold (simplify_schemata pos schemata) (simp_frames,full_frames,n) (fun (simp_frames,full_frames,n) (schema,frames) ->
        let schema = match pos with
            "verb" -> schema @ verb_adjuncts
          | "noun" -> schema @ (if is_measure attrs then noun_measure_adjuncts else noun_adjuncts)
          | "adj" -> schema @ adj_adjuncts
          | "adv" -> schema @ adv_adjuncts
          | _ -> schema in
        (n,Frame(attrs,schema)) :: simp_frames,
        Xlist.fold frames full_frames (fun full_frames frame -> (n,frame) :: full_frames),
        n+1)) in
    ExtArray.set lex_sems id {t with simple_valence=simp_frames; valence=full_frames})

let translate_negation = function
    (Negation:negation) -> ["neg"]
  | Aff -> ["aff"]
  | NegationUndef -> ["aff";"neg"]
  | NegationNA -> []

let translate_aspect = function
    (Aspect s:aspect) -> [s]
  | AspectUndef -> ["imperf";"perf"]
  | AspectNA -> []

let translate_case = function
    (Case s:case) -> [s]
  | CaseUndef -> all_cases
  | _ -> failwith "translate_case"

let translate_nsem = function
    Common s -> [s]
  | Time -> ["time"]


let define_valence_selectors = function
    DefaultAtrs(m,r,o,neg,p,a) -> failwith "apply_valence_selectors"
  | EmptyAtrs m -> []
  | NounAtrs(m,nsyn,nsem) -> [LCGlexicon2.Nsyn,LCGlexicon2.Eq,[nsyn];LCGlexicon2.Nsem,LCGlexicon2.Eq,translate_nsem nsem]
  | AdjAtrs(m,c,adjsyn(*,adjsem,typ*)) -> [LCGlexicon2.Case,LCGlexicon2.Eq,translate_case c]
  | PersAtrs(m,le,neg,mo,t,au,a) -> [LCGlexicon2.Negation,LCGlexicon2.Eq,translate_negation neg;LCGlexicon2.Mood,LCGlexicon2.Eq,[mo];LCGlexicon2.Tense,LCGlexicon2.Eq,[t];LCGlexicon2.Aspect,LCGlexicon2.Eq,translate_aspect a]
  | GerAtrs(m,le,neg,a) -> [LCGlexicon2.Negation,LCGlexicon2.Eq,translate_negation neg;LCGlexicon2.Aspect,LCGlexicon2.Eq,translate_aspect a]
  | NonPersAtrs(m,le,role,role_attr,neg,a) -> [LCGlexicon2.Negation,Eq,translate_negation neg;LCGlexicon2.Aspect,LCGlexicon2.Eq,translate_aspect a]
  | ComprepAtrs _ -> failwith "apply_valence_selectors"


let render_schema schema =
  Xlist.map schema (function
        {morfs=[Multi args]} as s -> LCGrenderer.dir_of_dir s.dir, Maybe(Plus(Xlist.map args LCGrenderer.make_arg_phrase))
      | s -> LCGrenderer.dir_of_dir s.dir, Plus(Xlist.map s.morfs (LCGrenderer.make_arg [])))

let assign_very_simplified_valence tokens lex_sems group =
  let preps = generate_prep_adjunct_tokens tokens group in
  let compreps = generate_comprep_adjunct_tokens tokens group in
  let compars = generate_compar_adjunct_tokens tokens group in
  let verb_prep_adjuncts = generate_verb_prep_adjuncts preps in
  let verb_comprep_adjuncts = generate_verb_comprep_adjuncts compreps in
  let verb_compar_adjuncts = generate_verb_compar_adjuncts compars in
  let noun_prep_adjuncts = generate_noun_prep_adjuncts preps compreps in
  let noun_compar_adjuncts = generate_noun_compar_adjuncts compars in
  let adj_compar_adjuncts = generate_adj_compar_adjuncts compars in
  let verb_adjuncts = ENIAMwalFrames.verb_adjuncts_simp2 @ verb_prep_adjuncts @ verb_comprep_adjuncts @ verb_compar_adjuncts in
  let noun_adjuncts = ENIAMwalFrames.noun_adjuncts_simp @ [noun_prep_adjuncts] @ [noun_compar_adjuncts] in
  let noun_measure_adjuncts = ENIAMwalFrames.noun_measure_adjuncts_simp @ [noun_prep_adjuncts] @ [noun_compar_adjuncts] in
  let adj_adjuncts = ENIAMwalFrames.adj_adjuncts_simp @ [adj_compar_adjuncts] in
  let adv_adjuncts = ENIAMwalFrames.adv_adjuncts_simp @ [adj_compar_adjuncts] in
  Xlist.iter group (fun id ->
      let t = ExtArray.get lex_sems id in
      (* Printf.printf "lemma=%s\n" t.cats.lemma; *)
    let pos = match (ExtArray.get tokens id).token with
        Lemma(_,pos,_) -> ENIAMwalFrames.simplify_pos pos
      | _ -> "" in
    (* Printf.printf "lemma=%s pos=%s\n" t.cats.lemma pos; *)
    let frames = Xlist.fold t.valence StringMap.empty (fun frames -> function
        _,Frame(attrs,schema) ->
          let attrs = remove_meaning attrs in
          StringMap.add_inc frames (ENIAMwalStringOf.frame_atrs attrs) (attrs,[schema]) (fun (_,l) -> attrs, schema :: l)
        | _,_ -> frames) in
    (* Printf.printf "|frames|=%d\n" (StringMap.size frames); *)
    let frames = StringMap.fold frames [] (fun frames _ (attrs,schemata) ->
        let schema = simplify_schemata2 pos schemata in
        let schema = match pos with
            "verb" -> schema @ verb_adjuncts
          | "noun" -> schema @ (if is_measure attrs then noun_measure_adjuncts else noun_adjuncts)
          | "adj" -> schema @ adj_adjuncts
          | "adv" -> schema @ adv_adjuncts
          | _ -> schema in
        let selectors = define_valence_selectors attr in
        let schema = render_schema schema in
        (selectors,schema) :: frames) in
    (* Printf.printf "|frames|=%d\n" (Xlist.size frames); *)
    ExtArray.set lex_sems id {t with very_simple_valence=frames})
  *)

let assign tokens text =
  let lex_sems = ExtArray.make (ExtArray.size tokens) empty_lex_sem in
  let _ = ExtArray.add lex_sems empty_lex_sem in
  Int.iter 1 (ExtArray.size tokens - 1) (fun i ->
    (* let token = ExtArray.get tokens i in
    (* ExtArray.set tokens i token; *)
    let senses = find_senses token in *)
      let lex_sem = {empty_lex_sem with senses=[](*senses*)} in
    let _ = ExtArray.add lex_sems lex_sem in
    ());
  let groups = split_tokens_into_groups (ExtArray.size tokens) text in
  (* Xlist.iter groups (fun group -> print_endline (String.concat " " (Xlist.map group string_of_int))); *)
  Xlist.iter groups (fun group -> assign_valence tokens lex_sems group);
  (* Xlist.iter groups (fun group -> assign_valence tokens lex_sems group);
  Xlist.iter groups (fun group -> disambiguate_senses lex_sems group);
  Xlist.iter groups (fun group -> assign_simplified_valence tokens lex_sems group);
  Xlist.iter groups (fun group -> assign_very_simplified_valence tokens lex_sems group);
  Xlist.iter groups (fun group -> ENIAMlexSemanticsData.assign_semantics tokens lex_sems group); *)
  lex_sems