[hltdi-l3] push by onlysk...@gmail.com - L3Lite: a lot of work on target language generation (Solution, Transla... on 2014-04-27 18:08 GMT

0 views
Skip to first unread message

hltd...@googlecode.com

unread,
Apr 27, 2014, 2:08:29 PM4/27/14
to hltdi-...@googlegroups.com
Revision: b18b787a14e7
Branch: default
Author: Michael Gasser <gas...@cs.indiana.edu>
Date: Sun Apr 27 18:07:59 2014 UTC
Log: L3Lite: a lot of work on target language generation (Solution,
Translation classes) and constraint satisfaction (Solver)
http://code.google.com/p/hltdi-l3/source/detail?r=b18b787a14e7

Added:
/l3lite/cs.py
/l3lite/languages/orm.lg
Modified:
/l3lite/constraint.py
/l3lite/entry.py
/l3lite/features.py
/l3lite/language.py
/l3lite/languages/amh.lg
/l3lite/sentence.py
/l3lite/variable.py
/l3xdg/solver.py
/lite.py

=======================================
--- /dev/null
+++ /l3lite/cs.py Sun Apr 27 18:07:59 2014 UTC
@@ -0,0 +1,126 @@
+#
+# L3Lite CS: what is needed to implement l3 style constraint satisfaction
+# using the lexicon/grammars created.
+#
+########################################################################
+#
+# This file is part of the HLTDI L^3 project
+# for parsing, generation, translation, and computer-assisted
+# human translation.
+#
+# Copyright (C) 2014, HLTDI <gas...@cs.indiana.edu>
+#
+# This program is free software: you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# =========================================================================
+
+# 2014.04.26
+# -- Created
+
+from .constraint import *
+
+class Solver:
+ """A solver for a constraint satisfaction problem."""
+
+ id = 0
+
+ running = 0
+ succeeded = 1
+ failed = 2
+ distributable = 3
+ skipped = 4
+
+ def __init__(self, constraints, dstore):
+ self.constraints = constraints
+ self.dstore = dstore
+ self.entailed = []
+ self.failed = []
+ self.status = Solver.running
+ self.id = Solver.id
+ Solver.id += 1
+
+ def __repr__(self):
+ return "Solver {}".format(self.id)
+
+ def fixed_point(self, awaken):
+ if len(awaken) == 0:
+ # No more constraints are awake
+ if self.dstore.is_determined():
+ # All variables are determined in the dstore or
peripheral: success
+ self.status = Solver.succeeded
+ else:
+ # No more constraints apply: continue search
+ self.status = Solver.distributable
+ return True
+ # Keep propagating
+ return False
+
+ def run(self, verbosity=0, tracevar=[]):
+ awaken = set(self.constraints)
+ while not self.fixed_point(awaken):
+ awaken = self.run_constraints(awaken, verbosity=verbosity,
tracevar=tracevar)
+
+ def run_constraints(self, constraints, verbosity=0, tracevar=[]):
+ awaken = set()
+ for constraint in constraints:
+ state, changed_vars = constraint.run(dstore=self.dstore,
verbosity=verbosity, tracevar=tracevar)
+ if state == Constraint.entailed:
+ # Constraint is entailed; add it to the list of those.
+ self.entailed.append(constraint)
+ # Delete it from awaken if it's already there
+ if constraint in awaken:
+ awaken.remove(constraint)
+
+ # Check whether any of the changed vars cannot possibly be
determined; if so,
+ # the constraint fails
+ if state != Constraint.failed:
+ for var in changed_vars:
+ try:
+ var.determined(dstore=self.dstore,
verbosity=verbosity)
+ except VarError:
+ if verbosity:
+ print("{} CAN'T BE DETERMINED, SO {} MUST
FAIL".format(var, propagator))
+ state = Constraint.failed
+ break
+
+ if state == Constraint.failed:
+ # constraint fails; remove it from the entailed or awaken
lists if it's there
+ if constraint in self.entailed:
+ self.entailed.remove(constraint)
+ if constraint in awaken:
+ awaken.remove(propagator)
+# # penalize the CSpace
+# self.penalty += constraint.weight
+ # and remember that it failed
+ self.failed.append(constraint)
+
+# if self.penalty > self.max_penalty:
+# # CSpace fails without running other constraints
+# if verbosity:
+# print('PENALTY {} EXCEEDS MAXIMUM
{}!'.format(self.penalty, self.max_penalty))
+# self.status = CSpace.failed
+# return CSpace.failed
+
+ # If the constraint succeeds, add the constraints of its
variables to awaken
+ if state not in [Constraint.failed]:
+ for var in changed_vars:
+ update_cons = {p for p in var.constraints if p not in
self.entailed and p not in self.failed}
+ if var == tracevar and verbosity:
+ print('Adding {} constraints for changed variable
{}'.format(len(update_cons), tracevar))
+ # Add constraints for changed var to awaken unless
those constraints are already entailed
+ # or failed
+ awaken.update(update_cons)
+# print('update cons {}'.format(update_cons))
+ return awaken
+
=======================================
--- /dev/null
+++ /l3lite/languages/orm.lg Sun Apr 27 18:07:59 2014 UTC
@@ -0,0 +1,40 @@
+name: afaan oromoo
+abbrev: orm
+groups:
+ nyaate:
+ - words: [$food, nyaate]
+ features: [{case: acc}, False]
+ beeke:
+ - words: [$vb, hin, beeke]
+ features: [{tam: cnt}, false, {tam: prs, pol: neg}]
+ name: ^godhe_hin^beeku
+ - words: [$fact, beeke]
+ qurxummii:
+ - words: [qurxummii]
+ kalluuna:
+ - words: [kalluuna]
+forms:
+ beeka:
+ root: beeke
+ features: {tam: prs, pol: aff, prs: 3, num: 0, gen: 0}
+ cats: [$vb]
+ beeku:
+ root: beeke
+ features: {tam: prs, pol: neg, prs: 3, num: 0, gen: 0}
+ cats: [$vb]
+ nyaate:
+ root: nyaate
+ features: {tam: cnt, prs: 3, num: 0, gen: 0}
+ cats: [$vb]
+ dinnicha:
+ root: dinnicha
+ features: {num: 0, case: acc}
+ cats: [$food, $thing]
+ qurxummii:
+ root: qurxummii
+ features: {num: 0, case: acc}
+ cats: [$food, $animal, $thing]
+ kalluuna:
+ root: kalluuna
+ features: {num: 0, case: acc}
+ cats: [$food, $animal, $thing]
=======================================
--- /l3lite/constraint.py Sun Apr 20 07:07:10 2014 UTC
+++ /l3lite/constraint.py Sun Apr 27 18:07:59 2014 UTC
@@ -36,6 +36,8 @@
# -- Constraint types used so far:
# UnionSelection, PrecedenceSelection, ComplexUnionSelection,
# ComplexSetConvexity, Union, Disjoint, Inclusion
+# 2014.04.26
+# -- Fixed several bugs in SetPrecedence (needed for TL sequencing).

from .variable import *
import itertools
@@ -56,7 +58,7 @@
self.weight = weight
if record:
for var in variables:
- if isinstance(var, Determined):
+ if isinstance(var, DetVar):
if problem:
if var not in problem.detvarsD:
problem.detvarsD[var] = []
@@ -575,7 +577,7 @@
return True
return False

- def infer(self, dstore=None, verbosity=0, tracevar=None):
+ def infer(self, dstore=None, verbosity=0, tracevar=[]):
changed = set()
# Intersection of lower bound of S2 and S3 is subset of lower
bound of S1.
s1 = self.variables[0]
@@ -817,8 +819,9 @@
"""Entailed if everything that can be in set1 precedes anything
that can be in set2."""
return SetPrecedence.must_precede(self.variables[0],
self.variables[1], dstore=dstore)

- def infer(self, dstore=None, verbosity=0, tracevar=None):
+ def infer(self, dstore=None, verbosity=0, tracevar=[]):
changed = set()
+ state = Constraint.sleeping
v1 = self.variables[0]
v1_low = v1.get_lower(dstore=dstore)
v2 = self.variables[1]
@@ -830,6 +833,7 @@
if v2.strengthen_upper(v2_up_new, dstore=dstore,
constraint=(verbosity>1 or v2 in
tracevar) and self):
changed.add(v2)
+ return state, changed
# If the lower bound on v2 is not empty, v1 must be a subset of
# {0, ..., max(0, min(v2_low) - 1)}
if v2_low:
@@ -837,6 +841,18 @@
if v1.strengthen_upper(v1_up_new, dstore=dstore,
constraint=(verbosity>1 or v1 in
tracevar) and self):
changed.add(v1)
+ return state, changed
+ # Remove all elements from v1 >= highest possible element in v2
+ v1_up = v1.get_upper(dstore=dstore)
+ v2_up = v2.get_upper(dstore=dstore)
+ v2.max = max(v2_up)
+ v1_over = set(itertools.filterfalse(lambda x: x < v2.max, v1_up))
+ if v1_over:
+ if v1.discard_upper(v1_over, dstore=dstore,
+ constraint=(verbosity>1 or v1 in tracevar)
and self):
+ changed.add(v1)
+ return state, changed
+ return state, changed

# Integer domain and set domain variables

@@ -2306,7 +2322,7 @@

# A dict of DetSVars for different values so these don't get recreated
each time
# Union is instantiated
-DetVarD = dict([(n, Determined('sel' + str(n), set(range(n)))) for n in
range(1, 20)])
+DetVarD = dict([(n, DetVar('sel' + str(n), set(range(n)))) for n in
range(1, 20)])

class Union(DerivedConstraint):
"""S0 = S1 U S2 U ... :
@@ -2317,7 +2333,7 @@

def init_constraints(self):
nvar = len(self.variables) - 1
- selvar = DetVarD.get(nvar) or Determined('sel', set(range(nvar)))
+ selvar = DetVarD.get(nvar) or DetVar('sel', set(range(nvar)))
self.constraints = [UnionSelection(self.variables[0], selvar,
self.variables[1:],
problem=self.problem,
weight=self.weight)]
@@ -2328,7 +2344,7 @@

def init_constraints(self):
nvar = len(self.variables) - 1
- selvar = DetVarD.get(nvar) or Determined('sel', set(range(nvar)))
+ selvar = DetVarD.get(nvar) or DetVar('sel', set(range(nvar)))
self.constraints = [IntersectionSelection(self.variables[0],
selvar, self.variables[1:],
problem=self.problem,
weight=self.weight)]
=======================================
--- /l3lite/entry.py Sun Apr 20 19:19:51 2014 UTC
+++ /l3lite/entry.py Sun Apr 27 18:07:59 2014 UTC
@@ -43,6 +43,8 @@
# -- words attribute in Group is a list of [word, feat_dict] pairs.
# 2014.04.16
# -- Created simpler Group (with no dependency types), renamed old Group
to MWE.
+# 2014.04.20
+# -- Matching of group and sentence nodes.

import copy, itertools
import yaml
@@ -58,11 +60,11 @@
ID = 1
dflt_dep = 'dflt'

- def __init__(self, name, language, id=0):
+ def __init__(self, name, language, id=0, trans=None):
"""Initialize name and basic features: language, trans, count,
id."""
self.name = name
self.language = language
- self.trans = None
+ self.trans = trans
self.count = 1
if id:
self.id = id
@@ -108,7 +110,7 @@
### Translations (word, gram, lexeme, group entries)
###
### Translations are stored in a language-id-keyed dict.
- ### Values are dicts with target entry repr strings as ids.
+ ### Values are dicts with target entry names as ids.
### Values are dicts with correspondence ('cor'), count ('cnt'), etc.
### as keys.

@@ -135,12 +137,6 @@
raise(EntryError(s.format(trans, self.name)))
transdict[trans]['c'] += count

- def add_trans_dep(self, language, trans, typ, src_dep, targ_dep):
- """Add a translation dependency specification.
- typ is iso(morphic), reverse, sepheads (separate heads).
- src_dep is the label on the source language dependency.
- targ_dep is the label on the target language dependency."""
-
class Lex(Entry):

cloneID = 1
@@ -647,12 +643,12 @@
other languages."""

def __init__(self, tokens, head_index=-1, head='', language=None,
name='',
- features=None):
+ features=None, trans=None):
"""Either head_index or head (a string) must be specified."""
# tokens is a list of strings
# name may be specified explicitly or not
name = name or Group.make_name(tokens)
- Entry.__init__(self, name, language)
+ Entry.__init__(self, name, language, trans=trans)
self.tokens = tokens
if head:
self.head = head
@@ -663,12 +659,12 @@
# Either None or a list of feat-val dicts for tokens that require
them
# Convert dicts to Features objects
if isinstance(features, list):
- features = [Features(d) for d in features]
+ features = [Features(d) if d else None for d in features]
self.features = features

def __repr__(self):
"""Print name."""
- return '<{}:{}>'.format(self.name, self.id)
+ return '{}:{}'.format(self.name, self.id)

@staticmethod
def make_name(tokens):
@@ -698,38 +694,56 @@
"""Convert a dict (loaded from a yaml file) to a Group object."""
tokens = d['words']
features = d.get('features')
- p = Group(tokens, head=head, language=language, features=features)
+ name = d.get('name', '')
+ trans = d.get('trans')
+ p = Group(tokens, head=head, language=language, features=features,
+ name=name, trans=trans)
return p

def match_nodes(self, snodes, head_index):
- """Attempt to match the group tokens (and features) with snodes
from a sentence."""
+ """Attempt to match the group tokens (and features) with snodes
from a sentence,
+ returning the snode indices and root and unified features if
any."""
# print("Does {} match {}".format(self, snodes))
- sent_indices = []
+ match_snodes = []
for index, token in enumerate(self.tokens):
- sent_tok_indices = []
+ match_snodes1 = []
# print(" Attempting to match {}".format(token))
matched = False
- for node in snodes:# for token, feats in
zip(self.tokens, self.features):
-
+ for node in snodes:
# print(" Trying {}".format(node))
if index == node.index == head_index:
# This is the token corresponding to the group head
- sent_tok_indices.append(node.index)
+ match_snodes1.append((node.index, None))
# print(" Head matched already".format(node))
matched = True
break
else:
feats = self.features[index] if self.features else None
- if node.match(token, feats):
- sent_tok_indices.append(node.index)
+ node_match = node.match(token, feats)
+# print('Node {} match {}:{}, {}:: {}'.format(node,
token, index, feats, node_match))
+ if node_match:
+ match_snodes1.append((node.index, node_match))
# print(" Matched node {}".format(node))
matched = True
if not matched:
# print(" {} not matched; failed".format(token))
return False
else:
- sent_indices.append(sent_tok_indices)
- return sent_indices
+ match_snodes.append(match_snodes1)
+# print("Group {}, s_indices {}".format(self, match_snodes))
+ return match_snodes
+
+ ### Translations
+
+ ## Alignments: position correspondences, agreement constraints
+ ## አድርጎ አያውቅም -> godhe hin beeku
+ ## a: {positions: (1, 2),
+ ## agreements: {gen: gen},
+ ## featmaps: {((pers, 2), (num, 2)): ((pers, 3), (num, 2))}
+ ## }
+
+ def add_alignment(self, trans):
+ pass

class EntryError(Exception):
'''Class for errors encountered when attempting to update an entry.'''
=======================================
--- /l3lite/features.py Sun Apr 20 07:07:10 2014 UTC
+++ /l3lite/features.py Sun Apr 27 18:07:59 2014 UTC
@@ -26,6 +26,10 @@

# 2014.04.19
# -- Created.
+# 2014.04.23-24
+# -- Unification with one or both values sets.
+# Unification with a list/tuple of feature-value pairs.
+# Copying of agreement features: agree:().

class Features(dict):

@@ -44,12 +48,34 @@
l.sort()
return l

+ @staticmethod
+ def unify_sets(x, y):
+ """If both are sets, their intersection. If one is a set,
+ the other if it's a member of the set."""
+ if isinstance(x, set):
+ if isinstance(y, set):
+ return x & y
+ elif y in x:
+ return y
+ elif isinstance(y, set):
+ if x in y:
+ return x
+ return False
+
@staticmethod
def simple_unify(x, y):
- """Unify the expressions x and y, returning the result
or 'fail'."""
+ """Unify the values x and y, returning the result or 'fail'."""
# If they're the same, return one.
if x == y:
return x
+ # If one or the other is a set, return the intersection
+ # (a single value if one is not a set)
+ elif isinstance(x, set) or isinstance(y, set):
+ u = Features.unify_sets(x, y)
+ if u is not False:
+ return u
+ else:
+ return 'fail'
# # If both are dicts, call unify_dict
# elif isinstance(x, dict) and isinstance(y, dict):
# x.unify(y)
@@ -80,5 +106,35 @@
result[k] = other_val

return result
+
+ def agree(self, target, agrs):
+ """Make target agree with self on features specified in agrs
dict."""
+ for src_feat, targ_feat in agrs.items():
+ if src_feat in self:
+ src_value = self[src_feat]
+ if targ_feat in target and target[targ_feat] != src_value:
+ # Clash; fail!
+ return 'fail'
+ else:
+ target[targ_feat] = src_value
+
+ def match_list(self, feat_list):
+ """Does this Features object match list or tuple of feature/value
pairs?"""
+ for feat, val in feat_list:
+ if feat in self:
+ if Features.simple_unify(val, self[feat]) == 'fail':
+ return False
+ return True

+ @staticmethod
+ def unify_all(features_list):
+ """Unify all of the Features objects (or None) in the list, if
possible."""
+ result = Features({})
+ for features in features_list:
+ if not features:
+ continue
+ result = result.unify(features)
+ if result == 'fail':
+ return 'fail'
+ return result

=======================================
--- /l3lite/language.py Sun Apr 20 19:19:51 2014 UTC
+++ /l3lite/language.py Sun Apr 27 18:07:59 2014 UTC
@@ -37,7 +37,6 @@
# -- Analysis and generation dicts for particular wordforms.

from .entry import *
-from .constraint import *

import os, yaml

@@ -66,6 +65,8 @@
self.mwes = mwes or {}
self.forms = forms or {}
self.groups = groups or {}
+ # Dict of groups with names as keys
+ self.groupnames = {}
# Record possibilities for dependency labels, feature values,
order constraints
self.possible = {}
# Record whether language has changed since last loaded
@@ -166,7 +167,11 @@
if groups:
l.groups = {}
for head, v in groups.items():
- l.groups[head] = [Group.from_dict(g, l, head) for g in v]
+ group_objs = [Group.from_dict(g, l, head) for g in v]
+ l.groups[head] = group_objs
+ # Add groups to groupnames dict
+ for go in group_objs:
+ l.groupnames[go.name] = go
forms = d.get('forms')
if forms:
l.forms = {}
@@ -200,14 +205,17 @@
return Language.from_dict(dct)

@staticmethod
- def load(abbrev):
- path = os.path.join(LANGUAGE_DIR, abbrev + '.lg')
- try:
- language = Language.read(path)
- return language
- except IOError:
- print("That language doesn't seem to exist.")
- return
+ def load(*abbrevs):
+ languages = []
+ for abbrev in abbrevs:
+ path = os.path.join(LANGUAGE_DIR, abbrev + '.lg')
+ try:
+ language = Language.read(path)
+ languages.append(language)
+ except IOError:
+ print("That language doesn't seem to exist.")
+ return
+ return languages

### Basic setters. Create entries (dicts) for item. For debugging
purposes, include name
### in entry.
@@ -255,20 +263,21 @@
self.genforms[lexeme] = {}
featdict = self.genforms[lexeme]
# features is a Features object; convert it to a list of tuples
- features = features.to_list()
- feat = features.pop(0)
- self.make_featdict(featdict, feat, features, form)
+ features = tuple(features.to_list())
+ featdict[features] = form
+# feat = features.pop(0)
+# self.make_featdict(featdict, feat, features, form)

- @staticmethod
- def make_featdict(featdict, feat, features, form):
- """Make a feat-value dict with the form as final value."""
- if not features:
- featdict[feat] = form
- return
- if feat not in featdict:
- featdict[feat] = {}
- new_feat = features.pop(0)
- Language.make_featdict(featdict[feat], new_feat, features, form)
+# @staticmethod
+# def make_featdict(featdict, feat, features, form):
+# """Make a feat-value dict with the form as final value."""
+# if not features:
+# featdict[feat] = form
+# return
+# if feat not in featdict:
+# featdict[feat] = {}
+# new_feat = features.pop(0)
+# Language.make_featdict(featdict[feat], new_feat, features, form)

def add_class(self, cls):
if cls in self.words:
@@ -290,7 +299,7 @@

def add_group(self, tokens, head_index=-1, head='', name='',
features=None):
group = Group(tokens, head_index=head_index, head=head,
- language=self, name=name, features=features)
+ language=self, name=name, features=features)
# print('Group {}, head {}'.format(group, group.head))
if features:
head_i = tokens.index(group.head)
@@ -298,6 +307,7 @@
else:
head_feats = None
self.add_group_to_lexicon(group.head, group, head_feats)
+ self.groupnames[group.name] = group
self.changed = True
return group

@@ -366,7 +376,22 @@
"""Returns a single lexeme entry."""
return self.words.get(lexeme)[0]

- ### Analysis and generation of word forms.
+ ### Generation of word forms
+
+ def generate(self, root, features):
+ if root not in self.genforms:
+ print("Impossible to generate root {}".format(root))
+ return
+ gendict = self.genforms[root]
+ # List of matching forms
+ result = []
+ for feat_list, form in gendict.items():
+ if features.match_list(feat_list):
+ result.append(form)
+# print('Feat list {}, form {}'.format())
+ if not result:
+ print("No forms found for {}:{}".format(root, features))
+ return result

## Dependencies (word, lexeme, class entries)

=======================================
--- /l3lite/languages/amh.lg Sun Apr 20 19:19:51 2014 UTC
+++ /l3lite/languages/amh.lg Sun Apr 27 18:07:59 2014 UTC
@@ -3,9 +3,35 @@
groups:
በላ:
- words: [$food, በላ]
+ features: [{case: acc}, False]
+ trans:
+ orm:
+ - [$food_nyaate,
+ {alg: [0, 1],
+ agr: [false, {sp: prs, sn: num, sg: gen}]}]
አወቀ:
- words: [$vb, አወቀ]
features: [{tam: ger}, {tam: imf, pol: neg}]
+ name: ^አድርጎ^አያውቅም
+ trans:
+ orm:
+ - [^godhe_hin^beeku,
+ {alg: [0, 2],
+ agr: [{sp: prs, sn: num, sg: gen},
+ {sp: prs, sn: num, sg: gen}]}]
+ - words: [$fact, አወቀ]
+ trans:
+ orm:
+ - [$fact_beeke,
+ {alg: [0, 1],
+ agr: [False, {sp: prs, sn: num, sg: gen}]}]
+ አሳ:
+ - words: [አሳ]
+ trans:
+ orm:
+ - [qurxummii]
+ - [kalluuna]
+
forms:
ያውቃል:
root: አወቀ
@@ -19,11 +45,15 @@
root: በላ
features: {tam: ger, sp: 3, sn: 0, sg: 0}
cats: [$vb]
+ በላ:
+ root: በላ
+ features: {tam: prf, sp: 3, sn: 0, sg: 0}
+ cats: [$vb]
ድንች:
root: ድንች
- features: {num: 0, case: 0, poss: 0, def: 0}
+ features: {num: 0, poss: 0, def: 0}
cats: [$food, $thing]
አሳ:
root: አሳ
- features: {num: 0, case: 0, poss: 0, def: 0}
+ features: {num: 0, poss: 0, def: 0}
cats: [$food, $animal, $thing]
=======================================
--- /l3lite/sentence.py Sun Apr 20 19:19:51 2014 UTC
+++ /l3lite/sentence.py Sun Apr 27 18:07:59 2014 UTC
@@ -28,44 +28,67 @@
# -- Created.
# 2014.04.19-20
# -- Group matching. GInst, GNode, and SNode classes.
+# 2014.04.22
+# -- Solution class.

+import itertools
# ui.py loads language, etc.
from .ui import *
+from .cs import *

class Sentence:
"""A sentence is a list of words (or other lexical tokens) that gets
assigned a set of variables and constraints that are run during
parsing or translation."""

+ id = 0
+
def __init__(self, raw='', language=None,
tokens=None, analyses=None,
- nodes=None, groups=None):
+ nodes=None, groups=None, target=None):
+ self.set_id()
# A string representing the raw sentence
self.raw = raw
# # A list of strings tokenized words (or morphemes)
# self.tokens = tokens or []
# # A list of analyses pairs (root, features) for particular tokens
# self.analyses = analyses or []
- # A language object
+ # Source language: a language object
self.language = language
+ # Target language: a language object
+ self.target = target
# A list of SNode objects, one for each token
self.nodes = nodes or []
# A list of candidate groups found during lexicalization
self.groups = groups or []
# A list of constraints to run
self.constraints = []
+ # Root domain store for variables
+ self.dstore = DStore(name="S{}".format(self.id))
+ # A dict of sentence-level variables
+ self.variables = {}
+ # Solutions found during parsing
+ self.solutions = []
+
+ def set_id(self):
+ self.id = Sentence.id
+ Sentence.id += 1

def __repr__(self):
"""Print name."""
if self.raw:
- return '|| {} ||'.format(self.raw)
+ return '|| ({}) {} ||'.format(self.id, self.raw)
else:
- return '|| {} sentence ||'.format(self.language)
+ return '|| {} sentence {} ||'.format(self.language, self.id)

def initialize(self):
"""Things to do before running constraint satisfaction."""
self.tokenize()
self.lexicalize()
+ if not self.groups:
+ print("No groups found for {}".format(self))
+ else:
+ self.create_variables()

def tokenize(self):
"""Segment the sentence string into tokens, analyze them
morphologically,
@@ -125,14 +148,61 @@
# to the group's words
groups = []
for head_i, group in candidates:
- indices = group.match_nodes(self.nodes, head_i)
- if not indices:
+ # Matching snodes, along with root and unified features if any
+ snodes = group.match_nodes(self.nodes, head_i)
+ if not snodes:
# This group is out
continue
# print('Found indices {} for group {}, head index
{}'.format(indices, group, head_i))
- groups.append((head_i, indices, group))
+ groups.append((head_i, snodes, group))
+ # Create a GInst object and GNodes for each surviving group
+ self.groups = [GInst(group, self, head_i, snodes, index) for
index, (head_i, snodes, group) in enumerate(groups)]
+ # Assign sentence-level indices to each GNode; store gnodes in list
+ self.gnodes = []
+ sent_index = 0
+ for group in self.groups:
+ for gnode in group.nodes:
+ gnode.sent_index = sent_index
+ self.gnodes.append(gnode)
+ sent_index += 1
+ # Number of GNodes
+ self.ngnodes = sent_index

- self.groups = [GInst(group, self, head_i, indices) for head_i,
indices, group in groups]
+ def create_variables(self):
+ # All abstract (category) and instance (word or lexeme) gnodes
+ catnodes = set()
+ instnodes = set()
+ for group in self.groups:
+ for node in group.nodes:
+ if node.cat:
+ catnodes.add(node.sent_index)
+ else:
+ instnodes.add(node.sent_index)
+ # Snodes that are merged with catnodes
+ merged_snodes = set()
+ for gn_index in catnodes:
+ gn = self.gnodes[gn_index]
+ merged_snodes.update(gn.snode_indices)
+
+ self.variables['groups'] = Var('groups', set(),
set(range(len(self.groups))),
+ # At least 1, at most all groups
+ 1, len(self.groups))
+ self.variables['gnodes'] = Var('gnodes', set(),
set(range(self.ngnodes)),
+ # At least size of smallest group,
at most all
+ min([len(g.nodes) for g in
self.groups]),
+ self.ngnodes)
+ self.variables['snodes'] = DetVar('snodes',
set(range(len(self.nodes))))
+ self.variables['catgnodes'] = Var('catgnodes', set(), catnodes)
+ # Instance gnodes that are merged with catnodes
+ self.variables['merged_gnodes'] = Var('merged_gnodes', set(),
instnodes,
+ 0, len(catnodes))
+ # Snodes that involve merger of gnodes (that have two associated
gnodes)
+ self.variables['merged_snodes'] = Var('merged_snodes', set(),
merged_snodes,
+ 0, len(catnodes))
+
+ def make_solution(self, ginsts, s2gnodes, g2snodes):
+ self.solutions.append(Solution(self, ginsts, s2gnodes, g2snodes,
+ len(self.solutions)))

class SNode:
"""Sentence token and its associated analyses and variables."""
@@ -150,11 +220,18 @@
self.sentence = sentence
# We'll need these for multiple matchings
self.cats = self.get_cats()
+ # Dict of variables specific to this SNode
+ self.variables = {}
+ ## Tokens in target language for this SNode
+ self.translations = []

def __repr__(self):
"""Print name."""
return "*{}:{}".format(self.token, self.index)

+ def create_variables(self):
+ pass
+
def get_cats(self):
"""The set of categories for the node's token, or None."""
if not self.analyses:
@@ -168,10 +245,25 @@
def match(self, item, features):
"""Does this node match the group item (word, lexeme, category) and
and any features associated with it?"""
+# print(' SNode {} trying to match item {} with features
{}'.format(self, item, features))
# If item is a category, don't bother looking at token
if Entry.is_cat(item):
+# print(' Cat item, looking in {}'.format(self.cats))
if self.cats and item in self.cats:
- return True
+# print(" Token {} is in cat {}".format(self.token, item))
+ if not self.analyses or not features:
+ return True
+ else:
+ for analysis in self.analyses:
+ node_features = analysis.get('features')
+ if node_features:
+ u_features = node_features.unify(features)
+ if u_features != 'fail':
+ return analysis.get('root'), u_features
+# print(" Matching group features {} and sentence
features {}".format(features, node_features))
+# if node_features and
node_features.unify(features) != 'fail':
+# return True
+ return False
elif self.token == item:
# item matches this node's token; features are irrelevant
return True
@@ -180,43 +272,392 @@
for analysis in self.analyses:
root = analysis.get('root', '')
node_features = analysis.get('features')
+# print(" SNode features {}".format(node_features))
if root == item:
if not features:
- return True
+ return root, None
+# return True
elif not node_features:
- return True
- elif node_features.unify(features):
- return True
+ return root, None
+# return True
+ else:
+ u_features = node_features.unify(features)
+ if u_features != 'fail':
+ return root, u_features
+# elif node_features.unify(features) != 'fail':
+# return True
return False

class GInst:

"""Instantiation of a group; holds variables and GNode objects."""

- def __init__(self, group, sentence, head_index, snode_indices):
+ def __init__(self, group, sentence, head_index, snode_indices, index):
# The Group object that this "instantiates"
self.group = group
self.sentence = sentence
+ self.target = sentence.target
+ # Index of group within the sentence
+ self.index = index
# Index of SNode associated with group head
self.head_index = head_index
# List of GNodes
self.nodes = [GNode(self, index, indices) for index, indices in
enumerate(snode_indices)]
+ # Dict of variables specific to this group
+ self.variables = {}
+ # List of target language groups
+ self.translations = []

def __repr__(self):
return '<<{}:{}>>'.format(self.group.name, self.group.id)

+ def create_variables(self):
+ pass
+
+ def set_translations(self):
+ """Find the translations of the group in the target language."""
+ translations = self.group.get_translations(self.target.abbrev,
False)
+ # If alignments are missing, add default alignment
+ for i, t in enumerate(translations):
+ if len(t) == 1:
+ translations[i] = [t[0], {'alg':
list(range(len(self.nodes)))}]
+# print("Translations for {}: {}".format(self, translations))
+ ntokens = len(self.group.tokens)
+ for tgroup, alignment in translations:
+ if isinstance(tgroup, str):
+ # First find the target Group object
+ tgroup = self.target.groupnames[tgroup]
+ # Make any TNodes required
+ nttokens = len(tgroup.tokens)
+ tnodes = []
+ if nttokens > ntokens:
+ # Target group has more nodes than source group.
+ # Indices of groups that are not empty.
+ full_t_indices = set(alignment['alg'])
+ empty_t_indices = set(range(nttokens)) - full_t_indices
+ for i in empty_t_indices:
+ empty_t_token = tgroup.tokens[i]
+ empty_t_feats = tgroup.features[i] if tgroup.features
else None
+ tnodes.append(TNode(empty_t_token, empty_t_feats,
self, i))
+ # Deal with individual gnodes in the group
+ gnodes = []
+ for gn_index, gnode in enumerate(self.nodes):
+ # Align gnodes with target tokens and features
+ tokens = tgroup.tokens
+ features = tgroup.features
+ targ_index = alignment['alg'][gn_index]
+ if targ_index < 0:
+ # This means there's no target language token
+ continue
+ agrs = alignment['agr'][gn_index] if 'agr' in alignment
else None
+ token = tokens[targ_index]
+ feats = features[targ_index] if features else None
+ gnodes.append((gnode, token, feats, agrs, targ_index))
+ # Copy this information??
+# gnode.translations.append((token, feats, agrs,
targ_index))
+ self.translations.append((tgroup, gnodes, tnodes))
+ # Assign corresponding translations to gnodes
+# for gn_index, gnode in enumerate(self.nodes):
+# for tgroup, alignment, tnodes in self.translations:
+# # Align gnodes with target tokens and features
+# tokens = tgroup.tokens
+# features = tgroup.features
+# targ_index = alignment['alg'][gn_index]
+# agrs = alignment['agr'][gn_index] if 'agr' in alignment
else None
+# if targ_index < 0:
+# # This means there's no target language token
+# continue
+# token = tokens[targ_index]
+# feats = features[targ_index] if features else None
+# gnode.translations.append((token, feats, agrs,
targ_index))
+
+
class GNode:

"""Representation of a single node (word, position) within a GInst
object."""

- def __init__(self, ginst, index, snode_indices):
+ def __init__(self, ginst, index, snodes):
self.ginst = ginst
self.index = index
- self.snode_indices = snode_indices
+ self.snode_indices = [s[0] for s in snodes]
+ self.snode_anal = [s[1] for s in snodes]
# Whether this is the head of the group
self.head = index == ginst.group.head_index
# Group word, etc. associated with this node
self.token = ginst.group.tokens[index]
+ # Whether the associated token is abstract (a category)
+ self.cat = Entry.is_cat(self.token)
+ # Features associated with this group node
+ groupfeats = ginst.group.features
+ if groupfeats:
+ self.features = groupfeats[index]
+ else:
+ self.features = None
+ # List of target-language token and features associated with this
gnode
+# self.translations = []

def __repr__(self):
return "{}|{}".format(self.ginst, self.token)
+
+ def create_variables(self):
+ pass
+
+class TNode:
+
+ """Representation of a node within a target language group that doesn't
+ have a corresponding node in the source language group that it's the
+ translation of."""
+
+ def __init__(self, token, features, ginst, index):
+ self.token = token
+ self.features = features
+ self.ginst = ginst
+ self.sentence = ginst.sentence
+ self.index = index
+
+ def generate(self):
+ """Generate forms for the TNode."""
+ if self.features:
+ return self.sentence.target.generate(self.token, self.features)
+ else:
+ return [self.token]
+
+ def __repr__(self):
+ return "~{}|{}".format(self.ginst, self.token)
+
+class Solution:
+
+ """A non-conflicting set of groups for a sentence, at most one instance
+ GNode for each sentence token, exactly one sentence token for each
obligatory
+ GNode in a selected group. Created when a complete variable
assignment.get('features'))
+ is found for a sentence."""
+
+ def __init__(self, sentence, ginsts, s2gnodes, g2snodes, index):
+ self.sentence = sentence
+ # List of sets of gnode indices
+ self.s2gnodes = s2gnodes
+ self.ginsts = ginsts
+ self.index = index
+ # A list of pairs for each snode: (gnodes, features)
+ self.snodes = []
+ # List of Translation objects
+ self.translations = []
+
+ def __repr__(self):
+ return "|< {} >|({})".format(self.sentence.raw, self.index)
+
+ def translate(self):
+ """Do everything you need to create the translation."""
+ self.merge_nodes()
+ for ginst in self.ginsts:
+ ginst.set_translations()
+ self.make_translations()
+
+ def make_translations(self):
+ """Combine GInsts for each translation in translation products, and
+ separate gnodes into a dict for each translation."""
+ translations = itertools.product(*[g.translations for g in
self.ginsts])
+ index = 0
+ for translation in translations:
+ t = Translation(self, translation, index)
+ t.initialize()
+ self.translations.append(t)
+ index += 1
+
+ def merge_nodes(self):
+ """Merge the source features of cat and inst GNodes associated
with each SNode."""
+ for snode, gn_indices in zip(self.sentence.nodes, self.s2gnodes):
+ # gn_indices is either one or two ints indexing gnodes in
self.gnodes
+ gnodes = [self.sentence.gnodes[index] for index in gn_indices]
+ features = []
+ for gnode in gnodes:
+ snode_indices = gnode.snode_indices
+ snode_index = snode_indices.index(snode.index)
+ snode_anal = gnode.snode_anal[snode_index]
+ if snode_anal:
+ features.append(snode_anal[1])
+ # Could this fail??
+ features = Features.unify_all(features)
+ self.snodes.append((gnodes, features))
+
+ def build_translations(self):
+ for translation in self.t:
+ print('TRANSLATION {}'.format(trans_index))
+ trans_nodes = []
+ tginsts, tgnodes, trans_index = translation.groups_tnodes,
translation.gnode_dict, translation.index
+ # Figure out the target forms for each snode
+ for snode, (gnodes, features) in zip(self.sentence.nodes,
self.snodes):
+ if len(gnodes) > 1:
+ # There are two gnodes for this snode; only the
concrete node
+ # can have translations
+ gn0 = tgnodes[gnodes[0]]
+ gn1 = tgnodes[gnodes[1]]
+ tgroups, tokens, targ_feats, agrs, t_index = zip(gn0,
gn1)
+ print('snode {}, gnodes {}'.format(snode, gnodes))
+ token = False
+ i = 0
+ # Find the token that's not a cat
+ while not token:
+ t = tokens[i]
+ if not Entry.is_cat(t):
+ token = t
+ i += 1
+ targ_feats = Features.unify_all(targ_feats)
+ # Merge the agreements
+ agrs = Solution.combine_agrs(agrs)
+ t_index = [(tgroups[0], gn0[-1]), (tgroups[1],
gn1[-1])]
+ print(' tokens {}, targ_feats {}, agrs {}, t_index
{}'.format(tokens, targ_feats, agrs, t_index))
+ else:
+ gnode = gnodes[0]
+ print('snode {}, gnode {}'.format(snode, gnode))
+ tgroup, token, targ_feats, agrs, t_index =
tgnodes[gnode]
+ t_index = [(tgroup, t_index)]
+ print(' tokens {}, targ_feats {}, agrs {}, t_index
{}'.format(token, targ_feats, agrs, t_index))
+ features.agree(targ_feats, agrs)
+ gen = self.sentence.target.generate(token, targ_feats)
+ print(" Generating {}/{}: {}".format(token, targ_feats,
gen))
+ trans_nodes.append((gen, t_index))
+ # Add TNode elements
+ tgnode_elements = []
+ for ginst, tgnodes in tginsts:
+ if tgnodes:
+ for tgnode in tgnodes:
+ forms = tgnode.generate()
+ index = [(ginst, tgnode.index)]
+ trans_nodes.append((forms, index))
+ self.translations.append(trans_nodes)
+
+class Translation:
+ """Representation of a single translation for an input sentence.
+ Multiple translations are possible with a single Solution."""
+
+ def __init__(self, solution, attribs, index):
+ self.solution = solution
+ self.index = index
+ self.sentence = solution.sentence
+ # Create GNode dict and list of target group, gnodes and tnodes
+ # from attributes
+ self.gnode_dict = {}
+ self.groups_tnodes = []
+ for tgroup, tgnodes, tnodes in attribs:
+ for tgnode, tokens, feats, agrs, t_index in tgnodes:
+ self.gnode_dict[tgnode] = (tgroup, tokens, feats, agrs,
t_index)
+ self.groups_tnodes.append((tgroup, tnodes))
+ # form list / order constraint pairs for each sentence position
+ self.nodes = []
+ # pairs of node indices representing order constraints
+ self.order_pairs = []
+ # Root domain store for variables
+ self.dstore = DStore(name="T{}".format(self.index))
+ # Order variables for each node
+ self.variables = []
+ # Order and disjunction constraints
+ self.constraints = []
+ # Translation needs a solver to figure out positions of words
+ self.solver = Solver(self.constraints, self.dstore)
+
+ def __repr__(self):
+ return "{}[{}] ->".format(self.solution, self.index)
+
+ def initialize(self):
+ """Set up everything needed to run the constraints and generate
the translation."""
+ self.build()
+ self.make_order_pairs()
+ self.create_variables()
+ self.create_constraints()
+
+ def build(self):
+ """Unify translation features for merged nodes, map agr features
from source to target,
+ generate surface target forms from resulting roots and features."""
+ print('Building {}'.format(self))
+ tginsts, tgnodes, trans_index = self.groups_tnodes,
self.gnode_dict, self.index
+ # Figure out the target forms for each snode
+ for snode, (gnodes, features) in zip(self.sentence.nodes,
self.solution.snodes):
+ t_indices = []
+ if len(gnodes) > 1:
+ # There are two gnodes for this snode; only the concrete
node
+ # can have translations
+ gn0, gn1 = tgnodes[gnodes[0]], tgnodes[gnodes[1]]
+ tgroups, tokens, targ_feats, agrs, t_index = zip(gn0, gn1)
+ token = False
+ i = 0
+ # Find the token that's not a cat
+ while not token:
+ t = tokens[i]
+ if not Entry.is_cat(t):
+ token = t
+ i += 1
+ targ_feats = Features.unify_all(targ_feats)
+ # Merge the agreements
+ agrs = Translation.combine_agrs(agrs)
+ if len(tgroups[0].tokens) > 1:
+ t_indices.append((tgroups[0], gn0[-1]))
+ if len(tgroups[1].tokens) > 1:
+ t_indices.append((tgroups[1], gn1[-1]))
+# t_index = [(tgroups[0], gn0[-1]), (tgroups[1], gn1[-1])]
+ else:
+ gnode = gnodes[0]
+ tgroup, token, targ_feats, agrs, t_index = tgnodes[gnode]
+ if len(tgroup.tokens) > 1:
+ t_indices.append((tgroup, t_index))
+
+ # Make target and source features agree as required
+ features.agree(targ_feats, agrs)
+ # Generate target forms for this SNode
+ gen = self.sentence.target.generate(token, targ_feats)
+ print(" Generating node form {}/{}: {}".format(token,
targ_feats, gen))
+ self.nodes.append((gen, t_indices))
+ # Add TNode elements
+ tgnode_elements = []
+ for ginst, tnodes in tginsts:
+ if tnodes:
+ for tnode in tnodes:
+ forms = tnode.generate()
+ print(' Generating tnode form {}/{}:
{}'.format(tnode.token, tnode.features, forms))
+ index = [(ginst, tnode.index)]
+ self.nodes.append((forms, index))
+
+ @staticmethod
+ def combine_agrs(agr_list):
+ """Merge agr dicts in agr_list into a single agr dict."""
+ result = {}
+ for agr in agr_list:
+ if not agr:
+ continue
+ for k, v in agr.items():
+ if k in result:
+ if result[k] != v:
+ print("Warning: agrs in {} failed to
merge".format(agr_list))
+ return 'fail'
+ else:
+ continue
+ else:
+ result[k] = v
+ return result
+
+ def make_order_pairs(self):
+ """Convert group/index pairs to integer (index) order pairs."""
+ tgroup_dict = {}
+ for index, (forms, constraints) in enumerate(self.nodes):
+ for tgroup, tg_index in constraints:
+ if tgroup not in tgroup_dict:
+ tgroup_dict[tgroup] = []
+ tgroup_dict[tgroup].append((index, tg_index))
+ for pairs in tgroup_dict.values():
+ for pairpair in itertools.combinations(pairs, 2):
+ pairpair = list(pairpair)
+ # Sort by the target index
+ pairpair.sort(key=lambda x: x[1])
+ self.order_pairs.append([x[0] for x in pairpair])
+
+ def create_variables(self):
+ """Create an IVar for each translation node."""
+ nnodes = len(self.nodes)
+ self.variables = [IVar("o{}".format(i), set(range(nnodes)),
rootDS=self.dstore) for i in range(nnodes)]
+
+ def create_constraints(self):
+ """Make order and disjunction constraints."""
+ for first, second in self.order_pairs:
+ self.constraints.append(SetPrecedence([self.variables[first],
self.variables[second]]))
+ self.constraints.extend(Disjoint(self.variables).constraints)
+
=======================================
--- /l3lite/variable.py Wed Apr 16 06:00:17 2014 UTC
+++ /l3lite/variable.py Sun Apr 27 18:07:59 2014 UTC
@@ -47,10 +47,25 @@
self.level = level
# Undetermined variables
self.undetermined = []
+ # Essential undetermined variables
+ self.ess_undet = []

def __repr__(self):
- return '<DS {}/{}>'.format(self.name, self.level)
+ return '@ {}/{}'.format(self.name, self.level)

+ def is_determined(self, essential=True):
+ """Are all variables in dstore determined that need to be
determined?"""
+ if essential:
+ if self.ess_undet:
+ return False
+ else:
+ print('{} has {} undetermined variables'.format(self,
len(self.ess_undet)))
+ elif self.undetermined:
+ return False
+ else:
+ print('{} has {} undetermined variables'.format(self,
len(self.undetermined)))
+ return True
+
def clone(self, constraint=None, name='', project=False, verbosity=0):
"""Create a new dstore by applying the basic constraint
to the bindings in this store."""
@@ -58,6 +73,7 @@
problem=self.problem, parent=self)
self.children.append(new_store)
new_store.undetermined = self.undetermined[:]
+ new_store.ess_undet = self.ess_undet[:]
constraint.infer(dstore=new_store, verbosity=0, tracevar=[])
for var in constraint.variables:
# See if the new variable(?s) is now determined
@@ -76,6 +92,8 @@
lower_card=0, upper_card=MAX,
problem=None, dstores=None, rootDS=None,
constraint=None,
+ # Whether a complete solution depends on a single value
for this variable
+ essential=True,
# Vars with low weights are "peripheral".
weight=1):
self.name = name
@@ -83,6 +101,7 @@
if problem:
self.problem.add_variable(self)
self.constraints = [constraint] if constraint else []
+ self.essential = essential
self.value = None
# Normally initialize with a top-level domain store
self.rootDS = rootDS or DS0
@@ -91,6 +110,8 @@
# Add the variable to the list of undetermined variables for
# the dstore
self.rootDS.undetermined.append(self)
+ if essential:
+ self.rootDS.ess_undet.append(self)
self.weight = weight
if lower_domain != None:
self.lower_domain = lower_domain
@@ -102,6 +123,7 @@
self.upper_domain = ALL.copy()
self.init_lower_card = max(lower_card, len(self.lower_domain))
self.init_upper_card = min(upper_card, len(self.upper_domain))
+ self.max = MAX
self.init_values(dstore=self.rootDS)

def __repr__(self):
@@ -240,6 +262,12 @@
dstore = dstore or self.rootDS
return self.get_upper(dstore=dstore) -
self.get_lower(dstore=dstore)

+ def det_update(self, dstore=None):
+ if dstore:
+ dstore.undetermined.remove(self)
+ if self.essential:
+ dstore.ess_undet.remove(self)
+
def determined(self, dstore=None, constraint=None, verbosity=0):
"""Attempt to determine the variable, returning the value if this
is possible,
False if it's not."""
@@ -263,8 +291,7 @@
self.set_upper_card(value_card, dstore=dst)
if verb > 1:
print(' {} is determined at {}'.format(self, value))
- if dst:
- dst.undetermined.remove(self)
+ self.det_update(dstore=dst)
return value
lower = self.get_lower(dstore=dstore)
upper = self.get_upper(dstore=dstore)
@@ -304,8 +331,8 @@
self.set_value(value, dstore=dstore)
self.set_lower_card(val_card, dstore=dstore)
self.set_upper_card(val_card, dstore=dstore)
- if dstore and self in dstore.undetermined:
- dstore.undetermined.remove(self)
+ if self in dstore.undetermined:
+ self.det_update(dstore)
if orig_upper != value or orig_lower != value:
return True
return False
@@ -341,15 +368,15 @@
self.set_value(lower, dstore=dstore)
self.set_lower_card(val_len, dstore=dstore)
self.set_upper_card(val_len, dstore=dstore)
- if dstore and self in dstore.undetermined:
- dstore.undetermined.remove(self)
+ if self in dstore.undetermined:
+ self.det_update(dstore)
elif len(new_upper) == lower_card:
val_len = lower_card
self.set_lower(new_upper, dstore=dstore)
self.set_value(new_upper, dstore=dstore)
self.set_upper_card(val_len, dstore=dstore)
- if dstore and self in dstore.undetermined:
- dstore.undetermined.remove(self)
+ if self in dstore.undetermined:
+ self.det_update(dstore)
return True
return False

@@ -395,8 +422,8 @@
if det:
if new_lower == upper and upper_card ==
self.lower_card(dstore=dstore):
self.set_value(upper, dstore=dstore)
- if dstore and self in dstore.undetermined:
- dstore.undetermined.remove(self)
+ if self in dstore.undetermined:
+ self.det_update(dstore)
return True
return False

@@ -414,8 +441,8 @@
# Determine
self.set_lower(upper, dstore=dstore)
self.set_value(upper, dstore=dstore)
- if dstore and self in dstore.undetermined:
- dstore.undetermined.remove(self)
+ if self in dstore.undetermined:
+ self.det_update(dstore)
return True
return False

@@ -433,8 +460,8 @@
# Determine
self.set_upper(lower, dstore=dstore)
self.set_value(lower, dstore=dstore)
- if dstore and self in dstore.undetermined:
- dstore.undetermined.remove(self)
+ if self in dstore.undetermined:
+ self.det_update(dstore)
return True
return False

@@ -504,7 +531,7 @@
if verbosity > 1:
print(' {} is determined at {}'.format(self, upper))
if dstore:
- dstore.undetermined.remove(self)
+ self.det_update(dstore)
return upper
return False

@@ -515,7 +542,7 @@

### Variables that are pre-determined.

-class Determined(Var):
+class DetVar(Var):
"""Pre-determined variable. If DStore is not specified in constructor,
the variable is determined in all DStores. Should not be modified."""

@@ -570,14 +597,14 @@
if feature in {'lower_card', 'upper_card'}:
return len(self.value)

-class DetIVar(Determined, IVar):
+class DetIVar(DetVar, IVar):

def __init__(self, name='', value=0, dstore=None):
IVar.__init__(self, name, rootDS=dstore)
# value could be the empty set
if not isinstance(value, set):
value = {value}
- Determined.__init__(self, name, value, dstore)
+ DetVar.__init__(self, name, value, dstore)
self.init_domain = value
self.default_value = value

@@ -618,4 +645,4 @@
return repr(self.value)

# Constant variables, determined in all DStores
-EMPTY = Determined("empty", set())
+EMPTY = DetVar("empty", set())
=======================================
--- /l3xdg/solver.py Wed Feb 5 00:32:05 2014 UTC
+++ /l3xdg/solver.py Sun Apr 27 18:07:59 2014 UTC
@@ -338,187 +338,6 @@
if verbosity and self.status == CSpace.succeeded:
print('SUCCEEDED at {} iterations'.format(n))

-## def check_projector(self, projector, verbosity=0, tracevar=None):
-## if projector in self.dead_projectors:
-## return CSpace.skipped
-## if self.newly_determined & projector.right.get_variables():
-## vrbs = 2 if (projector.variable in tracevar) else verbosity
-## fail = not projector.check(dstore=self.dstore, runner=self,
verbosity=vrbs)
-## if fail:
-## if verbosity:
-## print()
-## print('FAILED (check)')
-## print(' Projector {}'.format(projector))
-## if projector.constraint:
-## print(' Constraint
{}'.format(projector.constraint))
-### print(' RHS',
projector.eval_right(dstore=self.dstore))
-## return CSpace.failed
-## self.dead_projectors.add(projector)
-
-## def run_projector(self, projector, verbosity=0, tracevar=None,
-## checkdead=True, checkvar=False, kill=False):
-## if checkdead and projector in self.dead_projectors:
-## return CSpace.skipped
-## if checkvar and projector.variable in self.newly_determined:
-## return CSpace.skipped
-## vrbs = 2 if (projector.variable in tracevar) else verbosity
-## # Later save time by calling check_projector if variable is
newly determined
-## changed, failed = projector.update(dstore=self.dstore,
runner=self, verbosity=vrbs)
-## if failed:
-## if verbosity:
-## print()
-## print('FAILED (run)')
-## print(' Projector {}'.format(projector))
-## if projector.constraint:
-## print(' Constraint {}'.format(projector.constraint))
-### print(' RHS', projector.eval_right(dstore=self.dstore))
-### print('{} projectors evaluated'.format(evalpred))
-## return CSpace.failed
-## if kill:
-## self.dead_projectors.add(projector)
-## if changed:
-## # p's variable was updated
-## if verbosity > 1:
-## print()
-## print('{} SUCCEEDED'.format(projector))
-## return CSpace.succeeded
-##
-## def run_projectors(self, verbosity=0, tracevar=None, traceprojs=[],
cutoff=100):
-## active_projectors = self.projectors
-## it = 0
-## variables = set()
-## if verbosity:
-## print('# Projectors: {}'.format(len(active_projectors)))
-## print('PROPAGATION: Iterations (active projectors)...')
-## # For evaluating different projector orders, keep track of the
number that are
-## # evaluated
-## evalpred = 0
-## n_det = 0
-## if self.depth == 0:
-## # For the first CS, do determined projectors first
-## for p in active_projectors:
-## if p.right.is_determined():
-## run1 = self.run_projector(p, verbosity=verbosity,
tracevar=tracevar,
-## checkdead=False,
checkvar=False, kill=True)
-## if run1 == CSpace.failed:
-## return CSpace.failed
-## else:
-## n_det += 1
-## evalpred += 1
-## if verbosity > 1:
-## print('{} determined projectors'.format(n_det))
-## print('{} determined
variables'.format(len(self.newly_determined)))
-### for v in self.newly_determined:
-### vproj = v.all_projectors()
-### for p in vproj:
-### run1 = self.run_projector(p, checkdead=True,
checkvar=False,
-### verbosity=verbosity,
tracevar=tracevar)
-### if run1 == CSpace.failed:
-### return CSpace.failed
-### print('{} newly determined'.format(len(self.newly_determined)))
-## while not self.fixed_point(active_projectors):
-### t1 = time.time()
-## if verbosity:
-## if (it + 1) % 10 == 0:
-## print()
-## print('{}({})'.format(it+1, len(active_projectors)),
end=' ')
-### print('Iteration {}, {} active projectors'.format(it+1,
len(active_projectors)))
-## if tracevar:
-## if verbosity:
-## print()
-## print('Traced variables: ', end='')
-## print(tracevar)
-## for v in tracevar:
-## v.pprint(dstore=self.dstore)
-## if not isinstance(active_projectors, list):
-## active_projectors = list(active_projectors)
-## self.sort_projectors(active_projectors)
-## n_skipped = 0
-## for p in active_projectors:
-## run1 = self.run_projector(p, checkdead=True,
checkvar=False,
-## verbosity=verbosity,
tracevar=tracevar)
-## if run1 == CSpace.failed:
-## return CSpace.failed
-## elif run1 == CSpace.skipped:
-## n_skipped += 1
-## elif run1 == CSpace.succeeded:
-## evalpred += 1
-## if verbosity > 1:
-## print()
-## print(' Skipped {} dead projectors'.format(n_skipped))
-### t2 = time.time()
-### print('{} newly determined
variables'.format(len(self.newly_determined)))
-## # For all variables that have been determined on this
iteration, run those
-## # of their projectors whose right-hand sides contain any
newly determined
-## # variables one more time.
-### n_checked = 0
-### n_dying = 0
-### n_skipped = 0
-## if verbosity > 1:
-## print()
-## print(' {} newly determined
vars'.format(len(self.newly_determined)))
-## for v in self.newly_determined:
-## vproj = v.all_projectors()
-## for p in vproj:
-## check1 = self.check_projector(p,
verbosity=verbosity, tracevar=tracevar)
-## if check1 == CSpace.failed:
-## return CSpace.failed
-### n_dying += 1
-## evalpred += 1
-### if verbosity > 1:
-### print(' Checked {} of {} dying
projectors'.format(n_checked, n_dying))
-### print(' {} dead
projectors'.format(len(self.dead_projectors)))
-## active_projectors = set()
-## self.newly_determined = set()
-## if self.executed_events:
-## for e in self.executed_events:
-## next_projectors = {p for p in e.reexec_projs if p
not in self.dead_projectors}
-## active_projectors.update(next_projectors)
-### elif verbosity:
-### print()
-### print('FIXED POINT REACHED AT {} iterations'.format(it))
-## self.executed_events = set()
-### t3 = time.time()
-### print('Times {}, {}'.format(t2-t1, t3-t2))
-## it += 1
-## if it >= cutoff:
-## if verbosity:
-## print()
-## print('HALTING, wakeful propagators')
-## for p in awaken:
-## print(p)
-## break
-## if verbosity > 1:
-## print()
-## print('Variables')
-## for v in variables:
-## v.pprint(dstore=self.dstore)
-## if verbosity:
-## print()
-## print('FIXED POINT REACHED at {} iterations, {} projectors
evaluated'.format(it-1, evalpred))
-## return active_projectors
-##
-## def sort_projectors(self, projectors):
-## """Sort the projectors for a given iteration."""
-### print('Sorting projectors by cost of determining variables')
-### projectors.sort(key=lambda p:
p.variable.cost(dstore=self.dstore))
-## # Sort projectors by the frequency of the corresponding
expression
-## # on the right-hand side of rules.
-### projectors.sort(key=lambda p: p.get_right_counts())
-## projectors.sort(key=lambda p: self.proj_princ_score(p),
reverse=True)
-##
-## def proj_princ(self, projector):
-## """Get the principle ID associated with a projector."""
-## var = projector.variable
-## princ = var.get_princ(var)
-## return princ
-##
-## def proj_princ_score(self, projector):
-## princ = self.proj_princ(projector)
-## if princ in PRINCS:
-## return PRINCS.index(princ)
-## return 20
-
def propagate(self, propagators, verbosity=0, tracevar=None,
traceprops=[],
show_changed=False):
"""Run all of the awake propagators once.
=======================================
--- /lite.py Sun Apr 20 07:07:10 2014 UTC
+++ /lite.py Sun Apr 27 18:07:59 2014 UTC
@@ -36,20 +36,46 @@
#import cProfile
#import pstats

+def eat_fish():
+ amh, orm = l3lite.Language.load('amh', 'orm')
+ ss = l3lite.Sentence(raw="አሳ በላ", language=amh, target=orm)
+ ss.initialize()
+ ss.make_solution(ss.groups, [{0, 1}, {2}])
+ return ss.solutions[0]
+
+def not_eaten_fish():
+ amh, orm = l3lite.Language.load('amh', 'orm')
+ ss = l3lite.Sentence(raw="አሳ በልቶ አያውቅም", language=amh, target=orm)
+ ss.initialize()
+ ss.make_solution(ss.groups, [{0, 1}, {2, 3}, {4}], [0, 0, 1, 1, 2])
+ return ss.solutions[0]
+
+def order():
+ constraints = [(0,1), (1,2), (1,3), (3,2)]
+ vrs = [l3lite.IVar('v0', {0, 1, 2, 3}),
+ l3lite.IVar('v1', {0, 1, 2, 3}),
+ l3lite.IVar('v2', {0, 1, 2, 3}),
+ l3lite.IVar('v3', {0, 1, 2, 3})]
+ cs = []
+ for v1, v2 in constraints:
+ cs.append(l3lite.SetPrecedence([vrs[v1], vrs[v2]]))
+ cs.extend(l3lite.Disjoint(vrs).constraints)
+ return cs, vrs
+
def language(name, abbrev):
return l3lite.Language(name, abbrev)

-def phrases():
- eng = language('English', 'eng')
- esp = language('español', 'spa')
- amh = language('አማርኛ', 'amh')
- rra = eng.add_phrase(['%read', '$sbd', 'the', 'riot', 'act'],
head='%read')
-# clc = esp.add_phrase(['%cantar', 'a', '$algn', 'las', 'cuarenta'], 0)
- vo_awq = amh.add_phrase(['$vb', "%'wq"],
- head="%'wq",
- features=[{'tam': 'ger'},
{'tam': 'imf', 'pol': 'neg'}],
- name="%%በልቶ_አያቅም")
- return rra, vo_awq
+##def phrases():
+## eng = language('English', 'eng')
+## esp = language('español', 'spa')
+## amh = language('አማርኛ', 'amh')
+## rra = eng.add_phrase(['%read', '$sbd', 'the', 'riot', 'act'],
head='%read')
+### clc = esp.add_phrase(['%cantar', 'a', '$algn', 'las', 'cuarenta'],
0)
+## vo_awq = amh.add_phrase(['$vb', "%'wq"],
+## head="%'wq",
+## features=[{'tam': 'ger'},
{'tam': 'imf', 'pol': 'neg'}],
+## name="%%በልቶ_አያቅም")
+## return rra, vo_awq

##def eg():
## ### Spanish
@@ -119,102 +145,102 @@
e, s = l3lite.Language("English", 'eng'),
l3lite.Language("español", 'spa')
return u, e, s

-def t1():
- # broke the window
- g0_nodes = {0, 1, 2}
- # the boy
- g1_nodes = {3, 4}
- # the window
- g2_nodes = {5, 6}
- # broke
- g3_nodes = {7}
- # .
- g4_nodes = {8}
- # the window broke
- g5_nodes = {9, 10, 11}
- # the boy broke the window .
- w = {0, 1, 2, 3, 4, 5}
- g = {0, 1, 2, 3, 4, 5}
- w2gnodes = [l3lite.IVar('the0', {1, 3, 5, 9}), # {3}
- l3lite.DetIVar('boy', 4),
- l3lite.IVar('broke', {0, 7, 11}), # {0, 7}
- l3lite.IVar('the3', {1, 3, 5, 9}), # {1, 5}
- l3lite.IVar('window', {2, 6, 10}), # {2, 6}
- l3lite.DetIVar('.', 8)]
- groups = l3lite.Var('groups', set(), g, 2, 6)
- gdetvars = [l3lite.Determined('g0', {0, 1, 2}),
- l3lite.Determined('g1', {3, 4}),
- l3lite.Determined('g2', {5, 6}),
- l3lite.Determined('g3', {7}),
- l3lite.Determined('g4', {8}),
- l3lite.Determined('g5', {9, 10, 11})]
- nodes = l3lite.Determined('nodes', w)
- ## Union selection on the gnodes of all words
- # Union of all group nodes used
- gnodeU = l3lite.Var('gnodeU', set(),
- {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 6, 6)
- w_usel = l3lite.UnionSelection(gnodeU, nodes, w2gnodes)
- ## Union selection on the gnodes of all groups
- g_usel = l3lite.UnionSelection(gnodeU, groups, gdetvars)
- ## Position constraints
- # Positions (word indices) of gnodes
- # Group 0
- gno0 = l3lite.DetIVar('gno0', 2) #broke
- gno1 = l3lite.IVar('gno1', {0, 3}) #the {3}
- gno2 = l3lite.DetIVar('gno2', 4) #window
- # Group 1
- gno3 = l3lite.IVar('gno3', {0, 3}) #the {0}
- gno4 = l3lite.DetIVar('gno4', 1) #boy
- # Group 2
- gno5 = l3lite.IVar('gno5', {0, 3}) #the {3}
- gno6 = l3lite.DetIVar('gno6', 4) #window
- # Group 3
- gno7 = l3lite.DetIVar('gno7', 2) #broke
- # Group 4
- gno8 = l3lite.DetIVar('gno8', 5) #.
- # Group 5 (impossible)
- gno9 = l3lite.IVar('gno9', {0, 3}) #the
- gno10 = l3lite.DetIVar('gno10', 4) #window
- gno11 = l3lite.DetIVar('gno11', 2) #broke
- # Position pair constraints
- gposcons = l3lite.Var('gposcons', set(),
- {(0, 1), (1, 2), (0, 2), (3, 4), (5, 6),
- (9, 10), (10, 11), (9, 11)})
- # Precedence selection
- precsel = l3lite.PrecedenceSelection(gposcons,
- [gno0, gno1, gno2, gno3, gno4,
gno5,
- gno6, gno7, gno8, gno9, gno10,
gno11])
- # Union selection on position pair constraints
- gp0 = l3lite.Determined('gp0', {(0, 1), (1, 2), (0, 2)})
- gp1 = l3lite.Determined('gp1', {(3, 4)})
- gp2 = l3lite.Determined('gp2', {(5, 6)})
- gp3 = l3lite.Determined('gp3', set())
- gp4 = l3lite.Determined('gp4', set())
- gp5 = l3lite.Determined('gp5', {(9, 10), (10, 11), (9, 11)})
- gpos = [gp0, gp1, gp2, gp3, gp4, gp5]
- gp_usel = l3lite.UnionSelection(gposcons, groups, gpos)
- ## Projectivity
- # Positions of gnodes in each group
- g0snode = l3lite.Var('g0snode', set(), {0, 1, 2, 3, 4}, 3, 3) # {2,
3, 4}
- g1snode = l3lite.Var('g1snode', set(), {0, 1, 2, 3, 4}, 2, 2) # {0, 1}
- g2snode = l3lite.Var('g2snode', set(), {0, 1, 2, 3, 4}, 2, 2) # {3, 4}
- g3snode = l3lite.DetIVar('g3snode', 2) # {2}
- g4snode = l3lite.DetIVar('g4snode', 5) # {5}
- g5snode = l3lite.Var('g5snode', set(), {0, 1, 2, 3, 4}, 3, 3) #
impossible
- # Set convexity constraint for each of these
- setconv = l3lite.ComplexSetConvexity(groups, [g0snode, g1snode,
g2snode,
- g3snode, g4snode,
g5snode])
- ## ComplexUnionSelection
- ## mainvars: [{0, 1, 2}, {3, 4}, ...]
- ## selvars: [g0snode, g1snode, ...]
- ## seqvars: w2gnodes
- ## selvar: groups
- complexU = l3lite.ComplexUnionSelection(selvar=groups,
- selvars=[g0snode, g1snode,
g2snode, g3snode,
- g4snode, g5snode],
- seqvars=w2gnodes,
- mainvars=gdetvars)
- return w_usel, g_usel, precsel, gp_usel, setconv, complexU
+##def t1():
+## # broke the window
+## g0_nodes = {0, 1, 2}
+## # the boy
+## g1_nodes = {3, 4}
+## # the window
+## g2_nodes = {5, 6}
+## # broke
+## g3_nodes = {7}
+## # .
+## g4_nodes = {8}
+## # the window broke
+## g5_nodes = {9, 10, 11}
+## # the boy broke the window .
+## w = {0, 1, 2, 3, 4, 5}
+## g = {0, 1, 2, 3, 4, 5}
+## w2gnodes = [l3lite.IVar('the0', {1, 3, 5, 9}), # {3}
+## l3lite.DetIVar('boy', 4),
+## l3lite.IVar('broke', {0, 7, 11}), # {0, 7}
+## l3lite.IVar('the3', {1, 3, 5, 9}), # {1, 5}
+## l3lite.IVar('window', {2, 6, 10}), # {2, 6}
+## l3lite.DetIVar('.', 8)]
+## groups = l3lite.Var('groups', set(), g, 2, 6)
+## gdetvars = [l3lite.DetVar('g0', {0, 1, 2}),
+## l3lite.DetVar('g1', {3, 4}),
+## l3lite.DetVar('g2', {5, 6}),
+## l3lite.DetVar('g3', {7}),
+## l3lite.DetVar('g4', {8}),
+## l3lite.DetVar('g5', {9, 10, 11})]
+## nodes = l3lite.DetVar('nodes', w)
+## ## Union selection on the gnodes of all words
+## # Union of all group nodes used
+## gnodeU = l3lite.Var('gnodeU', set(),
+## {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 6, 6)
+## w_usel = l3lite.UnionSelection(gnodeU, nodes, w2gnodes)
+## ## Union selection on the gnodes of all groups
+## g_usel = l3lite.UnionSelection(gnodeU, groups, gdetvars)
+## ## Position constraints
+## # Positions (word indices) of gnodes
+## # Group 0
+## gno0 = l3lite.DetIVar('gno0', 2) #broke
+## gno1 = l3lite.IVar('gno1', {0, 3}) #the {3}
+## gno2 = l3lite.DetIVar('gno2', 4) #window
+## # Group 1
+## gno3 = l3lite.IVar('gno3', {0, 3}) #the {0}
+## gno4 = l3lite.DetIVar('gno4', 1) #boy
+## # Group 2
+## gno5 = l3lite.IVar('gno5', {0, 3}) #the {3}
+## gno6 = l3lite.DetIVar('gno6', 4) #window
+## # Group 3
+## gno7 = l3lite.DetIVar('gno7', 2) #broke
+## # Group 4
+## gno8 = l3lite.DetIVar('gno8', 5) #.
+## # Group 5 (impossible)
+## gno9 = l3lite.IVar('gno9', {0, 3}) #the
+## gno10 = l3lite.DetIVar('gno10', 4) #window
+## gno11 = l3lite.DetIVar('gno11', 2) #broke
+## # Position pair constraints
+## gposcons = l3lite.Var('gposcons', set(),
+## {(0, 1), (1, 2), (0, 2), (3, 4), (5, 6),
+## (9, 10), (10, 11), (9, 11)})
+## # Precedence selection
+## precsel = l3lite.PrecedenceSelection(gposcons,
+## [gno0, gno1, gno2, gno3, gno4,
gno5,
+## gno6, gno7, gno8, gno9, gno10,
gno11])
+## # Union selection on position pair constraints
+## gp0 = l3lite.DetVar('gp0', {(0, 1), (1, 2), (0, 2)})
+## gp1 = l3lite.DetVar('gp1', {(3, 4)})
+## gp2 = l3lite.DetVar('gp2', {(5, 6)})
+## gp3 = l3lite.DetVar('gp3', set())
+## gp4 = l3lite.DetVar('gp4', set())
+## gp5 = l3lite.DetVar('gp5', {(9, 10), (10, 11), (9, 11)})
+## gpos = [gp0, gp1, gp2, gp3, gp4, gp5]
+## gp_usel = l3lite.UnionSelection(gposcons, groups, gpos)
+## ## Projectivity
+## # Positions of gnodes in each group
+## g0snode = l3lite.Var('g0snode', set(), {0, 1, 2, 3, 4}, 3, 3) # {2,
3, 4}
+## g1snode = l3lite.Var('g1snode', set(), {0, 1, 2, 3, 4}, 2, 2) # {0,
1}
+## g2snode = l3lite.Var('g2snode', set(), {0, 1, 2, 3, 4}, 2, 2) # {3,
4}
+## g3snode = l3lite.DetIVar('g3snode', 2) # {2}
+## g4snode = l3lite.DetIVar('g4snode', 5) # {5}
+## g5snode = l3lite.Var('g5snode', set(), {0, 1, 2, 3, 4}, 3, 3) #
impossible
+## # Set convexity constraint for each of these
+## setconv = l3lite.ComplexSetConvexity(groups, [g0snode, g1snode,
g2snode,
+## g3snode, g4snode,
g5snode])
+## ## ComplexUnionSelection
+## ## mainvars: [{0, 1, 2}, {3, 4}, ...]
+## ## selvars: [g0snode, g1snode, ...]
+## ## seqvars: w2gnodes
+## ## selvar: groups
+## complexU = l3lite.ComplexUnionSelection(selvar=groups,
+## selvars=[g0snode, g1snode,
g2snode, g3snode,
+## g4snode, g5snode],
+## seqvars=w2gnodes,
+## mainvars=gdetvars)
+## return w_usel, g_usel, precsel, gp_usel, setconv, complexU

def t2():
constraints = []
@@ -228,21 +254,21 @@
# 6: .(14)
groups = l3lite.Var('groups', set(), {0, 1, 2, 3, 4, 5, 6}, 2, 7)
## Group node indices
- gdetvars = [l3lite.Determined('g0', {0, 1, 2, 3, 4}), # 1 is
abstract
- l3lite.Determined('g1', {5, 6}), # 6 is
abstract
- l3lite.Determined('g2', {7, 8}),
- l3lite.Determined('g3', {9}),
- l3lite.Determined('g4', {10, 11}),
- l3lite.Determined('g5', {12, 13}),
- l3lite.Determined('g6', {14})]
+ gdetvars = [l3lite.DetVar('g0', {0, 1, 2, 3, 4}), # 1 is abstract
+ l3lite.DetVar('g1', {5, 6}), # 6 is abstract
+ l3lite.DetVar('g2', {7, 8}),
+ l3lite.DetVar('g3', {9}),
+ l3lite.DetVar('g4', {10, 11}),
+ l3lite.DetVar('g5', {12, 13}),
+ l3lite.DetVar('g6', {14})]
## Concrete group node indices
- gc_detvars = [l3lite.Determined('g0', {0, 2, 3, 4}), # 1 is abstract
- l3lite.Determined('g1', {5}), # 6 is abstract
- l3lite.Determined('g2', {7, 8}),
- l3lite.Determined('g3', {9}),
- l3lite.Determined('g4', {10, 11}),
- l3lite.Determined('g5', {12, 13}),
- l3lite.Determined('g6', {14})]
+ gc_detvars = [l3lite.DetVar('g0', {0, 2, 3, 4}), # 1 is abstract
+ l3lite.DetVar('g1', {5}), # 6 is abstract
+ l3lite.DetVar('g2', {7, 8}),
+ l3lite.DetVar('g3', {9}),
+ l3lite.DetVar('g4', {10, 11}),
+ l3lite.DetVar('g5', {12, 13}),
+ l3lite.DetVar('g6', {14})]
## Words
sentence = ['the', 'boy', 'read', 'us', 'the', 'riot', 'act']
## Word positions associated with group nodes (what about unused
gnodes?)
@@ -295,7 +321,7 @@
w6ga = l3lite.Var('w6ga', set(), {6}, 0, 1)
w2ga_nodes = [l3lite.EMPTY, w1ga, l3lite.EMPTY, w3ga, l3lite.EMPTY,
w5ga, w6ga, l3lite.EMPTY]
## All word nodes
- nodes = l3lite.Determined('nodes', {0, 1, 2, 3, 4, 5, 6, 7})
+ nodes = l3lite.DetVar('nodes', {0, 1, 2, 3, 4, 5, 6, 7})
## Union selection on the gnodes of all words
# Words 1 (boy), 3 (us), 5 (riot), 6 (act) can have abstract nodes
constraints.extend(l3lite.Union([w1gn, w1cgn, w1ga]).constraints)
@@ -321,7 +347,7 @@
gsnodes = [g0snode, g1snode, g2snode, g3snode, g4snode, g5snode,
g6snode]
# Positions of concrete gnodes in each group
g0cw = l3lite.Var('g0cw', set(), {0, 1, 2, 3, 4, 5, 6}, 4, 4)
- g1cw = l3lite.Determined('g1cw', {2})
+ g1cw = l3lite.DetVar('g1cw', {2})
g2cw = g2snode
g3cw = g3snode
g4cw = g4snode
@@ -365,8 +391,8 @@
# gn1w = gn8w | gn9w ; gn6w = gn11w | gn13w
# Abstract nodes in used groups
anodes = l3lite.Var('anodes', set(), {1, 6})
- constraints.append(l3lite.UnionSelection(anodes, groups,
[l3lite.Determined('g0a', {1}),
-
l3lite.Determined('g1a', {6}),
+ constraints.append(l3lite.UnionSelection(anodes, groups,
[l3lite.DetVar('g0a', {1}),
+
l3lite.DetVar('g1a', {6}),

l3lite.EMPTY, l3lite.EMPTY,

l3lite.EMPTY, l3lite.EMPTY,

l3lite.EMPTY]))
@@ -399,11 +425,11 @@
## Groups merged with groups, including the group itself
g0gm = l3lite.Var('g0gm', {0}, {0, 2, 3}, 1, 2)
g1gm = l3lite.Var('g1gm', {1}, {1, 4, 5}, 1, 2)
- g2gm = l3lite.Determined('g2gm', {2})
- g3gm = l3lite.Determined('g3gm', {3})
- g4gm = l3lite.Determined('g4gm', {4})
- g5gm = l3lite.Determined('g5gm', {5})
- g6gm = l3lite.Determined('g6gm', {6})
+ g2gm = l3lite.DetVar('g2gm', {2})
+ g3gm = l3lite.DetVar('g3gm', {3})
+ g4gm = l3lite.DetVar('g4gm', {4})
+ g5gm = l3lite.DetVar('g5gm', {5})
+ g6gm = l3lite.DetVar('g6gm', {6})
## Trees under gnode heads
g0tree = l3lite.Var('g0tree', set(), {0, 1, 2, 3, 4, 5, 6}, 5)
g1tree = l3lite.Var('g1tree', set(), {0, 1, 2, 3, 4, 5, 6}, 2)
@@ -423,33 +449,33 @@
# Select one or the other
# UnionSelection(choice, IVar('chooser', {0, 1}), [choice1, choice2])
g0merge_sel = l3lite.IVar('g0m_sel', {0, 1, 2})
- constraints.append(l3lite.UnionSelection(g0gm, g0merge_sel,
[l3lite.Determined('g0g2', {0, 2}),
-
l3lite.Determined('g0g3', {0, 3}),
-
l3lite.Determined('g0_', {0})]))
+ constraints.append(l3lite.UnionSelection(g0gm, g0merge_sel,
[l3lite.DetVar('g0g2', {0, 2}),
+
l3lite.DetVar('g0g3', {0, 3}),
+
l3lite.DetVar('g0_', {0})]))
constraints.append(l3lite.UnionSelection(gan1m, g0merge_sel,
- [l3lite.Determined('gan1g8',
{8}),
- l3lite.Determined('gan1g9',
{9}),
+ [l3lite.DetVar('gan1g8', {8}),
+ l3lite.DetVar('gan1g9', {9}),
l3lite.EMPTY]))
g1merge_sel = l3lite.IVar('g1m_sel', {0, 1, 2})
- constraints.append(l3lite.UnionSelection(g1gm, g1merge_sel,
[l3lite.Determined('g1g4', {1, 4}),
-
l3lite.Determined('g1g5', {1, 5}),
-
l3lite.Determined('g1_', {1})]))
+ constraints.append(l3lite.UnionSelection(g1gm, g1merge_sel,
[l3lite.DetVar('g1g4', {1, 4}),
+
l3lite.DetVar('g1g5', {1, 5}),
+
l3lite.DetVar('g1_', {1})]))
constraints.append(l3lite.UnionSelection(gan6m, g1merge_sel,
- [l3lite.Determined('gan6g11',
{11}),
- l3lite.Determined('gan6g13',
{13}),
+ [l3lite.DetVar('gan6g11',
{11}),
+ l3lite.DetVar('gan6g13',
{13}),
l3lite.EMPTY]))
# Position pair constraints
gposcons = l3lite.Var('gposcons', set(),
{(0, 1), (1, 2), (2, 3), (3, 4), (0, 2), (1, 3),
(2, 4), (0, 3), (1, 4),
(5, 6), (7, 8), (10, 11), (12, 13)})
# Union selection on position pair constraints
- gp0 = l3lite.Determined('gp0', {(0, 1), (1, 2), (2, 3), (3, 4), (0,
2), (1, 3), (2, 4), (0, 3), (1, 4)})
- gp1 = l3lite.Determined('gp1', {(5, 6)})
- gp2 = l3lite.Determined('gp2', {(7, 8)})
- gp3 = l3lite.Determined('gp3', set())
- gp4 = l3lite.Determined('gp4', {(10, 11)})
- gp5 = l3lite.Determined('gp5', {(12, 13)})
- gp6 = l3lite.Determined('gp6', set())
+ gp0 = l3lite.DetVar('gp0', {(0, 1), (1, 2), (2, 3), (3, 4), (0, 2),
(1, 3), (2, 4), (0, 3), (1, 4)})
+ gp1 = l3lite.DetVar('gp1', {(5, 6)})
+ gp2 = l3lite.DetVar('gp2', {(7, 8)})
+ gp3 = l3lite.DetVar('gp3', set())
+ gp4 = l3lite.DetVar('gp4', {(10, 11)})
+ gp5 = l3lite.DetVar('gp5', {(12, 13)})
+ gp6 = l3lite.DetVar('gp6', set())
gpos = [gp0, gp1, gp2, gp3, gp4, gp5, gp6]
constraints.append(l3lite.PrecedenceSelection(gposcons,
[gn0w, gn1w, gn2w, gn3w,
gn4w, gn5w,
Reply all
Reply to author
Forward
0 new messages