from . import types from .utils import consume, keep_until, split, default_id_dict, default_fwd_dict from .ordered import OrderedSet, OrderedFrozenSet from .actions import Action, Replay, Reduce, FilterStates, Seq from .grammar import End, ErrorSymbol, InitNt, Nt from .rewrites import CanonicalGrammar from .lr0 import LR0Generator, Term from .aps import APS, Edge, Path
# StateAndTransitions objects are indexed using a StateId which is an integer.
StateId = int
# Action or ordered sequence of action which have to be performed.
DelayedAction = typing.Union[Action, typing.Tuple[Action, ...]]
class StateAndTransitions: """This is one state of the parse table, which has transitions based on
terminals (text), non-terminals (grammar rules) and epsilon (reduce).
In this model epsilon transitions are used to represent code to be executed
such as reduce actions and any others actions. """
# The stable_str of each LRItem we could be parsing in this state: the # places in grammar productions that tell what we've already parsed, # i.e. how we got to this state.
locations: OrderedFrozenSet[str]
# Ordered set of Actions which are pushed to the next state after a # conflict.
delayed_actions: OrderedFrozenSet[DelayedAction]
# Number of argument of an action state. # # Instead of having action states with a non-empty replay list of terms, we # have a non-empty list of argument which size is described by this # variable.
arguments: int
# Outgoing edges taken when shifting terminals.
terminals: typing.Dict[str, StateId]
# Outgoing edges taken when shifting nonterminals after reducing.
nonterminals: typing.Dict[Nt, StateId]
# Error symbol transitions.
errors: typing.Dict[ErrorSymbol, StateId]
# List of epsilon transitions with associated actions.
epsilon: typing.List[typing.Tuple[Action, StateId]]
# Set of edges that lead to this state.
backedges: OrderedSet[Edge]
# Cached hash code. This class implements __hash__ and __eq__ in order to # help detect equivalent states (which must be merged, for correctness).
_hash: int
# A hash code computed the same way as _hash, but used only for # human-readable output. The stability is useful for debugging, to match # states across multiple runs of the parser generator.
stable_hash: str
# NOTE: The hash of a state depends on its location in the LR0 # parse-table, as well as the actions which have not yet been executed. def hashed_content() -> typing.Iterator[object]: for item in sorted(self.locations): yield item yield"\n" yield"delayed_actions" for action in self.delayed_actions: yield hash(action) yield"arguments" yield arguments
def is_inconsistent(self) -> bool: "Returns True if the state transitions are inconsistent." # TODO: We could easily allow having a state with non-terminal # transition and other epsilon transitions, as the non-terminal shift # transitions are a form of condition based on the fact that a # non-terminal, produced by a reduce action is consumed by the # automaton. if len(self.terminals) + len(self.nonterminals) + len(self.errors) > 0 and len(self.epsilon) > 0: returnTrue elif len(self.epsilon) == 1: if any(k.is_inconsistent() for k, s in self.epsilon): returnTrue elif len(self.epsilon) > 1: if any(k.is_inconsistent() for k, s in self.epsilon): returnTrue # NOTE: We can accept multiple conditions as epsilon transitions # iff they are checking the same variable with non-overlapping # values. This implies that we can implement these conditions as a # deterministic switch statement in the code emitter. if any(not k.is_condition() for k, s in self.epsilon): returnTrue
iterator = iter(self.epsilon)
first, _ = next(iterator) if any(not first.check_same_variable(k) for k, s in iterator): returnTrue # "type: ignore" because mypy does not see that the preceding if-statement # means all k.condition() actions are FilterFlags.
pairs = itertools.combinations((k for k, s in self.epsilon), 2) if any(not k1.check_different_values(k2) for k1, k2 in pairs): returnTrue else: try:
self.get_error_symbol() except ValueError: returnTrue returnFalse
def shifted_edges(self) -> typing.Iterator[
typing.Tuple[typing.Union[str, Nt, ErrorSymbol], StateId]
]:
k: Term
s: StateId for k, s in self.terminals.items(): yield (k, s) for k, s in self.nonterminals.items(): yield (k, s) for k, s in self.errors.items(): yield (k, s)
def edges(self) -> typing.Iterator[typing.Tuple[Term, StateId]]:
k: Term
s: StateId for k, s in self.terminals.items(): yield (k, s) for k, s in self.nonterminals.items(): yield (k, s) for k, s in self.errors.items(): yield (k, s) for k, s in self.epsilon: yield (k, s)
def rewrite_state_indexes(
self,
state_map: typing.Dict[StateId, StateId]
) -> None: def apply_on_term(term: typing.Union[Term, None]) -> Term: assert term isnotNone if isinstance(term, Action): return term.rewrite_state_indexes(state_map) return term
self.index = state_map[self.index]
self.terminals = {
k: state_map[s] for k, s in self.terminals.items()
}
self.nonterminals = {
k: state_map[s] for k, s in self.nonterminals.items()
}
self.errors = {
k: state_map[s] for k, s in self.errors.items()
}
self.epsilon = [
(k.rewrite_state_indexes(state_map), state_map[s]) for k, s in self.epsilon
] # We cannot have multiple identical actions jumping to different locations. assert len(self.epsilon) == len(set(k for k, _ in self.epsilon))
self.backedges = OrderedSet(
Edge(state_map[edge.src], apply_on_term(edge.term)) for edge in self.backedges
)
def get_error_symbol(self) -> typing.Optional[ErrorSymbol]: if len(self.errors) > 1: raise ValueError("More than one error symbol on the same state.") else: return next(iter(self.errors), None)
def __contains__(self, term: object) -> bool: if isinstance(term, Action): for t, s in self.epsilon: if t == term: returnTrue returnFalse elif isinstance(term, Nt): return term in self.nonterminals elif isinstance(term, ErrorSymbol): return term in self.errors else: return term in self.terminals
def __getitem__(self, term: Term) -> StateId: if isinstance(term, Action): for t, s in self.epsilon: if t == term: return s raise KeyError(term) elif isinstance(term, Nt): return self.nonterminals[term] if isinstance(term, ErrorSymbol): return self.errors[term] else: return self.terminals[term]
def stable_str(self, states: typing.List[StateAndTransitions]) -> str:
conflict = "" if self.is_inconsistent():
conflict = " (inconsistent)" return"{}{}:\n{}".format(self.stable_hash, conflict, "\n".join([ "\t{} --> {}".format(k, states[s].stable_hash) for k, s in self.edges()]))
def __str__(self) -> str:
conflict = "" if self.is_inconsistent():
conflict = " (inconsistent)" return"{}{}:\n{}".format(self.index, conflict, "\n".join([ "\t{} --> {}".format(k, s) for k, s in self.edges()]))
def __eq__(self, other: object) -> bool: return (isinstance(other, StateAndTransitions) and sorted(self.locations) == sorted(other.locations) and sorted(self.delayed_actions) == sorted(other.delayed_actions) and self.arguments == other.arguments)
def __hash__(self) -> int: return self._hash
DebugInfo = typing.Dict[StateId, int]
class ParseTable: """The parser can be represented as a matrix of state transitions where on one
side we have the current state, and on the other we have the expected
terminal, non-terminal or epsilon transition.
The terminals `a` are the token which are read from the input. The
non-terminals `A` are the token which are pushed by the reduce actions of
the epsilon transitions. The epsilon transitions `#1` are the actions which
have to be executed as code by the parser.
A parse table is inconsistent if there is any state which has an epsilon
transitions and terminals/non-terminals transitions (shift-reduce
conflict), or a state with more than one epsilon transitions (reduce-reduce
conflict). This is equivalent to having a non deterministic state machine.
# Map of actions identifier to the corresponding object.
actions: typing.List[Action]
# Map of state identifier to the corresponding object.
states: typing.List[StateAndTransitions]
# Hash table of state objects, ensuring we never have two equal states.
state_cache: typing.Dict[StateAndTransitions, StateAndTransitions]
# List of (Nt, states) tuples which are the entry point of the state # machine.
named_goals: typing.List[typing.Tuple[Nt, StateId]]
# Set of all terminals.
terminals: OrderedFrozenSet[typing.Union[str, End]]
# List of non-terminals.
nonterminals: typing.List[Nt]
# Carry the info to be used when generating debug_context. If False, # then no debug_context is ever produced.
debug_info: typing.Union[bool, DebugInfo]
# Execution modes are used by the code generator to decide which # function is executed when. This is a dictionary of OrderedSet, where # the keys are the various parsing modes, and the mapped set contains # the list of traits which have to be implemented, and consequently # which functions would be encoded.
exec_modes: typing.Optional[typing.DefaultDict[str, OrderedSet[types.Type]]]
# True if the parse table might be inconsistent. When this is False, we add # extra assertions when computing the reduce path.
assume_inconsistent: bool
# typing.cast() doesn't actually check at run time, so let's do that: assert all(isinstance(nt, Nt) for nt in self.nonterminals)
self.debug_info = debug
self.exec_modes = grammar.grammar.exec_modes
self.assume_inconsistent = True
self.create_lr0_table(grammar, verbose, progress)
self.fix_inconsistent_table(verbose, progress) # TODO: Optimize chains of actions into sequences. # Optimize by removing unused states.
self.remove_all_unreachable_state(verbose, progress) # TODO: Statically compute replayed terms. (maybe?) # Replace reduce actions by programmatic stack manipulation.
self.lower_reduce_actions(verbose, progress) # Fold Replay followed by Unwind instruction.
self.fold_replay_unwind(verbose, progress) # Fold paths which have the same ending.
self.fold_identical_endings(verbose, progress) # Group state with similar non-terminal edges close-by, to improve the # generated Rust code by grouping matched state numbers.
self.group_nonterminal_states(verbose, progress) # Split shift states from epsilon states. # self.group_epsilon_states(verbose, progress)
def save(self, filename: os.PathLike) -> None: with open(filename, 'wb') as f:
pickle.dump(self, f)
@classmethod def load(cls, filename: os.PathLike) -> ParseTable: with open(filename, 'rb') as f:
obj = pickle.load(f) if len(f.read()) != 0: raise ValueError("file has unexpected extra bytes at end") ifnot isinstance(obj, cls): raise TypeError("file contains wrong kind of object: expected {}, got {}"
.format(cls.__name__, obj.__class__.__name__)) return obj
def is_inconsistent(self) -> bool: "Returns True if the grammar contains any inconsistent state." for s in self.states: if s isnotNoneand s.is_inconsistent(): returnTrue returnFalse
def rewrite_state_indexes(self, state_map: typing.Dict[StateId, StateId]) -> None: for s in self.states: if s isnotNone:
s.rewrite_state_indexes(state_map)
self.named_goals = [
(nt, state_map[s]) for nt, s in self.named_goals
]
# After a rewrite, multiple actions (conditions) might jump to the same # target, attempt to fold these conditions based on having the same # target. If we can merge them, then remove previous edges (updating # the backedges of successor states) and replace them by the newly # created edges. for s in self.states: if s isnotNoneand len(s.epsilon) != 0:
epsilon_by_dest = collections.defaultdict(list) for k, d in s.epsilon:
epsilon_by_dest[d].append(k) for d, ks in epsilon_by_dest.items(): if len(ks) == 1: continue
new_ks = ks[0].fold_by_destination(ks) if new_ks == ks: continue # This collection is required by `remove_edge`, but in this # particular case we know for sure that at least one edge # would be added back. Therefore no need to use the content # of the set.
maybe_unreachable_set: OrderedSet[StateId] = OrderedSet() assert len(new_ks) > 0 for k in ks:
self.remove_edge(s, k, maybe_unreachable_set) for k in new_ks:
self.add_edge(s, k, d)
self.assert_table_invariants()
def rewrite_reordered_state_indexes(self) -> None:
state_map = {
s.index: i for i, s in enumerate(self.states) if s isnotNone
}
self.rewrite_state_indexes(state_map)
def new_state(
self,
locations: OrderedFrozenSet[str],
delayed_actions: OrderedFrozenSet[DelayedAction] = OrderedFrozenSet(),
arguments: int = 0
) -> typing.Tuple[bool, StateAndTransitions]: """Get or create state with an LR0 location and delayed actions. Returns a tuple
where the first element is whether the element is newly created, and
the second element is the State object."""
index = len(self.states)
state = StateAndTransitions(index, locations, delayed_actions, arguments) try: returnFalse, self.state_cache[state] except KeyError:
self.state_cache[state] = state
self.states.append(state) returnTrue, state
def get_state(
self,
locations: OrderedFrozenSet[str],
delayed_actions: OrderedFrozenSet[DelayedAction] = OrderedFrozenSet(),
arguments: int = 0
) -> StateAndTransitions: """Like new_state(), but only returns the state without returning whether it is
newly created ornot."""
_, state = self.new_state(locations, delayed_actions, arguments) return state
def remove_state(self, s: StateId, maybe_unreachable_set: OrderedSet[StateId]) -> None:
state = self.states[s]
self.clear_edges(state, maybe_unreachable_set) del self.state_cache[state]
# "type: ignore" because the type annotation on `states` doesn't allow # entries to be `None`.
self.states[s] = None# type: ignore
def add_edge(
self,
src: StateAndTransitions,
term: Term,
dest: StateId
) -> None: assert term notin src assert dest < len(self.states) if isinstance(term, Action):
src.epsilon.append((term, dest)) elif isinstance(term, Nt):
src.nonterminals[term] = dest elif isinstance(term, ErrorSymbol):
src.errors[term] = dest else:
src.terminals[term] = dest
self.states[dest].backedges.add(Edge(src.index, term))
edge_existed = term in src if edge_existed:
old_dest = src[term]
self.remove_backedge(src, term, old_dest, maybe_unreachable_set)
if isinstance(term, Action):
src.epsilon = [(t, d) for t, d in src.epsilon if t != term]
src.epsilon.append((term, dest)) elif isinstance(term, Nt):
src.nonterminals[term] = dest elif isinstance(term, ErrorSymbol):
src.errors[term] = dest else:
src.terminals[term] = dest
self.states[dest].backedges.add(Edge(src.index, term))
self.assert_state_invariants(src)
self.assert_state_invariants(dest) if edge_existed:
self.assert_state_invariants(old_dest)
def remove_edge(
self,
src: StateAndTransitions,
term: Term,
maybe_unreachable_set: OrderedSet[StateId]
) -> None:
edge_existed = term in src if edge_existed:
old_dest = src[term]
self.remove_backedge(src, term, old_dest, maybe_unreachable_set) if isinstance(term, Action):
src.epsilon = [(t, d) for t, d in src.epsilon if t != term] elif isinstance(term, Nt): del src.nonterminals[term] elif isinstance(term, ErrorSymbol): del src.errors[term] else: del src.terminals[term]
self.assert_state_invariants(src) if edge_existed:
self.assert_state_invariants(old_dest)
def clear_edges(
self,
src: StateAndTransitions,
maybe_unreachable_set: OrderedSet[StateId]
) -> None: """Remove all existing edges, in order to replace them by new one. This is used
when resolving shift-reduce conflicts.""" assert isinstance(src, StateAndTransitions)
old_dest = [] for term, dest in src.edges():
self.remove_backedge(src, term, dest, maybe_unreachable_set)
old_dest.append(dest)
src.terminals = {}
src.nonterminals = {}
src.errors = {}
src.epsilon = []
self.assert_state_invariants(src) for dest in old_dest:
self.assert_state_invariants(dest)
def assert_table_invariants(self) -> None: for s in self.states: if s isnotNone:
self.assert_state_invariants(s)
def assert_state_invariants(self, src: typing.Union[StateId, StateAndTransitions]) -> None: ifnot self.debug_info: return if isinstance(src, int):
src = self.states[src] assert isinstance(src, StateAndTransitions) try: for term, dest in src.edges(): assert Edge(src.index, term) in self.states[dest].backedges for e in src.backedges: assert e.term isnotNone assert self.states[e.src][e.term] == src.index ifnot self.assume_inconsistent: assertnot src.is_inconsistent() except AssertionError as exc:
print("assert_state_inveriants for {}\n".format(src)) for e in src.backedges:
print("backedge {} from {}\n".format(e, self.states[e.src])) raise exc
def remove_unreachable_states(
self,
maybe_unreachable_set: OrderedSet[StateId]
) -> None: # TODO: This function is incomplete in case of loops, some cycle might # remain isolated while not being reachable from the init states. We # should maintain a notion of depth per-state, such that we can # identify loops by noticing the all backedges have a larger depth than # the current state.
init: OrderedSet[StateId]
init = OrderedSet(goal for name, goal in self.named_goals) while maybe_unreachable_set:
next_set: OrderedSet[StateId] = OrderedSet() for s in maybe_unreachable_set: # Check if the state is reachable, if not remove the state and # fill the next_set with all outgoing edges. if len(self.states[s].backedges) == 0 and s notin init:
self.remove_state(s, next_set)
maybe_unreachable_set = next_set
def is_reachable_state(self, s: StateId) -> bool: """Check whether the current state is reachable or not.""" if self.states[s] isNone: returnFalse
reachable_back: OrderedSet[StateId] = OrderedSet()
todo = [s] while todo:
s = todo.pop()
reachable_back.add(s) for edge in self.states[s].backedges: if edge.src notin reachable_back:
todo.append(edge.src) for _, s in self.named_goals: if s in reachable_back: returnTrue returnFalse
def debug_dump(self) -> None: # Sort the grammar by state hash, such that it can be compared # before/after grammar modifications.
temp = [s for s in self.states if s isnotNone]
temp = sorted(temp, key=lambda s: s.stable_hash) for s in temp:
print(s.stable_str(self.states))
def create_lr0_table(
self,
grammar: CanonicalGrammar,
verbose: bool,
progress: bool
) -> None: if verbose or progress:
print("Create LR(0) parse table.")
# Temporary work queue.
todo: typing.Deque[typing.Tuple[LR0Generator, StateAndTransitions]]
todo = collections.deque()
# Record the starting goals in the todo list. for nt in goals:
init_nt = Nt(InitNt(nt), ())
it = LR0Generator.start(grammar, init_nt)
s = self.get_state(it.stable_locations())
todo.append((it, s))
self.named_goals.append((nt, s.index))
# Iterate the grammar with sets of LR Items abstracted by the # LR0Generator, and create new states in the parse table as long as new # sets of LR Items are discovered. def visit_grammar() -> typing.Iterator[None]: while todo: yield# progress bar. # TODO: Compare stack / queue, for the traversal of the states.
s_it, s = todo.popleft() if verbose:
print("\nMapping state {} to LR0:\n{}".format(s.stable_hash, s_it)) for k, sk_it in s_it.transitions().items():
locations = sk_it.stable_locations() ifnot self.term_is_shifted(k):
locations = OrderedFrozenSet()
is_new, sk = self.new_state(locations) if is_new:
todo.append((sk_it, sk))
# Add the edge from s to sk with k.
self.add_edge(s, k, sk.index)
consume(visit_grammar(), progress)
if verbose:
print("Create LR(0) Table Result:")
self.debug_dump()
def is_valid_path(
self,
path: typing.Sequence[Edge],
state: typing.Optional[StateId] = None
) -> bool: """This function is used to check a list of edges and returns whether it
corresponds to a valid path within the parse table. This is useful when
merging sequences of edges from various locations.""" ifnot state and path != []:
state = path[0].src while path:
edge = path[0]
path = path[1:] if state != edge.src: returnFalse assert isinstance(state, StateId)
term = edge.term if term isNoneand len(path) == 0: returnTrue
row = self.states[state] if term notin row: returnFalse assert term isnotNone
state = row[term] returnTrue
def term_is_stacked(self, term: typing.Optional[Term]) -> bool: # The `term` argument is annotated as Optional because `Edge.term` is a # common argument. If it's ever None in practice, the caller has a bug. assert term isnotNone
returnnot isinstance(term, Action)
def aps_visitor(self, aps: APS, visit: typing.Callable[[APS], bool]) -> None: """Visit all the states of the parse table, as-if we were running a
Generalized LR parser.
However, instead parsing content, we use this algorithm to generate
both the content which remains to be parsed as well as the context
which might lead us to be in the state which from which we started.
This algorithm takes an APS (Abstract Parser State) and a callback, and
consider all edges of the parse table, unless restricted by one of the
previously encountered actions. These restrictions, such as replayed
lookahead or the path which might be reduced are used for filtering out
states which are not handled by this parse table.
For each edge, this functions calls the visit functions to know whether
to stop orcontinue. The visit function might capture APS given as
argument to be used for other analysis.
"""
todo = [aps] while todo:
aps = todo.pop()
cont = visit(aps) ifnot cont: continue
todo.extend(aps.shift_next(self))
def context_lanes(self, state: StateId) -> typing.Tuple[bool, typing.List[APS]]: """Compute lanes, such that each reduce action can have set of unique stack to
reach the given state. The stacks are assumed to be loop-free by
reducing edges at most once.
In order to avoid attempting to eagerly solve everything using context
information, we break this loop as soon as we have one token of
lookahead in a case which does not have enough context.
The return value is a tuple where the first element is a boolean which isTrueif we should fallback on solving this issue with more
lookahead, and the second is the list of APS lanes which are providing
enough context to disambiguate the inconsistency of the given state."""
def not_interesting(aps: APS) -> bool:
reduce_list = [e for e in aps.history if self.term_is_shifted(e.term)]
has_reduce_loop = len(reduce_list) != len(set(reduce_list)) return has_reduce_loop
# The context is a dictionary which maps all stack suffixes from an APS # stack. It is mapped to a list of tuples, where the each tuple is the # index with the APS stack and the APS action used to follow this path.
context: typing.DefaultDict[typing.Tuple[Edge, ...], typing.List[Edge]]
context = collections.defaultdict(lambda: [])
def has_enough_context(aps: APS) -> bool: try: assert aps.history[0] in context[tuple(aps.stack)] # Check the number of different actions which can reach this # location. If there is more than 1, then we do not have enough # context. return len(set(context[tuple(aps.stack)])) <= 1 except IndexError: returnFalse
collect = [APS.start(state)]
enough_context = False whilenot enough_context: # print("collect.len = {}".format(len(collect))) # Fill the context dictionary with all the sub-stack which might be # encountered by other APS.
recurse = []
context = collections.defaultdict(lambda: []) while collect:
aps = collect.pop()
recurse.append(aps) if aps.history == []: continue for i in range(len(aps.stack)):
context[tuple(aps.stack[i:])].append(aps.history[0]) assert collect == []
# print("recurse.len = {}".format(len(recurse))) # Iterate over APS which do not yet have enough context information # to uniquely identify a single action.
enough_context = True while recurse:
aps = recurse.pop() if not_interesting(aps): # print("discard uninteresting context lane:") # print(aps.string("\tcontext")) continue if has_enough_context(aps):
collect.append(aps) continue # If we have not enough context but some lookahead is # available, attempt to first solve this issue using more # lookahead before attempting to use context information. if len(aps.lookahead) >= 1: # print("discard context_lanes due to lookahead:") # for aps in itertools.chain(collect, recurse, [aps]): # print(aps.string("\tcontext")) returnTrue, []
enough_context = False # print("extend starting at:\n{}".format(aps.string("\tcontext")))
collect.extend(aps.shift_next(self)) assert recurse == []
# print("context_lanes:") # for aps in collect: # print(aps.string("\tcontext"))
returnFalse, collect
def lookahead_lanes(self, state: StateId) -> typing.List[APS]: """Compute lanes to collect all lookahead symbols available. After each reduce
action, there is no need to consider the same non-terminal multiple
times, we are only interested in lookahead token andnotin the context
information provided by reducing action."""
record = [] # After the first reduce action, we do not want to spend too much # resource visiting edges which would give us the same information. # Therefore, if we already reduce an action to a given state, then we # skip looking for lookahead that we already visited. # # Set of (first-reduce-edge, reducing-base, last-reduce-edge)
seen_edge_after_reduce: typing.Set[typing.Tuple[Edge, StateId, typing.Optional[Term]]]
seen_edge_after_reduce = set()
def find_first_reduce(
edges: Path
) -> typing.Tuple[int, typing.Optional[Edge]]: for i, edge in enumerate(edges): ifnot self.term_is_shifted(edge.term): return i, edge return 0, None
def find_last_reduce(
edges: Path
) -> typing.Tuple[int, typing.Optional[Edge]]: for i, edge in zip(reversed(range(len(edges))), reversed(edges)): ifnot self.term_is_shifted(edge.term): return i, edge return 0, None
def visit(aps: APS) -> bool: # Note, this suppose that we are not considering flags when # computing, as flag might prevent some lookahead investigations.
reduce_key = None
first_index, first_reduce = find_first_reduce(aps.history)
last_index, last_reduce = find_last_reduce(aps.history) if first_index != last_index and first_reduce and last_reduce: ifnot isinstance(aps.history[-1].term, Action):
reduce_key = (first_reduce, aps.shift[0].src, last_reduce.term)
has_seen_edge_after_reduce = reduce_key and reduce_key in seen_edge_after_reduce
has_lookahead = len(aps.lookahead) >= 1
stop = has_seen_edge_after_reduce or has_lookahead # print("stop: {}, size lookahead: {}, seen_edge_after_reduce: {}".format( # stop, len(aps.lookahead), repr(reduce_key) # )) # print(aps.string("\tvisitor")) if stop: if has_lookahead:
record.append(aps) if reduce_key:
seen_edge_after_reduce.add(reduce_key) returnnot stop
self.aps_visitor(APS.start(state), visit) return record
def fix_with_context(self, s: StateId, aps_lanes: typing.List[APS]) -> None: raise ValueError("fix_with_context: Not Implemented") # # This strategy is about using context information. By using chains of # # reduce actions, we are able to increase the knowledge of the stack # # content. The stack content is the context which can be used to # # determine how to consider a reduction. The stack content is also # # called a lane, as defined in the Lane Table algorithm. # # # # To add context information to the current graph, we add flags # # manipulation actions. # # # # Consider each edge as having an implicit function which can map one # # flag value to another. The following implements a unification # # algorithm which is attempting to solve the question of what is the # # flag value, and where it should be changed. # # # # NOTE: (nbp) I would not be surprised if there is a more specialized # # algorithm, but I failed to find one so far, and this problem # # definitely looks like a unification problem. # Id = collections.namedtuple("Id", "edge") # Eq = collections.namedtuple("Eq", "flag_in edge flag_out") # Var = collections.namedtuple("Var", "n") # SubSt = collections.namedtuple("SubSt", "var by") # # # Unify expression, and return one substitution if both expressions # # can be unified. # def unify_expr(expr1, expr2, swapped=False): # if isinstance(expr1, Eq) and isinstance(expr2, Id): # if expr1.edge != expr2.edge: # # Different edges are ok, but produce no substituions. # return True # if isinstance(expr1.flag_in, Var): # return SubSt(expr1.flag_in, expr1.flag_out) # if isinstance(expr1.flag_out, Var): # return SubSt(expr1.flag_out, expr1.flag_in) # # We are unifying with a relation which consider the current # # function as an identity function. Having different values as # # input and output fails the unification rule. # return expr1.flag_out == expr1.flag_in # if isinstance(expr1, Eq) and isinstance(expr2, Eq): # if expr1.edge != expr2.edge: # # Different edges are ok, but produce no substituions. # return True # if expr1.flag_in is None and isinstance(expr2.flag_in, Var): # return SubSt(expr2.flag_in, None) # if expr1.flag_out is None and isinstance(expr2.flag_out, Var): # return SubSt(expr2.flag_out, None) # if expr1.flag_in == expr2.flag_in: # if isinstance(expr1.flag_out, Var): # return SubSt(expr1.flag_out, expr2.flag_out) # elif isinstance(expr2.flag_out, Var): # return SubSt(expr2.flag_out, expr1.flag_out) # # Reject solutions which are not deterministic. We do not # # want the same input flag to have multiple outputs. # return expr1.flag_out == expr2.flag_out # if expr1.flag_out == expr2.flag_out: # if isinstance(expr1.flag_in, Var): # return SubSt(expr1.flag_in, expr2.flag_in) # elif isinstance(expr2.flag_in, Var): # return SubSt(expr2.flag_in, expr1.flag_in) # return True # if not swapped: # return unify_expr(expr2, expr1, True) # return True # # # Apply substituion rule to an expression. # def subst_expr(subst, expr): # if expr == subst.var: # return True, subst.by # if isinstance(expr, Eq): # subst1, flag_in = subst_expr(subst, expr.flag_in) # subst2, flag_out = subst_expr(subst, expr.flag_out) # return subst1 or subst2, Eq(flag_in, expr.edge, flag_out) # return False, expr # # # Add an expression to an existing knowledge based which is relying on # # a set of free variables. # def unify_with(expr, knowledge, free_vars): # old_knowledge = knowledge # old_free_Vars = free_vars # while True: # subst = None # for rel in knowledge: # subst = unify_expr(rel, expr) # if subst is False: # raise Error("Failed to find a coherent solution") # if subst is True: # continue # break # else: # return knowledge + [expr], free_vars # free_vars = [fv for fv in free_vars if fv != subst.var] # # Substitue variables, and re-add rules which have substituted # # vars to check the changes to other rules, in case 2 rules are # # now in conflict or in case we can propagate more variable # # changes. # subst_rules = [subst_expr(subst, k) for k in knowledge] # knowledge = [rule for changed, rule in subst_rule if not changed] # for changed, rule in subst_rule: # if not changed: # continue # knowledge, free_vars = unify_with(rule, knowledge, free_vars) # # # Register boundary conditions as part of the knowledge based, i-e that # # reduce actions are expecting to see the flag value matching the # # reduced non-terminal, and that we have no flag value at the start of # # every lane head. # # # # TODO: Catch exceptions from the unify function in case we do not yet # # have enough context to disambiguate. # rules = [] # free_vars = [] # last_free = 0 # maybe_id_edges = set() # nts = set() # for aps in aps_lanes: # assert len(aps.stack) >= 1 # flag_in = None # for edge in aps.stack[-1]: # i = last_free # last_free += 1 # free_vars.append(Var(i)) # rule = Eq(flag_in, edge, Var(i)) # rules, free_vars = unify_with(rule, rules, free_vars) # flag_in = Var(i) # if flag_in is not None: # maybe_id_edges.add(Id(edge)) # edge = aps.stack[-1] # nt = edge.term.update_stack_with().nt # rule = Eq(nt, edge, None) # rules, free_vars = unify_with(rule, rules, free_vars) # nts.add(nt) # # # We want to produce a parse table where most of the node are ignoring # # the content of the flag which is being added. Thus we want to find a # # solution where most edges are the identical function. # def fill_with_id_functions(rules, free_vars, maybe_id_edges): # min_rules, min_vars = rules, free_vars # for num_id_edges in reversed(range(len(maybe_id_edges))): # for id_edges in itertools.combinations(edges, num_id_edges): # for edge in id_edges: # new_rules, new_free_vars = unify_with(rule, rules, free_vars) # if new_free_vars == []: # return new_rules, new_free_vars # if len(new_free_vars) < len(min_free_vars): # min_vars = new_free_vars # min_rules = new_rules # return rules, free_vars # # rules, free_vars = fill_with_id_functions(rules, free_vars, maybe_id_edges) # if free_vars != []: # raise Error("Hum … maybe we can try to iterate over the remaining free-variable.") # print("debug: Great we found a solution for a reduce-reduce conflict") # # # The set of rules describe the function that each edge is expected to # # support. If there is an Id(edge), then we know that we do not have to # # change the graph for the given edge. If the rule is Eq(A, edge, B), # # then we have to (filter A & pop) and push B, except if A or B is # # None. # # # # For each edge, collect the set of rules concerning the edge to # # determine which edges have to be transformed to add the filter&pop # # and push actions. # edge_rules = collections.defaultdict(lambda: []) # for rule in rules: # if isinstance(rule, Id): # edge_rules[rule.edge] = None # elif isinstance(rule, Eq): # if edge_rules[rule.edge] is not None: # edge_rules[rule.edge].append(rule) # # maybe_unreachable_set = set() # flag_name = self.get_flag_for(nts) # for edge, rules in edge_rules.items(): # # If the edge is an identity function, then skip doing any # # modifications on it. # if rules is None: # continue # # Otherwise, create a new state and transition for each mapping. # src = self.states[edge.src] # dest = src[edge.term] # dest_state = self.states[dest] # # TODO: Add some information to avoid having identical hashes as # # the destination. # actions = [] # for rule in OrderedFrozenSet(rules): # assert isinstance(rule, Eq) # seq = [] # if rule.flag_in is not None: # seq.append(FilterFlag(flag_name, True)) # if rule.flag_in != rule.flag_out: # seq.append(PopFlag(flag_name)) # if rule.flag_out is not None and rule.flag_in != rule.flag_out: # seq.append(PushFlag(flag_name, rule.flag_out)) # actions.append(Seq(seq)) # # Assert that we do not map flag_in more than once. # assert len(set(eq.flag_in for eq in rules)) < len(rules) # # Create the new state and add edges. # is_new, switch = self.new_state(dest.locations, OrderedFrozenSet(actions)) # assert is_new # for seq in actions: # self.add_edge(switch, seq, dest) # # # Replace the edge from src to dest, by an edge from src to the # # newly created switch state, which then decide which flag to set # # before going to the destination target. # self.replace_edge(src, edge.term, switch, maybe_unreachable_set) # # self.remove_unreachable_states(maybe_unreachable_set) # pass
def fix_with_lookahead(self, s: StateId, aps_lanes: typing.List[APS]) -> None: # Find the list of terminals following each actions (even reduce # actions). assert all(len(aps.lookahead) >= 1 for aps in aps_lanes) if self.debug_info: for aps in aps_lanes:
print(str(aps))
maybe_unreachable_set: OrderedSet[StateId] = OrderedSet()
# For each shifted term, associate a set of state and actions which # would have to be executed.
shift_map: typing.DefaultDict[
Term,
typing.List[typing.Tuple[StateAndTransitions, typing.List[Edge]]]
]
shift_map = collections.defaultdict(lambda: []) for aps in aps_lanes:
actions = aps.history assert isinstance(actions[-1], Edge)
src = actions[-1].src
term = actions[-1].term assert term == aps.lookahead[0] assert isinstance(term, (str, End, ErrorSymbol, Nt))
# No need to consider any action beyind the first reduced action # since the reduced action is in charge of replaying the lookahead # terms.
actions = list(keep_until(actions[:-1], lambda edge: not self.term_is_shifted(edge.term))) assert all(isinstance(edge.term, Action) for edge in actions)
# Change the order of the shifted term, shift all actions by 1 with # the given lookahead term, in order to match the newly generated # state machine. # # Shifting actions with the list of shifted terms is used to record # the number of terms to be replayed, as well as verifying whether # Lookahead filter actions should accept or reject this lane.
new_actions = []
accept = True for edge in actions:
edge_term = edge.term assert isinstance(edge_term, Action)
new_term = edge_term.shifted_action(term) if isinstance(new_term, bool): if new_term isFalse:
accept = False break else: continue
new_actions.append(Edge(edge.src, new_term)) if accept:
target_id = self.states[src][term]
target = self.states[target_id]
shift_map[term].append((target, new_actions))
# Restore the new state machine based on a given state to use as a base # and the shift_map corresponding to edges. def restore_edges(
state: StateAndTransitions,
shift_map: typing.DefaultDict[
Term,
typing.List[typing.Tuple[StateAndTransitions, typing.List[Edge]]]
],
depth: str
) -> None: # print("{}starting with {}\n".format(depth, state))
edges = {} for term, actions_list in shift_map.items(): # print("{}term: {}, lists: {}\n".format(depth, repr(term), repr(actions_list))) # Collect all the states reachable after shifting the term. # Compute the unique name, based on the locations and actions # which are delayed.
locations: OrderedSet[str] = OrderedSet()
delayed: OrderedSet[DelayedAction] = OrderedSet()
new_shift_map: typing.DefaultDict[
Term,
typing.List[typing.Tuple[StateAndTransitions, typing.List[Edge]]]
]
new_shift_map = collections.defaultdict(lambda: [])
recurse = False ifnot self.term_is_shifted(term): # There is no more target after a reduce action.
actions_list = [] for target, actions in actions_list: assert isinstance(target, StateAndTransitions)
locations |= target.locations
delayed |= target.delayed_actions if actions != []: # Pull edges, with delayed actions.
edge = actions[0] assert isinstance(edge, Edge) for action in actions:
action_term = action.term assert isinstance(action_term, Action)
delayed.add(action_term)
edge_term = edge.term assert edge_term isnotNone
new_shift_map[edge_term].append((target, actions[1:]))
recurse = True else: # Pull edges, as a copy of existing edges. for next_term, next_dest_id in target.edges():
next_dest = self.states[next_dest_id]
new_shift_map[next_term].append((next_dest, []))
is_new, new_target = self.new_state(
OrderedFrozenSet(locations), OrderedFrozenSet(delayed))
edges[term] = new_target.index if self.debug_info:
print("{}is_new = {}, index = {}".format(depth, is_new, new_target.index))
print("{}Add: {} -- {} --> {}".format(depth, state.index, str(term), new_target.index))
print("{}continue: (is_new: {}) or (recurse: {})".format(depth, is_new, recurse)) if is_new or recurse:
restore_edges(new_target, new_shift_map, depth + " ")
self.clear_edges(state, maybe_unreachable_set) for term, target_id in edges.items():
self.add_edge(state, term, target_id) if self.debug_info:
print("{}replaced by {}\n".format(depth, state))
state = self.states[s]
restore_edges(state, shift_map, "")
self.remove_unreachable_states(maybe_unreachable_set)
def fix_inconsistent_state(self, s: StateId, verbose: bool) -> bool: # Fix inconsistent states works one state at a time. The goal is to # achieve the same method as the Lane Tracer, but instead of building a # table to then mutate the parse state, we mutate the parse state # directly. # # This strategy is simpler, and should be able to reproduce the same # graph mutations as seen with Lane Table algorithm. One of the problem # with the Lane Table algorithm is that it assume reduce operations, # and as such it does not apply simply to epsilon transitions which are # used as conditions on the parse table. # # By using push-flag and filter-flag actions, we are capable to # decompose the Lane Table transformation of the parse table into # multiple steps which are working one step at a time, and with less # table state duplication.
state = self.states[s] if state isNoneornot state.is_inconsistent(): returnFalse
all_reduce = all(a.update_stack() for a, _ in state.epsilon)
any_shift = (len(state.terminals) + len(state.nonterminals) + len(state.errors)) > 0
try_with_context = all_reduce andnot any_shift
try_with_lookahead = not try_with_context # if verbose: # print(aps_lanes_str(aps_lanes, "fix_inconsistent_state:", "\taps")) if try_with_context: if verbose:
print("\tFix with context.")
try_with_lookahead, aps_lanes = self.context_lanes(s) ifnot try_with_lookahead: assert aps_lanes != []
self.fix_with_context(s, aps_lanes) elif verbose:
print("\tFallback on fixing with lookahead.") if try_with_lookahead: if verbose:
print("\tFix with lookahead.")
aps_lanes = self.lookahead_lanes(s) assert aps_lanes != []
self.fix_with_lookahead(s, aps_lanes) returnTrue
def fix_inconsistent_table(self, verbose: bool, progress: bool) -> None: """The parse table might be inconsistent. We fix the parse table by looking
around the inconsistent states for more context. Either by looking at the
potential stack state which might lead to the inconsistent state, or by
increasing the lookahead."""
self.assume_inconsistent = True if verbose or progress:
print("Fix parse table inconsistencies.")
todo: typing.Deque[StateId] = collections.deque() for state in self.states: if state.is_inconsistent():
todo.append(state.index)
if verbose and todo:
print("\n".join([ "\nGrammar is inconsistent.", "\tNumber of States = {}", "\tNumber of inconsistencies found = {}"]).format(
len(self.states), len(todo)))
count = 0
def visit_table() -> typing.Iterator[None]: nonlocal count
unreachable = [] while todo: while todo: yield# progress bar. # TODO: Compare stack / queue, for the traversal of the states.
s = todo.popleft() ifnot self.is_reachable_state(s): # NOTE: We do not fix unreachable states, as we might # not be able to compute the reduce actions. However, # we should not clean edges not backedges as the state # might become reachable later on, since states are # shared if they have the same locations.
unreachable.append(s) continue assert self.states[s].is_inconsistent()
start_len = len(self.states) if verbose:
count = count + 1
print("Fixing state {}\n".format(self.states[s].stable_str(self.states))) try:
self.fix_inconsistent_state(s, verbose) except Exception as exc:
self.debug_info = True raise ValueError( "Error while fixing conflict in state {}\n\n" "In the following grammar productions:\n{}"
.format(self.states[s].stable_str(self.states),
self.debug_context(s, "\n", "\t"))
) from exc
new_inconsistent_states = [
s.index for s in self.states[start_len:] if s.is_inconsistent()
] if verbose:
print("\tAdding {} states".format(len(self.states[start_len:])))
print("\tWith {} inconsistent states".format(len(new_inconsistent_states)))
todo.extend(new_inconsistent_states)
# Check whether none of the previously inconsistent and # unreahable state became reachable. If so add it back to the # todo list.
still_unreachable = [] for s in unreachable: if self.is_reachable_state(s):
todo.append(s) else:
still_unreachable.append(s)
unreachable = still_unreachable
consume(visit_table(), progress) if verbose:
print("\n".join([ "\nGrammar is now consistent.", "\tNumber of States = {}", "\tNumber of inconsistencies solved = {}"]).format(
len(self.states), count)) assertnot self.is_inconsistent()
self.assume_inconsistent = False
if verbose:
print("Fix Inconsistent Table Result:")
self.debug_dump()
def remove_all_unreachable_state(self, verbose: bool, progress: bool) -> None:
self.states = [s for s in self.states if s isnotNone]
self.rewrite_reordered_state_indexes()
def lower_reduce_actions(self, verbose: bool, progress: bool) -> None: # Remove Reduce actions and replace them by the programmatic # equivalent. # # This transformation preserves the stack manipulations of the parse # table. It only changes it from being implicitly executed by the LR # parser, to being explicitly executed with actions. # # This transformation converts the hard-to-predict load of the shift # table into a branch prediction which is potentially easier to # predict. # # A side-effect of this transformation is that it removes the need for # replaying non-terminals, thus the backends could safely ignore the # ability of the shift function from handling non-terminals. if verbose or progress:
print("Lower Reduce actions.")
def transform() -> typing.Iterator[None]: for s in self.states:
term, _ = next(iter(s.epsilon), (None, None)) if self.term_is_shifted(term): continue assert len(s.epsilon) == 1 yield# progress bar.
reduce_state = s if verbose:
print("Inlining shift-operation for state {}".format(str(reduce_state)))
# The reduced_aps should contain all reduced path of the single # Reduce action which is present on this state. However, as # state of the graph are shared, some reduced paths might follow # the same path and reach the same state. # # This code collect for each replayed path, the tops of the # stack on top of which these states are replayed.
aps = APS.start(s.index)
states_by_replay_term = collections.defaultdict(list) # print("Start:\n{}".format(aps.string(name="\titer_aps"))) # print(s.stable_str(self.states)) for reduced_aps in aps.shift_next(self): # As long as we have elements to replay, we should only # have a single path for each reduced path. If the next # state contains an action, then we stop here.
iter_aps = reduced_aps
next_is_action = self.states[iter_aps.state].epsilon != []
has_replay = iter_aps.replay != [] assert next_is_action isFalseand has_replay isTrue while (not next_is_action) and has_replay: # print("Step {}:\n{}".format(len(iter_aps.history), # iter_aps.string(name="\titer_aps")))
next_aps = list(iter_aps.shift_next(self)) if len(next_aps) == 0: # Note, this might happen as we are adding # lookahead tokens from any successor, we might not # always have a way to replay all tokens, in such # case an error should be produced, but in the mean # time, let's use the shift function as usual. break assert len(next_aps) == 1
iter_aps = next_aps[0]
next_is_action = self.states[iter_aps.state].epsilon != []
has_replay = iter_aps.replay != [] # print("End at {}:\n{}".format(len(iter_aps.history), # iter_aps.string(name="\titer_aps")))
replay_list = [e.src for e in iter_aps.shift] assert len(replay_list) >= 2
replay_term = Replay(replay_list[1:])
states_by_replay_term[replay_term].append(replay_list[0])
# Create FilterStates actions.
filter_by_replay_term = {
replay_term: FilterStates(states) for replay_term, states in states_by_replay_term.items()
}
# Convert the Reduce action to an Unwind action.
reduce_term, _ = next(iter(s.epsilon)) if isinstance(reduce_term, Reduce):
unwind_term: Action = reduce_term.unwind else: assert isinstance(reduce_term, Seq) assert isinstance(reduce_term.actions[-1], Reduce)
unwind_term = Seq(list(reduce_term.actions[:-1]) + [reduce_term.actions[-1].unwind])
# Remove the old Reduce edge if still present. # print("Before:\n{}".format(reduce_state.stable_str(self.states)))
self.remove_edge(reduce_state, reduce_term, maybe_unreachable_set)
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.