(root)/
bison-3.8.2/
src/
AnnotationList.c
       1  /* IELR's inadequacy annotation list.
       2  
       3     Copyright (C) 2009-2015, 2018-2021 Free Software Foundation, Inc.
       4  
       5     This file is part of Bison, the GNU Compiler Compiler.
       6  
       7     This program is free software: you can redistribute it and/or modify
       8     it under the terms of the GNU General Public License as published by
       9     the Free Software Foundation, either version 3 of the License, or
      10     (at your option) any later version.
      11  
      12     This program is distributed in the hope that it will be useful,
      13     but WITHOUT ANY WARRANTY; without even the implied warranty of
      14     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      15     GNU General Public License for more details.
      16  
      17     You should have received a copy of the GNU General Public License
      18     along with this program.  If not, see <https://www.gnu.org/licenses/>.  */
      19  
      20  #include <config.h>
      21  
      22  #include "AnnotationList.h"
      23  
      24  #include "system.h"
      25  
      26  #include "ielr.h"
      27  #include "lalr.h"
      28  
      29  /**
      30   * \pre
      31   *   - <tt>annotations_obstackp != NULL</tt>.
      32   * \post
      33   *   - \c result is a new \c AnnotationList with one node whose:
      34   *     - \c inadequacyNode member is \c NULL.
      35   *     - \c contributions member is allocated with \c contribution_count
      36   *       uninitialized elements.
      37   *   - All memory was allocated on \c annotations_obstackp.
      38   */
      39  static AnnotationList*
      40  AnnotationList__alloc_on_obstack (ContributionIndex contribution_count,
      41                                    struct obstack *annotations_obstackp)
      42  {
      43    AnnotationList *res;
      44    size_t contributions_size = contribution_count * sizeof res->contributions[0];
      45    res = obstack_alloc (annotations_obstackp,
      46                         offsetof (AnnotationList, contributions)
      47                         + contributions_size);
      48    res->next = NULL;
      49    res->inadequacyNode = NULL;
      50    return res;
      51  }
      52  
      53  /**
      54   * \pre
      55   *   - <tt>self != NULL</tt>.
      56   *   - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>.
      57   * \post
      58   *   - \c result = true iff contribution \c ci in \c self represents an
      59   *     "always" contribution.
      60   */
      61  static bool
      62  AnnotationList__isContributionAlways (AnnotationList const *self,
      63                                        ContributionIndex ci)
      64  {
      65    aver (0 <= ci && ci < self->inadequacyNode->contributionCount);
      66    return self->contributions[ci] == NULL;
      67  }
      68  
      69  /**
      70   * \pre
      71   *   - \c self is a single node.
      72   *   - \c self annotates the same state as every other node in \c list, and
      73   *     that state has \c nitems kernel items.
      74   * \post
      75   *   - If the list \c list already contains an identical annotation to \c self,
      76   *     \c self was discarded, \c result is false, and the caller is responsible
      77   *     for the memory of \c self.
      78   *   - Otherwise, \c list now contains the node \c self, \c result is true, and
      79   *     \c list assumes responsibility for the memory of \c self.
      80   *   - The sort in \c list is:
      81   *     - Sort in reverse order on the unique ID of the associated
      82   *       inadequacy node.  Because these IDs are assigned in ascending
      83   *       order, this should mean that the insertion position within an
      84   *       annotation list is usually near the beginning with other
      85   *       annotations associated with the same inadequacy.
      86   *     - Next, sort on the first contribution that is different as follows:
      87   *       - Sort an always-contribution before a never-contribution before a
      88   *         potential-contribution.
      89   *       - Two always-contributions are identical.
      90   *       - Two never-contributions are identical.
      91   *       - For two potential-contributions, sort on the contributions' kernel
      92   *         item bitsets interpreted as binary numbers.
      93   *  - The sorting has a few effects:
      94   *    - It accelerates elimination of identical annotations during insertion.
      95   *    - It determines how the output of \c AnnotationList__debug is sorted.
      96   *    - Other than that, it's probably not important.
      97   */
      98  static bool
      99  AnnotationList__insertInto (AnnotationList *self, AnnotationList **list,
     100                              size_t nitems)
     101  {
     102    AnnotationList **node;
     103    for (node = list; *node; node = &(*node)->next)
     104      {
     105        int cmp = 0;
     106        if (self->inadequacyNode->id < (*node)->inadequacyNode->id)
     107          cmp = 1;
     108        else if ((*node)->inadequacyNode->id < self->inadequacyNode->id)
     109          cmp = -1;
     110        else
     111          for (ContributionIndex ci = 0;
     112               cmp == 0 && ci < self->inadequacyNode->contributionCount;
     113               ++ci)
     114            {
     115              if (AnnotationList__isContributionAlways (self, ci))
     116                {
     117                  if (!AnnotationList__isContributionAlways (*node, ci))
     118                    cmp = -1;
     119                }
     120              else if (AnnotationList__isContributionAlways (*node, ci))
     121                cmp = 1;
     122              else
     123                for (size_t item = 0; cmp == 0 && item < nitems; ++item)
     124                  {
     125                    if (!Sbitset__test (self->contributions[ci], item))
     126                      {
     127                        if (Sbitset__test ((*node)->contributions[ci], item))
     128                          cmp = -1;
     129                      }
     130                    else if (!Sbitset__test ((*node)->contributions[ci], item))
     131                      cmp = 1;
     132                  }
     133            }
     134        if (cmp < 0)
     135          {
     136            self->next = *node;
     137            *node = self;
     138            break;
     139          }
     140        else if (cmp == 0)
     141          {
     142            self = NULL;
     143            break;
     144          }
     145      }
     146    if (!*node)
     147      *node = self;
     148    return self != NULL;
     149  }
     150  
     151  static bitset
     152  AnnotationList__compute_shift_tokens (transitions *trans)
     153  {
     154    bitset shift_tokens = bitset_create (ntokens, BITSET_FIXED);
     155    int i;
     156    FOR_EACH_SHIFT (trans, i)
     157      bitset_set (shift_tokens, TRANSITION_SYMBOL (trans, i));
     158    return shift_tokens;
     159  }
     160  
     161  static bitset
     162  AnnotationList__compute_conflicted_tokens (bitset shift_tokens,
     163                                             reductions *reds)
     164  {
     165    bitset conflicted_tokens = bitset_create (ntokens, BITSET_FIXED);
     166    bitset conflicted_tokens_rule = bitset_create (ntokens, BITSET_FIXED);
     167    bitset tokens = bitset_create (ntokens, BITSET_FIXED);
     168  
     169    bitset_copy (tokens, shift_tokens);
     170    for (int i = 0; i < reds->num; ++i)
     171      {
     172        bitset_and (conflicted_tokens_rule, tokens, reds->lookaheads[i]);
     173        bitset_or (conflicted_tokens,
     174                   conflicted_tokens, conflicted_tokens_rule);
     175        bitset_or (tokens, tokens, reds->lookaheads[i]);
     176        /* Check that rules are sorted on rule number or the next step in
     177           AnnotationList__compute_from_inadequacies will misbehave.  */
     178        aver (i == 0 || reds->rules[i-1] < reds->rules[i]);
     179      }
     180  
     181    bitset_free (tokens);
     182    bitset_free (conflicted_tokens_rule);
     183  
     184    return conflicted_tokens;
     185  }
     186  
     187  static bool
     188  AnnotationList__compute_lhs_contributions (state *s, const rule *the_rule,
     189                                             symbol_number conflicted_token,
     190                                             bitsetv follow_kernel_items,
     191                                             bitsetv always_follows,
     192                                             state ***predecessors,
     193                                             bitset **item_lookahead_sets,
     194                                             Sbitset *items,
     195                                             struct obstack
     196                                               *annotations_obstackp)
     197  {
     198    goto_number lhs_goto = map_goto (s->number, the_rule->lhs->number);
     199    if (bitset_test (always_follows[lhs_goto], conflicted_token))
     200      return true;
     201    *items = Sbitset__new_on_obstack (s->nitems, annotations_obstackp);
     202    {
     203      bitset_iterator biter_item;
     204      bitset_bindex item;
     205      BITSET_FOR_EACH (biter_item, follow_kernel_items[lhs_goto], item, 0)
     206        if (ielr_item_has_lookahead (s, 0, item, conflicted_token,
     207                                     predecessors, item_lookahead_sets))
     208          Sbitset__set (*items, item);
     209    }
     210    return false;
     211  }
     212  
     213  static void
     214  AnnotationList__computePredecessorAnnotations (
     215    AnnotationList *self, state *s,
     216    bitsetv follow_kernel_items,
     217    bitsetv always_follows,
     218    state ***predecessors,
     219    bitset **item_lookahead_sets,
     220    AnnotationList **annotation_lists,
     221    AnnotationIndex *annotation_counts,
     222    struct obstack *annotations_obstackp)
     223  {
     224    for (state **predecessor = predecessors[s->number]; *predecessor; ++predecessor)
     225      {
     226        AnnotationList *annotation_node =
     227          AnnotationList__alloc_on_obstack (
     228            self->inadequacyNode->contributionCount, annotations_obstackp);
     229        annotation_node->inadequacyNode = self->inadequacyNode;
     230        bool potential_contribution = false;
     231        bitset *lookaheads = NULL;
     232  
     233        for (ContributionIndex ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
     234          {
     235            symbol_number contribution_token =
     236              InadequacyList__getContributionToken (self->inadequacyNode, ci)
     237              ->content->number;
     238            if (AnnotationList__isContributionAlways (self, ci))
     239              {
     240                annotation_node->contributions[ci] = NULL;
     241                continue;
     242              }
     243            annotation_node->contributions[ci] =
     244              Sbitset__new_on_obstack ((*predecessor)->nitems,
     245                                       annotations_obstackp);
     246            {
     247              size_t predecessor_item = 0;
     248              Sbitset sbiter_item;
     249              Sbitset__Index self_item;
     250              SBITSET__FOR_EACH (self->contributions[ci], s->nitems,
     251                                 sbiter_item, self_item)
     252                {
     253                  /* If this kernel item is the beginning of a RHS, it must be
     254                     the kernel item in the start state, and so it has an empty
     255                     lookahead set.  Thus, it can't contribute to inadequacies,
     256                     and so it should never have been identified as a
     257                     contribution.  If, instead, this kernel item is the
     258                     successor of the start state's kernel item, the lookahead
     259                     set is still empty, and so it also should never have been
     260                     identified as a contribution.  This situation is fortunate
     261                     because we want to avoid the - 2 below in both cases.  */
     262                  aver (s->items[self_item] > 1);
     263                  /* If this kernel item is next to the beginning of the RHS,
     264                     then check all of the predecessor's goto follows for the
     265                     LHS.  */
     266                  if (item_number_is_rule_number (ritem[s->items[self_item] - 2]))
     267                    {
     268                      Sbitset items;
     269                      if (AnnotationList__compute_lhs_contributions (
     270                            *predecessor,
     271                            item_rule (&ritem[s->items[self_item]]),
     272                            contribution_token,
     273                            follow_kernel_items, always_follows, predecessors,
     274                            item_lookahead_sets, &items, annotations_obstackp))
     275                        {
     276                          obstack_free (annotations_obstackp,
     277                                        annotation_node->contributions[ci]);
     278                          annotation_node->contributions[ci] = NULL;
     279                          // "Break" out of SBITSET__FOR_EACH.
     280                          goto after_sbitset__for_each;
     281                        }
     282                      else
     283                        {
     284                          Sbitset__or (annotation_node->contributions[ci],
     285                                       annotation_node->contributions[ci],
     286                                       items, (*predecessor)->nitems);
     287                          obstack_free (annotations_obstackp, items);
     288                        }
     289                    }
     290                  /* If this kernel item is later in the RHS, then check the
     291                     predecessor item's lookahead set.  */
     292                  else
     293                    {
     294                      /* We don't have to start the predecessor item search at
     295                         the beginning every time because items from both
     296                         states are sorted by their indices in ritem.  */
     297                      for (;
     298                           predecessor_item < (*predecessor)->nitems;
     299                           ++predecessor_item)
     300                        if ((*predecessor)->items[predecessor_item]
     301                            == s->items[self_item] - 1)
     302                          break;
     303                      aver (predecessor_item != (*predecessor)->nitems);
     304                      if (ielr_item_has_lookahead (*predecessor, 0,
     305                                                   predecessor_item,
     306                                                   contribution_token,
     307                                                   predecessors,
     308                                                   item_lookahead_sets))
     309                        Sbitset__set (annotation_node->contributions[ci],
     310                                      predecessor_item);
     311                    }
     312                }
     313            after_sbitset__for_each:;
     314            }
     315            if (annotation_node->contributions[ci])
     316              {
     317                Sbitset biter;
     318                Sbitset__Index i;
     319                SBITSET__FOR_EACH (annotation_node->contributions[ci],
     320                                   (*predecessor)->nitems, biter, i)
     321                  {
     322                    potential_contribution = true;
     323                    if (!lookaheads)
     324                      {
     325                        lookaheads = xnmalloc ((*predecessor)->nitems,
     326                                               sizeof *lookaheads);
     327                        for (size_t j = 0; j < (*predecessor)->nitems; ++j)
     328                          lookaheads[j] = NULL;
     329                      }
     330                    if (!lookaheads[i])
     331                      lookaheads[i] = bitset_create (ntokens, BITSET_FIXED);
     332                    bitset_set (lookaheads[i], contribution_token);
     333                  }
     334              }
     335          }
     336  
     337        /* If the predecessor has any contributions besides just "always" and
     338           "never" contributions:
     339           - If the dominant contribution is split-stable, the annotation could
     340             not affect merging on this predecessor state or its eventual
     341             predecessor states.  Moreover, all contributions that affect
     342             whether the dominant contribution remains dominant must be "always"
     343             or "never" contributions in order for the dominant contribution to
     344             be split-stable.  Thus, the dominant contribution computation result
     345             in eventual successor states will not be affected by lookaheads
     346             tracked for this predecessor state.  (Also, as in the isocore
     347             compatibility test, we depend on the fact that isocores with equal
     348             dominant contributions will have the same dominant contribution when
     349             merged.  Otherwise, we might have to worry that the presence of a
     350             potential contribution might somehow be the culprit of that behavior
     351             and thus need to be tracked regardless of the split stability of the
     352             dominant contribution.)  Thus, go ahead and discard the annotation
     353             to save space now plus time during state splitting.
     354           - Otherwise, record the annotation, and compute any resulting
     355             annotations needed on predecessor states.  */
     356        if (potential_contribution)
     357          {
     358            if (ContributionIndex__none
     359                != AnnotationList__computeDominantContribution (
     360                     annotation_node, (*predecessor)->nitems, lookaheads, true))
     361              {
     362                obstack_free (annotations_obstackp, annotation_node);
     363                annotation_node = NULL;
     364              }
     365            {
     366              for (size_t i = 0; i < (*predecessor)->nitems; ++i)
     367                if (lookaheads[i])
     368                  bitset_free (lookaheads[i]);
     369              free (lookaheads);
     370            }
     371            if (annotation_node)
     372              {
     373                if (AnnotationList__insertInto (annotation_node,
     374                                                &annotation_lists[(*predecessor)
     375                                                                  ->number],
     376                                                (*predecessor)->nitems))
     377                  {
     378                    ++annotation_counts[(*predecessor)->number];
     379                    AnnotationList__computePredecessorAnnotations (
     380                      annotation_node, *predecessor,
     381                      follow_kernel_items, always_follows, predecessors,
     382                      item_lookahead_sets, annotation_lists, annotation_counts,
     383                      annotations_obstackp);
     384                  }
     385                else
     386                  obstack_free (annotations_obstackp, annotation_node);
     387              }
     388          }
     389        else
     390          obstack_free (annotations_obstackp, annotation_node);
     391      }
     392  }
     393  
     394  void
     395  AnnotationList__compute_from_inadequacies (
     396    state *s, bitsetv follow_kernel_items, bitsetv always_follows,
     397    state ***predecessors, bitset **item_lookahead_sets,
     398    InadequacyList **inadequacy_lists, AnnotationList **annotation_lists,
     399    AnnotationIndex *annotation_counts,
     400    ContributionIndex *max_contributionsp,
     401    struct obstack *annotations_obstackp,
     402    InadequacyListNodeCount *inadequacy_list_node_count)
     403  {
     404    /* Return an empty list if s->lookaheads = NULL.  */
     405    if (s->consistent)
     406      return;
     407  
     408    bitsetv all_lookaheads = bitsetv_create (s->nitems, ntokens, BITSET_FIXED);
     409    bitsetv_ones (all_lookaheads);
     410    bitset shift_tokens = AnnotationList__compute_shift_tokens (s->transitions);
     411    bitset conflicted_tokens =
     412      AnnotationList__compute_conflicted_tokens (shift_tokens, s->reductions);
     413  
     414    /* Add an inadequacy annotation for each conflicted_token.  */
     415    bitset_iterator biter_conflict;
     416    bitset_bindex conflicted_token;
     417    BITSET_FOR_EACH (biter_conflict, conflicted_tokens, conflicted_token, 0)
     418      {
     419        AnnotationList *annotation_node;
     420        ContributionIndex contribution_count = 0;
     421  
     422        /* Allocate the annotation node.  */
     423        {
     424          for (int rule_i = 0; rule_i < s->reductions->num; ++rule_i)
     425            if (bitset_test (s->reductions->lookaheads[rule_i],
     426                             conflicted_token))
     427              ++contribution_count;
     428          if (bitset_test (shift_tokens, conflicted_token))
     429            ++contribution_count;
     430          annotation_node =
     431            AnnotationList__alloc_on_obstack (contribution_count,
     432                                              annotations_obstackp);
     433        }
     434  
     435        /* FIXME: Would a BITSET_FRUGAL or BITEST_SPARSE be more efficient?  Now
     436           or convert it inside InadequacyList__new_conflict?  */
     437        bitset actions = bitset_create (s->reductions->num + 1, BITSET_FIXED);
     438        bool potential_contribution = false;
     439  
     440        /* Add a contribution for each reduction that has conflicted_token as a
     441           lookahead.  */
     442        {
     443          ContributionIndex ci = 0;
     444          int item_i = 0;
     445          for (int rule_i = 0; rule_i < s->reductions->num; ++rule_i)
     446            {
     447              rule *the_rule = s->reductions->rules[rule_i];
     448              if (bitset_test (s->reductions->lookaheads[rule_i],
     449                               conflicted_token))
     450                {
     451                  bitset_set (actions, rule_i);
     452                  /* If this reduction is on a kernel item, just add it.  */
     453                  if (!item_number_is_rule_number (the_rule->rhs[0]))
     454                    {
     455                      annotation_node->contributions[ci] =
     456                        Sbitset__new_on_obstack (s->nitems,
     457                                                 annotations_obstackp);
     458                      /* Catch item_i up to rule_i.  This works because both are
     459                         sorted on rule number.  */
     460                      while (!item_number_is_rule_number (ritem[s->items[item_i]])
     461                             || item_number_as_rule_number (ritem[s->items[item_i]]) != the_rule->number)
     462                        {
     463                          ++item_i;
     464                          aver (item_i < s->nitems);
     465                        }
     466                      Sbitset__set (annotation_node->contributions[ci], item_i);
     467                    }
     468                  /* Otherwise, add the kernel items whose lookahead sets
     469                     contribute the conflicted token to this reduction's
     470                     lookahead set.  */
     471                  else if (AnnotationList__compute_lhs_contributions (
     472                             s, the_rule, conflicted_token, follow_kernel_items,
     473                             always_follows, predecessors, item_lookahead_sets,
     474                             &annotation_node->contributions[ci],
     475                             annotations_obstackp))
     476                    {
     477                      annotation_node->contributions[ci++] = NULL;
     478                      continue;
     479                    }
     480                  /* The lookahead token has to come from somewhere.  */
     481                  aver (!Sbitset__isEmpty (annotation_node->contributions[ci],
     482                                           s->nitems));
     483                  ++ci;
     484                  potential_contribution = true;
     485                }
     486            }
     487        }
     488  
     489        /* If there are any contributions besides just "always" contributions:
     490           - If there's also a shift contribution, record it.
     491           - If the dominant contribution is split-stable, then the annotation
     492             could not affect merging, so go ahead and discard the annotation and
     493             the inadequacy to save space now plus time during state splitting.
     494           - Otherwise, record the annotation and the inadequacy, and compute any
     495             resulting annotations needed on predecessor states.  */
     496        if (potential_contribution)
     497          {
     498            if (bitset_test (shift_tokens, conflicted_token))
     499              {
     500                bitset_set (actions, s->reductions->num);
     501                annotation_node->contributions[contribution_count - 1] = NULL;
     502              }
     503            {
     504              InadequacyList *conflict_node =
     505                InadequacyList__new_conflict (
     506                  s, symbols[conflicted_token], actions,
     507                  inadequacy_list_node_count);
     508              actions = NULL;
     509              annotation_node->inadequacyNode = conflict_node;
     510              if (ContributionIndex__none
     511                  != AnnotationList__computeDominantContribution (
     512                       annotation_node, s->nitems, all_lookaheads, true))
     513                {
     514                  obstack_free (annotations_obstackp, annotation_node);
     515                  InadequacyList__delete (conflict_node);
     516                }
     517              else
     518                {
     519                  InadequacyList__prependTo (conflict_node,
     520                                             &inadequacy_lists[s->number]);
     521                  {
     522                    bool b =
     523                      AnnotationList__insertInto (annotation_node,
     524                                                  &annotation_lists[s->number],
     525                                                  s->nitems);
     526                    aver (b); (void) b;
     527                  }
     528                  /* This aver makes sure the
     529                     AnnotationList__computeDominantContribution check above
     530                     does discard annotations in the simplest case of a S/R
     531                     conflict with no token precedence.  */
     532                  aver (!bitset_test (shift_tokens, conflicted_token)
     533                        || symbols[conflicted_token]->content->prec);
     534                  ++annotation_counts[s->number];
     535                  if (contribution_count > *max_contributionsp)
     536                    *max_contributionsp = contribution_count;
     537                  AnnotationList__computePredecessorAnnotations (
     538                    annotation_node, s,
     539                    follow_kernel_items, always_follows, predecessors,
     540                    item_lookahead_sets, annotation_lists, annotation_counts,
     541                    annotations_obstackp);
     542                }
     543            }
     544          }
     545        else
     546          {
     547            bitset_free (actions);
     548            obstack_free (annotations_obstackp, annotation_node);
     549          }
     550      }
     551  
     552    bitsetv_free (all_lookaheads);
     553    bitset_free (shift_tokens);
     554    bitset_free (conflicted_tokens);
     555  }
     556  
     557  void
     558  AnnotationList__debug (AnnotationList const *self, size_t nitems, int spaces)
     559  {
     560    AnnotationList const *a;
     561    AnnotationIndex ai;
     562    for (a = self, ai = 0; a; a = a->next, ++ai)
     563      {
     564        fprintf (stderr, "%*sAnnotation %d (manifesting state %d):\n",
     565                 spaces, "",
     566                 ai, a->inadequacyNode->manifestingState->number);
     567        bitset_bindex rulei
     568          = bitset_first (a->inadequacyNode->inadequacy.conflict.actions);
     569        for (ContributionIndex ci = 0; ci < a->inadequacyNode->contributionCount; ++ci)
     570          {
     571            symbol_number token =
     572              InadequacyList__getContributionToken (a->inadequacyNode, ci)
     573              ->content->number;
     574            fprintf (stderr, "%*s", spaces+2, "");
     575            if (ci == InadequacyList__getShiftContributionIndex (
     576                                                                 a->inadequacyNode))
     577              fprintf (stderr, "Contributes shift of token %d.\n", token);
     578            else
     579              {
     580                fprintf (stderr, "Contributes token %d", token);
     581                aver (rulei != BITSET_BINDEX_MAX);
     582                fprintf (stderr, " as lookahead, rule number %d",
     583                         a->inadequacyNode->manifestingState
     584                         ->reductions->rules[rulei]->number);
     585                rulei =
     586                  bitset_next (a->inadequacyNode->inadequacy.conflict.actions,
     587                               rulei+1);
     588                if (AnnotationList__isContributionAlways (a, ci))
     589                  fprintf (stderr, " always.");
     590                else
     591                  {
     592                    fprintf (stderr, ", items: ");
     593                    Sbitset__fprint (a->contributions[ci], nitems, stderr);
     594                  }
     595                fprintf (stderr, "\n");
     596              }
     597          }
     598      }
     599  }
     600  
     601  void
     602  AnnotationList__computeLookaheadFilter (AnnotationList const *self,
     603                                          size_t nitems,
     604                                          bitsetv lookahead_filter)
     605  {
     606    bitsetv_zero (lookahead_filter);
     607    for (; self; self = self->next)
     608      for (ContributionIndex ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
     609        if (!AnnotationList__isContributionAlways (self, ci))
     610          {
     611            symbol_number token =
     612              InadequacyList__getContributionToken (self->inadequacyNode, ci)
     613              ->content->number;
     614            Sbitset__Index item;
     615            Sbitset biter;
     616            SBITSET__FOR_EACH (self->contributions[ci], nitems, biter, item)
     617              bitset_set (lookahead_filter[item], token);
     618          }
     619  }
     620  
     621  /**
     622   * \pre
     623   *   - <tt>self != NULL</tt>.
     624   *   - \c nitems is the number of kernel items in the LR(0) state that \c self
     625   *     annotates.
     626   *   - \c lookaheads describes the lookahead sets on the kernel items of some
     627   *     isocore of the LR(0) state that \c self annotates.  Either:
     628   *     - <tt>lookaheads = NULL</tt> only if the lookahead set on every kernel
     629   *       item is empty.
     630   *     - For any <tt>0 <= i < nitems</tt>, <tt>lookaheads[i]</tt> is either:
     631   *       - \c NULL only if the lookahead set on kernel item \c i is empty.
     632   *       - The (possibly empty) lookahead set on kernel item \c i.
     633   *   - <tt>0 <= ci < self->inadequacyNode->contributionCount</tt>.
     634   * \post
     635   *   - \c result = true iff contribution \c ci in \c self is made by the state
     636   *     described by \c lookaheads.
     637   */
     638  static bool
     639  AnnotationList__stateMakesContribution (AnnotationList const *self,
     640                                          size_t nitems, ContributionIndex ci,
     641                                          bitset *lookaheads)
     642  {
     643    if (AnnotationList__isContributionAlways (self, ci))
     644      return true;
     645    if (!lookaheads)
     646      return false;
     647    {
     648      symbol_number token =
     649        InadequacyList__getContributionToken (self->inadequacyNode, ci)
     650        ->content->number;
     651      Sbitset__Index item;
     652      Sbitset biter;
     653      SBITSET__FOR_EACH (self->contributions[ci], nitems, biter, item)
     654        if (lookaheads[item] && bitset_test (lookaheads[item], token))
     655          return true;
     656    }
     657    return false;
     658  }
     659  
     660  ContributionIndex
     661  AnnotationList__computeDominantContribution (AnnotationList const *self,
     662                                               size_t nitems, bitset *lookaheads,
     663                                               bool require_split_stable)
     664  {
     665    ContributionIndex const ci_shift =
     666      InadequacyList__getShiftContributionIndex (self->inadequacyNode);
     667  
     668    symbol *token = self->inadequacyNode->inadequacy.conflict.token;
     669  
     670    /* S/R conflict.  */
     671    if (ci_shift != ContributionIndex__none)
     672      {
     673        bool find_stable_domination_over_shift = false;
     674        bool find_stable_error_action_domination = false;
     675        {
     676          int shift_precedence = token->content->prec;
     677  
     678          /* If the token has no precedence set, shift is always chosen.  */
     679          if (!shift_precedence)
     680            return ci_shift;
     681  
     682          /* Figure out which reductions contribute, which of those would
     683             dominate in a R/R comparison, and whether any reduction dominates
     684             the shift so that the R/R comparison is actually needed.  */
     685          ContributionIndex ci_rr_dominator = ContributionIndex__none;
     686          int actioni;
     687          ContributionIndex ci;
     688          for (ci = 0,
     689                 actioni = bitset_first (self->inadequacyNode->inadequacy
     690                                         .conflict.actions);
     691               ci < self->inadequacyNode->contributionCount;
     692               ++ci,
     693                 actioni = bitset_next (self->inadequacyNode->inadequacy
     694                                        .conflict.actions, actioni+1))
     695            {
     696              int reduce_precedence = 0;
     697              if (ci == ci_shift)
     698                continue;
     699              {
     700                rule *r = self->inadequacyNode->manifestingState
     701                            ->reductions->rules[actioni];
     702                if (r->prec)
     703                  reduce_precedence = r->prec->prec;
     704              }
     705              /* If there's no need to check whether this reduction actually
     706                 contributes because the shift eliminates it from the R/R
     707                 comparison anyway, continue to the next reduction.  */
     708              if (reduce_precedence
     709                  && (reduce_precedence < shift_precedence
     710                      || (reduce_precedence == shift_precedence
     711                          && token->content->assoc == right_assoc)))
     712                continue;
     713              if (!AnnotationList__stateMakesContribution (self, nitems, ci,
     714                                                           lookaheads))
     715                continue;
     716              /* This uneliminated reduction contributes, so see if it can cause
     717                 an error action.  */
     718              if (reduce_precedence == shift_precedence
     719                   && token->content->assoc == non_assoc)
     720                {
     721                  /* It's not possible to find split-stable domination over
     722                     shift after a potential %nonassoc.  */
     723                  if (find_stable_domination_over_shift)
     724                    return ContributionIndex__none;
     725                  if (!require_split_stable
     726                      || AnnotationList__isContributionAlways (self, ci))
     727                    return ContributionIndex__error_action;
     728                  find_stable_error_action_domination = true;
     729                }
     730              /* Consider this uneliminated contributing reduction in the R/R
     731                 comparison.  */
     732              if (ci_rr_dominator == ContributionIndex__none)
     733                ci_rr_dominator = ci;
     734              /* If precedence is set for this uneliminated contributing
     735                 reduction, it dominates the shift, so try to figure out which
     736                 reduction dominates the R/R comparison.  */
     737              if (reduce_precedence)
     738                {
     739                  /* It's not possible to find split-stable error action
     740                     domination after a potential reduction.  */
     741                  if (find_stable_error_action_domination)
     742                    return ContributionIndex__none;
     743                  if (!require_split_stable)
     744                    return ci_rr_dominator;
     745                  if (!AnnotationList__isContributionAlways (self,
     746                                                             ci_rr_dominator))
     747                    return ContributionIndex__none;
     748                  if (AnnotationList__isContributionAlways (self, ci))
     749                    return ci_rr_dominator;
     750                  find_stable_domination_over_shift = true;
     751                }
     752            }
     753        }
     754        if (find_stable_domination_over_shift
     755            || find_stable_error_action_domination)
     756          return ContributionIndex__none;
     757        /* No reduce or error action domination found, so shift dominates.  */
     758        return ci_shift;
     759      }
     760  
     761    /* R/R conflict, so the reduction with the lowest rule number dominates.
     762       Fortunately, contributions are sorted by rule number.  */
     763    for (ContributionIndex ci = 0; ci < self->inadequacyNode->contributionCount; ++ci)
     764      if (AnnotationList__stateMakesContribution (self, nitems, ci, lookaheads))
     765        {
     766          if (require_split_stable
     767              && !AnnotationList__isContributionAlways (self, ci))
     768            return ContributionIndex__none;
     769          return ci;
     770        }
     771    return ContributionIndex__none;
     772  }