(root)/
gcc-13.2.0/
gcc/
config/
aarch64/
aarch64-sve-builtins-functions.h
       1  /* ACLE support for AArch64 SVE (function_base classes)
       2     Copyright (C) 2018-2023 Free Software Foundation, Inc.
       3  
       4     This file is part of GCC.
       5  
       6     GCC is free software; you can redistribute it and/or modify it
       7     under the terms of the GNU General Public License as published by
       8     the Free Software Foundation; either version 3, or (at your option)
       9     any later version.
      10  
      11     GCC is distributed in the hope that it will be useful, but
      12     WITHOUT ANY WARRANTY; without even the implied warranty of
      13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      14     General Public License for more details.
      15  
      16     You should have received a copy of the GNU General Public License
      17     along with GCC; see the file COPYING3.  If not see
      18     <http://www.gnu.org/licenses/>.  */
      19  
      20  #ifndef GCC_AARCH64_SVE_BUILTINS_FUNCTIONS_H
      21  #define GCC_AARCH64_SVE_BUILTINS_FUNCTIONS_H
      22  
      23  namespace aarch64_sve {
      24  
      25  /* Wrap T, which is derived from function_base, and indicate that the
      26     function never has side effects.  It is only necessary to use this
      27     wrapper on functions that might have floating-point suffixes, since
      28     otherwise we assume by default that the function has no side effects.  */
      29  template<typename T>
      30  class quiet : public T
      31  {
      32  public:
      33    using T::T;
      34  
      35    unsigned int
      36    call_properties (const function_instance &) const override
      37    {
      38      return 0;
      39    }
      40  };
      41  
      42  /* A function_base that sometimes or always operates on tuples of
      43     vectors.  */
      44  class multi_vector_function : public function_base
      45  {
      46  public:
      47    CONSTEXPR multi_vector_function (unsigned int vectors_per_tuple)
      48      : m_vectors_per_tuple (vectors_per_tuple) {}
      49  
      50    unsigned int
      51    vectors_per_tuple () const override
      52    {
      53      return m_vectors_per_tuple;
      54    }
      55  
      56    /* The number of vectors in a tuple, or 1 if the function only operates
      57       on single vectors.  */
      58    unsigned int m_vectors_per_tuple;
      59  };
      60  
      61  /* A function_base that loads or stores contiguous memory elements
      62     without extending or truncating them.  */
      63  class full_width_access : public multi_vector_function
      64  {
      65  public:
      66    CONSTEXPR full_width_access (unsigned int vectors_per_tuple = 1)
      67      : multi_vector_function (vectors_per_tuple) {}
      68  
      69    tree
      70    memory_scalar_type (const function_instance &fi) const override
      71    {
      72      return fi.scalar_type (0);
      73    }
      74  
      75    machine_mode
      76    memory_vector_mode (const function_instance &fi) const override
      77    {
      78      machine_mode mode = fi.vector_mode (0);
      79      if (m_vectors_per_tuple != 1)
      80        mode = targetm.array_mode (mode, m_vectors_per_tuple).require ();
      81      return mode;
      82    }
      83  };
      84  
      85  /* A function_base that loads elements from memory and extends them
      86     to a wider element.  The memory element type is a fixed part of
      87     the function base name.  */
      88  class extending_load : public function_base
      89  {
      90  public:
      91    CONSTEXPR extending_load (type_suffix_index memory_type)
      92      : m_memory_type (memory_type) {}
      93  
      94    unsigned int
      95    call_properties (const function_instance &) const override
      96    {
      97      return CP_READ_MEMORY;
      98    }
      99  
     100    tree
     101    memory_scalar_type (const function_instance &) const override
     102    {
     103      return scalar_types[type_suffixes[m_memory_type].vector_type];
     104    }
     105  
     106    machine_mode
     107    memory_vector_mode (const function_instance &fi) const override
     108    {
     109      machine_mode mem_mode = type_suffixes[m_memory_type].vector_mode;
     110      machine_mode reg_mode = fi.vector_mode (0);
     111      return aarch64_sve_data_mode (GET_MODE_INNER (mem_mode),
     112  				  GET_MODE_NUNITS (reg_mode)).require ();
     113    }
     114  
     115    /* Return the rtx code associated with the kind of extension that
     116       the load performs.  */
     117    rtx_code
     118    extend_rtx_code () const
     119    {
     120      return (type_suffixes[m_memory_type].unsigned_p
     121  	    ? ZERO_EXTEND : SIGN_EXTEND);
     122    }
     123  
     124    /* The type of the memory elements.  This is part of the function base
     125       name rather than a true type suffix.  */
     126    type_suffix_index m_memory_type;
     127  };
     128  
     129  /* A function_base that truncates vector elements and stores them to memory.
     130     The memory element width is a fixed part of the function base name.  */
     131  class truncating_store : public function_base
     132  {
     133  public:
     134    CONSTEXPR truncating_store (scalar_int_mode to_mode) : m_to_mode (to_mode) {}
     135  
     136    unsigned int
     137    call_properties (const function_instance &) const override
     138    {
     139      return CP_WRITE_MEMORY;
     140    }
     141  
     142    tree
     143    memory_scalar_type (const function_instance &fi) const override
     144    {
     145      /* In truncating stores, the signedness of the memory element is defined
     146         to be the same as the signedness of the vector element.  The signedness
     147         doesn't make any difference to the behavior of the function.  */
     148      type_class_index tclass = fi.type_suffix (0).tclass;
     149      unsigned int element_bits = GET_MODE_BITSIZE (m_to_mode);
     150      type_suffix_index suffix = find_type_suffix (tclass, element_bits);
     151      return scalar_types[type_suffixes[suffix].vector_type];
     152    }
     153  
     154    machine_mode
     155    memory_vector_mode (const function_instance &fi) const override
     156    {
     157      poly_uint64 nunits = GET_MODE_NUNITS (fi.vector_mode (0));
     158      return aarch64_sve_data_mode (m_to_mode, nunits).require ();
     159    }
     160  
     161    /* The mode of a single memory element.  */
     162    scalar_int_mode m_to_mode;
     163  };
     164  
     165  /* An incomplete function_base for functions that have an associated rtx code.
     166     It simply records information about the mapping for derived classes
     167     to use.  */
     168  class rtx_code_function_base : public function_base
     169  {
     170  public:
     171    CONSTEXPR rtx_code_function_base (rtx_code code_for_sint,
     172  				    rtx_code code_for_uint,
     173  				    int unspec_for_fp = -1)
     174      : m_code_for_sint (code_for_sint), m_code_for_uint (code_for_uint),
     175        m_unspec_for_fp (unspec_for_fp) {}
     176  
     177    /* The rtx code to use for signed and unsigned integers respectively.
     178       Can be UNKNOWN for functions that don't have integer forms.  */
     179    rtx_code m_code_for_sint;
     180    rtx_code m_code_for_uint;
     181  
     182    /* The UNSPEC_COND_* to use for floating-point operations.  Can be -1
     183       for functions that only operate on integers.  */
     184    int m_unspec_for_fp;
     185  };
     186  
     187  /* A function_base for functions that have an associated rtx code.
     188     It supports all forms of predication except PRED_implicit.  */
     189  class rtx_code_function : public rtx_code_function_base
     190  {
     191  public:
     192    using rtx_code_function_base::rtx_code_function_base;
     193  
     194    rtx
     195    expand (function_expander &e) const override
     196    {
     197      return e.map_to_rtx_codes (m_code_for_sint, m_code_for_uint,
     198  			       m_unspec_for_fp);
     199    }
     200  };
     201  
     202  /* Like rtx_code_function, but for functions that take what is normally
     203     the final argument first.  One use of this class is to handle binary
     204     reversed operations; another is to handle MLA-style operations that
     205     are normally expressed in GCC as MAD-style operations.  */
     206  class rtx_code_function_rotated : public rtx_code_function_base
     207  {
     208  public:
     209    using rtx_code_function_base::rtx_code_function_base;
     210  
     211    rtx
     212    expand (function_expander &e) const override
     213    {
     214      /* Rotate the inputs into their normal order, but continue to make _m
     215         functions merge with what was originally the first vector argument.  */
     216      unsigned int nargs = e.args.length ();
     217      e.rotate_inputs_left (e.pred != PRED_none ? 1 : 0, nargs);
     218      return e.map_to_rtx_codes (m_code_for_sint, m_code_for_uint,
     219  			       m_unspec_for_fp, nargs - 1);
     220    }
     221  };
     222  
     223  /* An incomplete function_base for functions that have an associated
     224     unspec code, with separate codes for signed integers, unsigned
     225     integers and floating-point values.  The class simply records
     226     information about the mapping for derived classes to use.  */
     227  class unspec_based_function_base : public function_base
     228  {
     229  public:
     230    CONSTEXPR unspec_based_function_base (int unspec_for_sint,
     231  					int unspec_for_uint,
     232  					int unspec_for_fp)
     233      : m_unspec_for_sint (unspec_for_sint),
     234        m_unspec_for_uint (unspec_for_uint),
     235        m_unspec_for_fp (unspec_for_fp)
     236    {}
     237  
     238    /* Return the unspec code to use for INSTANCE, based on type suffix 0.  */
     239    int
     240    unspec_for (const function_instance &instance) const
     241    {
     242      return (!instance.type_suffix (0).integer_p ? m_unspec_for_fp
     243  	    : instance.type_suffix (0).unsigned_p ? m_unspec_for_uint
     244  	    : m_unspec_for_sint);
     245    }
     246  
     247    /* The unspec code associated with signed-integer, unsigned-integer
     248       and floating-point operations respectively.  */
     249    int m_unspec_for_sint;
     250    int m_unspec_for_uint;
     251    int m_unspec_for_fp;
     252  };
     253  
     254  /* A function_base for functions that have an associated unspec code.
     255     It supports all forms of predication except PRED_implicit.  */
     256  class unspec_based_function : public unspec_based_function_base
     257  {
     258  public:
     259    using unspec_based_function_base::unspec_based_function_base;
     260  
     261    rtx
     262    expand (function_expander &e) const override
     263    {
     264      return e.map_to_unspecs (m_unspec_for_sint, m_unspec_for_uint,
     265  			     m_unspec_for_fp);
     266    }
     267  };
     268  
     269  /* Like unspec_based_function, but for functions that take what is normally
     270     the final argument first.  One use of this class is to handle binary
     271     reversed operations; another is to handle MLA-style operations that
     272     are normally expressed in GCC as MAD-style operations.  */
     273  class unspec_based_function_rotated : public unspec_based_function_base
     274  {
     275  public:
     276    using unspec_based_function_base::unspec_based_function_base;
     277  
     278    rtx
     279    expand (function_expander &e) const override
     280    {
     281      /* Rotate the inputs into their normal order, but continue to make _m
     282         functions merge with what was originally the first vector argument.  */
     283      unsigned int nargs = e.args.length ();
     284      e.rotate_inputs_left (e.pred != PRED_none ? 1 : 0, nargs);
     285      return e.map_to_unspecs (m_unspec_for_sint, m_unspec_for_uint,
     286  			     m_unspec_for_fp, nargs - 1);
     287    }
     288  };
     289  
     290  /* Like unspec_based_function, but map the function directly to
     291     CODE (UNSPEC, M) instead of using the generic predication-based
     292     expansion. where M is the vector mode associated with type suffix 0.
     293     This is useful if the unspec doesn't describe the full operation or
     294     if the usual predication rules don't apply for some reason.  */
     295  template<insn_code (*CODE) (int, machine_mode)>
     296  class unspec_based_function_exact_insn : public unspec_based_function_base
     297  {
     298  public:
     299    using unspec_based_function_base::unspec_based_function_base;
     300  
     301    rtx
     302    expand (function_expander &e) const override
     303    {
     304      return e.use_exact_insn (CODE (unspec_for (e), e.vector_mode (0)));
     305    }
     306  };
     307  
     308  /* A function that performs an unspec and then adds it to another value.  */
     309  typedef unspec_based_function_exact_insn<code_for_aarch64_sve_add>
     310    unspec_based_add_function;
     311  typedef unspec_based_function_exact_insn<code_for_aarch64_sve_add_lane>
     312    unspec_based_add_lane_function;
     313  
     314  /* Generic unspec-based _lane function.  */
     315  typedef unspec_based_function_exact_insn<code_for_aarch64_sve_lane>
     316    unspec_based_lane_function;
     317  
     318  /* A functon that uses aarch64_pred* patterns regardless of the
     319     predication type.  */
     320  typedef unspec_based_function_exact_insn<code_for_aarch64_pred>
     321    unspec_based_pred_function;
     322  
     323  /* Like unspec_based_add_function and unspec_based_add_lane_function,
     324     but using saturating addition.  */
     325  typedef unspec_based_function_exact_insn<code_for_aarch64_sve_qadd>
     326    unspec_based_qadd_function;
     327  typedef unspec_based_function_exact_insn<code_for_aarch64_sve_qadd_lane>
     328    unspec_based_qadd_lane_function;
     329  
     330  /* Like unspec_based_sub_function and unspec_based_sub_lane_function,
     331     but using saturating subtraction.  */
     332  typedef unspec_based_function_exact_insn<code_for_aarch64_sve_qsub>
     333    unspec_based_qsub_function;
     334  typedef unspec_based_function_exact_insn<code_for_aarch64_sve_qsub_lane>
     335    unspec_based_qsub_lane_function;
     336  
     337  /* A function that performs an unspec and then subtracts it from
     338     another value.  */
     339  typedef unspec_based_function_exact_insn<code_for_aarch64_sve_sub>
     340    unspec_based_sub_function;
     341  typedef unspec_based_function_exact_insn<code_for_aarch64_sve_sub_lane>
     342    unspec_based_sub_lane_function;
     343  
     344  /* A function that acts like unspec_based_function_exact_insn<INT_CODE>
     345     when operating on integers, but that expands to an (fma ...)-style
     346     aarch64_sve* operation when applied to floats.  */
     347  template<insn_code (*INT_CODE) (int, machine_mode)>
     348  class unspec_based_fused_function : public unspec_based_function_base
     349  {
     350  public:
     351    using unspec_based_function_base::unspec_based_function_base;
     352  
     353    rtx
     354    expand (function_expander &e) const override
     355    {
     356      int unspec = unspec_for (e);
     357      insn_code icode;
     358      if (e.type_suffix (0).float_p)
     359        {
     360  	/* Put the operands in the normal (fma ...) order, with the accumulator
     361  	   last.  This fits naturally since that's also the unprinted operand
     362  	   in the asm output.  */
     363  	e.rotate_inputs_left (0, e.pred != PRED_none ? 4 : 3);
     364  	icode = code_for_aarch64_sve (unspec, e.vector_mode (0));
     365        }
     366      else
     367        icode = INT_CODE (unspec, e.vector_mode (0));
     368      return e.use_exact_insn (icode);
     369    }
     370  };
     371  typedef unspec_based_fused_function<code_for_aarch64_sve_add>
     372    unspec_based_mla_function;
     373  typedef unspec_based_fused_function<code_for_aarch64_sve_sub>
     374    unspec_based_mls_function;
     375  
     376  /* Like unspec_based_fused_function, but for _lane functions.  */
     377  template<insn_code (*INT_CODE) (int, machine_mode)>
     378  class unspec_based_fused_lane_function : public unspec_based_function_base
     379  {
     380  public:
     381    using unspec_based_function_base::unspec_based_function_base;
     382  
     383    rtx
     384    expand (function_expander &e) const override
     385    {
     386      int unspec = unspec_for (e);
     387      insn_code icode;
     388      if (e.type_suffix (0).float_p)
     389        {
     390  	/* Put the operands in the normal (fma ...) order, with the accumulator
     391  	   last.  This fits naturally since that's also the unprinted operand
     392  	   in the asm output.  */
     393  	e.rotate_inputs_left (0, e.pred != PRED_none ? 5 : 4);
     394  	icode = code_for_aarch64_lane (unspec, e.vector_mode (0));
     395        }
     396      else
     397        icode = INT_CODE (unspec, e.vector_mode (0));
     398      return e.use_exact_insn (icode);
     399    }
     400  };
     401  typedef unspec_based_fused_lane_function<code_for_aarch64_sve_add_lane>
     402    unspec_based_mla_lane_function;
     403  typedef unspec_based_fused_lane_function<code_for_aarch64_sve_sub_lane>
     404    unspec_based_mls_lane_function;
     405  
     406  /* A function_base that uses CODE_FOR_MODE (M) to get the associated
     407     instruction code, where M is the vector mode associated with type
     408     suffix N.  */
     409  template<insn_code (*CODE_FOR_MODE) (machine_mode), unsigned int N>
     410  class code_for_mode_function : public function_base
     411  {
     412  public:
     413    rtx
     414    expand (function_expander &e) const override
     415    {
     416      return e.use_exact_insn (CODE_FOR_MODE (e.vector_mode (N)));
     417    }
     418  };
     419  
     420  /* A function that uses code_for_<PATTERN> (M), where M is the vector
     421     mode associated with the first type suffix.  */
     422  #define CODE_FOR_MODE0(PATTERN) code_for_mode_function<code_for_##PATTERN, 0>
     423  
     424  /* Likewise for the second type suffix.  */
     425  #define CODE_FOR_MODE1(PATTERN) code_for_mode_function<code_for_##PATTERN, 1>
     426  
     427  /* Like CODE_FOR_MODE0, but the function doesn't raise exceptions when
     428     operating on floating-point data.  */
     429  #define QUIET_CODE_FOR_MODE0(PATTERN) \
     430    quiet< code_for_mode_function<code_for_##PATTERN, 0> >
     431  
     432  /* A function_base for functions that always expand to a fixed insn pattern,
     433     regardless of what the suffixes are.  */
     434  class fixed_insn_function : public function_base
     435  {
     436  public:
     437    CONSTEXPR fixed_insn_function (insn_code code) : m_code (code) {}
     438  
     439    rtx
     440    expand (function_expander &e) const override
     441    {
     442      return e.use_exact_insn (m_code);
     443    }
     444  
     445    /* The instruction to use.  */
     446    insn_code m_code;
     447  };
     448  
     449  /* A function_base for functions that permute their arguments.  */
     450  class permute : public quiet<function_base>
     451  {
     452  public:
     453    /* Fold a unary or binary permute with the permute vector given by
     454       BUILDER.  */
     455    gimple *
     456    fold_permute (const gimple_folder &f, const vec_perm_builder &builder) const
     457    {
     458      /* Punt for now on _b16 and wider; we'd need more complex evpc logic
     459         to rerecognize the result.  */
     460      if (f.type_suffix (0).bool_p && f.type_suffix (0).element_bits > 8)
     461        return NULL;
     462  
     463      unsigned int nargs = gimple_call_num_args (f.call);
     464      poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (TREE_TYPE (f.lhs));
     465      vec_perm_indices indices (builder, nargs, nelts);
     466      tree perm_type = build_vector_type (ssizetype, nelts);
     467      return gimple_build_assign (f.lhs, VEC_PERM_EXPR,
     468  				gimple_call_arg (f.call, 0),
     469  				gimple_call_arg (f.call, nargs - 1),
     470  				vec_perm_indices_to_tree (perm_type, indices));
     471    }
     472  };
     473  
     474  /* A function_base for functions that permute two vectors using a fixed
     475     choice of indices.  */
     476  class binary_permute : public permute
     477  {
     478  public:
     479    CONSTEXPR binary_permute (int unspec) : m_unspec (unspec) {}
     480  
     481    rtx
     482    expand (function_expander &e) const override
     483    {
     484      insn_code icode = code_for_aarch64_sve (m_unspec, e.vector_mode (0));
     485      return e.use_exact_insn (icode);
     486    }
     487  
     488    /* The unspec code associated with the operation.  */
     489    int m_unspec;
     490  };
     491  
     492  /* A function_base for functions that reduce a vector to a scalar.  */
     493  class reduction : public function_base
     494  {
     495  public:
     496    CONSTEXPR reduction (int unspec)
     497      : m_unspec_for_sint (unspec),
     498        m_unspec_for_uint (unspec),
     499        m_unspec_for_fp (unspec)
     500    {}
     501  
     502    CONSTEXPR reduction (int unspec_for_sint, int unspec_for_uint,
     503  		       int unspec_for_fp)
     504      : m_unspec_for_sint (unspec_for_sint),
     505        m_unspec_for_uint (unspec_for_uint),
     506        m_unspec_for_fp (unspec_for_fp)
     507    {}
     508  
     509    rtx
     510    expand (function_expander &e) const override
     511    {
     512      machine_mode mode = e.vector_mode (0);
     513      int unspec = (!e.type_suffix (0).integer_p ? m_unspec_for_fp
     514  		  : e.type_suffix (0).unsigned_p ? m_unspec_for_uint
     515  		  : m_unspec_for_sint);
     516      /* There's no distinction between SADDV and UADDV for 64-bit elements;
     517         the signed versions only exist for narrower elements.  */
     518      if (GET_MODE_UNIT_BITSIZE (mode) == 64 && unspec == UNSPEC_SADDV)
     519        unspec = UNSPEC_UADDV;
     520      return e.use_exact_insn (code_for_aarch64_pred_reduc (unspec, mode));
     521    }
     522  
     523    /* The unspec code associated with signed-integer, unsigned-integer
     524       and floating-point operations respectively.  */
     525    int m_unspec_for_sint;
     526    int m_unspec_for_uint;
     527    int m_unspec_for_fp;
     528  };
     529  
     530  /* A function_base for functions that shift narrower-than-64-bit values
     531     by 64-bit amounts.  */
     532  class shift_wide : public function_base
     533  {
     534  public:
     535    CONSTEXPR shift_wide (rtx_code code, int wide_unspec)
     536      : m_code (code), m_wide_unspec (wide_unspec) {}
     537  
     538    rtx
     539    expand (function_expander &e) const override
     540    {
     541      machine_mode mode = e.vector_mode (0);
     542      machine_mode elem_mode = GET_MODE_INNER (mode);
     543  
     544      /* If the argument is a constant that the normal shifts can handle
     545         directly, use them instead.  */
     546      rtx shift = unwrap_const_vec_duplicate (e.args.last ());
     547      if (aarch64_simd_shift_imm_p (shift, elem_mode, m_code == ASHIFT))
     548        {
     549  	e.args.last () = shift;
     550  	return e.map_to_rtx_codes (m_code, m_code, -1);
     551        }
     552  
     553      if (e.pred == PRED_x)
     554        return e.use_unpred_insn (code_for_aarch64_sve (m_wide_unspec, mode));
     555  
     556      return e.use_cond_insn (code_for_cond (m_wide_unspec, mode));
     557    }
     558  
     559    /* The rtx code associated with a "normal" shift.  */
     560    rtx_code m_code;
     561  
     562    /* The unspec code associated with the wide shift.  */
     563    int m_wide_unspec;
     564  };
     565  
     566  /* A function_base for unary functions that count bits.  */
     567  class unary_count : public quiet<function_base>
     568  {
     569  public:
     570    CONSTEXPR unary_count (rtx_code code) : m_code (code) {}
     571  
     572    rtx
     573    expand (function_expander &e) const override
     574    {
     575      /* The md patterns treat the operand as an integer.  */
     576      machine_mode mode = aarch64_sve_int_mode (e.vector_mode (0));
     577      e.args.last () = gen_lowpart (mode, e.args.last ());
     578  
     579      if (e.pred == PRED_x)
     580        return e.use_pred_x_insn (code_for_aarch64_pred (m_code, mode));
     581  
     582      return e.use_cond_insn (code_for_cond (m_code, mode));
     583    }
     584  
     585    /* The rtx code associated with the operation.  */
     586    rtx_code m_code;
     587  };
     588  
     589  /* A function_base for svwhile* functions.  */
     590  class while_comparison : public function_base
     591  {
     592  public:
     593    CONSTEXPR while_comparison (int unspec_for_sint, int unspec_for_uint)
     594      : m_unspec_for_sint (unspec_for_sint),
     595        m_unspec_for_uint (unspec_for_uint)
     596    {}
     597  
     598    rtx
     599    expand (function_expander &e) const override
     600    {
     601      /* Suffix 0 determines the predicate mode, suffix 1 determines the
     602         scalar mode and signedness.  */
     603      int unspec = (e.type_suffix (1).unsigned_p
     604  		  ? m_unspec_for_uint
     605  		  : m_unspec_for_sint);
     606      machine_mode pred_mode = e.vector_mode (0);
     607      scalar_mode reg_mode = GET_MODE_INNER (e.vector_mode (1));
     608      return e.use_exact_insn (code_for_while (unspec, reg_mode, pred_mode));
     609    }
     610  
     611    /* The unspec codes associated with signed and unsigned operations
     612       respectively.  */
     613    int m_unspec_for_sint;
     614    int m_unspec_for_uint;
     615  };
     616  
     617  }
     618  
     619  /* Declare the global function base NAME, creating it from an instance
     620     of class CLASS with constructor arguments ARGS.  */
     621  #define FUNCTION(NAME, CLASS, ARGS) \
     622    namespace { static CONSTEXPR const CLASS NAME##_obj ARGS; } \
     623    namespace functions { const function_base *const NAME = &NAME##_obj; }
     624  
     625  #endif