(root)/
gcc-13.2.0/
libgomp/
config/
nvptx/
bar.h
       1  /* Copyright (C) 2015-2023 Free Software Foundation, Inc.
       2     Contributed by Alexander Monakov <amonakov@ispras.ru>
       3  
       4     This file is part of the GNU Offloading and Multi Processing Library
       5     (libgomp).
       6  
       7     Libgomp is free software; you can redistribute it and/or modify it
       8     under the terms of the GNU General Public License as published by
       9     the Free Software Foundation; either version 3, or (at your option)
      10     any later version.
      11  
      12     Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
      13     WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
      14     FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
      15     more details.
      16  
      17     Under Section 7 of GPL version 3, you are granted additional
      18     permissions described in the GCC Runtime Library Exception, version
      19     3.1, as published by the Free Software Foundation.
      20  
      21     You should have received a copy of the GNU General Public License and
      22     a copy of the GCC Runtime Library Exception along with this program;
      23     see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
      24     <http://www.gnu.org/licenses/>.  */
      25  
      26  /* This is an NVPTX specific implementation of a barrier synchronization
      27     mechanism for libgomp.  This type is private to the library.  This
      28     implementation uses atomic instructions and bar.sync instruction.  */
      29  
      30  #ifndef GOMP_BARRIER_H
      31  #define GOMP_BARRIER_H 1
      32  
      33  #include "mutex.h"
      34  
      35  typedef struct
      36  {
      37    unsigned total;
      38    unsigned generation;
      39    unsigned awaited;
      40    unsigned awaited_final;
      41  } gomp_barrier_t;
      42  
      43  typedef unsigned int gomp_barrier_state_t;
      44  
      45  /* The generation field contains a counter in the high bits, with a few
      46     low bits dedicated to flags.  Note that TASK_PENDING and WAS_LAST can
      47     share space because WAS_LAST is never stored back to generation.  */
      48  #define BAR_TASK_PENDING	1
      49  #define BAR_WAS_LAST		1
      50  #define BAR_WAITING_FOR_TASK	2
      51  #define BAR_CANCELLED		4
      52  #define BAR_INCR		8
      53  
      54  static inline void gomp_barrier_init (gomp_barrier_t *bar, unsigned count)
      55  {
      56    bar->total = count;
      57    bar->awaited = count;
      58    bar->awaited_final = count;
      59    bar->generation = 0;
      60  }
      61  
      62  static inline void gomp_barrier_reinit (gomp_barrier_t *bar, unsigned count)
      63  {
      64    __atomic_add_fetch (&bar->awaited, count - bar->total, MEMMODEL_ACQ_REL);
      65    bar->total = count;
      66  }
      67  
      68  static inline void gomp_barrier_destroy (gomp_barrier_t *bar)
      69  {
      70  }
      71  
      72  extern void gomp_barrier_wait (gomp_barrier_t *);
      73  extern void gomp_barrier_wait_last (gomp_barrier_t *);
      74  extern void gomp_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t);
      75  extern void gomp_team_barrier_wait (gomp_barrier_t *);
      76  extern void gomp_team_barrier_wait_final (gomp_barrier_t *);
      77  extern void gomp_team_barrier_wait_end (gomp_barrier_t *,
      78  					gomp_barrier_state_t);
      79  extern bool gomp_team_barrier_wait_cancel (gomp_barrier_t *);
      80  extern bool gomp_team_barrier_wait_cancel_end (gomp_barrier_t *,
      81  					       gomp_barrier_state_t);
      82  struct gomp_team;
      83  extern void gomp_team_barrier_cancel (struct gomp_team *);
      84  
      85  static inline void
      86  gomp_team_barrier_wake (gomp_barrier_t *bar, int count)
      87  {
      88    /* We never "wake up" threads on nvptx.  Threads wait at barrier
      89       instructions till barrier fullfilled.  Do nothing here.  */
      90  }
      91  
      92  static inline gomp_barrier_state_t
      93  gomp_barrier_wait_start (gomp_barrier_t *bar)
      94  {
      95    unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
      96    ret &= -BAR_INCR | BAR_CANCELLED;
      97    /* A memory barrier is needed before exiting from the various forms
      98       of gomp_barrier_wait, to satisfy OpenMP API version 3.1 section
      99       2.8.6 flush Construct, which says there is an implicit flush during
     100       a barrier region.  This is a convenient place to add the barrier,
     101       so we use MEMMODEL_ACQ_REL here rather than MEMMODEL_ACQUIRE.  */
     102    if (__atomic_add_fetch (&bar->awaited, -1, MEMMODEL_ACQ_REL) == 0)
     103      ret |= BAR_WAS_LAST;
     104    return ret;
     105  }
     106  
     107  static inline gomp_barrier_state_t
     108  gomp_barrier_wait_cancel_start (gomp_barrier_t *bar)
     109  {
     110    return gomp_barrier_wait_start (bar);
     111  }
     112  
     113  /* This is like gomp_barrier_wait_start, except it decrements
     114     bar->awaited_final rather than bar->awaited and should be used
     115     for the gomp_team_end barrier only.  */
     116  static inline gomp_barrier_state_t
     117  gomp_barrier_wait_final_start (gomp_barrier_t *bar)
     118  {
     119    unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
     120    ret &= -BAR_INCR | BAR_CANCELLED;
     121    /* See above gomp_barrier_wait_start comment.  */
     122    if (__atomic_add_fetch (&bar->awaited_final, -1, MEMMODEL_ACQ_REL) == 0)
     123      ret |= BAR_WAS_LAST;
     124    return ret;
     125  }
     126  
     127  static inline bool
     128  gomp_barrier_last_thread (gomp_barrier_state_t state)
     129  {
     130    return state & BAR_WAS_LAST;
     131  }
     132  
     133  /* All the inlines below must be called with team->task_lock
     134     held.  */
     135  
     136  static inline void
     137  gomp_team_barrier_set_task_pending (gomp_barrier_t *bar)
     138  {
     139    bar->generation |= BAR_TASK_PENDING;
     140  }
     141  
     142  static inline void
     143  gomp_team_barrier_clear_task_pending (gomp_barrier_t *bar)
     144  {
     145    bar->generation &= ~BAR_TASK_PENDING;
     146  }
     147  
     148  static inline void
     149  gomp_team_barrier_set_waiting_for_tasks (gomp_barrier_t *bar)
     150  {
     151    bar->generation |= BAR_WAITING_FOR_TASK;
     152  }
     153  
     154  static inline bool
     155  gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar)
     156  {
     157    return (bar->generation & BAR_WAITING_FOR_TASK) != 0;
     158  }
     159  
     160  static inline bool
     161  gomp_team_barrier_cancelled (gomp_barrier_t *bar)
     162  {
     163    return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0);
     164  }
     165  
     166  static inline void
     167  gomp_team_barrier_done (gomp_barrier_t *bar, gomp_barrier_state_t state)
     168  {
     169    bar->generation = (state & -BAR_INCR) + BAR_INCR;
     170  }
     171  
     172  #endif /* GOMP_BARRIER_H */