1  /* { dg-do compile } */
       2  /* { dg-options "-O2 -w -fdump-tree-optimized" } */
       3  
       4  extern void XYZZY (void);
       5  typedef unsigned long __kernel_size_t;
       6  typedef __kernel_size_t size_t;
       7  typedef unsigned gfp_t;
       8  struct per_cpu_pageset { } __attribute__ ((__aligned__ ((1 << (6)))));
       9  struct zone { struct per_cpu_pageset *pageset[64]; }
      10  zone_flags_t; typedef struct pglist_data { struct zone node_zones[4]; } pg_data_t;
      11  extern struct pglist_data *first_online_pgdat (void);
      12  extern struct zone *next_zone (struct zone *zone);
      13  extern volatile int per_cpu__x86_cpu_to_node_map[];
      14  struct kmem_cache { int size; };
      15  extern struct kmem_cache kmalloc_caches[(12 + 2)];
      16  struct tracepoint { void **funcs; } __attribute__ ((aligned (32)));
      17  extern struct tracepoint __tracepoint_kmalloc_node;
      18  void *__kmalloc_node (size_t size, gfp_t flags, int node);
      19  
      20  static inline int
      21  cpu_to_node (int cpu)
      22  {
      23    return per_cpu__x86_cpu_to_node_map[cpu];
      24  }
      25  
      26  static inline void
      27  trace_kmalloc_node (unsigned long call_site, const void *ptr,
      28  		    size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags,
      29  		    int node)
      30  {
      31    asm goto ("" : : : : trace_label);
      32    if (0)
      33      {
      34  	  void **it_func;
      35      trace_label:
      36  	  asm ("" : "=r"(it_func) : "0"(&__tracepoint_kmalloc_node.funcs));
      37      }
      38  };
      39  
      40  static inline __attribute__ ((always_inline)) int
      41  kmalloc_index (size_t size)
      42  {
      43    if (size <= 64)
      44      return 6;
      45    return -1;
      46  }
      47  
      48  static inline __attribute__ ((always_inline)) struct kmem_cache *
      49  kmalloc_slab (size_t size)
      50  {
      51    int index = kmalloc_index (size);
      52    if (index == 0)
      53      return ((void *) 0);
      54    return &kmalloc_caches[index];
      55  }
      56  
      57  static inline __attribute__ ((always_inline)) void *
      58  kmalloc_node (size_t size, gfp_t flags, int node)
      59  {
      60    void *ret;
      61    if (__builtin_constant_p (size) && size <= (2 * ((1UL) << 12))
      62        && !(flags & ((gfp_t) 0x01u)))
      63      {
      64        struct kmem_cache *s = kmalloc_slab (size);
      65        if (!s)
      66  	return ((void *) 16);
      67        trace_kmalloc_node (({ __here:(unsigned long) &&__here;}),
      68  			  ret, size, s->size, flags, node);
      69      }
      70    return __kmalloc_node (size, flags, node);
      71  }
      72  
      73  int
      74  process_zones (int cpu)
      75  {
      76    struct zone *zone, *dzone;
      77    int node = cpu_to_node (cpu);
      78    for (zone = (first_online_pgdat ())->node_zones;
      79         zone; zone = next_zone (zone))
      80        {
      81  	((zone)->pageset[(cpu)]) =
      82  	  kmalloc_node (sizeof (struct per_cpu_pageset),
      83  			(((gfp_t) 0x10u) | ((gfp_t) 0x40u) | ((gfp_t) 0x80u)),
      84  			node);
      85  	if (!((zone)->pageset[(cpu)]))
      86  	  goto bad;
      87        }
      88    return 0;
      89  bad:
      90    XYZZY ();
      91    return -12;
      92  }
      93  
      94  /* { dg-final { scan-tree-dump-times "XYZZY" 1 "optimized" } } */