1 /*
2 * Copyright © 2011 Ryan Lortie
3 *
4 * SPDX-License-Identifier: LGPL-2.1-or-later
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 *
19 * Author: Ryan Lortie <desrt@desrt.ca>
20 */
21
22 #include "config.h"
23
24 #include "gatomic.h"
25
26 /**
27 * G_ATOMIC_LOCK_FREE:
28 *
29 * This macro is defined if the atomic operations of GLib are
30 * implemented using real hardware atomic operations. This means that
31 * the GLib atomic API can be used between processes and safely mixed
32 * with other (hardware) atomic APIs.
33 *
34 * If this macro is not defined, the atomic operations may be
35 * emulated using a mutex. In that case, the GLib atomic operations are
36 * only atomic relative to themselves and within a single process.
37 **/
38
39 /* NOTE CAREFULLY:
40 *
41 * This file is the lowest-level part of GLib.
42 *
43 * Other lowlevel parts of GLib (threads, slice allocator, g_malloc,
44 * messages, etc) call into these functions and macros to get work done.
45 *
46 * As such, these functions can not call back into any part of GLib
47 * without risking recursion.
48 */
49
50 #ifdef G_ATOMIC_LOCK_FREE
51
52 /* if G_ATOMIC_LOCK_FREE was defined by `meson configure` then we MUST
53 * implement the atomic operations in a lock-free manner.
54 */
55
56 #if defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
57
58 /**
59 * g_atomic_int_get:
60 * @atomic: a pointer to a #gint or #guint
61 *
62 * Gets the current value of @atomic.
63 *
64 * This call acts as a full compiler and hardware
65 * memory barrier (before the get).
66 *
67 * While @atomic has a `volatile` qualifier, this is a historical artifact and
68 * the pointer passed to it should not be `volatile`.
69 *
70 * Returns: the value of the integer
71 *
72 * Since: 2.4
73 **/
74 gint
75 (g_atomic_int_get) (const volatile gint *atomic)
76 {
77 return g_atomic_int_get (atomic);
78 }
79
80 /**
81 * g_atomic_int_set:
82 * @atomic: a pointer to a #gint or #guint
83 * @newval: a new value to store
84 *
85 * Sets the value of @atomic to @newval.
86 *
87 * This call acts as a full compiler and hardware
88 * memory barrier (after the set).
89 *
90 * While @atomic has a `volatile` qualifier, this is a historical artifact and
91 * the pointer passed to it should not be `volatile`.
92 *
93 * Since: 2.4
94 */
95 void
96 (g_atomic_int_set) (volatile gint *atomic,
97 gint newval)
98 {
99 g_atomic_int_set (atomic, newval);
100 }
101
102 /**
103 * g_atomic_int_inc:
104 * @atomic: a pointer to a #gint or #guint
105 *
106 * Increments the value of @atomic by 1.
107 *
108 * Think of this operation as an atomic version of `{ *atomic += 1; }`.
109 *
110 * This call acts as a full compiler and hardware memory barrier.
111 *
112 * While @atomic has a `volatile` qualifier, this is a historical artifact and
113 * the pointer passed to it should not be `volatile`.
114 *
115 * Since: 2.4
116 **/
117 void
118 (g_atomic_int_inc) (volatile gint *atomic)
119 {
120 g_atomic_int_inc (atomic);
121 }
122
123 /**
124 * g_atomic_int_dec_and_test:
125 * @atomic: a pointer to a #gint or #guint
126 *
127 * Decrements the value of @atomic by 1.
128 *
129 * Think of this operation as an atomic version of
130 * `{ *atomic -= 1; return (*atomic == 0); }`.
131 *
132 * This call acts as a full compiler and hardware memory barrier.
133 *
134 * While @atomic has a `volatile` qualifier, this is a historical artifact and
135 * the pointer passed to it should not be `volatile`.
136 *
137 * Returns: %TRUE if the resultant value is zero
138 *
139 * Since: 2.4
140 **/
141 gboolean
142 (g_atomic_int_dec_and_test) (volatile gint *atomic)
143 {
144 return g_atomic_int_dec_and_test (atomic);
145 }
146
147 /**
148 * g_atomic_int_compare_and_exchange:
149 * @atomic: a pointer to a #gint or #guint
150 * @oldval: the value to compare with
151 * @newval: the value to conditionally replace with
152 *
153 * Compares @atomic to @oldval and, if equal, sets it to @newval.
154 * If @atomic was not equal to @oldval then no change occurs.
155 *
156 * This compare and exchange is done atomically.
157 *
158 * Think of this operation as an atomic version of
159 * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`.
160 *
161 * This call acts as a full compiler and hardware memory barrier.
162 *
163 * While @atomic has a `volatile` qualifier, this is a historical artifact and
164 * the pointer passed to it should not be `volatile`.
165 *
166 * Returns: %TRUE if the exchange took place
167 *
168 * Since: 2.4
169 **/
170 gboolean
171 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
172 gint oldval,
173 gint newval)
174 {
175 return g_atomic_int_compare_and_exchange (atomic, oldval, newval);
176 }
177
178 /**
179 * g_atomic_int_compare_and_exchange_full:
180 * @atomic: a pointer to a #gint or #guint
181 * @oldval: the value to compare with
182 * @newval: the value to conditionally replace with
183 * @preval: (out): the contents of @atomic before this operation
184 *
185 * Compares @atomic to @oldval and, if equal, sets it to @newval.
186 * If @atomic was not equal to @oldval then no change occurs.
187 * In any case the value of @atomic before this operation is stored in @preval.
188 *
189 * This compare and exchange is done atomically.
190 *
191 * Think of this operation as an atomic version of
192 * `{ *preval = *atomic; if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`.
193 *
194 * This call acts as a full compiler and hardware memory barrier.
195 *
196 * See also g_atomic_int_compare_and_exchange()
197 *
198 * Returns: %TRUE if the exchange took place
199 *
200 * Since: 2.74
201 **/
202 gboolean
203 (g_atomic_int_compare_and_exchange_full) (gint *atomic,
204 gint oldval,
205 gint newval,
206 gint *preval)
207 {
208 return g_atomic_int_compare_and_exchange_full (atomic, oldval, newval, preval);
209 }
210
211 /**
212 * g_atomic_int_exchange:
213 * @atomic: a pointer to a #gint or #guint
214 * @newval: the value to replace with
215 *
216 * Sets the @atomic to @newval and returns the old value from @atomic.
217 *
218 * This exchange is done atomically.
219 *
220 * Think of this operation as an atomic version of
221 * `{ tmp = *atomic; *atomic = val; return tmp; }`.
222 *
223 * This call acts as a full compiler and hardware memory barrier.
224 *
225 * Returns: the value of @atomic before the exchange, signed
226 *
227 * Since: 2.74
228 **/
229 gint
230 (g_atomic_int_exchange) (gint *atomic,
231 gint newval)
232 {
233 return g_atomic_int_exchange (atomic, newval);
234 }
235
236 /**
237 * g_atomic_int_add:
238 * @atomic: a pointer to a #gint or #guint
239 * @val: the value to add
240 *
241 * Atomically adds @val to the value of @atomic.
242 *
243 * Think of this operation as an atomic version of
244 * `{ tmp = *atomic; *atomic += val; return tmp; }`.
245 *
246 * This call acts as a full compiler and hardware memory barrier.
247 *
248 * Before version 2.30, this function did not return a value
249 * (but g_atomic_int_exchange_and_add() did, and had the same meaning).
250 *
251 * While @atomic has a `volatile` qualifier, this is a historical artifact and
252 * the pointer passed to it should not be `volatile`.
253 *
254 * Returns: the value of @atomic before the add, signed
255 *
256 * Since: 2.4
257 **/
258 gint
259 (g_atomic_int_add) (volatile gint *atomic,
260 gint val)
261 {
262 return g_atomic_int_add (atomic, val);
263 }
264
265 /**
266 * g_atomic_int_and:
267 * @atomic: a pointer to a #gint or #guint
268 * @val: the value to 'and'
269 *
270 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
271 * storing the result back in @atomic.
272 *
273 * This call acts as a full compiler and hardware memory barrier.
274 *
275 * Think of this operation as an atomic version of
276 * `{ tmp = *atomic; *atomic &= val; return tmp; }`.
277 *
278 * While @atomic has a `volatile` qualifier, this is a historical artifact and
279 * the pointer passed to it should not be `volatile`.
280 *
281 * Returns: the value of @atomic before the operation, unsigned
282 *
283 * Since: 2.30
284 **/
285 guint
286 (g_atomic_int_and) (volatile guint *atomic,
287 guint val)
288 {
289 return g_atomic_int_and (atomic, val);
290 }
291
292 /**
293 * g_atomic_int_or:
294 * @atomic: a pointer to a #gint or #guint
295 * @val: the value to 'or'
296 *
297 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
298 * storing the result back in @atomic.
299 *
300 * Think of this operation as an atomic version of
301 * `{ tmp = *atomic; *atomic |= val; return tmp; }`.
302 *
303 * This call acts as a full compiler and hardware memory barrier.
304 *
305 * While @atomic has a `volatile` qualifier, this is a historical artifact and
306 * the pointer passed to it should not be `volatile`.
307 *
308 * Returns: the value of @atomic before the operation, unsigned
309 *
310 * Since: 2.30
311 **/
312 guint
313 (g_atomic_int_or) (volatile guint *atomic,
314 guint val)
315 {
316 return g_atomic_int_or (atomic, val);
317 }
318
319 /**
320 * g_atomic_int_xor:
321 * @atomic: a pointer to a #gint or #guint
322 * @val: the value to 'xor'
323 *
324 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
325 * storing the result back in @atomic.
326 *
327 * Think of this operation as an atomic version of
328 * `{ tmp = *atomic; *atomic ^= val; return tmp; }`.
329 *
330 * This call acts as a full compiler and hardware memory barrier.
331 *
332 * While @atomic has a `volatile` qualifier, this is a historical artifact and
333 * the pointer passed to it should not be `volatile`.
334 *
335 * Returns: the value of @atomic before the operation, unsigned
336 *
337 * Since: 2.30
338 **/
339 guint
340 (g_atomic_int_xor) (volatile guint *atomic,
341 guint val)
342 {
343 return g_atomic_int_xor (atomic, val);
344 }
345
346
347 /**
348 * g_atomic_pointer_get:
349 * @atomic: (not nullable): a pointer to a #gpointer-sized value
350 *
351 * Gets the current value of @atomic.
352 *
353 * This call acts as a full compiler and hardware
354 * memory barrier (before the get).
355 *
356 * While @atomic has a `volatile` qualifier, this is a historical artifact and
357 * the pointer passed to it should not be `volatile`.
358 *
359 * Returns: the value of the pointer
360 *
361 * Since: 2.4
362 **/
363 gpointer
364 (g_atomic_pointer_get) (const volatile void *atomic)
365 {
366 return g_atomic_pointer_get ((gpointer *) atomic);
367 }
368
369 /**
370 * g_atomic_pointer_set:
371 * @atomic: (not nullable): a pointer to a #gpointer-sized value
372 * @newval: a new value to store
373 *
374 * Sets the value of @atomic to @newval.
375 *
376 * This call acts as a full compiler and hardware
377 * memory barrier (after the set).
378 *
379 * While @atomic has a `volatile` qualifier, this is a historical artifact and
380 * the pointer passed to it should not be `volatile`.
381 *
382 * Since: 2.4
383 **/
384 void
385 (g_atomic_pointer_set) (volatile void *atomic,
386 gpointer newval)
387 {
388 g_atomic_pointer_set ((gpointer *) atomic, newval);
389 }
390
391 /**
392 * g_atomic_pointer_compare_and_exchange:
393 * @atomic: (not nullable): a pointer to a #gpointer-sized value
394 * @oldval: the value to compare with
395 * @newval: the value to conditionally replace with
396 *
397 * Compares @atomic to @oldval and, if equal, sets it to @newval.
398 * If @atomic was not equal to @oldval then no change occurs.
399 *
400 * This compare and exchange is done atomically.
401 *
402 * Think of this operation as an atomic version of
403 * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`.
404 *
405 * This call acts as a full compiler and hardware memory barrier.
406 *
407 * While @atomic has a `volatile` qualifier, this is a historical artifact and
408 * the pointer passed to it should not be `volatile`.
409 *
410 * Returns: %TRUE if the exchange took place
411 *
412 * Since: 2.4
413 **/
414 gboolean
415 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
416 gpointer oldval,
417 gpointer newval)
418 {
419 return g_atomic_pointer_compare_and_exchange ((gpointer *) atomic,
420 oldval, newval);
421 }
422
423 /**
424 * g_atomic_pointer_compare_and_exchange_full:
425 * @atomic: (not nullable): a pointer to a #gpointer-sized value
426 * @oldval: the value to compare with
427 * @newval: the value to conditionally replace with
428 * @preval: (not nullable) (out): the contents of @atomic before this operation
429 *
430 * Compares @atomic to @oldval and, if equal, sets it to @newval.
431 * If @atomic was not equal to @oldval then no change occurs.
432 * In any case the value of @atomic before this operation is stored in @preval.
433 *
434 * This compare and exchange is done atomically.
435 *
436 * Think of this operation as an atomic version of
437 * `{ *preval = *atomic; if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`.
438 *
439 * This call acts as a full compiler and hardware memory barrier.
440 *
441 * See also g_atomic_pointer_compare_and_exchange()
442 *
443 * Returns: %TRUE if the exchange took place
444 *
445 * Since: 2.74
446 **/
447 gboolean
448 (g_atomic_pointer_compare_and_exchange_full) (void *atomic,
449 gpointer oldval,
450 gpointer newval,
451 void *preval)
452 {
453 return g_atomic_pointer_compare_and_exchange_full ((gpointer *) atomic,
454 oldval, newval,
455 (gpointer *) preval);
456 }
457
458 /**
459 * g_atomic_pointer_exchange:
460 * @atomic: a pointer to a #gpointer-sized value
461 * @newval: the value to replace with
462 *
463 * Sets the @atomic to @newval and returns the old value from @atomic.
464 *
465 * This exchange is done atomically.
466 *
467 * Think of this operation as an atomic version of
468 * `{ tmp = *atomic; *atomic = val; return tmp; }`.
469 *
470 * This call acts as a full compiler and hardware memory barrier.
471 *
472 * Returns: the value of @atomic before the exchange
473 *
474 * Since: 2.74
475 **/
476 gpointer
477 (g_atomic_pointer_exchange) (void *atomic,
478 gpointer newval)
479 {
480 return g_atomic_pointer_exchange ((gpointer *) atomic, newval);
481 }
482
483 /**
484 * g_atomic_pointer_add:
485 * @atomic: (not nullable): a pointer to a #gpointer-sized value
486 * @val: the value to add
487 *
488 * Atomically adds @val to the value of @atomic.
489 *
490 * Think of this operation as an atomic version of
491 * `{ tmp = *atomic; *atomic += val; return tmp; }`.
492 *
493 * This call acts as a full compiler and hardware memory barrier.
494 *
495 * While @atomic has a `volatile` qualifier, this is a historical artifact and
496 * the pointer passed to it should not be `volatile`.
497 *
498 * In GLib 2.80, the return type was changed from #gssize to #gintptr to add
499 * support for platforms with 128-bit pointers. This should not affect existing
500 * code.
501 *
502 * Returns: the value of @atomic before the add, signed
503 *
504 * Since: 2.30
505 **/
506 gintptr
507 (g_atomic_pointer_add) (volatile void *atomic,
508 gssize val)
509 {
510 return g_atomic_pointer_add ((gpointer *) atomic, val);
511 }
512
513 /**
514 * g_atomic_pointer_and:
515 * @atomic: (not nullable): a pointer to a #gpointer-sized value
516 * @val: the value to 'and'
517 *
518 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
519 * storing the result back in @atomic.
520 *
521 * Think of this operation as an atomic version of
522 * `{ tmp = *atomic; *atomic &= val; return tmp; }`.
523 *
524 * This call acts as a full compiler and hardware memory barrier.
525 *
526 * While @atomic has a `volatile` qualifier, this is a historical artifact and
527 * the pointer passed to it should not be `volatile`.
528 *
529 * In GLib 2.80, the return type was changed from #gsize to #guintptr to add
530 * support for platforms with 128-bit pointers. This should not affect existing
531 * code.
532 *
533 * Returns: the value of @atomic before the operation, unsigned
534 *
535 * Since: 2.30
536 **/
537 guintptr
538 (g_atomic_pointer_and) (volatile void *atomic,
539 gsize val)
540 {
541 return g_atomic_pointer_and ((gpointer *) atomic, val);
542 }
543
544 /**
545 * g_atomic_pointer_or:
546 * @atomic: (not nullable): a pointer to a #gpointer-sized value
547 * @val: the value to 'or'
548 *
549 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
550 * storing the result back in @atomic.
551 *
552 * Think of this operation as an atomic version of
553 * `{ tmp = *atomic; *atomic |= val; return tmp; }`.
554 *
555 * This call acts as a full compiler and hardware memory barrier.
556 *
557 * While @atomic has a `volatile` qualifier, this is a historical artifact and
558 * the pointer passed to it should not be `volatile`.
559 *
560 * In GLib 2.80, the return type was changed from #gsize to #guintptr to add
561 * support for platforms with 128-bit pointers. This should not affect existing
562 * code.
563 *
564 * Returns: the value of @atomic before the operation, unsigned
565 *
566 * Since: 2.30
567 **/
568 guintptr
569 (g_atomic_pointer_or) (volatile void *atomic,
570 gsize val)
571 {
572 return g_atomic_pointer_or ((gpointer *) atomic, val);
573 }
574
575 /**
576 * g_atomic_pointer_xor:
577 * @atomic: (not nullable): a pointer to a #gpointer-sized value
578 * @val: the value to 'xor'
579 *
580 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
581 * storing the result back in @atomic.
582 *
583 * Think of this operation as an atomic version of
584 * `{ tmp = *atomic; *atomic ^= val; return tmp; }`.
585 *
586 * This call acts as a full compiler and hardware memory barrier.
587 *
588 * While @atomic has a `volatile` qualifier, this is a historical artifact and
589 * the pointer passed to it should not be `volatile`.
590 *
591 * In GLib 2.80, the return type was changed from #gsize to #guintptr to add
592 * support for platforms with 128-bit pointers. This should not affect existing
593 * code.
594 *
595 * Returns: the value of @atomic before the operation, unsigned
596 *
597 * Since: 2.30
598 **/
599 guintptr
600 (g_atomic_pointer_xor) (volatile void *atomic,
601 gsize val)
602 {
603 return g_atomic_pointer_xor ((gpointer *) atomic, val);
604 }
605
606 #elif defined (G_PLATFORM_WIN32)
607
608 #include <windows.h>
609 #if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64) && !(defined _MSC_VER && _MSC_VER <= 1200)
610 #define InterlockedAnd _InterlockedAnd
611 #define InterlockedOr _InterlockedOr
612 #define InterlockedXor _InterlockedXor
613 #endif
614
615 #if !defined (_MSC_VER) || _MSC_VER <= 1200
616 #include "gmessages.h"
617 /* Inlined versions for older compiler */
618 static LONG
619 _gInterlockedAnd (volatile guint *atomic,
620 guint val)
621 {
622 LONG i, j;
623
624 j = *atomic;
625 do {
626 i = j;
627 j = InterlockedCompareExchange(atomic, i & val, i);
628 } while (i != j);
629
630 return j;
631 }
632 #define InterlockedAnd(a,b) _gInterlockedAnd(a,b)
633 static LONG
634 _gInterlockedOr (volatile guint *atomic,
635 guint val)
636 {
637 LONG i, j;
638
639 j = *atomic;
640 do {
641 i = j;
642 j = InterlockedCompareExchange(atomic, i | val, i);
643 } while (i != j);
644
645 return j;
646 }
647 #define InterlockedOr(a,b) _gInterlockedOr(a,b)
648 static LONG
649 _gInterlockedXor (volatile guint *atomic,
650 guint val)
651 {
652 LONG i, j;
653
654 j = *atomic;
655 do {
656 i = j;
657 j = InterlockedCompareExchange(atomic, i ^ val, i);
658 } while (i != j);
659
660 return j;
661 }
662 #define InterlockedXor(a,b) _gInterlockedXor(a,b)
663 #endif
664
665 /*
666 * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx
667 */
668 gint
669 (g_atomic_int_get) (const volatile gint *atomic)
670 {
671 MemoryBarrier ();
672 return *atomic;
673 }
674
675 void
676 (g_atomic_int_set) (volatile gint *atomic,
677 gint newval)
678 {
679 *atomic = newval;
680 MemoryBarrier ();
681 }
682
683 void
684 (g_atomic_int_inc) (volatile gint *atomic)
685 {
686 InterlockedIncrement (atomic);
687 }
688
689 gboolean
690 (g_atomic_int_dec_and_test) (volatile gint *atomic)
691 {
692 return InterlockedDecrement (atomic) == 0;
693 }
694
695 gboolean
696 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
697 gint oldval,
698 gint newval)
699 {
700 return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
701 }
702
703 gboolean
704 (g_atomic_int_compare_and_exchange_full) (gint *atomic,
705 gint oldval,
706 gint newval,
707 gint *preval)
708 {
709 *preval = InterlockedCompareExchange (atomic, newval, oldval);
710 return *preval == oldval;
711 }
712
713 gint
714 (g_atomic_int_exchange) (gint *atomic,
715 gint newval)
716 {
717 return InterlockedExchange (atomic, newval);
718 }
719
720 gint
721 (g_atomic_int_add) (volatile gint *atomic,
722 gint val)
723 {
724 return InterlockedExchangeAdd (atomic, val);
725 }
726
727 guint
728 (g_atomic_int_and) (volatile guint *atomic,
729 guint val)
730 {
731 return InterlockedAnd (atomic, val);
732 }
733
734 guint
735 (g_atomic_int_or) (volatile guint *atomic,
736 guint val)
737 {
738 return InterlockedOr (atomic, val);
739 }
740
741 guint
742 (g_atomic_int_xor) (volatile guint *atomic,
743 guint val)
744 {
745 return InterlockedXor (atomic, val);
746 }
747
748
749 gpointer
750 (g_atomic_pointer_get) (const volatile void *atomic)
751 {
752 const gpointer *ptr = atomic;
753
754 MemoryBarrier ();
755 return *ptr;
756 }
757
758 void
759 (g_atomic_pointer_set) (volatile void *atomic,
760 gpointer newval)
761 {
762 gpointer *ptr = atomic;
763
764 *ptr = newval;
765 MemoryBarrier ();
766 }
767
768 gboolean
769 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
770 gpointer oldval,
771 gpointer newval)
772 {
773 return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
774 }
775
776 gboolean
777 (g_atomic_pointer_compare_and_exchange_full) (void *atomic,
778 gpointer oldval,
779 gpointer newval,
780 void *preval)
781 {
782 gpointer *pre = preval;
783
784 *pre = InterlockedCompareExchangePointer (atomic, newval, oldval);
785
786 return *pre == oldval;
787 }
788
789 gpointer
790 (g_atomic_pointer_exchange) (void *atomic,
791 gpointer newval)
792 {
793 return InterlockedExchangePointer (atomic, newval);
794 }
795
796 gintptr
797 (g_atomic_pointer_add) (volatile void *atomic,
798 gssize val)
799 {
800 #if GLIB_SIZEOF_VOID_P == 8
801 return InterlockedExchangeAdd64 (atomic, val);
802 #else
803 return InterlockedExchangeAdd (atomic, val);
804 #endif
805 }
806
807 guintptr
808 (g_atomic_pointer_and) (volatile void *atomic,
809 gsize val)
810 {
811 #if GLIB_SIZEOF_VOID_P == 8
812 return InterlockedAnd64 (atomic, val);
813 #else
814 return InterlockedAnd (atomic, val);
815 #endif
816 }
817
818 guintptr
819 (g_atomic_pointer_or) (volatile void *atomic,
820 gsize val)
821 {
822 #if GLIB_SIZEOF_VOID_P == 8
823 return InterlockedOr64 (atomic, val);
824 #else
825 return InterlockedOr (atomic, val);
826 #endif
827 }
828
829 guintptr
830 (g_atomic_pointer_xor) (volatile void *atomic,
831 gsize val)
832 {
833 #if GLIB_SIZEOF_VOID_P == 8
834 return InterlockedXor64 (atomic, val);
835 #else
836 return InterlockedXor (atomic, val);
837 #endif
838 }
839 #else
840
841 /* This error occurs when `meson configure` decided that we should be capable
842 * of lock-free atomics but we find at compile-time that we are not.
843 */
844 #error G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics.
845
846 #endif /* defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */
847
848 #else /* G_ATOMIC_LOCK_FREE */
849
850 /* We are not permitted to call into any GLib functions from here, so we
851 * can not use GMutex.
852 *
853 * Fortunately, we already take care of the Windows case above, and all
854 * non-Windows platforms on which glib runs have pthreads. Use those.
855 */
856 #include <pthread.h>
857
858 static pthread_mutex_t g_atomic_lock = PTHREAD_MUTEX_INITIALIZER;
859
860 gint
861 (g_atomic_int_get) (const volatile gint *atomic)
862 {
863 gint value;
864
865 pthread_mutex_lock (&g_atomic_lock);
866 value = *atomic;
867 pthread_mutex_unlock (&g_atomic_lock);
868
869 return value;
870 }
871
872 void
873 (g_atomic_int_set) (volatile gint *atomic,
874 gint value)
875 {
876 pthread_mutex_lock (&g_atomic_lock);
877 *atomic = value;
878 pthread_mutex_unlock (&g_atomic_lock);
879 }
880
881 void
882 (g_atomic_int_inc) (volatile gint *atomic)
883 {
884 pthread_mutex_lock (&g_atomic_lock);
885 (*atomic)++;
886 pthread_mutex_unlock (&g_atomic_lock);
887 }
888
889 gboolean
890 (g_atomic_int_dec_and_test) (volatile gint *atomic)
891 {
892 gboolean is_zero;
893
894 pthread_mutex_lock (&g_atomic_lock);
895 is_zero = --(*atomic) == 0;
896 pthread_mutex_unlock (&g_atomic_lock);
897
898 return is_zero;
899 }
900
901 gboolean
902 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
903 gint oldval,
904 gint newval)
905 {
906 gboolean success;
907
908 pthread_mutex_lock (&g_atomic_lock);
909
910 if ((success = (*atomic == oldval)))
911 *atomic = newval;
912
913 pthread_mutex_unlock (&g_atomic_lock);
914
915 return success;
916 }
917
918 gboolean
919 (g_atomic_int_compare_and_exchange_full) (gint *atomic,
920 gint oldval,
921 gint newval,
922 gint *preval)
923 {
924 gboolean success;
925
926 pthread_mutex_lock (&g_atomic_lock);
927
928 *preval = *atomic;
929
930 if ((success = (*atomic == oldval)))
931 *atomic = newval;
932
933 pthread_mutex_unlock (&g_atomic_lock);
934
935 return success;
936 }
937
938 gint
939 (g_atomic_int_exchange) (gint *atomic,
940 gint newval)
941 {
942 gint *ptr = atomic;
943 gint oldval;
944
945 pthread_mutex_lock (&g_atomic_lock);
946 oldval = *ptr;
947 *ptr = newval;
948 pthread_mutex_unlock (&g_atomic_lock);
949
950 return oldval;
951 }
952
953 gint
954 (g_atomic_int_add) (volatile gint *atomic,
955 gint val)
956 {
957 gint oldval;
958
959 pthread_mutex_lock (&g_atomic_lock);
960 oldval = *atomic;
961 *atomic = oldval + val;
962 pthread_mutex_unlock (&g_atomic_lock);
963
964 return oldval;
965 }
966
967 guint
968 (g_atomic_int_and) (volatile guint *atomic,
969 guint val)
970 {
971 guint oldval;
972
973 pthread_mutex_lock (&g_atomic_lock);
974 oldval = *atomic;
975 *atomic = oldval & val;
976 pthread_mutex_unlock (&g_atomic_lock);
977
978 return oldval;
979 }
980
981 guint
982 (g_atomic_int_or) (volatile guint *atomic,
983 guint val)
984 {
985 guint oldval;
986
987 pthread_mutex_lock (&g_atomic_lock);
988 oldval = *atomic;
989 *atomic = oldval | val;
990 pthread_mutex_unlock (&g_atomic_lock);
991
992 return oldval;
993 }
994
995 guint
996 (g_atomic_int_xor) (volatile guint *atomic,
997 guint val)
998 {
999 guint oldval;
1000
1001 pthread_mutex_lock (&g_atomic_lock);
1002 oldval = *atomic;
1003 *atomic = oldval ^ val;
1004 pthread_mutex_unlock (&g_atomic_lock);
1005
1006 return oldval;
1007 }
1008
1009
1010 gpointer
1011 (g_atomic_pointer_get) (const volatile void *atomic)
1012 {
1013 const gpointer *ptr = atomic;
1014 gpointer value;
1015
1016 pthread_mutex_lock (&g_atomic_lock);
1017 value = *ptr;
1018 pthread_mutex_unlock (&g_atomic_lock);
1019
1020 return value;
1021 }
1022
1023 void
1024 (g_atomic_pointer_set) (volatile void *atomic,
1025 gpointer newval)
1026 {
1027 gpointer *ptr = atomic;
1028
1029 pthread_mutex_lock (&g_atomic_lock);
1030 *ptr = newval;
1031 pthread_mutex_unlock (&g_atomic_lock);
1032 }
1033
1034 gboolean
1035 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
1036 gpointer oldval,
1037 gpointer newval)
1038 {
1039 gpointer *ptr = atomic;
1040 gboolean success;
1041
1042 pthread_mutex_lock (&g_atomic_lock);
1043
1044 if ((success = (*ptr == oldval)))
1045 *ptr = newval;
1046
1047 pthread_mutex_unlock (&g_atomic_lock);
1048
1049 return success;
1050 }
1051
1052 gboolean
1053 (g_atomic_pointer_compare_and_exchange_full) (void *atomic,
1054 gpointer oldval,
1055 gpointer newval,
1056 void *preval)
1057 {
1058 gpointer *ptr = atomic;
1059 gpointer *pre = preval;
1060 gboolean success;
1061
1062 pthread_mutex_lock (&g_atomic_lock);
1063
1064 *pre = *ptr;
1065 if ((success = (*ptr == oldval)))
1066 *ptr = newval;
1067
1068 pthread_mutex_unlock (&g_atomic_lock);
1069
1070 return success;
1071 }
1072
1073 gpointer
1074 (g_atomic_pointer_exchange) (void *atomic,
1075 gpointer newval)
1076 {
1077 gpointer *ptr = atomic;
1078 gpointer oldval;
1079
1080 pthread_mutex_lock (&g_atomic_lock);
1081 oldval = *ptr;
1082 *ptr = newval;
1083 pthread_mutex_unlock (&g_atomic_lock);
1084
1085 return oldval;
1086 }
1087
1088 gintptr
1089 (g_atomic_pointer_add) (volatile void *atomic,
1090 gssize val)
1091 {
1092 gintptr *ptr = atomic;
1093 gintptr oldval;
1094
1095 pthread_mutex_lock (&g_atomic_lock);
1096 oldval = *ptr;
1097 *ptr = oldval + val;
1098 pthread_mutex_unlock (&g_atomic_lock);
1099
1100 return oldval;
1101 }
1102
1103 guintptr
1104 (g_atomic_pointer_and) (volatile void *atomic,
1105 gsize val)
1106 {
1107 guintptr *ptr = atomic;
1108 guintptr oldval;
1109
1110 pthread_mutex_lock (&g_atomic_lock);
1111 oldval = *ptr;
1112 *ptr = oldval & val;
1113 pthread_mutex_unlock (&g_atomic_lock);
1114
1115 return oldval;
1116 }
1117
1118 guintptr
1119 (g_atomic_pointer_or) (volatile void *atomic,
1120 gsize val)
1121 {
1122 guintptr *ptr = atomic;
1123 guintptr oldval;
1124
1125 pthread_mutex_lock (&g_atomic_lock);
1126 oldval = *ptr;
1127 *ptr = oldval | val;
1128 pthread_mutex_unlock (&g_atomic_lock);
1129
1130 return oldval;
1131 }
1132
1133 guintptr
1134 (g_atomic_pointer_xor) (volatile void *atomic,
1135 gsize val)
1136 {
1137 guintptr *ptr = atomic;
1138 guintptr oldval;
1139
1140 pthread_mutex_lock (&g_atomic_lock);
1141 oldval = *ptr;
1142 *ptr = oldval ^ val;
1143 pthread_mutex_unlock (&g_atomic_lock);
1144
1145 return oldval;
1146 }
1147
1148 #endif
1149
1150 /**
1151 * g_atomic_int_exchange_and_add:
1152 * @atomic: a pointer to a #gint
1153 * @val: the value to add
1154 *
1155 * This function existed before g_atomic_int_add() returned the prior
1156 * value of the integer (which it now does). It is retained only for
1157 * compatibility reasons. Don't use this function in new code.
1158 *
1159 * Returns: the value of @atomic before the add, signed
1160 * Since: 2.4
1161 * Deprecated: 2.30: Use g_atomic_int_add() instead.
1162 **/
1163 gint
1164 g_atomic_int_exchange_and_add (volatile gint *atomic,
1165 gint val)
1166 {
1167 return (g_atomic_int_add) ((gint *) atomic, val);
1168 }