HEX
Server: Apache
System: Linux vps-cdc32557.vps.ovh.ca 5.15.0-156-generic #166-Ubuntu SMP Sat Aug 9 00:02:46 UTC 2025 x86_64
User: hanode (1017)
PHP: 7.4.33
Disabled: pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_get_handler,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,pcntl_async_signals,pcntl_unshare,
Upload Files
File: //usr/src/linux-headers-5.15.0-156-generic/arch/arm/include/asm/sync_bitops.h
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SYNC_BITOPS_H__
#define __ASM_SYNC_BITOPS_H__

#include <asm/bitops.h>

/* sync_bitops functions are equivalent to the SMP implementation of the
 * original functions, independently from CONFIG_SMP being defined.
 *
 * We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But
 * under Xen you might be communicating with a completely external entity
 * who might be on another CPU (e.g. two uniprocessor guests communicating
 * via event channels and grant tables). So we need a variant of the bit
 * ops which are SMP safe even on a UP kernel.
 */

/*
 * Unordered
 */

#define sync_set_bit(nr, p)		_set_bit(nr, p)
#define sync_clear_bit(nr, p)		_clear_bit(nr, p)
#define sync_change_bit(nr, p)		_change_bit(nr, p)
#define sync_test_bit(nr, addr)		test_bit(nr, addr)

/*
 * Fully ordered
 */

int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
#define sync_test_and_set_bit(nr, p)	_sync_test_and_set_bit(nr, p)

int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
#define sync_test_and_clear_bit(nr, p)	_sync_test_and_clear_bit(nr, p)

int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
#define sync_test_and_change_bit(nr, p)	_sync_test_and_change_bit(nr, p)

#define arch_sync_cmpxchg(ptr, old, new)				\
({									\
	__typeof__(*(ptr)) __ret;					\
	__smp_mb__before_atomic();					\
	__ret = arch_cmpxchg_relaxed((ptr), (old), (new));		\
	__smp_mb__after_atomic();					\
	__ret;								\
})

#endif