Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright (C) 2017 Imagination Technologies
0004  * Author: Paul Burton <paul.burton@mips.com>
0005  */
0006 
0007 #include <linux/bitops.h>
0008 #include <asm/cmpxchg.h>
0009 
0010 unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
0011 {
0012     u32 old32, new32, load32, mask;
0013     volatile u32 *ptr32;
0014     unsigned int shift;
0015 
0016     /* Check that ptr is naturally aligned */
0017     WARN_ON((unsigned long)ptr & (size - 1));
0018 
0019     /* Mask value to the correct size. */
0020     mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
0021     val &= mask;
0022 
0023     /*
0024      * Calculate a shift & mask that correspond to the value we wish to
0025      * exchange within the naturally aligned 4 byte integer that includes
0026      * it.
0027      */
0028     shift = (unsigned long)ptr & 0x3;
0029     if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
0030         shift ^= sizeof(u32) - size;
0031     shift *= BITS_PER_BYTE;
0032     mask <<= shift;
0033 
0034     /*
0035      * Calculate a pointer to the naturally aligned 4 byte integer that
0036      * includes our byte of interest, and load its value.
0037      */
0038     ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
0039     load32 = *ptr32;
0040 
0041     do {
0042         old32 = load32;
0043         new32 = (load32 & ~mask) | (val << shift);
0044         load32 = arch_cmpxchg(ptr32, old32, new32);
0045     } while (load32 != old32);
0046 
0047     return (load32 & mask) >> shift;
0048 }
0049 
0050 unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
0051                   unsigned long new, unsigned int size)
0052 {
0053     u32 mask, old32, new32, load32, load;
0054     volatile u32 *ptr32;
0055     unsigned int shift;
0056 
0057     /* Check that ptr is naturally aligned */
0058     WARN_ON((unsigned long)ptr & (size - 1));
0059 
0060     /* Mask inputs to the correct size. */
0061     mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
0062     old &= mask;
0063     new &= mask;
0064 
0065     /*
0066      * Calculate a shift & mask that correspond to the value we wish to
0067      * compare & exchange within the naturally aligned 4 byte integer
0068      * that includes it.
0069      */
0070     shift = (unsigned long)ptr & 0x3;
0071     if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
0072         shift ^= sizeof(u32) - size;
0073     shift *= BITS_PER_BYTE;
0074     mask <<= shift;
0075 
0076     /*
0077      * Calculate a pointer to the naturally aligned 4 byte integer that
0078      * includes our byte of interest, and load its value.
0079      */
0080     ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
0081     load32 = *ptr32;
0082 
0083     while (true) {
0084         /*
0085          * Ensure the byte we want to exchange matches the expected
0086          * old value, and if not then bail.
0087          */
0088         load = (load32 & mask) >> shift;
0089         if (load != old)
0090             return load;
0091 
0092         /*
0093          * Calculate the old & new values of the naturally aligned
0094          * 4 byte integer that include the byte we want to exchange.
0095          * Attempt to exchange the old value for the new value, and
0096          * return if we succeed.
0097          */
0098         old32 = (load32 & ~mask) | (old << shift);
0099         new32 = (load32 & ~mask) | (new << shift);
0100         load32 = arch_cmpxchg(ptr32, old32, new32);
0101         if (load32 == old32)
0102             return old;
0103     }
0104 }