Copy as Markdown

Other Tools

/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_AtomicOperationsGenerated_h
#define jit_AtomicOperationsGenerated_h
/* This file is generated by jit/GenerateAtomicOperations.py. Do not edit! */
#include "mozilla/Attributes.h"
namespace js {
namespace jit {
#define JS_HAVE_GENERATED_ATOMIC_OPS 1
inline void AtomicFenceSeqCst() {
asm volatile ("dmb sy\n\t" ::: "memory");
}
inline uint8_t AtomicLoad8SeqCst(const uint8_t* arg) {
uint8_t res;
asm volatile ("ldrb %[res], [%[arg]]\n\t"
"dmb sy\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint16_t AtomicLoad16SeqCst(const uint16_t* arg) {
uint16_t res;
asm volatile ("ldrh %[res], [%[arg]]\n\t"
"dmb sy\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint32_t AtomicLoad32SeqCst(const uint32_t* arg) {
uint32_t res;
asm volatile ("ldr %[res], [%[arg]]\n\t"
"dmb sy\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint8_t AtomicLoad8Unsynchronized(const uint8_t* arg) {
uint8_t res;
asm volatile ("ldrb %[res], [%[arg]]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint16_t AtomicLoad16Unsynchronized(const uint16_t* arg) {
uint16_t res;
asm volatile ("ldrh %[res], [%[arg]]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint32_t AtomicLoad32Unsynchronized(const uint32_t* arg) {
uint32_t res;
asm volatile ("ldr %[res], [%[arg]]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline void AtomicStore8SeqCst(uint8_t* addr, uint8_t val) {
asm volatile ("dmb sy\n\t"
"strb %[val], [%[addr]]\n\t"
"dmb sy\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore16SeqCst(uint16_t* addr, uint16_t val) {
asm volatile ("dmb sy\n\t"
"strh %[val], [%[addr]]\n\t"
"dmb sy\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore32SeqCst(uint32_t* addr, uint32_t val) {
asm volatile ("dmb sy\n\t"
"str %[val], [%[addr]]\n\t"
"dmb sy\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore8Unsynchronized(uint8_t* addr, uint8_t val) {
asm volatile ("strb %[val], [%[addr]]\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore16Unsynchronized(uint16_t* addr, uint16_t val) {
asm volatile ("strh %[val], [%[addr]]\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore32Unsynchronized(uint32_t* addr, uint32_t val) {
asm volatile ("str %[val], [%[addr]]\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline uint8_t AtomicExchange8SeqCst(uint8_t* addr, uint8_t val) {
uint8_t res;
uint32_t scratch;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrexb %[res], [%[addr]]\n\t"
"strexb %[scratch], %[val], [%[addr]]\n\t"
"cmp %[scratch], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r"(res), [scratch] "=&r"(scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint16_t AtomicExchange16SeqCst(uint16_t* addr, uint16_t val) {
uint16_t res;
uint32_t scratch;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrexh %[res], [%[addr]]\n\t"
"strexh %[scratch], %[val], [%[addr]]\n\t"
"cmp %[scratch], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r"(res), [scratch] "=&r"(scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint32_t AtomicExchange32SeqCst(uint32_t* addr, uint32_t val) {
uint32_t res;
uint32_t scratch;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrex %[res], [%[addr]]\n\t"
"strex %[scratch], %[val], [%[addr]]\n\t"
"cmp %[scratch], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r"(res), [scratch] "=&r"(scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint8_t AtomicCmpXchg8SeqCst(uint8_t* addr,
uint8_t oldval,
uint8_t newval) {
uint8_t res, scratch;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"uxtb %[scratch], %[oldval]\n\t"
"ldrexb %[res], [%[addr]]\n\t"
"cmp %[res], %[scratch]\n\t"
"bne 1f\n\t"
"strexb %[scratch], %[newval], [%[addr]]\n\t"
"cmp %[scratch], #1\n\t"
"beq 0b\n\t"
"1: dmb sy\n\t"
: [res] "=&r" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [oldval] "r"(oldval), [newval] "r" (newval)
: "memory", "cc");
return res;
}
inline uint16_t AtomicCmpXchg16SeqCst(uint16_t* addr,
uint16_t oldval,
uint16_t newval) {
uint16_t res, scratch;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"uxth %[scratch], %[oldval]\n\t"
"ldrexh %[res], [%[addr]]\n\t"
"cmp %[res], %[scratch]\n\t"
"bne 1f\n\t"
"strexh %[scratch], %[newval], [%[addr]]\n\t"
"cmp %[scratch], #1\n\t"
"beq 0b\n\t"
"1: dmb sy\n\t"
: [res] "=&r" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [oldval] "r"(oldval), [newval] "r" (newval)
: "memory", "cc");
return res;
}
inline uint32_t AtomicCmpXchg32SeqCst(uint32_t* addr,
uint32_t oldval,
uint32_t newval) {
uint32_t res, scratch;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"mov %[scratch], %[oldval]\n\t"
"ldrex %[res], [%[addr]]\n\t"
"cmp %[res], %[scratch]\n\t"
"bne 1f\n\t"
"strex %[scratch], %[newval], [%[addr]]\n\t"
"cmp %[scratch], #1\n\t"
"beq 0b\n\t"
"1: dmb sy\n\t"
: [res] "=&r" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [oldval] "r"(oldval), [newval] "r" (newval)
: "memory", "cc");
return res;
}
inline uint64_t AtomicCmpXchg64SeqCst(uint64_t* addr,
uint64_t oldval,
uint64_t newval) {
uint32_t oldval0 = oldval & 0xffff'ffff;
uint32_t oldval1 = oldval >> 32;
uint32_t newval0 = newval & 0xffff'ffff;
uint32_t newval1 = newval >> 32;
asm volatile (
"dmb sy\n\t"
"0: ldrexd r0, r1, [%[addr]]\n\t"
"cmp r0, %[oldval0]\n\t"
"bne 1f\n\t"
"cmp r1, %[oldval1]\n\t"
"bne 1f\n\t"
"mov r2, %[newval0]\n\t"
"mov r3, %[newval1]\n\t"
"strexd r4, r2, r3, [%[addr]]\n\t"
"cmp r4, #1\n\t"
"beq 0b\n\t"
"1: dmb sy\n\t"
"mov %[oldval0], r0\n\t"
"mov %[oldval1], r1\n\t"
: [oldval0] "+&r" (oldval0), [oldval1] "+&r"(oldval1)
: [addr] "r" (addr), [newval0] "r" (newval0), [newval1] "r" (newval1)
: "memory", "cc", "r0", "r1", "r2", "r3", "r4");
return uint64_t(oldval0) | (uint64_t(oldval1) << 32);
}
inline uint8_t AtomicAdd8SeqCst(uint8_t* addr, uint8_t val) {
uint8_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrexb %[res], [%[addr]]\n\t"
"add %[scratch1], %[res], %[val]\n\t"
"strexb %[scratch2], %[scratch1], [%[addr]]\n\t"
"cmp %[scratch2], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint16_t AtomicAdd16SeqCst(uint16_t* addr, uint16_t val) {
uint16_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrexh %[res], [%[addr]]\n\t"
"add %[scratch1], %[res], %[val]\n\t"
"strexh %[scratch2], %[scratch1], [%[addr]]\n\t"
"cmp %[scratch2], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint32_t AtomicAdd32SeqCst(uint32_t* addr, uint32_t val) {
uint32_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrex %[res], [%[addr]]\n\t"
"add %[scratch1], %[res], %[val]\n\t"
"strex %[scratch2], %[scratch1], [%[addr]]\n\t"
"cmp %[scratch2], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint8_t AtomicAnd8SeqCst(uint8_t* addr, uint8_t val) {
uint8_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrexb %[res], [%[addr]]\n\t"
"and %[scratch1], %[res], %[val]\n\t"
"strexb %[scratch2], %[scratch1], [%[addr]]\n\t"
"cmp %[scratch2], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint16_t AtomicAnd16SeqCst(uint16_t* addr, uint16_t val) {
uint16_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrexh %[res], [%[addr]]\n\t"
"and %[scratch1], %[res], %[val]\n\t"
"strexh %[scratch2], %[scratch1], [%[addr]]\n\t"
"cmp %[scratch2], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint32_t AtomicAnd32SeqCst(uint32_t* addr, uint32_t val) {
uint32_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrex %[res], [%[addr]]\n\t"
"and %[scratch1], %[res], %[val]\n\t"
"strex %[scratch2], %[scratch1], [%[addr]]\n\t"
"cmp %[scratch2], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint8_t AtomicOr8SeqCst(uint8_t* addr, uint8_t val) {
uint8_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrexb %[res], [%[addr]]\n\t"
"orr %[scratch1], %[res], %[val]\n\t"
"strexb %[scratch2], %[scratch1], [%[addr]]\n\t"
"cmp %[scratch2], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint16_t AtomicOr16SeqCst(uint16_t* addr, uint16_t val) {
uint16_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrexh %[res], [%[addr]]\n\t"
"orr %[scratch1], %[res], %[val]\n\t"
"strexh %[scratch2], %[scratch1], [%[addr]]\n\t"
"cmp %[scratch2], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint32_t AtomicOr32SeqCst(uint32_t* addr, uint32_t val) {
uint32_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrex %[res], [%[addr]]\n\t"
"orr %[scratch1], %[res], %[val]\n\t"
"strex %[scratch2], %[scratch1], [%[addr]]\n\t"
"cmp %[scratch2], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint8_t AtomicXor8SeqCst(uint8_t* addr, uint8_t val) {
uint8_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrexb %[res], [%[addr]]\n\t"
"eor %[scratch1], %[res], %[val]\n\t"
"strexb %[scratch2], %[scratch1], [%[addr]]\n\t"
"cmp %[scratch2], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint16_t AtomicXor16SeqCst(uint16_t* addr, uint16_t val) {
uint16_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrexh %[res], [%[addr]]\n\t"
"eor %[scratch1], %[res], %[val]\n\t"
"strexh %[scratch2], %[scratch1], [%[addr]]\n\t"
"cmp %[scratch2], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint32_t AtomicXor32SeqCst(uint32_t* addr, uint32_t val) {
uint32_t res;
uintptr_t scratch1, scratch2;
asm volatile ("dmb sy\n\t"
"0:\n\t"
"ldrex %[res], [%[addr]]\n\t"
"eor %[scratch1], %[res], %[val]\n\t"
"strex %[scratch2], %[scratch1], [%[addr]]\n\t"
"cmp %[scratch2], #1\n\t"
"beq 0b\n\t"
"dmb sy\n\t"
: [res] "=&r" (res), [scratch1] "=&r" (scratch1), [scratch2] "=&r"(scratch2)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline void AtomicCopyUnalignedBlockDownUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("ldrb %[scratch], [%[src], #0]\n\t"
"strb %[scratch], [%[dst], #0]\n\t"
"ldrb %[scratch], [%[src], #1]\n\t"
"strb %[scratch], [%[dst], #1]\n\t"
"ldrb %[scratch], [%[src], #2]\n\t"
"strb %[scratch], [%[dst], #2]\n\t"
"ldrb %[scratch], [%[src], #3]\n\t"
"strb %[scratch], [%[dst], #3]\n\t"
"ldrb %[scratch], [%[src], #4]\n\t"
"strb %[scratch], [%[dst], #4]\n\t"
"ldrb %[scratch], [%[src], #5]\n\t"
"strb %[scratch], [%[dst], #5]\n\t"
"ldrb %[scratch], [%[src], #6]\n\t"
"strb %[scratch], [%[dst], #6]\n\t"
"ldrb %[scratch], [%[src], #7]\n\t"
"strb %[scratch], [%[dst], #7]\n\t"
"ldrb %[scratch], [%[src], #8]\n\t"
"strb %[scratch], [%[dst], #8]\n\t"
"ldrb %[scratch], [%[src], #9]\n\t"
"strb %[scratch], [%[dst], #9]\n\t"
"ldrb %[scratch], [%[src], #10]\n\t"
"strb %[scratch], [%[dst], #10]\n\t"
"ldrb %[scratch], [%[src], #11]\n\t"
"strb %[scratch], [%[dst], #11]\n\t"
"ldrb %[scratch], [%[src], #12]\n\t"
"strb %[scratch], [%[dst], #12]\n\t"
"ldrb %[scratch], [%[src], #13]\n\t"
"strb %[scratch], [%[dst], #13]\n\t"
"ldrb %[scratch], [%[src], #14]\n\t"
"strb %[scratch], [%[dst], #14]\n\t"
"ldrb %[scratch], [%[src], #15]\n\t"
"strb %[scratch], [%[dst], #15]\n\t"
"ldrb %[scratch], [%[src], #16]\n\t"
"strb %[scratch], [%[dst], #16]\n\t"
"ldrb %[scratch], [%[src], #17]\n\t"
"strb %[scratch], [%[dst], #17]\n\t"
"ldrb %[scratch], [%[src], #18]\n\t"
"strb %[scratch], [%[dst], #18]\n\t"
"ldrb %[scratch], [%[src], #19]\n\t"
"strb %[scratch], [%[dst], #19]\n\t"
"ldrb %[scratch], [%[src], #20]\n\t"
"strb %[scratch], [%[dst], #20]\n\t"
"ldrb %[scratch], [%[src], #21]\n\t"
"strb %[scratch], [%[dst], #21]\n\t"
"ldrb %[scratch], [%[src], #22]\n\t"
"strb %[scratch], [%[dst], #22]\n\t"
"ldrb %[scratch], [%[src], #23]\n\t"
"strb %[scratch], [%[dst], #23]\n\t"
"ldrb %[scratch], [%[src], #24]\n\t"
"strb %[scratch], [%[dst], #24]\n\t"
"ldrb %[scratch], [%[src], #25]\n\t"
"strb %[scratch], [%[dst], #25]\n\t"
"ldrb %[scratch], [%[src], #26]\n\t"
"strb %[scratch], [%[dst], #26]\n\t"
"ldrb %[scratch], [%[src], #27]\n\t"
"strb %[scratch], [%[dst], #27]\n\t"
"ldrb %[scratch], [%[src], #28]\n\t"
"strb %[scratch], [%[dst], #28]\n\t"
"ldrb %[scratch], [%[src], #29]\n\t"
"strb %[scratch], [%[dst], #29]\n\t"
"ldrb %[scratch], [%[src], #30]\n\t"
"strb %[scratch], [%[dst], #30]\n\t"
"ldrb %[scratch], [%[src], #31]\n\t"
"strb %[scratch], [%[dst], #31]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyUnalignedBlockUpUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("ldrb %[scratch], [%[src], #31]\n\t"
"strb %[scratch], [%[dst], #31]\n\t"
"ldrb %[scratch], [%[src], #30]\n\t"
"strb %[scratch], [%[dst], #30]\n\t"
"ldrb %[scratch], [%[src], #29]\n\t"
"strb %[scratch], [%[dst], #29]\n\t"
"ldrb %[scratch], [%[src], #28]\n\t"
"strb %[scratch], [%[dst], #28]\n\t"
"ldrb %[scratch], [%[src], #27]\n\t"
"strb %[scratch], [%[dst], #27]\n\t"
"ldrb %[scratch], [%[src], #26]\n\t"
"strb %[scratch], [%[dst], #26]\n\t"
"ldrb %[scratch], [%[src], #25]\n\t"
"strb %[scratch], [%[dst], #25]\n\t"
"ldrb %[scratch], [%[src], #24]\n\t"
"strb %[scratch], [%[dst], #24]\n\t"
"ldrb %[scratch], [%[src], #23]\n\t"
"strb %[scratch], [%[dst], #23]\n\t"
"ldrb %[scratch], [%[src], #22]\n\t"
"strb %[scratch], [%[dst], #22]\n\t"
"ldrb %[scratch], [%[src], #21]\n\t"
"strb %[scratch], [%[dst], #21]\n\t"
"ldrb %[scratch], [%[src], #20]\n\t"
"strb %[scratch], [%[dst], #20]\n\t"
"ldrb %[scratch], [%[src], #19]\n\t"
"strb %[scratch], [%[dst], #19]\n\t"
"ldrb %[scratch], [%[src], #18]\n\t"
"strb %[scratch], [%[dst], #18]\n\t"
"ldrb %[scratch], [%[src], #17]\n\t"
"strb %[scratch], [%[dst], #17]\n\t"
"ldrb %[scratch], [%[src], #16]\n\t"
"strb %[scratch], [%[dst], #16]\n\t"
"ldrb %[scratch], [%[src], #15]\n\t"
"strb %[scratch], [%[dst], #15]\n\t"
"ldrb %[scratch], [%[src], #14]\n\t"
"strb %[scratch], [%[dst], #14]\n\t"
"ldrb %[scratch], [%[src], #13]\n\t"
"strb %[scratch], [%[dst], #13]\n\t"
"ldrb %[scratch], [%[src], #12]\n\t"
"strb %[scratch], [%[dst], #12]\n\t"
"ldrb %[scratch], [%[src], #11]\n\t"
"strb %[scratch], [%[dst], #11]\n\t"
"ldrb %[scratch], [%[src], #10]\n\t"
"strb %[scratch], [%[dst], #10]\n\t"
"ldrb %[scratch], [%[src], #9]\n\t"
"strb %[scratch], [%[dst], #9]\n\t"
"ldrb %[scratch], [%[src], #8]\n\t"
"strb %[scratch], [%[dst], #8]\n\t"
"ldrb %[scratch], [%[src], #7]\n\t"
"strb %[scratch], [%[dst], #7]\n\t"
"ldrb %[scratch], [%[src], #6]\n\t"
"strb %[scratch], [%[dst], #6]\n\t"
"ldrb %[scratch], [%[src], #5]\n\t"
"strb %[scratch], [%[dst], #5]\n\t"
"ldrb %[scratch], [%[src], #4]\n\t"
"strb %[scratch], [%[dst], #4]\n\t"
"ldrb %[scratch], [%[src], #3]\n\t"
"strb %[scratch], [%[dst], #3]\n\t"
"ldrb %[scratch], [%[src], #2]\n\t"
"strb %[scratch], [%[dst], #2]\n\t"
"ldrb %[scratch], [%[src], #1]\n\t"
"strb %[scratch], [%[dst], #1]\n\t"
"ldrb %[scratch], [%[src], #0]\n\t"
"strb %[scratch], [%[dst], #0]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyUnalignedWordDownUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("ldrb %[scratch], [%[src], #0]\n\t"
"strb %[scratch], [%[dst], #0]\n\t"
"ldrb %[scratch], [%[src], #1]\n\t"
"strb %[scratch], [%[dst], #1]\n\t"
"ldrb %[scratch], [%[src], #2]\n\t"
"strb %[scratch], [%[dst], #2]\n\t"
"ldrb %[scratch], [%[src], #3]\n\t"
"strb %[scratch], [%[dst], #3]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyUnalignedWordUpUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("ldrb %[scratch], [%[src], #3]\n\t"
"strb %[scratch], [%[dst], #3]\n\t"
"ldrb %[scratch], [%[src], #2]\n\t"
"strb %[scratch], [%[dst], #2]\n\t"
"ldrb %[scratch], [%[src], #1]\n\t"
"strb %[scratch], [%[dst], #1]\n\t"
"ldrb %[scratch], [%[src], #0]\n\t"
"strb %[scratch], [%[dst], #0]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyBlockDownUnsynchronized(uint8_t* dst, const uint8_t* src) {
uintptr_t* dst_ = reinterpret_cast<uintptr_t*>(dst);
const uintptr_t* src_ = reinterpret_cast<const uintptr_t*>(src);
uintptr_t scratch;
asm volatile ("ldr %[scratch], [%[src], #0]\n\t"
"str %[scratch], [%[dst], #0]\n\t"
"ldr %[scratch], [%[src], #4]\n\t"
"str %[scratch], [%[dst], #4]\n\t"
"ldr %[scratch], [%[src], #8]\n\t"
"str %[scratch], [%[dst], #8]\n\t"
"ldr %[scratch], [%[src], #12]\n\t"
"str %[scratch], [%[dst], #12]\n\t"
"ldr %[scratch], [%[src], #16]\n\t"
"str %[scratch], [%[dst], #16]\n\t"
"ldr %[scratch], [%[src], #20]\n\t"
"str %[scratch], [%[dst], #20]\n\t"
"ldr %[scratch], [%[src], #24]\n\t"
"str %[scratch], [%[dst], #24]\n\t"
"ldr %[scratch], [%[src], #28]\n\t"
"str %[scratch], [%[dst], #28]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyBlockUpUnsynchronized(uint8_t* dst, const uint8_t* src) {
uintptr_t* dst_ = reinterpret_cast<uintptr_t*>(dst);
const uintptr_t* src_ = reinterpret_cast<const uintptr_t*>(src);
uintptr_t scratch;
asm volatile ("ldr %[scratch], [%[src], #28]\n\t"
"str %[scratch], [%[dst], #28]\n\t"
"ldr %[scratch], [%[src], #24]\n\t"
"str %[scratch], [%[dst], #24]\n\t"
"ldr %[scratch], [%[src], #20]\n\t"
"str %[scratch], [%[dst], #20]\n\t"
"ldr %[scratch], [%[src], #16]\n\t"
"str %[scratch], [%[dst], #16]\n\t"
"ldr %[scratch], [%[src], #12]\n\t"
"str %[scratch], [%[dst], #12]\n\t"
"ldr %[scratch], [%[src], #8]\n\t"
"str %[scratch], [%[dst], #8]\n\t"
"ldr %[scratch], [%[src], #4]\n\t"
"str %[scratch], [%[dst], #4]\n\t"
"ldr %[scratch], [%[src], #0]\n\t"
"str %[scratch], [%[dst], #0]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyWordUnsynchronized(uint8_t* dst, const uint8_t* src) {
uintptr_t* dst_ = reinterpret_cast<uintptr_t*>(dst);
const uintptr_t* src_ = reinterpret_cast<const uintptr_t*>(src);
uintptr_t scratch;
asm volatile ("ldr %[scratch], [%[src], #0]\n\t"
"str %[scratch], [%[dst], #0]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyByteUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("ldrb %[scratch], [%[src], #0]\n\t"
"strb %[scratch], [%[dst], #0]\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
constexpr size_t JS_GENERATED_ATOMICS_BLOCKSIZE = 32;
constexpr size_t JS_GENERATED_ATOMICS_WORDSIZE = 4;
} // namespace jit
} // namespace js
#endif // jit_AtomicOperationsGenerated_h