Copy as Markdown
Other Tools
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
#ifndef jit_AtomicOperationsGenerated_h
#define jit_AtomicOperationsGenerated_h
/* This file is generated by jit/GenerateAtomicOperations.py. Do not edit! */
#include "mozilla/Attributes.h"
namespace js {
namespace jit {
#define JS_HAVE_GENERATED_ATOMIC_OPS 1
inline void AtomicFenceSeqCst() {
asm volatile ("mfence\n\t" ::: "memory");
}
inline uint8_t AtomicLoad8SeqCst(const uint8_t* arg) {
uint8_t res;
asm volatile ("movb (%[arg]), %[res]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint16_t AtomicLoad16SeqCst(const uint16_t* arg) {
uint16_t res;
asm volatile ("movw (%[arg]), %[res]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint32_t AtomicLoad32SeqCst(const uint32_t* arg) {
uint32_t res;
asm volatile ("movl (%[arg]), %[res]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint64_t AtomicLoad64SeqCst(const uint64_t* arg) {
uint64_t res;
asm volatile ("movq (%[arg]), %[res]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint8_t AtomicLoad8Unsynchronized(const uint8_t* arg) {
uint8_t res;
asm volatile ("movb (%[arg]), %[res]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint16_t AtomicLoad16Unsynchronized(const uint16_t* arg) {
uint16_t res;
asm volatile ("movw (%[arg]), %[res]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint32_t AtomicLoad32Unsynchronized(const uint32_t* arg) {
uint32_t res;
asm volatile ("movl (%[arg]), %[res]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline uint64_t AtomicLoad64Unsynchronized(const uint64_t* arg) {
uint64_t res;
asm volatile ("movq (%[arg]), %[res]\n\t"
: [res] "=r" (res)
: [arg] "r" (arg)
: "memory");
return res;
}
inline void AtomicStore8SeqCst(uint8_t* addr, uint8_t val) {
asm volatile ("movb %[val], (%[addr])\n\t"
"mfence\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore16SeqCst(uint16_t* addr, uint16_t val) {
asm volatile ("movw %[val], (%[addr])\n\t"
"mfence\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore32SeqCst(uint32_t* addr, uint32_t val) {
asm volatile ("movl %[val], (%[addr])\n\t"
"mfence\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore64SeqCst(uint64_t* addr, uint64_t val) {
asm volatile ("movq %[val], (%[addr])\n\t"
"mfence\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore8Unsynchronized(uint8_t* addr, uint8_t val) {
asm volatile ("movb %[val], (%[addr])\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore16Unsynchronized(uint16_t* addr, uint16_t val) {
asm volatile ("movw %[val], (%[addr])\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore32Unsynchronized(uint32_t* addr, uint32_t val) {
asm volatile ("movl %[val], (%[addr])\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline void AtomicStore64Unsynchronized(uint64_t* addr, uint64_t val) {
asm volatile ("movq %[val], (%[addr])\n\t"
:
: [addr] "r" (addr), [val] "r"(val)
: "memory");
}
inline uint8_t AtomicExchange8SeqCst(uint8_t* addr, uint8_t val) {
asm volatile ("xchgb %[val], (%[addr])\n\t"
: [val] "+r" (val)
: [addr] "r" (addr)
: "memory");
return val;
}
inline uint16_t AtomicExchange16SeqCst(uint16_t* addr, uint16_t val) {
asm volatile ("xchgw %[val], (%[addr])\n\t"
: [val] "+r" (val)
: [addr] "r" (addr)
: "memory");
return val;
}
inline uint32_t AtomicExchange32SeqCst(uint32_t* addr, uint32_t val) {
asm volatile ("xchgl %[val], (%[addr])\n\t"
: [val] "+r" (val)
: [addr] "r" (addr)
: "memory");
return val;
}
inline uint64_t AtomicExchange64SeqCst(uint64_t* addr, uint64_t val) {
asm volatile ("xchgq %[val], (%[addr])\n\t"
: [val] "+r" (val)
: [addr] "r" (addr)
: "memory");
return val;
}
inline uint8_t AtomicCmpXchg8SeqCst(uint8_t* addr,
uint8_t oldval,
uint8_t newval) {
asm volatile ("lock; cmpxchgb %[newval], (%[addr])\n\t"
: [oldval] "+a" (oldval)
: [addr] "r" (addr), [newval] "r" (newval)
: "memory", "cc");
return oldval;
}
inline uint16_t AtomicCmpXchg16SeqCst(uint16_t* addr,
uint16_t oldval,
uint16_t newval) {
asm volatile ("lock; cmpxchgw %[newval], (%[addr])\n\t"
: [oldval] "+a" (oldval)
: [addr] "r" (addr), [newval] "r" (newval)
: "memory", "cc");
return oldval;
}
inline uint32_t AtomicCmpXchg32SeqCst(uint32_t* addr,
uint32_t oldval,
uint32_t newval) {
asm volatile ("lock; cmpxchgl %[newval], (%[addr])\n\t"
: [oldval] "+a" (oldval)
: [addr] "r" (addr), [newval] "r" (newval)
: "memory", "cc");
return oldval;
}
inline uint64_t AtomicCmpXchg64SeqCst(uint64_t* addr,
uint64_t oldval,
uint64_t newval) {
asm volatile ("lock; cmpxchgq %[newval], (%[addr])\n\t"
: [oldval] "+a" (oldval)
: [addr] "r" (addr), [newval] "r" (newval)
: "memory", "cc");
return oldval;
}
inline uint8_t AtomicAdd8SeqCst(uint8_t* addr, uint8_t val) {
asm volatile ("lock; xaddb %[val], (%[addr])\n\t"
: [val] "+&r" (val)
: [addr] "r" (addr)
: "memory", "cc");
return val;
}
inline uint16_t AtomicAdd16SeqCst(uint16_t* addr, uint16_t val) {
asm volatile ("lock; xaddw %[val], (%[addr])\n\t"
: [val] "+&r" (val)
: [addr] "r" (addr)
: "memory", "cc");
return val;
}
inline uint32_t AtomicAdd32SeqCst(uint32_t* addr, uint32_t val) {
asm volatile ("lock; xaddl %[val], (%[addr])\n\t"
: [val] "+&r" (val)
: [addr] "r" (addr)
: "memory", "cc");
return val;
}
inline uint64_t AtomicAdd64SeqCst(uint64_t* addr, uint64_t val) {
asm volatile ("lock; xaddq %[val], (%[addr])\n\t"
: [val] "+&r" (val)
: [addr] "r" (addr)
: "memory", "cc");
return val;
}
inline uint8_t AtomicAnd8SeqCst(uint8_t* addr, uint8_t val) {
uint8_t res, scratch;
asm volatile ("movb (%[addr]), %[res]\n\t"
"0: movb %[res], %[scratch]\n\t"
"andb %[val], %[scratch]\n\t"
"lock; cmpxchgb %[scratch], (%[addr])\n\t"
"jnz 0b\n\t"
: [res] "=&a" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint16_t AtomicAnd16SeqCst(uint16_t* addr, uint16_t val) {
uint16_t res, scratch;
asm volatile ("movw (%[addr]), %[res]\n\t"
"0: movw %[res], %[scratch]\n\t"
"andw %[val], %[scratch]\n\t"
"lock; cmpxchgw %[scratch], (%[addr])\n\t"
"jnz 0b\n\t"
: [res] "=&a" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint32_t AtomicAnd32SeqCst(uint32_t* addr, uint32_t val) {
uint32_t res, scratch;
asm volatile ("movl (%[addr]), %[res]\n\t"
"0: movl %[res], %[scratch]\n\t"
"andl %[val], %[scratch]\n\t"
"lock; cmpxchgl %[scratch], (%[addr])\n\t"
"jnz 0b\n\t"
: [res] "=&a" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint64_t AtomicAnd64SeqCst(uint64_t* addr, uint64_t val) {
uint64_t res, scratch;
asm volatile ("movq (%[addr]), %[res]\n\t"
"0: movq %[res], %[scratch]\n\t"
"andq %[val], %[scratch]\n\t"
"lock; cmpxchgq %[scratch], (%[addr])\n\t"
"jnz 0b\n\t"
: [res] "=&a" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint8_t AtomicOr8SeqCst(uint8_t* addr, uint8_t val) {
uint8_t res, scratch;
asm volatile ("movb (%[addr]), %[res]\n\t"
"0: movb %[res], %[scratch]\n\t"
"orb %[val], %[scratch]\n\t"
"lock; cmpxchgb %[scratch], (%[addr])\n\t"
"jnz 0b\n\t"
: [res] "=&a" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint16_t AtomicOr16SeqCst(uint16_t* addr, uint16_t val) {
uint16_t res, scratch;
asm volatile ("movw (%[addr]), %[res]\n\t"
"0: movw %[res], %[scratch]\n\t"
"orw %[val], %[scratch]\n\t"
"lock; cmpxchgw %[scratch], (%[addr])\n\t"
"jnz 0b\n\t"
: [res] "=&a" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint32_t AtomicOr32SeqCst(uint32_t* addr, uint32_t val) {
uint32_t res, scratch;
asm volatile ("movl (%[addr]), %[res]\n\t"
"0: movl %[res], %[scratch]\n\t"
"orl %[val], %[scratch]\n\t"
"lock; cmpxchgl %[scratch], (%[addr])\n\t"
"jnz 0b\n\t"
: [res] "=&a" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint64_t AtomicOr64SeqCst(uint64_t* addr, uint64_t val) {
uint64_t res, scratch;
asm volatile ("movq (%[addr]), %[res]\n\t"
"0: movq %[res], %[scratch]\n\t"
"orq %[val], %[scratch]\n\t"
"lock; cmpxchgq %[scratch], (%[addr])\n\t"
"jnz 0b\n\t"
: [res] "=&a" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint8_t AtomicXor8SeqCst(uint8_t* addr, uint8_t val) {
uint8_t res, scratch;
asm volatile ("movb (%[addr]), %[res]\n\t"
"0: movb %[res], %[scratch]\n\t"
"xorb %[val], %[scratch]\n\t"
"lock; cmpxchgb %[scratch], (%[addr])\n\t"
"jnz 0b\n\t"
: [res] "=&a" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint16_t AtomicXor16SeqCst(uint16_t* addr, uint16_t val) {
uint16_t res, scratch;
asm volatile ("movw (%[addr]), %[res]\n\t"
"0: movw %[res], %[scratch]\n\t"
"xorw %[val], %[scratch]\n\t"
"lock; cmpxchgw %[scratch], (%[addr])\n\t"
"jnz 0b\n\t"
: [res] "=&a" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint32_t AtomicXor32SeqCst(uint32_t* addr, uint32_t val) {
uint32_t res, scratch;
asm volatile ("movl (%[addr]), %[res]\n\t"
"0: movl %[res], %[scratch]\n\t"
"xorl %[val], %[scratch]\n\t"
"lock; cmpxchgl %[scratch], (%[addr])\n\t"
"jnz 0b\n\t"
: [res] "=&a" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline uint64_t AtomicXor64SeqCst(uint64_t* addr, uint64_t val) {
uint64_t res, scratch;
asm volatile ("movq (%[addr]), %[res]\n\t"
"0: movq %[res], %[scratch]\n\t"
"xorq %[val], %[scratch]\n\t"
"lock; cmpxchgq %[scratch], (%[addr])\n\t"
"jnz 0b\n\t"
: [res] "=&a" (res), [scratch] "=&r" (scratch)
: [addr] "r" (addr), [val] "r"(val)
: "memory", "cc");
return res;
}
inline void AtomicPause() {
asm volatile ("pause" :::);
}
inline void AtomicCopyUnalignedBlockDownUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("movb 0(%[src]), %[scratch]\n\t"
"movb %[scratch], 0(%[dst])\n\t"
"movb 1(%[src]), %[scratch]\n\t"
"movb %[scratch], 1(%[dst])\n\t"
"movb 2(%[src]), %[scratch]\n\t"
"movb %[scratch], 2(%[dst])\n\t"
"movb 3(%[src]), %[scratch]\n\t"
"movb %[scratch], 3(%[dst])\n\t"
"movb 4(%[src]), %[scratch]\n\t"
"movb %[scratch], 4(%[dst])\n\t"
"movb 5(%[src]), %[scratch]\n\t"
"movb %[scratch], 5(%[dst])\n\t"
"movb 6(%[src]), %[scratch]\n\t"
"movb %[scratch], 6(%[dst])\n\t"
"movb 7(%[src]), %[scratch]\n\t"
"movb %[scratch], 7(%[dst])\n\t"
"movb 8(%[src]), %[scratch]\n\t"
"movb %[scratch], 8(%[dst])\n\t"
"movb 9(%[src]), %[scratch]\n\t"
"movb %[scratch], 9(%[dst])\n\t"
"movb 10(%[src]), %[scratch]\n\t"
"movb %[scratch], 10(%[dst])\n\t"
"movb 11(%[src]), %[scratch]\n\t"
"movb %[scratch], 11(%[dst])\n\t"
"movb 12(%[src]), %[scratch]\n\t"
"movb %[scratch], 12(%[dst])\n\t"
"movb 13(%[src]), %[scratch]\n\t"
"movb %[scratch], 13(%[dst])\n\t"
"movb 14(%[src]), %[scratch]\n\t"
"movb %[scratch], 14(%[dst])\n\t"
"movb 15(%[src]), %[scratch]\n\t"
"movb %[scratch], 15(%[dst])\n\t"
"movb 16(%[src]), %[scratch]\n\t"
"movb %[scratch], 16(%[dst])\n\t"
"movb 17(%[src]), %[scratch]\n\t"
"movb %[scratch], 17(%[dst])\n\t"
"movb 18(%[src]), %[scratch]\n\t"
"movb %[scratch], 18(%[dst])\n\t"
"movb 19(%[src]), %[scratch]\n\t"
"movb %[scratch], 19(%[dst])\n\t"
"movb 20(%[src]), %[scratch]\n\t"
"movb %[scratch], 20(%[dst])\n\t"
"movb 21(%[src]), %[scratch]\n\t"
"movb %[scratch], 21(%[dst])\n\t"
"movb 22(%[src]), %[scratch]\n\t"
"movb %[scratch], 22(%[dst])\n\t"
"movb 23(%[src]), %[scratch]\n\t"
"movb %[scratch], 23(%[dst])\n\t"
"movb 24(%[src]), %[scratch]\n\t"
"movb %[scratch], 24(%[dst])\n\t"
"movb 25(%[src]), %[scratch]\n\t"
"movb %[scratch], 25(%[dst])\n\t"
"movb 26(%[src]), %[scratch]\n\t"
"movb %[scratch], 26(%[dst])\n\t"
"movb 27(%[src]), %[scratch]\n\t"
"movb %[scratch], 27(%[dst])\n\t"
"movb 28(%[src]), %[scratch]\n\t"
"movb %[scratch], 28(%[dst])\n\t"
"movb 29(%[src]), %[scratch]\n\t"
"movb %[scratch], 29(%[dst])\n\t"
"movb 30(%[src]), %[scratch]\n\t"
"movb %[scratch], 30(%[dst])\n\t"
"movb 31(%[src]), %[scratch]\n\t"
"movb %[scratch], 31(%[dst])\n\t"
"movb 32(%[src]), %[scratch]\n\t"
"movb %[scratch], 32(%[dst])\n\t"
"movb 33(%[src]), %[scratch]\n\t"
"movb %[scratch], 33(%[dst])\n\t"
"movb 34(%[src]), %[scratch]\n\t"
"movb %[scratch], 34(%[dst])\n\t"
"movb 35(%[src]), %[scratch]\n\t"
"movb %[scratch], 35(%[dst])\n\t"
"movb 36(%[src]), %[scratch]\n\t"
"movb %[scratch], 36(%[dst])\n\t"
"movb 37(%[src]), %[scratch]\n\t"
"movb %[scratch], 37(%[dst])\n\t"
"movb 38(%[src]), %[scratch]\n\t"
"movb %[scratch], 38(%[dst])\n\t"
"movb 39(%[src]), %[scratch]\n\t"
"movb %[scratch], 39(%[dst])\n\t"
"movb 40(%[src]), %[scratch]\n\t"
"movb %[scratch], 40(%[dst])\n\t"
"movb 41(%[src]), %[scratch]\n\t"
"movb %[scratch], 41(%[dst])\n\t"
"movb 42(%[src]), %[scratch]\n\t"
"movb %[scratch], 42(%[dst])\n\t"
"movb 43(%[src]), %[scratch]\n\t"
"movb %[scratch], 43(%[dst])\n\t"
"movb 44(%[src]), %[scratch]\n\t"
"movb %[scratch], 44(%[dst])\n\t"
"movb 45(%[src]), %[scratch]\n\t"
"movb %[scratch], 45(%[dst])\n\t"
"movb 46(%[src]), %[scratch]\n\t"
"movb %[scratch], 46(%[dst])\n\t"
"movb 47(%[src]), %[scratch]\n\t"
"movb %[scratch], 47(%[dst])\n\t"
"movb 48(%[src]), %[scratch]\n\t"
"movb %[scratch], 48(%[dst])\n\t"
"movb 49(%[src]), %[scratch]\n\t"
"movb %[scratch], 49(%[dst])\n\t"
"movb 50(%[src]), %[scratch]\n\t"
"movb %[scratch], 50(%[dst])\n\t"
"movb 51(%[src]), %[scratch]\n\t"
"movb %[scratch], 51(%[dst])\n\t"
"movb 52(%[src]), %[scratch]\n\t"
"movb %[scratch], 52(%[dst])\n\t"
"movb 53(%[src]), %[scratch]\n\t"
"movb %[scratch], 53(%[dst])\n\t"
"movb 54(%[src]), %[scratch]\n\t"
"movb %[scratch], 54(%[dst])\n\t"
"movb 55(%[src]), %[scratch]\n\t"
"movb %[scratch], 55(%[dst])\n\t"
"movb 56(%[src]), %[scratch]\n\t"
"movb %[scratch], 56(%[dst])\n\t"
"movb 57(%[src]), %[scratch]\n\t"
"movb %[scratch], 57(%[dst])\n\t"
"movb 58(%[src]), %[scratch]\n\t"
"movb %[scratch], 58(%[dst])\n\t"
"movb 59(%[src]), %[scratch]\n\t"
"movb %[scratch], 59(%[dst])\n\t"
"movb 60(%[src]), %[scratch]\n\t"
"movb %[scratch], 60(%[dst])\n\t"
"movb 61(%[src]), %[scratch]\n\t"
"movb %[scratch], 61(%[dst])\n\t"
"movb 62(%[src]), %[scratch]\n\t"
"movb %[scratch], 62(%[dst])\n\t"
"movb 63(%[src]), %[scratch]\n\t"
"movb %[scratch], 63(%[dst])\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyUnalignedBlockUpUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("movb 63(%[src]), %[scratch]\n\t"
"movb %[scratch], 63(%[dst])\n\t"
"movb 62(%[src]), %[scratch]\n\t"
"movb %[scratch], 62(%[dst])\n\t"
"movb 61(%[src]), %[scratch]\n\t"
"movb %[scratch], 61(%[dst])\n\t"
"movb 60(%[src]), %[scratch]\n\t"
"movb %[scratch], 60(%[dst])\n\t"
"movb 59(%[src]), %[scratch]\n\t"
"movb %[scratch], 59(%[dst])\n\t"
"movb 58(%[src]), %[scratch]\n\t"
"movb %[scratch], 58(%[dst])\n\t"
"movb 57(%[src]), %[scratch]\n\t"
"movb %[scratch], 57(%[dst])\n\t"
"movb 56(%[src]), %[scratch]\n\t"
"movb %[scratch], 56(%[dst])\n\t"
"movb 55(%[src]), %[scratch]\n\t"
"movb %[scratch], 55(%[dst])\n\t"
"movb 54(%[src]), %[scratch]\n\t"
"movb %[scratch], 54(%[dst])\n\t"
"movb 53(%[src]), %[scratch]\n\t"
"movb %[scratch], 53(%[dst])\n\t"
"movb 52(%[src]), %[scratch]\n\t"
"movb %[scratch], 52(%[dst])\n\t"
"movb 51(%[src]), %[scratch]\n\t"
"movb %[scratch], 51(%[dst])\n\t"
"movb 50(%[src]), %[scratch]\n\t"
"movb %[scratch], 50(%[dst])\n\t"
"movb 49(%[src]), %[scratch]\n\t"
"movb %[scratch], 49(%[dst])\n\t"
"movb 48(%[src]), %[scratch]\n\t"
"movb %[scratch], 48(%[dst])\n\t"
"movb 47(%[src]), %[scratch]\n\t"
"movb %[scratch], 47(%[dst])\n\t"
"movb 46(%[src]), %[scratch]\n\t"
"movb %[scratch], 46(%[dst])\n\t"
"movb 45(%[src]), %[scratch]\n\t"
"movb %[scratch], 45(%[dst])\n\t"
"movb 44(%[src]), %[scratch]\n\t"
"movb %[scratch], 44(%[dst])\n\t"
"movb 43(%[src]), %[scratch]\n\t"
"movb %[scratch], 43(%[dst])\n\t"
"movb 42(%[src]), %[scratch]\n\t"
"movb %[scratch], 42(%[dst])\n\t"
"movb 41(%[src]), %[scratch]\n\t"
"movb %[scratch], 41(%[dst])\n\t"
"movb 40(%[src]), %[scratch]\n\t"
"movb %[scratch], 40(%[dst])\n\t"
"movb 39(%[src]), %[scratch]\n\t"
"movb %[scratch], 39(%[dst])\n\t"
"movb 38(%[src]), %[scratch]\n\t"
"movb %[scratch], 38(%[dst])\n\t"
"movb 37(%[src]), %[scratch]\n\t"
"movb %[scratch], 37(%[dst])\n\t"
"movb 36(%[src]), %[scratch]\n\t"
"movb %[scratch], 36(%[dst])\n\t"
"movb 35(%[src]), %[scratch]\n\t"
"movb %[scratch], 35(%[dst])\n\t"
"movb 34(%[src]), %[scratch]\n\t"
"movb %[scratch], 34(%[dst])\n\t"
"movb 33(%[src]), %[scratch]\n\t"
"movb %[scratch], 33(%[dst])\n\t"
"movb 32(%[src]), %[scratch]\n\t"
"movb %[scratch], 32(%[dst])\n\t"
"movb 31(%[src]), %[scratch]\n\t"
"movb %[scratch], 31(%[dst])\n\t"
"movb 30(%[src]), %[scratch]\n\t"
"movb %[scratch], 30(%[dst])\n\t"
"movb 29(%[src]), %[scratch]\n\t"
"movb %[scratch], 29(%[dst])\n\t"
"movb 28(%[src]), %[scratch]\n\t"
"movb %[scratch], 28(%[dst])\n\t"
"movb 27(%[src]), %[scratch]\n\t"
"movb %[scratch], 27(%[dst])\n\t"
"movb 26(%[src]), %[scratch]\n\t"
"movb %[scratch], 26(%[dst])\n\t"
"movb 25(%[src]), %[scratch]\n\t"
"movb %[scratch], 25(%[dst])\n\t"
"movb 24(%[src]), %[scratch]\n\t"
"movb %[scratch], 24(%[dst])\n\t"
"movb 23(%[src]), %[scratch]\n\t"
"movb %[scratch], 23(%[dst])\n\t"
"movb 22(%[src]), %[scratch]\n\t"
"movb %[scratch], 22(%[dst])\n\t"
"movb 21(%[src]), %[scratch]\n\t"
"movb %[scratch], 21(%[dst])\n\t"
"movb 20(%[src]), %[scratch]\n\t"
"movb %[scratch], 20(%[dst])\n\t"
"movb 19(%[src]), %[scratch]\n\t"
"movb %[scratch], 19(%[dst])\n\t"
"movb 18(%[src]), %[scratch]\n\t"
"movb %[scratch], 18(%[dst])\n\t"
"movb 17(%[src]), %[scratch]\n\t"
"movb %[scratch], 17(%[dst])\n\t"
"movb 16(%[src]), %[scratch]\n\t"
"movb %[scratch], 16(%[dst])\n\t"
"movb 15(%[src]), %[scratch]\n\t"
"movb %[scratch], 15(%[dst])\n\t"
"movb 14(%[src]), %[scratch]\n\t"
"movb %[scratch], 14(%[dst])\n\t"
"movb 13(%[src]), %[scratch]\n\t"
"movb %[scratch], 13(%[dst])\n\t"
"movb 12(%[src]), %[scratch]\n\t"
"movb %[scratch], 12(%[dst])\n\t"
"movb 11(%[src]), %[scratch]\n\t"
"movb %[scratch], 11(%[dst])\n\t"
"movb 10(%[src]), %[scratch]\n\t"
"movb %[scratch], 10(%[dst])\n\t"
"movb 9(%[src]), %[scratch]\n\t"
"movb %[scratch], 9(%[dst])\n\t"
"movb 8(%[src]), %[scratch]\n\t"
"movb %[scratch], 8(%[dst])\n\t"
"movb 7(%[src]), %[scratch]\n\t"
"movb %[scratch], 7(%[dst])\n\t"
"movb 6(%[src]), %[scratch]\n\t"
"movb %[scratch], 6(%[dst])\n\t"
"movb 5(%[src]), %[scratch]\n\t"
"movb %[scratch], 5(%[dst])\n\t"
"movb 4(%[src]), %[scratch]\n\t"
"movb %[scratch], 4(%[dst])\n\t"
"movb 3(%[src]), %[scratch]\n\t"
"movb %[scratch], 3(%[dst])\n\t"
"movb 2(%[src]), %[scratch]\n\t"
"movb %[scratch], 2(%[dst])\n\t"
"movb 1(%[src]), %[scratch]\n\t"
"movb %[scratch], 1(%[dst])\n\t"
"movb 0(%[src]), %[scratch]\n\t"
"movb %[scratch], 0(%[dst])\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyUnalignedWordDownUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("movb 0(%[src]), %[scratch]\n\t"
"movb %[scratch], 0(%[dst])\n\t"
"movb 1(%[src]), %[scratch]\n\t"
"movb %[scratch], 1(%[dst])\n\t"
"movb 2(%[src]), %[scratch]\n\t"
"movb %[scratch], 2(%[dst])\n\t"
"movb 3(%[src]), %[scratch]\n\t"
"movb %[scratch], 3(%[dst])\n\t"
"movb 4(%[src]), %[scratch]\n\t"
"movb %[scratch], 4(%[dst])\n\t"
"movb 5(%[src]), %[scratch]\n\t"
"movb %[scratch], 5(%[dst])\n\t"
"movb 6(%[src]), %[scratch]\n\t"
"movb %[scratch], 6(%[dst])\n\t"
"movb 7(%[src]), %[scratch]\n\t"
"movb %[scratch], 7(%[dst])\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyUnalignedWordUpUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("movb 7(%[src]), %[scratch]\n\t"
"movb %[scratch], 7(%[dst])\n\t"
"movb 6(%[src]), %[scratch]\n\t"
"movb %[scratch], 6(%[dst])\n\t"
"movb 5(%[src]), %[scratch]\n\t"
"movb %[scratch], 5(%[dst])\n\t"
"movb 4(%[src]), %[scratch]\n\t"
"movb %[scratch], 4(%[dst])\n\t"
"movb 3(%[src]), %[scratch]\n\t"
"movb %[scratch], 3(%[dst])\n\t"
"movb 2(%[src]), %[scratch]\n\t"
"movb %[scratch], 2(%[dst])\n\t"
"movb 1(%[src]), %[scratch]\n\t"
"movb %[scratch], 1(%[dst])\n\t"
"movb 0(%[src]), %[scratch]\n\t"
"movb %[scratch], 0(%[dst])\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyBlockDownUnsynchronized(uint8_t* dst, const uint8_t* src) {
uintptr_t* dst_ = reinterpret_cast<uintptr_t*>(dst);
const uintptr_t* src_ = reinterpret_cast<const uintptr_t*>(src);
uintptr_t scratch;
asm volatile ("movq 0(%[src]), %[scratch]\n\t"
"movq %[scratch], 0(%[dst])\n\t"
"movq 8(%[src]), %[scratch]\n\t"
"movq %[scratch], 8(%[dst])\n\t"
"movq 16(%[src]), %[scratch]\n\t"
"movq %[scratch], 16(%[dst])\n\t"
"movq 24(%[src]), %[scratch]\n\t"
"movq %[scratch], 24(%[dst])\n\t"
"movq 32(%[src]), %[scratch]\n\t"
"movq %[scratch], 32(%[dst])\n\t"
"movq 40(%[src]), %[scratch]\n\t"
"movq %[scratch], 40(%[dst])\n\t"
"movq 48(%[src]), %[scratch]\n\t"
"movq %[scratch], 48(%[dst])\n\t"
"movq 56(%[src]), %[scratch]\n\t"
"movq %[scratch], 56(%[dst])\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyBlockUpUnsynchronized(uint8_t* dst, const uint8_t* src) {
uintptr_t* dst_ = reinterpret_cast<uintptr_t*>(dst);
const uintptr_t* src_ = reinterpret_cast<const uintptr_t*>(src);
uintptr_t scratch;
asm volatile ("movq 56(%[src]), %[scratch]\n\t"
"movq %[scratch], 56(%[dst])\n\t"
"movq 48(%[src]), %[scratch]\n\t"
"movq %[scratch], 48(%[dst])\n\t"
"movq 40(%[src]), %[scratch]\n\t"
"movq %[scratch], 40(%[dst])\n\t"
"movq 32(%[src]), %[scratch]\n\t"
"movq %[scratch], 32(%[dst])\n\t"
"movq 24(%[src]), %[scratch]\n\t"
"movq %[scratch], 24(%[dst])\n\t"
"movq 16(%[src]), %[scratch]\n\t"
"movq %[scratch], 16(%[dst])\n\t"
"movq 8(%[src]), %[scratch]\n\t"
"movq %[scratch], 8(%[dst])\n\t"
"movq 0(%[src]), %[scratch]\n\t"
"movq %[scratch], 0(%[dst])\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyWordUnsynchronized(uint8_t* dst, const uint8_t* src) {
uintptr_t* dst_ = reinterpret_cast<uintptr_t*>(dst);
const uintptr_t* src_ = reinterpret_cast<const uintptr_t*>(src);
uintptr_t scratch;
asm volatile ("movq 0(%[src]), %[scratch]\n\t"
"movq %[scratch], 0(%[dst])\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
inline void AtomicCopyByteUnsynchronized(uint8_t* dst, const uint8_t* src) {
uint8_t* dst_ = reinterpret_cast<uint8_t*>(dst);
const uint8_t* src_ = reinterpret_cast<const uint8_t*>(src);
uint8_t scratch;
asm volatile ("movb 0(%[src]), %[scratch]\n\t"
"movb %[scratch], 0(%[dst])\n\t"
: [scratch] "=&r" (scratch)
: [dst] "r" (dst_), [src] "r"(src_)
: "memory");
}
constexpr size_t JS_GENERATED_ATOMICS_BLOCKSIZE = 64;
constexpr size_t JS_GENERATED_ATOMICS_WORDSIZE = 8;
} // namespace jit
} // namespace js
#endif // jit_AtomicOperationsGenerated_h