Skip to content

Commit

Permalink
compiler: add support for atomic operations
Browse files Browse the repository at this point in the history
This also implements DisableInterrupts/EnableInterrupts for RISC-V, as
those operations were needed to implement a few libcalls.
  • Loading branch information
aykevl authored and deadprogram committed May 28, 2020
1 parent 734613c commit fed433c
Show file tree
Hide file tree
Showing 10 changed files with 389 additions and 24 deletions.
57 changes: 57 additions & 0 deletions compiler/atomic.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
package compiler

import (
"golang.org/x/tools/go/ssa"
"tinygo.org/x/go-llvm"
)

// createAtomicOp lowers an atomic library call by lowering it as an LLVM atomic
// operation. It returns the result of the operation and true if the call could
// be lowered inline, and false otherwise.
func (b *builder) createAtomicOp(call *ssa.CallCommon) (llvm.Value, bool) {
name := call.Value.(*ssa.Function).Name()
switch name {
case "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr":
ptr := b.getValue(call.Args[0])
val := b.getValue(call.Args[1])
oldVal := b.CreateAtomicRMW(llvm.AtomicRMWBinOpAdd, ptr, val, llvm.AtomicOrderingSequentiallyConsistent, true)
// Return the new value, not the original value returned by atomicrmw.
return b.CreateAdd(oldVal, val, ""), true
case "SwapInt32", "SwapInt64", "SwapUint32", "SwapUint64", "SwapUintptr", "SwapPointer":
ptr := b.getValue(call.Args[0])
val := b.getValue(call.Args[1])
isPointer := val.Type().TypeKind() == llvm.PointerTypeKind
if isPointer {
// atomicrmw only supports integers, so cast to an integer.
val = b.CreatePtrToInt(val, b.uintptrType, "")
ptr = b.CreateBitCast(ptr, llvm.PointerType(val.Type(), 0), "")
}
oldVal := b.CreateAtomicRMW(llvm.AtomicRMWBinOpXchg, ptr, val, llvm.AtomicOrderingSequentiallyConsistent, true)
if isPointer {
oldVal = b.CreateIntToPtr(oldVal, b.i8ptrType, "")
}
return oldVal, true
case "CompareAndSwapInt32", "CompareAndSwapInt64", "CompareAndSwapUint32", "CompareAndSwapUint64", "CompareAndSwapUintptr", "CompareAndSwapPointer":
ptr := b.getValue(call.Args[0])
old := b.getValue(call.Args[1])
newVal := b.getValue(call.Args[2])
tuple := b.CreateAtomicCmpXchg(ptr, old, newVal, llvm.AtomicOrderingSequentiallyConsistent, llvm.AtomicOrderingSequentiallyConsistent, true)
swapped := b.CreateExtractValue(tuple, 1, "")
return swapped, true
case "LoadInt32", "LoadInt64", "LoadUint32", "LoadUint64", "LoadUintptr", "LoadPointer":
ptr := b.getValue(call.Args[0])
val := b.CreateLoad(ptr, "")
val.SetOrdering(llvm.AtomicOrderingSequentiallyConsistent)
val.SetAlignment(b.targetData.PrefTypeAlignment(val.Type())) // required
return val, true
case "StoreInt32", "StoreInt64", "StoreUint32", "StoreUint64", "StoreUintptr", "StorePointer":
ptr := b.getValue(call.Args[0])
val := b.getValue(call.Args[1])
store := b.CreateStore(val, ptr)
store.SetOrdering(llvm.AtomicOrderingSequentiallyConsistent)
store.SetAlignment(b.targetData.PrefTypeAlignment(val.Type())) // required
return store, true
default:
return llvm.Value{}, false
}
}
8 changes: 8 additions & 0 deletions compiler/compiler.go
Original file line number Diff line number Diff line change
Expand Up @@ -1323,6 +1323,14 @@ func (b *builder) createFunctionCall(instr *ssa.CallCommon) (llvm.Value, error)
return b.createVolatileLoad(instr)
case strings.HasPrefix(name, "runtime/volatile.Store"):
return b.createVolatileStore(instr)
case strings.HasPrefix(name, "sync/atomic."):
val, ok := b.createAtomicOp(instr)
if ok {
// This call could be lowered as an atomic operation.
return val, nil
}
// This call couldn't be lowered as an atomic operation, it's
// probably something else. Continue as usual.
case name == "runtime/interrupt.New":
return b.createInterruptGlobal(instr)
}
Expand Down
16 changes: 16 additions & 0 deletions src/device/riscv/riscv.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,19 @@ func Asm(asm string)
// You can use {} in the asm string (which expands to a register) to set the
// return value.
func AsmFull(asm string, regs map[string]interface{}) uintptr

// DisableInterrupts disables all interrupts, and returns the old interrupt
// state.
func DisableInterrupts() uintptr {
// Note: this can be optimized with a CSRRW instruction, which atomically
// swaps the value and returns the old value.
mask := MIE.Get()
MIE.Set(0)
return mask
}

// EnableInterrupts enables all interrupts again. The value passed in must be
// the mask returned by DisableInterrupts.
func EnableInterrupts(mask uintptr) {
MIE.Set(mask)
}
83 changes: 83 additions & 0 deletions src/runtime/arch_cortexm.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,3 +19,86 @@ func align(ptr uintptr) uintptr {
func getCurrentStackPointer() uintptr {
return arm.AsmFull("mov {}, sp", nil)
}

// Documentation:
// * https://llvm.org/docs/Atomics.html
// * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
//
// In the case of Cortex-M, some atomic operations are emitted inline while
// others are emitted as libcalls. How many are emitted as libcalls depends on
// the MCU core variant (M3 and higher support some 32-bit atomic operations
// while M0 and M0+ do not).

//export __sync_fetch_and_add_4
func __sync_fetch_and_add_4(ptr *uint32, value uint32) uint32 {
mask := arm.DisableInterrupts()
oldValue := *ptr
*ptr = oldValue + value
arm.EnableInterrupts(mask)
return oldValue
}

//export __sync_fetch_and_add_8
func __sync_fetch_and_add_8(ptr *uint64, value uint64) uint64 {
mask := arm.DisableInterrupts()
oldValue := *ptr
*ptr = oldValue + value
arm.EnableInterrupts(mask)
return oldValue
}

//export __sync_lock_test_and_set_4
func __sync_lock_test_and_set_4(ptr *uint32, value uint32) uint32 {
mask := arm.DisableInterrupts()
oldValue := *ptr
*ptr = value
arm.EnableInterrupts(mask)
return oldValue
}

//export __sync_lock_test_and_set_8
func __sync_lock_test_and_set_8(ptr *uint64, value uint64) uint64 {
mask := arm.DisableInterrupts()
oldValue := *ptr
*ptr = value
arm.EnableInterrupts(mask)
return oldValue
}

//export __sync_val_compare_and_swap_4
func __sync_val_compare_and_swap_4(ptr *uint32, expected, desired uint32) uint32 {
mask := arm.DisableInterrupts()
oldValue := *ptr
if oldValue == expected {
*ptr = desired
}
arm.EnableInterrupts(mask)
return oldValue
}

//export __sync_val_compare_and_swap_8
func __sync_val_compare_and_swap_8(ptr *uint64, expected, desired uint64) uint64 {
mask := arm.DisableInterrupts()
oldValue := *ptr
if oldValue == expected {
*ptr = desired
}
arm.EnableInterrupts(mask)
return oldValue
}

// The safest thing to do here would just be to disable interrupts for
// procPin/procUnpin. Note that a global variable is safe in this case, as any
// access to procPinnedMask will happen with interrupts disabled.

var procPinnedMask uintptr

//go:linkname procPin sync/atomic.runtime_procPin
func procPin() {
procPinnedMask = arm.DisableInterrupts()
}

//go:linkname procUnpin sync/atomic.runtime_procUnpin
func procUnpin() {
arm.EnableInterrupts(procPinnedMask)
}
73 changes: 73 additions & 0 deletions src/runtime/arch_tinygoriscv.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,76 @@ func align(ptr uintptr) uintptr {
func getCurrentStackPointer() uintptr {
return riscv.AsmFull("mv {}, sp", nil)
}

// Documentation:
// * https://llvm.org/docs/Atomics.html
// * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
//
// In the case of RISC-V, some operations may be implemented with libcalls if
// the operation is too big to be handled by assembly. Officially, these calls
// should be implemented with a lock-free algorithm but as (as of this time) all
// supported RISC-V chips have a single hart, we can simply disable interrupts
// to get the same behavior.

//export __atomic_load_8
func __atomic_load_8(ptr *uint64, ordering int32) uint64 {
mask := riscv.DisableInterrupts()
value := *ptr
riscv.EnableInterrupts(mask)
return value
}

//export __atomic_store_8
func __atomic_store_8(ptr *uint64, value uint64, ordering int32) {
mask := riscv.DisableInterrupts()
*ptr = value
riscv.EnableInterrupts(mask)
}

//export __atomic_exchange_8
func __atomic_exchange_8(ptr *uint64, value uint64, ordering int32) uint64 {
mask := riscv.DisableInterrupts()
oldValue := *ptr
*ptr = value
riscv.EnableInterrupts(mask)
return oldValue
}

//export __atomic_compare_exchange_8
func __atomic_compare_exchange_8(ptr, expected *uint64, desired uint64, success_ordering, failure_ordering int32) bool {
mask := riscv.DisableInterrupts()
oldValue := *ptr
success := oldValue == *expected
if success {
*ptr = desired
} else {
*expected = oldValue
}
riscv.EnableInterrupts(mask)
return success
}

//export __atomic_fetch_add_8
func __atomic_fetch_add_8(ptr *uint64, value uint64, ordering int32) uint64 {
mask := riscv.DisableInterrupts()
oldValue := *ptr
*ptr = oldValue + value
riscv.EnableInterrupts(mask)
return oldValue
}

// The safest thing to do here would just be to disable interrupts for
// procPin/procUnpin. Note that a global variable is safe in this case, as any
// access to procPinnedMask will happen with interrupts disabled.

var procPinnedMask uintptr

//go:linkname procPin sync/atomic.runtime_procPin
func procPin() {
procPinnedMask = riscv.DisableInterrupts()
}

//go:linkname procUnpin sync/atomic.runtime_procUnpin
func procUnpin() {
riscv.EnableInterrupts(procPinnedMask)
}
24 changes: 0 additions & 24 deletions src/runtime/atomic.go

This file was deleted.

11 changes: 11 additions & 0 deletions src/runtime/runtime_unix.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,3 +94,14 @@ func extalloc(size uintptr) unsafe.Pointer {

//export free
func extfree(ptr unsafe.Pointer)

// TinyGo does not yet support any form of parallelism on an OS, so these can be
// left empty.

//go:linkname procPin sync/atomic.runtime_procPin
func procPin() {
}

//go:linkname procUnpin sync/atomic.runtime_procUnpin
func procUnpin() {
}
11 changes: 11 additions & 0 deletions src/runtime/runtime_wasm.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,3 +91,14 @@ func ticks() timeUnit
func abort() {
trap()
}

// TinyGo does not yet support any form of parallelism on WebAssembly, so these
// can be left empty.

//go:linkname procPin sync/atomic.runtime_procPin
func procPin() {
}

//go:linkname procUnpin sync/atomic.runtime_procUnpin
func procUnpin() {
}
Loading

0 comments on commit fed433c

Please sign in to comment.