// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"os"
"strconv"
)
// A Config holds readonly compilation information.
// It is created once, early during compilation,
// and shared across all compilations.
type Config struct {
arch string // "amd64", etc.
PtrSize int64 // 4 or 8; copy of cmd/internal/sys.Arch.PtrSize
RegSize int64 // 4 or 8; copy of cmd/internal/sys.Arch.RegSize
Types Types
lowerBlock blockRewriter // lowering function
lowerValue valueRewriter // lowering function
registers []Register // machine registers
gpRegMask regMask // general purpose integer register mask
fpRegMask regMask // floating point register mask
specialRegMask regMask // special register mask
FPReg int8 // register number of frame pointer, -1 if not used
LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used
hasGReg bool // has hardware g register
ctxt *obj.Link // Generic arch information
optimize bool // Do optimization
noDuffDevice bool // Don't use Duff's device
useSSE bool // Use SSE for non-float operations
nacl bool // GOOS=nacl
use387 bool // GO386=387
SoftFloat bool //
NeedsFpScratch bool // No direct move between GP and FP register sets
BigEndian bool //
sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score
}
type (
blockRewriter func(*Block) bool
valueRewriter func(*Value) bool
)
type Types struct {
Bool *types.Type
Int8 *types.Type
Int16 *types.Type
Int32 *types.Type
Int64 *types.Type
UInt8 *types.Type
UInt16 *types.Type
UInt32 *types.Type
UInt64 *types.Type
Int *types.Type
Float32 *types.Type
Float64 *types.Type
UInt *types.Type
Uintptr *types.Type
String *types.Type
BytePtr *types.Type // TODO: use unsafe.Pointer instead?
Int32Ptr *types.Type
UInt32Ptr *types.Type
IntPtr *types.Type
UintptrPtr *types.Type
Float32Ptr *types.Type
Float64Ptr *types.Type
BytePtrPtr *types.Type
}
type Logger interface {
// Logf logs a message from the compiler.
Logf(string, ...interface{})
// Log returns true if logging is not a no-op
// some logging calls account for more than a few heap allocations.
Log() bool
// Fatal reports a compiler error and exits.
Fatalf(pos src.XPos, msg string, args ...interface{})
// Warnl writes compiler messages in the form expected by "errorcheck" tests
Warnl(pos src.XPos, fmt_ string, args ...interface{})
// Forwards the Debug flags from gc
Debug_checknil() bool
Debug_eagerwb() bool
}
type Frontend interface {
CanSSA(t *types.Type) bool
Logger
// StringData returns a symbol pointing to the given string's contents.
StringData(string) interface{} // returns *gc.Sym
// Auto returns a Node for an auto variable of the given type.
// The SSA compiler uses this function to allocate space for spills.
Auto(src.XPos, *types.Type) GCNode
// Given the name for a compound type, returns the name we should use
// for the parts of that compound type.
SplitString(LocalSlot) (LocalSlot, LocalSlot)
SplitInterface(LocalSlot) (LocalSlot, LocalSlot)
SplitSlice(LocalSlot) (LocalSlot, LocalSlot, LocalSlot)
SplitComplex(LocalSlot) (LocalSlot, LocalSlot)
SplitStruct(LocalSlot, int) LocalSlot
SplitArray(LocalSlot) LocalSlot // array must be length 1
SplitInt64(LocalSlot) (LocalSlot, LocalSlot) // returns (hi, lo)
// DerefItab dereferences an itab function
// entry, given the symbol of the itab and
// the byte offset of the function pointer.
// It may return nil.
DerefItab(sym *obj.LSym, offset int64) *obj.LSym
// Line returns a string describing the given position.
Line(src.XPos) string
// AllocFrame assigns frame offsets to all live auto variables.
AllocFrame(f *Func)
// Syslook returns a symbol of the runtime function/variable with the
// given name.
Syslook(string) *obj.LSym
// UseWriteBarrier returns whether write barrier is enabled
UseWriteBarrier() bool
// SetWBPos indicates that a write barrier has been inserted
// in this function at position pos.
SetWBPos(pos src.XPos)
}
// interface used to hold a *gc.Node (a stack variable).
// We'd use *gc.Node directly but that would lead to an import cycle.
type GCNode interface {
Typ() *types.Type
String() string
StorageClass() StorageClass
}
type StorageClass uint8
const (
ClassAuto StorageClass = iota // local stack variable
ClassParam // argument
ClassParamOut // return value
)
// NewConfig returns a new configuration object for the given architecture.
func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config {
c := &Config{arch: arch, Types: types}
switch arch {
case "amd64":
c.PtrSize = 8
c.RegSize = 8
c.lowerBlock = rewriteBlockAMD64
c.lowerValue = rewriteValueAMD64
c.registers = registersAMD64[:]
c.gpRegMask = gpRegMaskAMD64
c.fpRegMask = fpRegMaskAMD64
c.FPReg = framepointerRegAMD64
c.LinkReg = linkRegAMD64
c.hasGReg = false
case "amd64p32":
c.PtrSize = 4
c.RegSize = 8
c.lowerBlock = rewriteBlockAMD64
c.lowerValue = rewriteValueAMD64
c.registers = registersAMD64[:]
c.gpRegMask = gpRegMaskAMD64
c.fpRegMask = fpRegMaskAMD64
c.FPReg = framepointerRegAMD64
c.LinkReg = linkRegAMD64
c.hasGReg = false
c.noDuffDevice = true
case "386":
c.PtrSize = 4
c.RegSize = 4
c.lowerBlock = rewriteBlock386
c.lowerValue = rewriteValue386
c.registers = registers386[:]
c.gpRegMask = gpRegMask386
c.fpRegMask = fpRegMask386
c.FPReg = framepointerReg386
c.LinkReg = linkReg386
c.hasGReg = false
case "arm":
c.PtrSize = 4
c.RegSize = 4
c.lowerBlock = rewriteBlockARM
c.lowerValue = rewriteValueARM
c.registers = registersARM[:]
c.gpRegMask = gpRegMaskARM
c.fpRegMask = fpRegMaskARM
c.FPReg = framepointerRegARM
c.LinkReg = linkRegARM
c.hasGReg = true
case "arm64":
c.PtrSize = 8
c.RegSize = 8
c.lowerBlock = rewriteBlockARM64
c.lowerValue = rewriteValueARM64
c.registers = registersARM64[:]
c.gpRegMask = gpRegMaskARM64
c.fpRegMask = fpRegMaskARM64
c.FPReg = framepointerRegARM64
c.LinkReg = linkRegARM64
c.hasGReg = true
c.noDuffDevice = objabi.GOOS == "darwin" // darwin linker cannot handle BR26 reloc with non-zero addend
case "ppc64":
c.BigEndian = true
fallthrough
case "ppc64le":
c.PtrSize = 8
c.RegSize = 8
c.lowerBlock = rewriteBlockPPC64
c.lowerValue = rewriteValuePPC64
c.registers = registersPPC64[:]
c.gpRegMask = gpRegMaskPPC64
c.fpRegMask = fpRegMaskPPC64
c.FPReg = framepointerRegPPC64
c.LinkReg = linkRegPPC64
c.noDuffDevice = true // TODO: Resolve PPC64 DuffDevice (has zero, but not copy)
c.hasGReg = true
case "mips64":
c.BigEndian = true
fallthrough
case "mips64le":
c.PtrSize = 8
c.RegSize = 8
c.lowerBlock = rewriteBlockMIPS64
c.lowerValue = rewriteValueMIPS64
c.registers = registersMIPS64[:]
c.gpRegMask = gpRegMaskMIPS64
c.fpRegMask = fpRegMaskMIPS64
c.specialRegMask = specialRegMaskMIPS64
c.FPReg = framepointerRegMIPS64
c.LinkReg = linkRegMIPS64
c.hasGReg = true
case "s390x":
c.PtrSize = 8
c.RegSize = 8
c.lowerBlock = rewriteBlockS390X
c.lowerValue = rewriteValueS390X
c.registers = registersS390X[:]
c.gpRegMask = gpRegMaskS390X
c.fpRegMask = fpRegMaskS390X
c.FPReg = framepointerRegS390X
c.LinkReg = linkRegS390X
c.hasGReg = true
c.noDuffDevice = true
c.BigEndian = true
case "mips":
c.BigEndian = true
fallthrough
case "mipsle":
c.PtrSize = 4
c.RegSize = 4
c.lowerBlock = rewriteBlockMIPS
c.lowerValue = rewriteValueMIPS
c.registers = registersMIPS[:]
c.gpRegMask = gpRegMaskMIPS
c.fpRegMask = fpRegMaskMIPS
c.specialRegMask = specialRegMaskMIPS
c.FPReg = framepointerRegMIPS
c.LinkReg = linkRegMIPS
c.hasGReg = true
c.noDuffDevice = true
default:
ctxt.Diag("arch %s not implemented", arch)
}
c.ctxt = ctxt
c.optimize = optimize
c.nacl = objabi.GOOS == "nacl"
c.useSSE = true
// Don't use Duff's device nor SSE on Plan 9 AMD64, because
// floating point operations are not allowed in note handler.
if objabi.GOOS == "plan9" && arch == "amd64" {
c.noDuffDevice = true
c.useSSE = false
}
if c.nacl {
c.noDuffDevice = true // Don't use Duff's device on NaCl
// runtime call clobber R12 on nacl
opcodeTable[OpARMCALLudiv].reg.clobbers |= 1 << 12 // R12
}
// cutoff is compared with product of numblocks and numvalues,
// if product is smaller than cutoff, use old non-sparse method.
// cutoff == 0 implies all sparse.
// cutoff == -1 implies none sparse.
// Good cutoff values seem to be O(million) depending on constant factor cost of sparse.
// TODO: get this from a flag, not an environment variable
c.sparsePhiCutoff = 2500000 // 0 for testing. // 2500000 determined with crude experiments w/ make.bash
ev := os.Getenv("GO_SSA_PHI_LOC_CUTOFF")
if ev != "" {
v, err := strconv.ParseInt(ev, 10, 64)
if err != nil {
ctxt.Diag("Environment variable GO_SSA_PHI_LOC_CUTOFF (value '%s') did not parse as a number", ev)
}
c.sparsePhiCutoff = uint64(v) // convert -1 to maxint, for never use sparse
}
return c
}
func (c *Config) Set387(b bool) {
c.NeedsFpScratch = b
c.use387 = b
}
func (c *Config) SparsePhiCutoff() uint64 { return c.sparsePhiCutoff }
func (c *Config) Ctxt() *obj.Link { return c.ctxt }