Source file src/cmd/compile/internal/loong64/ssa.go

     1  // Copyright 2022 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package loong64
     6  
     7  import (
     8  	"math"
     9  
    10  	"cmd/compile/internal/base"
    11  	"cmd/compile/internal/ir"
    12  	"cmd/compile/internal/logopt"
    13  	"cmd/compile/internal/objw"
    14  	"cmd/compile/internal/ssa"
    15  	"cmd/compile/internal/ssagen"
    16  	"cmd/compile/internal/types"
    17  	"cmd/internal/obj"
    18  	"cmd/internal/obj/loong64"
    19  	"internal/abi"
    20  )
    21  
    22  // isFPreg reports whether r is an FP register.
    23  func isFPreg(r int16) bool {
    24  	return loong64.REG_F0 <= r && r <= loong64.REG_F31
    25  }
    26  
    27  // loadByType returns the load instruction of the given type.
    28  func loadByType(t *types.Type, r int16) obj.As {
    29  	if isFPreg(r) {
    30  		if t.Size() == 4 {
    31  			return loong64.AMOVF
    32  		} else {
    33  			return loong64.AMOVD
    34  		}
    35  	} else {
    36  		switch t.Size() {
    37  		case 1:
    38  			if t.IsSigned() {
    39  				return loong64.AMOVB
    40  			} else {
    41  				return loong64.AMOVBU
    42  			}
    43  		case 2:
    44  			if t.IsSigned() {
    45  				return loong64.AMOVH
    46  			} else {
    47  				return loong64.AMOVHU
    48  			}
    49  		case 4:
    50  			if t.IsSigned() {
    51  				return loong64.AMOVW
    52  			} else {
    53  				return loong64.AMOVWU
    54  			}
    55  		case 8:
    56  			return loong64.AMOVV
    57  		}
    58  	}
    59  	panic("bad load type")
    60  }
    61  
    62  // storeByType returns the store instruction of the given type.
    63  func storeByType(t *types.Type, r int16) obj.As {
    64  	if isFPreg(r) {
    65  		if t.Size() == 4 {
    66  			return loong64.AMOVF
    67  		} else {
    68  			return loong64.AMOVD
    69  		}
    70  	} else {
    71  		switch t.Size() {
    72  		case 1:
    73  			return loong64.AMOVB
    74  		case 2:
    75  			return loong64.AMOVH
    76  		case 4:
    77  			return loong64.AMOVW
    78  		case 8:
    79  			return loong64.AMOVV
    80  		}
    81  	}
    82  	panic("bad store type")
    83  }
    84  
    85  // largestMove returns the largest move instruction possible and its size,
    86  // given the alignment of the total size of the move.
    87  //
    88  // e.g., a 16-byte move may use MOVV, but an 11-byte move must use MOVB.
    89  //
    90  // Note that the moves may not be on naturally aligned addresses depending on
    91  // the source and destination.
    92  //
    93  // This matches the calculation in ssa.moveSize.
    94  func largestMove(alignment int64) (obj.As, int64) {
    95  	switch {
    96  	case alignment%8 == 0:
    97  		return loong64.AMOVV, 8
    98  	case alignment%4 == 0:
    99  		return loong64.AMOVW, 4
   100  	case alignment%2 == 0:
   101  		return loong64.AMOVH, 2
   102  	default:
   103  		return loong64.AMOVB, 1
   104  	}
   105  }
   106  
   107  func ssaGenValue(s *ssagen.State, v *ssa.Value) {
   108  	switch v.Op {
   109  	case ssa.OpCopy, ssa.OpLOONG64MOVVreg:
   110  		if v.Type.IsMemory() {
   111  			return
   112  		}
   113  		x := v.Args[0].Reg()
   114  		y := v.Reg()
   115  		if x == y {
   116  			return
   117  		}
   118  		as := loong64.AMOVV
   119  		if isFPreg(x) && isFPreg(y) {
   120  			as = loong64.AMOVD
   121  		}
   122  		p := s.Prog(as)
   123  		p.From.Type = obj.TYPE_REG
   124  		p.From.Reg = x
   125  		p.To.Type = obj.TYPE_REG
   126  		p.To.Reg = y
   127  	case ssa.OpLOONG64MOVVnop,
   128  		ssa.OpLOONG64LoweredRound32F,
   129  		ssa.OpLOONG64LoweredRound64F:
   130  		// nothing to do
   131  	case ssa.OpLoadReg:
   132  		if v.Type.IsFlags() {
   133  			v.Fatalf("load flags not implemented: %v", v.LongString())
   134  			return
   135  		}
   136  		r := v.Reg()
   137  		p := s.Prog(loadByType(v.Type, r))
   138  		ssagen.AddrAuto(&p.From, v.Args[0])
   139  		p.To.Type = obj.TYPE_REG
   140  		p.To.Reg = r
   141  	case ssa.OpStoreReg:
   142  		if v.Type.IsFlags() {
   143  			v.Fatalf("store flags not implemented: %v", v.LongString())
   144  			return
   145  		}
   146  		r := v.Args[0].Reg()
   147  		p := s.Prog(storeByType(v.Type, r))
   148  		p.From.Type = obj.TYPE_REG
   149  		p.From.Reg = r
   150  		ssagen.AddrAuto(&p.To, v)
   151  	case ssa.OpArgIntReg, ssa.OpArgFloatReg:
   152  		// The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
   153  		// The loop only runs once.
   154  		for _, a := range v.Block.Func.RegArgs {
   155  			// Pass the spill/unspill information along to the assembler, offset by size of
   156  			// the saved LR slot.
   157  			addr := ssagen.SpillSlotAddr(a, loong64.REGSP, base.Ctxt.Arch.FixedFrameSize)
   158  			s.FuncInfo().AddSpill(
   159  				obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type, a.Reg), Spill: storeByType(a.Type, a.Reg)})
   160  		}
   161  		v.Block.Func.RegArgs = nil
   162  		ssagen.CheckArgReg(v)
   163  	case ssa.OpLOONG64ADDV,
   164  		ssa.OpLOONG64SUBV,
   165  		ssa.OpLOONG64AND,
   166  		ssa.OpLOONG64OR,
   167  		ssa.OpLOONG64XOR,
   168  		ssa.OpLOONG64NOR,
   169  		ssa.OpLOONG64ANDN,
   170  		ssa.OpLOONG64ORN,
   171  		ssa.OpLOONG64SLL,
   172  		ssa.OpLOONG64SLLV,
   173  		ssa.OpLOONG64SRL,
   174  		ssa.OpLOONG64SRLV,
   175  		ssa.OpLOONG64SRA,
   176  		ssa.OpLOONG64SRAV,
   177  		ssa.OpLOONG64ROTR,
   178  		ssa.OpLOONG64ROTRV,
   179  		ssa.OpLOONG64ADDF,
   180  		ssa.OpLOONG64ADDD,
   181  		ssa.OpLOONG64SUBF,
   182  		ssa.OpLOONG64SUBD,
   183  		ssa.OpLOONG64MULF,
   184  		ssa.OpLOONG64MULD,
   185  		ssa.OpLOONG64DIVF,
   186  		ssa.OpLOONG64DIVD,
   187  		ssa.OpLOONG64MULV, ssa.OpLOONG64MULHV, ssa.OpLOONG64MULHVU,
   188  		ssa.OpLOONG64DIVV, ssa.OpLOONG64REMV, ssa.OpLOONG64DIVVU, ssa.OpLOONG64REMVU,
   189  		ssa.OpLOONG64FCOPYSGD:
   190  		p := s.Prog(v.Op.Asm())
   191  		p.From.Type = obj.TYPE_REG
   192  		p.From.Reg = v.Args[1].Reg()
   193  		p.Reg = v.Args[0].Reg()
   194  		p.To.Type = obj.TYPE_REG
   195  		p.To.Reg = v.Reg()
   196  
   197  	case ssa.OpLOONG64BSTRPICKV,
   198  		ssa.OpLOONG64BSTRPICKW:
   199  		p := s.Prog(v.Op.Asm())
   200  		p.From.Type = obj.TYPE_CONST
   201  		if v.Op == ssa.OpLOONG64BSTRPICKW {
   202  			p.From.Offset = v.AuxInt >> 5
   203  			p.AddRestSourceConst(v.AuxInt & 0x1f)
   204  		} else {
   205  			p.From.Offset = v.AuxInt >> 6
   206  			p.AddRestSourceConst(v.AuxInt & 0x3f)
   207  		}
   208  		p.Reg = v.Args[0].Reg()
   209  		p.To.Type = obj.TYPE_REG
   210  		p.To.Reg = v.Reg()
   211  
   212  	case ssa.OpLOONG64FMINF,
   213  		ssa.OpLOONG64FMIND,
   214  		ssa.OpLOONG64FMAXF,
   215  		ssa.OpLOONG64FMAXD:
   216  		// ADDD Rarg0, Rarg1, Rout
   217  		// CMPEQD Rarg0, Rarg0, FCC0
   218  		// bceqz FCC0, end
   219  		// CMPEQD Rarg1, Rarg1, FCC0
   220  		// bceqz FCC0, end
   221  		// F(MIN|MAX)(F|D)
   222  
   223  		r0 := v.Args[0].Reg()
   224  		r1 := v.Args[1].Reg()
   225  		out := v.Reg()
   226  		add, fcmp := loong64.AADDD, loong64.ACMPEQD
   227  		if v.Op == ssa.OpLOONG64FMINF || v.Op == ssa.OpLOONG64FMAXF {
   228  			add = loong64.AADDF
   229  			fcmp = loong64.ACMPEQF
   230  		}
   231  		p1 := s.Prog(add)
   232  		p1.From.Type = obj.TYPE_REG
   233  		p1.From.Reg = r0
   234  		p1.Reg = r1
   235  		p1.To.Type = obj.TYPE_REG
   236  		p1.To.Reg = out
   237  
   238  		p2 := s.Prog(fcmp)
   239  		p2.From.Type = obj.TYPE_REG
   240  		p2.From.Reg = r0
   241  		p2.Reg = r0
   242  		p2.To.Type = obj.TYPE_REG
   243  		p2.To.Reg = loong64.REG_FCC0
   244  
   245  		p3 := s.Prog(loong64.ABFPF)
   246  		p3.To.Type = obj.TYPE_BRANCH
   247  
   248  		p4 := s.Prog(fcmp)
   249  		p4.From.Type = obj.TYPE_REG
   250  		p4.From.Reg = r1
   251  		p4.Reg = r1
   252  		p4.To.Type = obj.TYPE_REG
   253  		p4.To.Reg = loong64.REG_FCC0
   254  
   255  		p5 := s.Prog(loong64.ABFPF)
   256  		p5.To.Type = obj.TYPE_BRANCH
   257  
   258  		p6 := s.Prog(v.Op.Asm())
   259  		p6.From.Type = obj.TYPE_REG
   260  		p6.From.Reg = r1
   261  		p6.Reg = r0
   262  		p6.To.Type = obj.TYPE_REG
   263  		p6.To.Reg = out
   264  
   265  		nop := s.Prog(obj.ANOP)
   266  		p3.To.SetTarget(nop)
   267  		p5.To.SetTarget(nop)
   268  
   269  	case ssa.OpLOONG64SGT,
   270  		ssa.OpLOONG64SGTU:
   271  		p := s.Prog(v.Op.Asm())
   272  		p.From.Type = obj.TYPE_REG
   273  		p.From.Reg = v.Args[0].Reg()
   274  		p.Reg = v.Args[1].Reg()
   275  		p.To.Type = obj.TYPE_REG
   276  		p.To.Reg = v.Reg()
   277  	case ssa.OpLOONG64ADDVconst,
   278  		ssa.OpLOONG64SUBVconst,
   279  		ssa.OpLOONG64ANDconst,
   280  		ssa.OpLOONG64ORconst,
   281  		ssa.OpLOONG64XORconst,
   282  		ssa.OpLOONG64SLLconst,
   283  		ssa.OpLOONG64SLLVconst,
   284  		ssa.OpLOONG64SRLconst,
   285  		ssa.OpLOONG64SRLVconst,
   286  		ssa.OpLOONG64SRAconst,
   287  		ssa.OpLOONG64SRAVconst,
   288  		ssa.OpLOONG64ROTRconst,
   289  		ssa.OpLOONG64ROTRVconst,
   290  		ssa.OpLOONG64SGTconst,
   291  		ssa.OpLOONG64SGTUconst:
   292  		p := s.Prog(v.Op.Asm())
   293  		p.From.Type = obj.TYPE_CONST
   294  		p.From.Offset = v.AuxInt
   295  		p.Reg = v.Args[0].Reg()
   296  		p.To.Type = obj.TYPE_REG
   297  		p.To.Reg = v.Reg()
   298  
   299  	case ssa.OpLOONG64NORconst:
   300  		// MOVV $const, Rtmp
   301  		// NOR  Rtmp, Rarg0, Rout
   302  		p := s.Prog(loong64.AMOVV)
   303  		p.From.Type = obj.TYPE_CONST
   304  		p.From.Offset = v.AuxInt
   305  		p.To.Type = obj.TYPE_REG
   306  		p.To.Reg = loong64.REGTMP
   307  
   308  		p2 := s.Prog(v.Op.Asm())
   309  		p2.From.Type = obj.TYPE_REG
   310  		p2.From.Reg = loong64.REGTMP
   311  		p2.Reg = v.Args[0].Reg()
   312  		p2.To.Type = obj.TYPE_REG
   313  		p2.To.Reg = v.Reg()
   314  
   315  	case ssa.OpLOONG64MOVVconst:
   316  		r := v.Reg()
   317  		p := s.Prog(v.Op.Asm())
   318  		p.From.Type = obj.TYPE_CONST
   319  		p.From.Offset = v.AuxInt
   320  		p.To.Type = obj.TYPE_REG
   321  		p.To.Reg = r
   322  		if isFPreg(r) {
   323  			// cannot move into FP or special registers, use TMP as intermediate
   324  			p.To.Reg = loong64.REGTMP
   325  			p = s.Prog(loong64.AMOVV)
   326  			p.From.Type = obj.TYPE_REG
   327  			p.From.Reg = loong64.REGTMP
   328  			p.To.Type = obj.TYPE_REG
   329  			p.To.Reg = r
   330  		}
   331  	case ssa.OpLOONG64MOVFconst,
   332  		ssa.OpLOONG64MOVDconst:
   333  		p := s.Prog(v.Op.Asm())
   334  		p.From.Type = obj.TYPE_FCONST
   335  		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
   336  		p.To.Type = obj.TYPE_REG
   337  		p.To.Reg = v.Reg()
   338  	case ssa.OpLOONG64CMPEQF,
   339  		ssa.OpLOONG64CMPEQD,
   340  		ssa.OpLOONG64CMPGEF,
   341  		ssa.OpLOONG64CMPGED,
   342  		ssa.OpLOONG64CMPGTF,
   343  		ssa.OpLOONG64CMPGTD:
   344  		p := s.Prog(v.Op.Asm())
   345  		p.From.Type = obj.TYPE_REG
   346  		p.From.Reg = v.Args[0].Reg()
   347  		p.Reg = v.Args[1].Reg()
   348  		p.To.Type = obj.TYPE_REG
   349  		p.To.Reg = loong64.REG_FCC0
   350  
   351  	case ssa.OpLOONG64FMADDF,
   352  		ssa.OpLOONG64FMADDD,
   353  		ssa.OpLOONG64FMSUBF,
   354  		ssa.OpLOONG64FMSUBD,
   355  		ssa.OpLOONG64FNMADDF,
   356  		ssa.OpLOONG64FNMADDD,
   357  		ssa.OpLOONG64FNMSUBF,
   358  		ssa.OpLOONG64FNMSUBD:
   359  		p := s.Prog(v.Op.Asm())
   360  		// r=(FMA x y z) -> FMADDD z, y, x, r
   361  		// the SSA operand order is for taking advantage of
   362  		// commutativity (that only applies for the first two operands)
   363  		r := v.Reg()
   364  		x := v.Args[0].Reg()
   365  		y := v.Args[1].Reg()
   366  		z := v.Args[2].Reg()
   367  		p.From.Type = obj.TYPE_REG
   368  		p.From.Reg = z
   369  		p.Reg = y
   370  		p.AddRestSourceReg(x)
   371  		p.To.Type = obj.TYPE_REG
   372  		p.To.Reg = r
   373  
   374  	case ssa.OpLOONG64MOVVaddr:
   375  		p := s.Prog(loong64.AMOVV)
   376  		p.From.Type = obj.TYPE_ADDR
   377  		p.From.Reg = v.Args[0].Reg()
   378  		var wantreg string
   379  		// MOVV $sym+off(base), R
   380  		// the assembler expands it as the following:
   381  		// - base is SP: add constant offset to SP (R3)
   382  		// when constant is large, tmp register (R30) may be used
   383  		// - base is SB: load external address with relocation
   384  		switch v.Aux.(type) {
   385  		default:
   386  			v.Fatalf("aux is of unknown type %T", v.Aux)
   387  		case *obj.LSym:
   388  			wantreg = "SB"
   389  			ssagen.AddAux(&p.From, v)
   390  		case *ir.Name:
   391  			wantreg = "SP"
   392  			ssagen.AddAux(&p.From, v)
   393  		case nil:
   394  			// No sym, just MOVV $off(SP), R
   395  			wantreg = "SP"
   396  			p.From.Offset = v.AuxInt
   397  		}
   398  		if reg := v.Args[0].RegName(); reg != wantreg {
   399  			v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
   400  		}
   401  		p.To.Type = obj.TYPE_REG
   402  		p.To.Reg = v.Reg()
   403  
   404  	case ssa.OpLOONG64MOVBloadidx,
   405  		ssa.OpLOONG64MOVBUloadidx,
   406  		ssa.OpLOONG64MOVHloadidx,
   407  		ssa.OpLOONG64MOVHUloadidx,
   408  		ssa.OpLOONG64MOVWloadidx,
   409  		ssa.OpLOONG64MOVWUloadidx,
   410  		ssa.OpLOONG64MOVVloadidx,
   411  		ssa.OpLOONG64MOVFloadidx,
   412  		ssa.OpLOONG64MOVDloadidx:
   413  		p := s.Prog(v.Op.Asm())
   414  		p.From.Type = obj.TYPE_MEM
   415  		p.From.Name = obj.NAME_NONE
   416  		p.From.Reg = v.Args[0].Reg()
   417  		p.From.Index = v.Args[1].Reg()
   418  		p.To.Type = obj.TYPE_REG
   419  		p.To.Reg = v.Reg()
   420  
   421  	case ssa.OpLOONG64MOVBstoreidx,
   422  		ssa.OpLOONG64MOVHstoreidx,
   423  		ssa.OpLOONG64MOVWstoreidx,
   424  		ssa.OpLOONG64MOVVstoreidx,
   425  		ssa.OpLOONG64MOVFstoreidx,
   426  		ssa.OpLOONG64MOVDstoreidx:
   427  		p := s.Prog(v.Op.Asm())
   428  		p.From.Type = obj.TYPE_REG
   429  		p.From.Reg = v.Args[2].Reg()
   430  		p.To.Type = obj.TYPE_MEM
   431  		p.To.Name = obj.NAME_NONE
   432  		p.To.Reg = v.Args[0].Reg()
   433  		p.To.Index = v.Args[1].Reg()
   434  
   435  	case ssa.OpLOONG64MOVBstorezeroidx,
   436  		ssa.OpLOONG64MOVHstorezeroidx,
   437  		ssa.OpLOONG64MOVWstorezeroidx,
   438  		ssa.OpLOONG64MOVVstorezeroidx:
   439  		p := s.Prog(v.Op.Asm())
   440  		p.From.Type = obj.TYPE_REG
   441  		p.From.Reg = loong64.REGZERO
   442  		p.To.Type = obj.TYPE_MEM
   443  		p.To.Name = obj.NAME_NONE
   444  		p.To.Reg = v.Args[0].Reg()
   445  		p.To.Index = v.Args[1].Reg()
   446  
   447  	case ssa.OpLOONG64MOVBload,
   448  		ssa.OpLOONG64MOVBUload,
   449  		ssa.OpLOONG64MOVHload,
   450  		ssa.OpLOONG64MOVHUload,
   451  		ssa.OpLOONG64MOVWload,
   452  		ssa.OpLOONG64MOVWUload,
   453  		ssa.OpLOONG64MOVVload,
   454  		ssa.OpLOONG64MOVFload,
   455  		ssa.OpLOONG64MOVDload:
   456  		p := s.Prog(v.Op.Asm())
   457  		p.From.Type = obj.TYPE_MEM
   458  		p.From.Reg = v.Args[0].Reg()
   459  		ssagen.AddAux(&p.From, v)
   460  		p.To.Type = obj.TYPE_REG
   461  		p.To.Reg = v.Reg()
   462  	case ssa.OpLOONG64MOVBstore,
   463  		ssa.OpLOONG64MOVHstore,
   464  		ssa.OpLOONG64MOVWstore,
   465  		ssa.OpLOONG64MOVVstore,
   466  		ssa.OpLOONG64MOVFstore,
   467  		ssa.OpLOONG64MOVDstore:
   468  		p := s.Prog(v.Op.Asm())
   469  		p.From.Type = obj.TYPE_REG
   470  		p.From.Reg = v.Args[1].Reg()
   471  		p.To.Type = obj.TYPE_MEM
   472  		p.To.Reg = v.Args[0].Reg()
   473  		ssagen.AddAux(&p.To, v)
   474  	case ssa.OpLOONG64MOVBstorezero,
   475  		ssa.OpLOONG64MOVHstorezero,
   476  		ssa.OpLOONG64MOVWstorezero,
   477  		ssa.OpLOONG64MOVVstorezero:
   478  		p := s.Prog(v.Op.Asm())
   479  		p.From.Type = obj.TYPE_REG
   480  		p.From.Reg = loong64.REGZERO
   481  		p.To.Type = obj.TYPE_MEM
   482  		p.To.Reg = v.Args[0].Reg()
   483  		ssagen.AddAux(&p.To, v)
   484  	case ssa.OpLOONG64MOVBreg,
   485  		ssa.OpLOONG64MOVBUreg,
   486  		ssa.OpLOONG64MOVHreg,
   487  		ssa.OpLOONG64MOVHUreg,
   488  		ssa.OpLOONG64MOVWreg,
   489  		ssa.OpLOONG64MOVWUreg:
   490  		a := v.Args[0]
   491  		for a.Op == ssa.OpCopy || a.Op == ssa.OpLOONG64MOVVreg {
   492  			a = a.Args[0]
   493  		}
   494  		if a.Op == ssa.OpLoadReg && loong64.REG_R0 <= a.Reg() && a.Reg() <= loong64.REG_R31 {
   495  			// LoadReg from a narrower type does an extension, except loading
   496  			// to a floating point register. So only eliminate the extension
   497  			// if it is loaded to an integer register.
   498  
   499  			t := a.Type
   500  			switch {
   501  			case v.Op == ssa.OpLOONG64MOVBreg && t.Size() == 1 && t.IsSigned(),
   502  				v.Op == ssa.OpLOONG64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
   503  				v.Op == ssa.OpLOONG64MOVHreg && t.Size() == 2 && t.IsSigned(),
   504  				v.Op == ssa.OpLOONG64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
   505  				v.Op == ssa.OpLOONG64MOVWreg && t.Size() == 4 && t.IsSigned(),
   506  				v.Op == ssa.OpLOONG64MOVWUreg && t.Size() == 4 && !t.IsSigned():
   507  				// arg is a proper-typed load, already zero/sign-extended, don't extend again
   508  				if v.Reg() == v.Args[0].Reg() {
   509  					return
   510  				}
   511  				p := s.Prog(loong64.AMOVV)
   512  				p.From.Type = obj.TYPE_REG
   513  				p.From.Reg = v.Args[0].Reg()
   514  				p.To.Type = obj.TYPE_REG
   515  				p.To.Reg = v.Reg()
   516  				return
   517  			default:
   518  			}
   519  		}
   520  		fallthrough
   521  
   522  	case ssa.OpLOONG64MOVWF,
   523  		ssa.OpLOONG64MOVWD,
   524  		ssa.OpLOONG64TRUNCFW,
   525  		ssa.OpLOONG64TRUNCDW,
   526  		ssa.OpLOONG64MOVVF,
   527  		ssa.OpLOONG64MOVVD,
   528  		ssa.OpLOONG64TRUNCFV,
   529  		ssa.OpLOONG64TRUNCDV,
   530  		ssa.OpLOONG64MOVFD,
   531  		ssa.OpLOONG64MOVDF,
   532  		ssa.OpLOONG64MOVWfpgp,
   533  		ssa.OpLOONG64MOVWgpfp,
   534  		ssa.OpLOONG64MOVVfpgp,
   535  		ssa.OpLOONG64MOVVgpfp,
   536  		ssa.OpLOONG64NEGF,
   537  		ssa.OpLOONG64NEGD,
   538  		ssa.OpLOONG64CLZW,
   539  		ssa.OpLOONG64CLZV,
   540  		ssa.OpLOONG64CTZW,
   541  		ssa.OpLOONG64CTZV,
   542  		ssa.OpLOONG64SQRTD,
   543  		ssa.OpLOONG64SQRTF,
   544  		ssa.OpLOONG64REVB2H,
   545  		ssa.OpLOONG64REVB2W,
   546  		ssa.OpLOONG64REVBV,
   547  		ssa.OpLOONG64BITREV4B,
   548  		ssa.OpLOONG64BITREVW,
   549  		ssa.OpLOONG64BITREVV,
   550  		ssa.OpLOONG64ABSD:
   551  		p := s.Prog(v.Op.Asm())
   552  		p.From.Type = obj.TYPE_REG
   553  		p.From.Reg = v.Args[0].Reg()
   554  		p.To.Type = obj.TYPE_REG
   555  		p.To.Reg = v.Reg()
   556  
   557  	case ssa.OpLOONG64VPCNT64,
   558  		ssa.OpLOONG64VPCNT32,
   559  		ssa.OpLOONG64VPCNT16:
   560  		p := s.Prog(v.Op.Asm())
   561  		p.From.Type = obj.TYPE_REG
   562  		p.From.Reg = ((v.Args[0].Reg() - loong64.REG_F0) & 31) + loong64.REG_V0
   563  		p.To.Type = obj.TYPE_REG
   564  		p.To.Reg = ((v.Reg() - loong64.REG_F0) & 31) + loong64.REG_V0
   565  
   566  	case ssa.OpLOONG64NEGV:
   567  		// SUB from REGZERO
   568  		p := s.Prog(loong64.ASUBVU)
   569  		p.From.Type = obj.TYPE_REG
   570  		p.From.Reg = v.Args[0].Reg()
   571  		p.Reg = loong64.REGZERO
   572  		p.To.Type = obj.TYPE_REG
   573  		p.To.Reg = v.Reg()
   574  
   575  	case ssa.OpLOONG64DUFFZERO:
   576  		// runtime.duffzero expects start address in R20
   577  		p := s.Prog(obj.ADUFFZERO)
   578  		p.To.Type = obj.TYPE_MEM
   579  		p.To.Name = obj.NAME_EXTERN
   580  		p.To.Sym = ir.Syms.Duffzero
   581  		p.To.Offset = v.AuxInt
   582  	case ssa.OpLOONG64LoweredZero:
   583  		// MOVx	R0, (Rarg0)
   584  		// ADDV	$sz, Rarg0
   585  		// BGEU	Rarg1, Rarg0, -2(PC)
   586  		mov, sz := largestMove(v.AuxInt)
   587  		p := s.Prog(mov)
   588  		p.From.Type = obj.TYPE_REG
   589  		p.From.Reg = loong64.REGZERO
   590  		p.To.Type = obj.TYPE_MEM
   591  		p.To.Reg = v.Args[0].Reg()
   592  
   593  		p2 := s.Prog(loong64.AADDVU)
   594  		p2.From.Type = obj.TYPE_CONST
   595  		p2.From.Offset = sz
   596  		p2.To.Type = obj.TYPE_REG
   597  		p2.To.Reg = v.Args[0].Reg()
   598  
   599  		p3 := s.Prog(loong64.ABGEU)
   600  		p3.From.Type = obj.TYPE_REG
   601  		p3.From.Reg = v.Args[1].Reg()
   602  		p3.Reg = v.Args[0].Reg()
   603  		p3.To.Type = obj.TYPE_BRANCH
   604  		p3.To.SetTarget(p)
   605  
   606  	case ssa.OpLOONG64DUFFCOPY:
   607  		p := s.Prog(obj.ADUFFCOPY)
   608  		p.To.Type = obj.TYPE_MEM
   609  		p.To.Name = obj.NAME_EXTERN
   610  		p.To.Sym = ir.Syms.Duffcopy
   611  		p.To.Offset = v.AuxInt
   612  	case ssa.OpLOONG64LoweredMove:
   613  		// MOVx	(Rarg1), Rtmp
   614  		// MOVx	Rtmp, (Rarg0)
   615  		// ADDV	$sz, Rarg1
   616  		// ADDV	$sz, Rarg0
   617  		// BGEU	Rarg2, Rarg0, -4(PC)
   618  		mov, sz := largestMove(v.AuxInt)
   619  		p := s.Prog(mov)
   620  		p.From.Type = obj.TYPE_MEM
   621  		p.From.Reg = v.Args[1].Reg()
   622  		p.To.Type = obj.TYPE_REG
   623  		p.To.Reg = loong64.REGTMP
   624  
   625  		p2 := s.Prog(mov)
   626  		p2.From.Type = obj.TYPE_REG
   627  		p2.From.Reg = loong64.REGTMP
   628  		p2.To.Type = obj.TYPE_MEM
   629  		p2.To.Reg = v.Args[0].Reg()
   630  
   631  		p3 := s.Prog(loong64.AADDVU)
   632  		p3.From.Type = obj.TYPE_CONST
   633  		p3.From.Offset = sz
   634  		p3.To.Type = obj.TYPE_REG
   635  		p3.To.Reg = v.Args[1].Reg()
   636  
   637  		p4 := s.Prog(loong64.AADDVU)
   638  		p4.From.Type = obj.TYPE_CONST
   639  		p4.From.Offset = sz
   640  		p4.To.Type = obj.TYPE_REG
   641  		p4.To.Reg = v.Args[0].Reg()
   642  
   643  		p5 := s.Prog(loong64.ABGEU)
   644  		p5.From.Type = obj.TYPE_REG
   645  		p5.From.Reg = v.Args[2].Reg()
   646  		p5.Reg = v.Args[1].Reg()
   647  		p5.To.Type = obj.TYPE_BRANCH
   648  		p5.To.SetTarget(p)
   649  
   650  	case ssa.OpLOONG64CALLstatic, ssa.OpLOONG64CALLclosure, ssa.OpLOONG64CALLinter:
   651  		s.Call(v)
   652  	case ssa.OpLOONG64CALLtail:
   653  		s.TailCall(v)
   654  	case ssa.OpLOONG64LoweredWB:
   655  		p := s.Prog(obj.ACALL)
   656  		p.To.Type = obj.TYPE_MEM
   657  		p.To.Name = obj.NAME_EXTERN
   658  		// AuxInt encodes how many buffer entries we need.
   659  		p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
   660  
   661  	case ssa.OpLOONG64LoweredPubBarrier:
   662  		// DBAR 0x1A
   663  		p := s.Prog(v.Op.Asm())
   664  		p.From.Type = obj.TYPE_CONST
   665  		p.From.Offset = 0x1A
   666  
   667  	case ssa.OpLOONG64LoweredPanicBoundsRR, ssa.OpLOONG64LoweredPanicBoundsRC, ssa.OpLOONG64LoweredPanicBoundsCR, ssa.OpLOONG64LoweredPanicBoundsCC:
   668  		// Compute the constant we put in the PCData entry for this call.
   669  		code, signed := ssa.BoundsKind(v.AuxInt).Code()
   670  		xIsReg := false
   671  		yIsReg := false
   672  		xVal := 0
   673  		yVal := 0
   674  		switch v.Op {
   675  		case ssa.OpLOONG64LoweredPanicBoundsRR:
   676  			xIsReg = true
   677  			xVal = int(v.Args[0].Reg() - loong64.REG_R4)
   678  			yIsReg = true
   679  			yVal = int(v.Args[1].Reg() - loong64.REG_R4)
   680  		case ssa.OpLOONG64LoweredPanicBoundsRC:
   681  			xIsReg = true
   682  			xVal = int(v.Args[0].Reg() - loong64.REG_R4)
   683  			c := v.Aux.(ssa.PanicBoundsC).C
   684  			if c >= 0 && c <= abi.BoundsMaxConst {
   685  				yVal = int(c)
   686  			} else {
   687  				// Move constant to a register
   688  				yIsReg = true
   689  				if yVal == xVal {
   690  					yVal = 1
   691  				}
   692  				p := s.Prog(loong64.AMOVV)
   693  				p.From.Type = obj.TYPE_CONST
   694  				p.From.Offset = c
   695  				p.To.Type = obj.TYPE_REG
   696  				p.To.Reg = loong64.REG_R4 + int16(yVal)
   697  			}
   698  		case ssa.OpLOONG64LoweredPanicBoundsCR:
   699  			yIsReg = true
   700  			yVal := int(v.Args[0].Reg() - loong64.REG_R4)
   701  			c := v.Aux.(ssa.PanicBoundsC).C
   702  			if c >= 0 && c <= abi.BoundsMaxConst {
   703  				xVal = int(c)
   704  			} else {
   705  				// Move constant to a register
   706  				xIsReg = true
   707  				if xVal == yVal {
   708  					xVal = 1
   709  				}
   710  				p := s.Prog(loong64.AMOVV)
   711  				p.From.Type = obj.TYPE_CONST
   712  				p.From.Offset = c
   713  				p.To.Type = obj.TYPE_REG
   714  				p.To.Reg = loong64.REG_R4 + int16(xVal)
   715  			}
   716  		case ssa.OpLOONG64LoweredPanicBoundsCC:
   717  			c := v.Aux.(ssa.PanicBoundsCC).Cx
   718  			if c >= 0 && c <= abi.BoundsMaxConst {
   719  				xVal = int(c)
   720  			} else {
   721  				// Move constant to a register
   722  				xIsReg = true
   723  				p := s.Prog(loong64.AMOVV)
   724  				p.From.Type = obj.TYPE_CONST
   725  				p.From.Offset = c
   726  				p.To.Type = obj.TYPE_REG
   727  				p.To.Reg = loong64.REG_R4 + int16(xVal)
   728  			}
   729  			c = v.Aux.(ssa.PanicBoundsCC).Cy
   730  			if c >= 0 && c <= abi.BoundsMaxConst {
   731  				yVal = int(c)
   732  			} else {
   733  				// Move constant to a register
   734  				yIsReg = true
   735  				yVal = 1
   736  				p := s.Prog(loong64.AMOVV)
   737  				p.From.Type = obj.TYPE_CONST
   738  				p.From.Offset = c
   739  				p.To.Type = obj.TYPE_REG
   740  				p.To.Reg = loong64.REG_R4 + int16(yVal)
   741  			}
   742  		}
   743  		c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
   744  
   745  		p := s.Prog(obj.APCDATA)
   746  		p.From.SetConst(abi.PCDATA_PanicBounds)
   747  		p.To.SetConst(int64(c))
   748  		p = s.Prog(obj.ACALL)
   749  		p.To.Type = obj.TYPE_MEM
   750  		p.To.Name = obj.NAME_EXTERN
   751  		p.To.Sym = ir.Syms.PanicBounds
   752  
   753  	case ssa.OpLOONG64LoweredAtomicLoad8, ssa.OpLOONG64LoweredAtomicLoad32, ssa.OpLOONG64LoweredAtomicLoad64:
   754  		// MOVB	(Rarg0), Rout
   755  		// DBAR	0x14
   756  		as := loong64.AMOVV
   757  		switch v.Op {
   758  		case ssa.OpLOONG64LoweredAtomicLoad8:
   759  			as = loong64.AMOVB
   760  		case ssa.OpLOONG64LoweredAtomicLoad32:
   761  			as = loong64.AMOVW
   762  		}
   763  		p := s.Prog(as)
   764  		p.From.Type = obj.TYPE_MEM
   765  		p.From.Reg = v.Args[0].Reg()
   766  		p.To.Type = obj.TYPE_REG
   767  		p.To.Reg = v.Reg0()
   768  		p1 := s.Prog(loong64.ADBAR)
   769  		p1.From.Type = obj.TYPE_CONST
   770  		p1.From.Offset = 0x14
   771  
   772  	case ssa.OpLOONG64LoweredAtomicStore8,
   773  		ssa.OpLOONG64LoweredAtomicStore32,
   774  		ssa.OpLOONG64LoweredAtomicStore64:
   775  		// DBAR 0x12
   776  		// MOVx (Rarg1), Rout
   777  		// DBAR 0x18
   778  		movx := loong64.AMOVV
   779  		switch v.Op {
   780  		case ssa.OpLOONG64LoweredAtomicStore8:
   781  			movx = loong64.AMOVB
   782  		case ssa.OpLOONG64LoweredAtomicStore32:
   783  			movx = loong64.AMOVW
   784  		}
   785  		p := s.Prog(loong64.ADBAR)
   786  		p.From.Type = obj.TYPE_CONST
   787  		p.From.Offset = 0x12
   788  
   789  		p1 := s.Prog(movx)
   790  		p1.From.Type = obj.TYPE_REG
   791  		p1.From.Reg = v.Args[1].Reg()
   792  		p1.To.Type = obj.TYPE_MEM
   793  		p1.To.Reg = v.Args[0].Reg()
   794  
   795  		p2 := s.Prog(loong64.ADBAR)
   796  		p2.From.Type = obj.TYPE_CONST
   797  		p2.From.Offset = 0x18
   798  
   799  	case ssa.OpLOONG64LoweredAtomicStore8Variant,
   800  		ssa.OpLOONG64LoweredAtomicStore32Variant,
   801  		ssa.OpLOONG64LoweredAtomicStore64Variant:
   802  		//AMSWAPx  Rarg1, (Rarg0), Rout
   803  		amswapx := loong64.AAMSWAPDBV
   804  		switch v.Op {
   805  		case ssa.OpLOONG64LoweredAtomicStore32Variant:
   806  			amswapx = loong64.AAMSWAPDBW
   807  		case ssa.OpLOONG64LoweredAtomicStore8Variant:
   808  			amswapx = loong64.AAMSWAPDBB
   809  		}
   810  		p := s.Prog(amswapx)
   811  		p.From.Type = obj.TYPE_REG
   812  		p.From.Reg = v.Args[1].Reg()
   813  		p.To.Type = obj.TYPE_MEM
   814  		p.To.Reg = v.Args[0].Reg()
   815  		p.RegTo2 = loong64.REGZERO
   816  
   817  	case ssa.OpLOONG64LoweredAtomicExchange32, ssa.OpLOONG64LoweredAtomicExchange64:
   818  		// AMSWAPx	Rarg1, (Rarg0), Rout
   819  		amswapx := loong64.AAMSWAPDBV
   820  		if v.Op == ssa.OpLOONG64LoweredAtomicExchange32 {
   821  			amswapx = loong64.AAMSWAPDBW
   822  		}
   823  		p := s.Prog(amswapx)
   824  		p.From.Type = obj.TYPE_REG
   825  		p.From.Reg = v.Args[1].Reg()
   826  		p.To.Type = obj.TYPE_MEM
   827  		p.To.Reg = v.Args[0].Reg()
   828  		p.RegTo2 = v.Reg0()
   829  
   830  	case ssa.OpLOONG64LoweredAtomicExchange8Variant:
   831  		// AMSWAPDBB	Rarg1, (Rarg0), Rout
   832  		p := s.Prog(loong64.AAMSWAPDBB)
   833  		p.From.Type = obj.TYPE_REG
   834  		p.From.Reg = v.Args[1].Reg()
   835  		p.To.Type = obj.TYPE_MEM
   836  		p.To.Reg = v.Args[0].Reg()
   837  		p.RegTo2 = v.Reg0()
   838  
   839  	case ssa.OpLOONG64LoweredAtomicAdd32, ssa.OpLOONG64LoweredAtomicAdd64:
   840  		// AMADDx  Rarg1, (Rarg0), Rout
   841  		// ADDV    Rarg1, Rout, Rout
   842  		amaddx := loong64.AAMADDDBV
   843  		addx := loong64.AADDV
   844  		if v.Op == ssa.OpLOONG64LoweredAtomicAdd32 {
   845  			amaddx = loong64.AAMADDDBW
   846  		}
   847  		p := s.Prog(amaddx)
   848  		p.From.Type = obj.TYPE_REG
   849  		p.From.Reg = v.Args[1].Reg()
   850  		p.To.Type = obj.TYPE_MEM
   851  		p.To.Reg = v.Args[0].Reg()
   852  		p.RegTo2 = v.Reg0()
   853  
   854  		p1 := s.Prog(addx)
   855  		p1.From.Type = obj.TYPE_REG
   856  		p1.From.Reg = v.Args[1].Reg()
   857  		p1.Reg = v.Reg0()
   858  		p1.To.Type = obj.TYPE_REG
   859  		p1.To.Reg = v.Reg0()
   860  
   861  	case ssa.OpLOONG64LoweredAtomicCas32, ssa.OpLOONG64LoweredAtomicCas64:
   862  		// MOVV $0, Rout
   863  		// DBAR 0x14
   864  		// LL	(Rarg0), Rtmp
   865  		// BNE	Rtmp, Rarg1, 4(PC)
   866  		// MOVV Rarg2, Rout
   867  		// SC	Rout, (Rarg0)
   868  		// BEQ	Rout, -4(PC)
   869  		// DBAR 0x12
   870  		ll := loong64.ALLV
   871  		sc := loong64.ASCV
   872  		if v.Op == ssa.OpLOONG64LoweredAtomicCas32 {
   873  			ll = loong64.ALL
   874  			sc = loong64.ASC
   875  		}
   876  
   877  		p := s.Prog(loong64.AMOVV)
   878  		p.From.Type = obj.TYPE_REG
   879  		p.From.Reg = loong64.REGZERO
   880  		p.To.Type = obj.TYPE_REG
   881  		p.To.Reg = v.Reg0()
   882  
   883  		p1 := s.Prog(loong64.ADBAR)
   884  		p1.From.Type = obj.TYPE_CONST
   885  		p1.From.Offset = 0x14
   886  
   887  		p2 := s.Prog(ll)
   888  		p2.From.Type = obj.TYPE_MEM
   889  		p2.From.Reg = v.Args[0].Reg()
   890  		p2.To.Type = obj.TYPE_REG
   891  		p2.To.Reg = loong64.REGTMP
   892  
   893  		p3 := s.Prog(loong64.ABNE)
   894  		p3.From.Type = obj.TYPE_REG
   895  		p3.From.Reg = v.Args[1].Reg()
   896  		p3.Reg = loong64.REGTMP
   897  		p3.To.Type = obj.TYPE_BRANCH
   898  
   899  		p4 := s.Prog(loong64.AMOVV)
   900  		p4.From.Type = obj.TYPE_REG
   901  		p4.From.Reg = v.Args[2].Reg()
   902  		p4.To.Type = obj.TYPE_REG
   903  		p4.To.Reg = v.Reg0()
   904  
   905  		p5 := s.Prog(sc)
   906  		p5.From.Type = obj.TYPE_REG
   907  		p5.From.Reg = v.Reg0()
   908  		p5.To.Type = obj.TYPE_MEM
   909  		p5.To.Reg = v.Args[0].Reg()
   910  
   911  		p6 := s.Prog(loong64.ABEQ)
   912  		p6.From.Type = obj.TYPE_REG
   913  		p6.From.Reg = v.Reg0()
   914  		p6.To.Type = obj.TYPE_BRANCH
   915  		p6.To.SetTarget(p2)
   916  
   917  		p7 := s.Prog(loong64.ADBAR)
   918  		p7.From.Type = obj.TYPE_CONST
   919  		p7.From.Offset = 0x12
   920  		p3.To.SetTarget(p7)
   921  
   922  	case ssa.OpLOONG64LoweredAtomicAnd32,
   923  		ssa.OpLOONG64LoweredAtomicOr32:
   924  		// AM{AND,OR}DBx  Rarg1, (Rarg0), RegZero
   925  		p := s.Prog(v.Op.Asm())
   926  		p.From.Type = obj.TYPE_REG
   927  		p.From.Reg = v.Args[1].Reg()
   928  		p.To.Type = obj.TYPE_MEM
   929  		p.To.Reg = v.Args[0].Reg()
   930  		p.RegTo2 = loong64.REGZERO
   931  
   932  	case ssa.OpLOONG64LoweredAtomicAnd32value,
   933  		ssa.OpLOONG64LoweredAtomicAnd64value,
   934  		ssa.OpLOONG64LoweredAtomicOr64value,
   935  		ssa.OpLOONG64LoweredAtomicOr32value:
   936  		// AM{AND,OR}DBx  Rarg1, (Rarg0), Rout
   937  		p := s.Prog(v.Op.Asm())
   938  		p.From.Type = obj.TYPE_REG
   939  		p.From.Reg = v.Args[1].Reg()
   940  		p.To.Type = obj.TYPE_MEM
   941  		p.To.Reg = v.Args[0].Reg()
   942  		p.RegTo2 = v.Reg0()
   943  
   944  	case ssa.OpLOONG64LoweredAtomicCas64Variant, ssa.OpLOONG64LoweredAtomicCas32Variant:
   945  		// MOVV         $0, Rout
   946  		// MOVV         Rarg1, Rtmp
   947  		// AMCASDBx     Rarg2, (Rarg0), Rtmp
   948  		// BNE          Rarg1, Rtmp, 2(PC)
   949  		// MOVV         $1, Rout
   950  		// NOP
   951  
   952  		amcasx := loong64.AAMCASDBV
   953  		if v.Op == ssa.OpLOONG64LoweredAtomicCas32Variant {
   954  			amcasx = loong64.AAMCASDBW
   955  		}
   956  
   957  		p := s.Prog(loong64.AMOVV)
   958  		p.From.Type = obj.TYPE_REG
   959  		p.From.Reg = loong64.REGZERO
   960  		p.To.Type = obj.TYPE_REG
   961  		p.To.Reg = v.Reg0()
   962  
   963  		p1 := s.Prog(loong64.AMOVV)
   964  		p1.From.Type = obj.TYPE_REG
   965  		p1.From.Reg = v.Args[1].Reg()
   966  		p1.To.Type = obj.TYPE_REG
   967  		p1.To.Reg = loong64.REGTMP
   968  
   969  		p2 := s.Prog(amcasx)
   970  		p2.From.Type = obj.TYPE_REG
   971  		p2.From.Reg = v.Args[2].Reg()
   972  		p2.To.Type = obj.TYPE_MEM
   973  		p2.To.Reg = v.Args[0].Reg()
   974  		p2.RegTo2 = loong64.REGTMP
   975  
   976  		p3 := s.Prog(loong64.ABNE)
   977  		p3.From.Type = obj.TYPE_REG
   978  		p3.From.Reg = v.Args[1].Reg()
   979  		p3.Reg = loong64.REGTMP
   980  		p3.To.Type = obj.TYPE_BRANCH
   981  
   982  		p4 := s.Prog(loong64.AMOVV)
   983  		p4.From.Type = obj.TYPE_CONST
   984  		p4.From.Offset = 0x1
   985  		p4.To.Type = obj.TYPE_REG
   986  		p4.To.Reg = v.Reg0()
   987  
   988  		p5 := s.Prog(obj.ANOP)
   989  		p3.To.SetTarget(p5)
   990  
   991  	case ssa.OpLOONG64LoweredNilCheck:
   992  		// Issue a load which will fault if arg is nil.
   993  		p := s.Prog(loong64.AMOVB)
   994  		p.From.Type = obj.TYPE_MEM
   995  		p.From.Reg = v.Args[0].Reg()
   996  		ssagen.AddAux(&p.From, v)
   997  		p.To.Type = obj.TYPE_REG
   998  		p.To.Reg = loong64.REGTMP
   999  		if logopt.Enabled() {
  1000  			logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
  1001  		}
  1002  		if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
  1003  			base.WarnfAt(v.Pos, "generated nil check")
  1004  		}
  1005  	case ssa.OpLOONG64FPFlagTrue,
  1006  		ssa.OpLOONG64FPFlagFalse:
  1007  		// MOVV	$0, r
  1008  		// BFPF	2(PC)
  1009  		// MOVV	$1, r
  1010  		branch := loong64.ABFPF
  1011  		if v.Op == ssa.OpLOONG64FPFlagFalse {
  1012  			branch = loong64.ABFPT
  1013  		}
  1014  		p := s.Prog(loong64.AMOVV)
  1015  		p.From.Type = obj.TYPE_REG
  1016  		p.From.Reg = loong64.REGZERO
  1017  		p.To.Type = obj.TYPE_REG
  1018  		p.To.Reg = v.Reg()
  1019  		p2 := s.Prog(branch)
  1020  		p2.To.Type = obj.TYPE_BRANCH
  1021  		p3 := s.Prog(loong64.AMOVV)
  1022  		p3.From.Type = obj.TYPE_CONST
  1023  		p3.From.Offset = 1
  1024  		p3.To.Type = obj.TYPE_REG
  1025  		p3.To.Reg = v.Reg()
  1026  		p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
  1027  		p2.To.SetTarget(p4)
  1028  	case ssa.OpLOONG64LoweredGetClosurePtr:
  1029  		// Closure pointer is R22 (loong64.REGCTXT).
  1030  		ssagen.CheckLoweredGetClosurePtr(v)
  1031  	case ssa.OpLOONG64LoweredGetCallerSP:
  1032  		// caller's SP is FixedFrameSize below the address of the first arg
  1033  		p := s.Prog(loong64.AMOVV)
  1034  		p.From.Type = obj.TYPE_ADDR
  1035  		p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
  1036  		p.From.Name = obj.NAME_PARAM
  1037  		p.To.Type = obj.TYPE_REG
  1038  		p.To.Reg = v.Reg()
  1039  	case ssa.OpLOONG64LoweredGetCallerPC:
  1040  		p := s.Prog(obj.AGETCALLERPC)
  1041  		p.To.Type = obj.TYPE_REG
  1042  		p.To.Reg = v.Reg()
  1043  	case ssa.OpLOONG64MASKEQZ, ssa.OpLOONG64MASKNEZ:
  1044  		p := s.Prog(v.Op.Asm())
  1045  		p.From.Type = obj.TYPE_REG
  1046  		p.From.Reg = v.Args[1].Reg()
  1047  		p.Reg = v.Args[0].Reg()
  1048  		p.To.Type = obj.TYPE_REG
  1049  		p.To.Reg = v.Reg()
  1050  
  1051  	case ssa.OpLOONG64PRELD:
  1052  		// PRELD (Rarg0), hint
  1053  		p := s.Prog(v.Op.Asm())
  1054  		p.From.Type = obj.TYPE_MEM
  1055  		p.From.Reg = v.Args[0].Reg()
  1056  		p.AddRestSourceConst(v.AuxInt & 0x1f)
  1057  
  1058  	case ssa.OpLOONG64PRELDX:
  1059  		// PRELDX (Rarg0), $n, $hint
  1060  		p := s.Prog(v.Op.Asm())
  1061  		p.From.Type = obj.TYPE_MEM
  1062  		p.From.Reg = v.Args[0].Reg()
  1063  		p.AddRestSourceArgs([]obj.Addr{
  1064  			{Type: obj.TYPE_CONST, Offset: int64((v.AuxInt >> 5) & 0x1fffffffff)},
  1065  			{Type: obj.TYPE_CONST, Offset: int64((v.AuxInt >> 0) & 0x1f)},
  1066  		})
  1067  
  1068  	case ssa.OpClobber, ssa.OpClobberReg:
  1069  		// TODO: implement for clobberdead experiment. Nop is ok for now.
  1070  	default:
  1071  		v.Fatalf("genValue not implemented: %s", v.LongString())
  1072  	}
  1073  }
  1074  
  1075  var blockJump = map[ssa.BlockKind]struct {
  1076  	asm, invasm obj.As
  1077  }{
  1078  	ssa.BlockLOONG64EQ:   {loong64.ABEQ, loong64.ABNE},
  1079  	ssa.BlockLOONG64NE:   {loong64.ABNE, loong64.ABEQ},
  1080  	ssa.BlockLOONG64LTZ:  {loong64.ABLTZ, loong64.ABGEZ},
  1081  	ssa.BlockLOONG64GEZ:  {loong64.ABGEZ, loong64.ABLTZ},
  1082  	ssa.BlockLOONG64LEZ:  {loong64.ABLEZ, loong64.ABGTZ},
  1083  	ssa.BlockLOONG64GTZ:  {loong64.ABGTZ, loong64.ABLEZ},
  1084  	ssa.BlockLOONG64FPT:  {loong64.ABFPT, loong64.ABFPF},
  1085  	ssa.BlockLOONG64FPF:  {loong64.ABFPF, loong64.ABFPT},
  1086  	ssa.BlockLOONG64BEQ:  {loong64.ABEQ, loong64.ABNE},
  1087  	ssa.BlockLOONG64BNE:  {loong64.ABNE, loong64.ABEQ},
  1088  	ssa.BlockLOONG64BGE:  {loong64.ABGE, loong64.ABLT},
  1089  	ssa.BlockLOONG64BLT:  {loong64.ABLT, loong64.ABGE},
  1090  	ssa.BlockLOONG64BLTU: {loong64.ABLTU, loong64.ABGEU},
  1091  	ssa.BlockLOONG64BGEU: {loong64.ABGEU, loong64.ABLTU},
  1092  }
  1093  
  1094  func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
  1095  	switch b.Kind {
  1096  	case ssa.BlockPlain, ssa.BlockDefer:
  1097  		if b.Succs[0].Block() != next {
  1098  			p := s.Prog(obj.AJMP)
  1099  			p.To.Type = obj.TYPE_BRANCH
  1100  			s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
  1101  		}
  1102  	case ssa.BlockExit, ssa.BlockRetJmp:
  1103  	case ssa.BlockRet:
  1104  		s.Prog(obj.ARET)
  1105  	case ssa.BlockLOONG64EQ, ssa.BlockLOONG64NE,
  1106  		ssa.BlockLOONG64LTZ, ssa.BlockLOONG64GEZ,
  1107  		ssa.BlockLOONG64LEZ, ssa.BlockLOONG64GTZ,
  1108  		ssa.BlockLOONG64BEQ, ssa.BlockLOONG64BNE,
  1109  		ssa.BlockLOONG64BLT, ssa.BlockLOONG64BGE,
  1110  		ssa.BlockLOONG64BLTU, ssa.BlockLOONG64BGEU,
  1111  		ssa.BlockLOONG64FPT, ssa.BlockLOONG64FPF:
  1112  		jmp := blockJump[b.Kind]
  1113  		var p *obj.Prog
  1114  		switch next {
  1115  		case b.Succs[0].Block():
  1116  			p = s.Br(jmp.invasm, b.Succs[1].Block())
  1117  		case b.Succs[1].Block():
  1118  			p = s.Br(jmp.asm, b.Succs[0].Block())
  1119  		default:
  1120  			if b.Likely != ssa.BranchUnlikely {
  1121  				p = s.Br(jmp.asm, b.Succs[0].Block())
  1122  				s.Br(obj.AJMP, b.Succs[1].Block())
  1123  			} else {
  1124  				p = s.Br(jmp.invasm, b.Succs[1].Block())
  1125  				s.Br(obj.AJMP, b.Succs[0].Block())
  1126  			}
  1127  		}
  1128  		switch b.Kind {
  1129  		case ssa.BlockLOONG64BEQ, ssa.BlockLOONG64BNE,
  1130  			ssa.BlockLOONG64BGE, ssa.BlockLOONG64BLT,
  1131  			ssa.BlockLOONG64BGEU, ssa.BlockLOONG64BLTU:
  1132  			p.From.Type = obj.TYPE_REG
  1133  			p.From.Reg = b.Controls[0].Reg()
  1134  			p.Reg = b.Controls[1].Reg()
  1135  		case ssa.BlockLOONG64EQ, ssa.BlockLOONG64NE,
  1136  			ssa.BlockLOONG64LTZ, ssa.BlockLOONG64GEZ,
  1137  			ssa.BlockLOONG64LEZ, ssa.BlockLOONG64GTZ,
  1138  			ssa.BlockLOONG64FPT, ssa.BlockLOONG64FPF:
  1139  			if !b.Controls[0].Type.IsFlags() {
  1140  				p.From.Type = obj.TYPE_REG
  1141  				p.From.Reg = b.Controls[0].Reg()
  1142  			}
  1143  		}
  1144  	default:
  1145  		b.Fatalf("branch not implemented: %s", b.LongString())
  1146  	}
  1147  }
  1148  
  1149  func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
  1150  	p := s.Prog(loadByType(t, reg))
  1151  	p.From.Type = obj.TYPE_MEM
  1152  	p.From.Name = obj.NAME_AUTO
  1153  	p.From.Sym = n.Linksym()
  1154  	p.From.Offset = n.FrameOffset() + off
  1155  	p.To.Type = obj.TYPE_REG
  1156  	p.To.Reg = reg
  1157  	return p
  1158  }
  1159  
  1160  func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
  1161  	p = pp.Append(p, storeByType(t, reg), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
  1162  	p.To.Name = obj.NAME_PARAM
  1163  	p.To.Sym = n.Linksym()
  1164  	p.Pos = p.Pos.WithNotStmt()
  1165  	return p
  1166  }
  1167  

View as plain text